2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Virtual memory mapping module.
61 #include <task_swapper.h>
62 #include <mach_assert.h>
64 #include <mach/kern_return.h>
65 #include <mach/port.h>
66 #include <mach/vm_attributes.h>
67 #include <mach/vm_param.h>
68 #include <mach/vm_behavior.h>
69 #include <mach/vm_statistics.h>
70 #include <kern/assert.h>
71 #include <kern/counters.h>
72 #include <kern/zalloc.h>
73 #include <vm/vm_init.h>
74 #include <vm/vm_fault.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_kern.h>
79 #include <ipc/ipc_port.h>
80 #include <kern/sched_prim.h>
81 #include <kern/misc_protos.h>
82 #include <mach/vm_map_server.h>
83 #include <mach/mach_host_server.h>
85 #include <machine/db_machdep.h>
88 /* Internal prototypes
90 extern boolean_t
vm_map_range_check(
94 vm_map_entry_t
*entry
);
96 extern vm_map_entry_t
_vm_map_entry_create(
97 struct vm_map_header
*map_header
);
99 extern void _vm_map_entry_dispose(
100 struct vm_map_header
*map_header
,
101 vm_map_entry_t entry
);
103 extern void vm_map_pmap_enter(
106 vm_offset_t end_addr
,
108 vm_object_offset_t offset
,
109 vm_prot_t protection
);
111 extern void _vm_map_clip_end(
112 struct vm_map_header
*map_header
,
113 vm_map_entry_t entry
,
116 extern void vm_map_entry_delete(
118 vm_map_entry_t entry
);
120 extern kern_return_t
vm_map_delete(
126 extern void vm_map_copy_steal_pages(
129 extern kern_return_t
vm_map_copy_overwrite_unaligned(
131 vm_map_entry_t entry
,
135 extern kern_return_t
vm_map_copy_overwrite_aligned(
137 vm_map_entry_t tmp_entry
,
142 extern kern_return_t
vm_map_copyin_kernel_buffer(
144 vm_offset_t src_addr
,
146 boolean_t src_destroy
,
147 vm_map_copy_t
*copy_result
); /* OUT */
149 extern kern_return_t
vm_map_copyout_kernel_buffer(
151 vm_offset_t
*addr
, /* IN/OUT */
153 boolean_t overwrite
);
155 extern void vm_map_fork_share(
157 vm_map_entry_t old_entry
,
160 extern boolean_t
vm_map_fork_copy(
162 vm_map_entry_t
*old_entry_p
,
165 extern kern_return_t
vm_remap_range_allocate(
167 vm_offset_t
*address
, /* IN/OUT */
171 vm_map_entry_t
*map_entry
); /* OUT */
173 extern void _vm_map_clip_start(
174 struct vm_map_header
*map_header
,
175 vm_map_entry_t entry
,
178 void vm_region_top_walk(
179 vm_map_entry_t entry
,
180 vm_region_top_info_t top
);
183 vm_map_entry_t entry
,
184 vm_region_extended_info_t extended
,
185 vm_object_offset_t offset
,
191 * Macros to copy a vm_map_entry. We must be careful to correctly
192 * manage the wired page count. vm_map_entry_copy() creates a new
193 * map entry to the same memory - the wired count in the new entry
194 * must be set to zero. vm_map_entry_copy_full() creates a new
195 * entry that is identical to the old entry. This preserves the
196 * wire count; it's used for map splitting and zone changing in
199 #define vm_map_entry_copy(NEW,OLD) \
202 (NEW)->is_shared = FALSE; \
203 (NEW)->needs_wakeup = FALSE; \
204 (NEW)->in_transition = FALSE; \
205 (NEW)->wired_count = 0; \
206 (NEW)->user_wired_count = 0; \
209 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
212 * Virtual memory maps provide for the mapping, protection,
213 * and sharing of virtual memory objects. In addition,
214 * this module provides for an efficient virtual copy of
215 * memory from one map to another.
217 * Synchronization is required prior to most operations.
219 * Maps consist of an ordered doubly-linked list of simple
220 * entries; a single hint is used to speed up lookups.
222 * Sharing maps have been deleted from this version of Mach.
223 * All shared objects are now mapped directly into the respective
224 * maps. This requires a change in the copy on write strategy;
225 * the asymmetric (delayed) strategy is used for shared temporary
226 * objects instead of the symmetric (shadow) strategy. All maps
227 * are now "top level" maps (either task map, kernel map or submap
228 * of the kernel map).
230 * Since portions of maps are specified by start/end addreses,
231 * which may not align with existing map entries, all
232 * routines merely "clip" entries to these start/end values.
233 * [That is, an entry is split into two, bordering at a
234 * start or end value.] Note that these clippings may not
235 * always be necessary (as the two resulting entries are then
236 * not changed); however, the clipping is done for convenience.
237 * No attempt is currently made to "glue back together" two
240 * The symmetric (shadow) copy strategy implements virtual copy
241 * by copying VM object references from one map to
242 * another, and then marking both regions as copy-on-write.
243 * It is important to note that only one writeable reference
244 * to a VM object region exists in any map when this strategy
245 * is used -- this means that shadow object creation can be
246 * delayed until a write operation occurs. The symmetric (delayed)
247 * strategy allows multiple maps to have writeable references to
248 * the same region of a vm object, and hence cannot delay creating
249 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
250 * Copying of permanent objects is completely different; see
251 * vm_object_copy_strategically() in vm_object.c.
254 zone_t vm_map_zone
; /* zone for vm_map structures */
255 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
256 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
257 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
261 * Placeholder object for submap operations. This object is dropped
262 * into the range by a call to vm_map_find, and removed when
263 * vm_map_submap creates the submap.
266 vm_object_t vm_submap_object
;
271 * Initialize the vm_map module. Must be called before
272 * any other vm_map routines.
274 * Map and entry structures are allocated from zones -- we must
275 * initialize those zones.
277 * There are three zones of interest:
279 * vm_map_zone: used to allocate maps.
280 * vm_map_entry_zone: used to allocate map entries.
281 * vm_map_kentry_zone: used to allocate map entries for the kernel.
283 * The kernel allocates map entries from a special zone that is initially
284 * "crammed" with memory. It would be difficult (perhaps impossible) for
285 * the kernel to allocate more memory to a entry zone when it became
286 * empty since the very act of allocating memory implies the creation
290 vm_offset_t map_data
;
291 vm_size_t map_data_size
;
292 vm_offset_t kentry_data
;
293 vm_size_t kentry_data_size
;
294 int kentry_count
= 2048; /* to init kentry_data_size */
296 #define NO_COALESCE_LIMIT (1024 * 128)
299 * Threshold for aggressive (eager) page map entering for vm copyout
300 * operations. Any copyout larger will NOT be aggressively entered.
302 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
304 /* Skip acquiring locks if we're in the midst of a kernel core dump */
305 extern unsigned int not_in_kdp
;
311 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
314 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
315 1024*1024, PAGE_SIZE
*5,
316 "non-kernel map entries");
318 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
319 kentry_data_size
, kentry_data_size
,
320 "kernel map entries");
322 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
323 16*1024, PAGE_SIZE
, "map copies");
326 * Cram the map and kentry zones with initial data.
327 * Set kentry_zone non-collectible to aid zone_gc().
329 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
330 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
331 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
332 zcram(vm_map_zone
, map_data
, map_data_size
);
333 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
340 map_data_size
= round_page_32(10 * sizeof(struct vm_map
));
341 map_data
= pmap_steal_memory(map_data_size
);
345 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
346 * physical page (i.e. that beyond the kernel image and page tables)
347 * individually; we guess at most one entry per eight pages in the
348 * real world. This works out to roughly .1 of 1% of physical memory,
349 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
352 kentry_count
= pmap_free_pages() / 8;
356 round_page_32(kentry_count
* sizeof(struct vm_map_entry
));
357 kentry_data
= pmap_steal_memory(kentry_data_size
);
363 * Creates and returns a new empty VM map with
364 * the given physical map structure, and having
365 * the given lower and upper address bounds.
374 register vm_map_t result
;
376 result
= (vm_map_t
) zalloc(vm_map_zone
);
377 if (result
== VM_MAP_NULL
)
378 panic("vm_map_create");
380 vm_map_first_entry(result
) = vm_map_to_entry(result
);
381 vm_map_last_entry(result
) = vm_map_to_entry(result
);
382 result
->hdr
.nentries
= 0;
383 result
->hdr
.entries_pageable
= pageable
;
386 result
->ref_count
= 1;
388 result
->res_count
= 1;
389 result
->sw_state
= MAP_SW_IN
;
390 #endif /* TASK_SWAPPER */
392 result
->min_offset
= min
;
393 result
->max_offset
= max
;
394 result
->wiring_required
= FALSE
;
395 result
->no_zero_fill
= FALSE
;
396 result
->mapped
= FALSE
;
397 result
->wait_for_space
= FALSE
;
398 result
->first_free
= vm_map_to_entry(result
);
399 result
->hint
= vm_map_to_entry(result
);
400 vm_map_lock_init(result
);
401 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
407 * vm_map_entry_create: [ internal use only ]
409 * Allocates a VM map entry for insertion in the
410 * given map (or map copy). No fields are filled.
412 #define vm_map_entry_create(map) \
413 _vm_map_entry_create(&(map)->hdr)
415 #define vm_map_copy_entry_create(copy) \
416 _vm_map_entry_create(&(copy)->cpy_hdr)
419 _vm_map_entry_create(
420 register struct vm_map_header
*map_header
)
422 register zone_t zone
;
423 register vm_map_entry_t entry
;
425 if (map_header
->entries_pageable
)
426 zone
= vm_map_entry_zone
;
428 zone
= vm_map_kentry_zone
;
430 entry
= (vm_map_entry_t
) zalloc(zone
);
431 if (entry
== VM_MAP_ENTRY_NULL
)
432 panic("vm_map_entry_create");
438 * vm_map_entry_dispose: [ internal use only ]
440 * Inverse of vm_map_entry_create.
442 #define vm_map_entry_dispose(map, entry) \
444 if((entry) == (map)->first_free) \
445 (map)->first_free = vm_map_to_entry(map); \
446 if((entry) == (map)->hint) \
447 (map)->hint = vm_map_to_entry(map); \
448 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
451 #define vm_map_copy_entry_dispose(map, entry) \
452 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
455 _vm_map_entry_dispose(
456 register struct vm_map_header
*map_header
,
457 register vm_map_entry_t entry
)
459 register zone_t zone
;
461 if (map_header
->entries_pageable
)
462 zone
= vm_map_entry_zone
;
464 zone
= vm_map_kentry_zone
;
466 zfree(zone
, (vm_offset_t
) entry
);
469 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
470 boolean_t first_free_check
= FALSE
;
475 vm_map_entry_t entry
, next
;
477 if (!first_free_check
)
480 entry
= vm_map_to_entry(map
);
481 next
= entry
->vme_next
;
482 while (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_end
) ||
483 (trunc_page_32(next
->vme_start
) == trunc_page_32(entry
->vme_start
) &&
484 next
!= vm_map_to_entry(map
))) {
486 next
= entry
->vme_next
;
487 if (entry
== vm_map_to_entry(map
))
490 if (map
->first_free
!= entry
) {
491 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
492 map
, map
->first_free
, entry
);
501 * Updates the map->first_free pointer to the
502 * entry immediately before the first hole in the map.
503 * The map should be locked.
505 #define UPDATE_FIRST_FREE(map, new_first_free) \
508 vm_map_entry_t UFF_first_free; \
509 vm_map_entry_t UFF_next_entry; \
511 UFF_first_free = (new_first_free); \
512 UFF_next_entry = UFF_first_free->vme_next; \
513 while (trunc_page_32(UFF_next_entry->vme_start) == \
514 trunc_page_32(UFF_first_free->vme_end) || \
515 (trunc_page_32(UFF_next_entry->vme_start) == \
516 trunc_page_32(UFF_first_free->vme_start) && \
517 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
518 UFF_first_free = UFF_next_entry; \
519 UFF_next_entry = UFF_first_free->vme_next; \
520 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
523 UFF_map->first_free = UFF_first_free; \
524 assert(first_free_is_valid(UFF_map)); \
528 * vm_map_entry_{un,}link:
530 * Insert/remove entries from maps (or map copies).
532 #define vm_map_entry_link(map, after_where, entry) \
535 vm_map_entry_t VMEL_entry; \
537 VMEL_entry = (entry); \
538 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
539 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
543 #define vm_map_copy_entry_link(copy, after_where, entry) \
544 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
546 #define _vm_map_entry_link(hdr, after_where, entry) \
549 (entry)->vme_prev = (after_where); \
550 (entry)->vme_next = (after_where)->vme_next; \
551 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
554 #define vm_map_entry_unlink(map, entry) \
557 vm_map_entry_t VMEU_entry; \
558 vm_map_entry_t VMEU_first_free; \
560 VMEU_entry = (entry); \
561 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
562 VMEU_first_free = VMEU_entry->vme_prev; \
564 VMEU_first_free = VMEU_map->first_free; \
565 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
566 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
569 #define vm_map_copy_entry_unlink(copy, entry) \
570 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
572 #define _vm_map_entry_unlink(hdr, entry) \
575 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
576 (entry)->vme_prev->vme_next = (entry)->vme_next; \
579 #if MACH_ASSERT && TASK_SWAPPER
581 * vm_map_res_reference:
583 * Adds another valid residence count to the given map.
585 * Map is locked so this function can be called from
589 void vm_map_res_reference(register vm_map_t map
)
591 /* assert map is locked */
592 assert(map
->res_count
>= 0);
593 assert(map
->ref_count
>= map
->res_count
);
594 if (map
->res_count
== 0) {
595 mutex_unlock(&map
->s_lock
);
598 mutex_lock(&map
->s_lock
);
606 * vm_map_reference_swap:
608 * Adds valid reference and residence counts to the given map.
610 * The map may not be in memory (i.e. zero residence count).
613 void vm_map_reference_swap(register vm_map_t map
)
615 assert(map
!= VM_MAP_NULL
);
616 mutex_lock(&map
->s_lock
);
617 assert(map
->res_count
>= 0);
618 assert(map
->ref_count
>= map
->res_count
);
620 vm_map_res_reference(map
);
621 mutex_unlock(&map
->s_lock
);
625 * vm_map_res_deallocate:
627 * Decrement residence count on a map; possibly causing swapout.
629 * The map must be in memory (i.e. non-zero residence count).
631 * The map is locked, so this function is callable from vm_map_deallocate.
634 void vm_map_res_deallocate(register vm_map_t map
)
636 assert(map
->res_count
> 0);
637 if (--map
->res_count
== 0) {
638 mutex_unlock(&map
->s_lock
);
642 mutex_lock(&map
->s_lock
);
644 assert(map
->ref_count
>= map
->res_count
);
646 #endif /* MACH_ASSERT && TASK_SWAPPER */
651 * Actually destroy a map.
655 register vm_map_t map
)
658 (void) vm_map_delete(map
, map
->min_offset
,
659 map
->max_offset
, VM_MAP_NO_FLAGS
);
663 pmap_destroy(map
->pmap
);
665 zfree(vm_map_zone
, (vm_offset_t
) map
);
670 * vm_map_swapin/vm_map_swapout
672 * Swap a map in and out, either referencing or releasing its resources.
673 * These functions are internal use only; however, they must be exported
674 * because they may be called from macros, which are exported.
676 * In the case of swapout, there could be races on the residence count,
677 * so if the residence count is up, we return, assuming that a
678 * vm_map_deallocate() call in the near future will bring us back.
681 * -- We use the map write lock for synchronization among races.
682 * -- The map write lock, and not the simple s_lock, protects the
683 * swap state of the map.
684 * -- If a map entry is a share map, then we hold both locks, in
685 * hierarchical order.
687 * Synchronization Notes:
688 * 1) If a vm_map_swapin() call happens while swapout in progress, it
689 * will block on the map lock and proceed when swapout is through.
690 * 2) A vm_map_reference() call at this time is illegal, and will
691 * cause a panic. vm_map_reference() is only allowed on resident
692 * maps, since it refuses to block.
693 * 3) A vm_map_swapin() call during a swapin will block, and
694 * proceeed when the first swapin is done, turning into a nop.
695 * This is the reason the res_count is not incremented until
696 * after the swapin is complete.
697 * 4) There is a timing hole after the checks of the res_count, before
698 * the map lock is taken, during which a swapin may get the lock
699 * before a swapout about to happen. If this happens, the swapin
700 * will detect the state and increment the reference count, causing
701 * the swapout to be a nop, thereby delaying it until a later
702 * vm_map_deallocate. If the swapout gets the lock first, then
703 * the swapin will simply block until the swapout is done, and
706 * Because vm_map_swapin() is potentially an expensive operation, it
707 * should be used with caution.
710 * 1) A map with a residence count of zero is either swapped, or
712 * 2) A map with a non-zero residence count is either resident,
713 * or being swapped in.
716 int vm_map_swap_enable
= 1;
718 void vm_map_swapin (vm_map_t map
)
720 register vm_map_entry_t entry
;
722 if (!vm_map_swap_enable
) /* debug */
727 * First deal with various races.
729 if (map
->sw_state
== MAP_SW_IN
)
731 * we raced with swapout and won. Returning will incr.
732 * the res_count, turning the swapout into a nop.
737 * The residence count must be zero. If we raced with another
738 * swapin, the state would have been IN; if we raced with a
739 * swapout (after another competing swapin), we must have lost
740 * the race to get here (see above comment), in which case
741 * res_count is still 0.
743 assert(map
->res_count
== 0);
746 * There are no intermediate states of a map going out or
747 * coming in, since the map is locked during the transition.
749 assert(map
->sw_state
== MAP_SW_OUT
);
752 * We now operate upon each map entry. If the entry is a sub-
753 * or share-map, we call vm_map_res_reference upon it.
754 * If the entry is an object, we call vm_object_res_reference
755 * (this may iterate through the shadow chain).
756 * Note that we hold the map locked the entire time,
757 * even if we get back here via a recursive call in
758 * vm_map_res_reference.
760 entry
= vm_map_first_entry(map
);
762 while (entry
!= vm_map_to_entry(map
)) {
763 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
764 if (entry
->is_sub_map
) {
765 vm_map_t lmap
= entry
->object
.sub_map
;
766 mutex_lock(&lmap
->s_lock
);
767 vm_map_res_reference(lmap
);
768 mutex_unlock(&lmap
->s_lock
);
770 vm_object_t object
= entry
->object
.vm_object
;
771 vm_object_lock(object
);
773 * This call may iterate through the
776 vm_object_res_reference(object
);
777 vm_object_unlock(object
);
780 entry
= entry
->vme_next
;
782 assert(map
->sw_state
== MAP_SW_OUT
);
783 map
->sw_state
= MAP_SW_IN
;
786 void vm_map_swapout(vm_map_t map
)
788 register vm_map_entry_t entry
;
792 * First deal with various races.
793 * If we raced with a swapin and lost, the residence count
794 * will have been incremented to 1, and we simply return.
796 mutex_lock(&map
->s_lock
);
797 if (map
->res_count
!= 0) {
798 mutex_unlock(&map
->s_lock
);
801 mutex_unlock(&map
->s_lock
);
804 * There are no intermediate states of a map going out or
805 * coming in, since the map is locked during the transition.
807 assert(map
->sw_state
== MAP_SW_IN
);
809 if (!vm_map_swap_enable
)
813 * We now operate upon each map entry. If the entry is a sub-
814 * or share-map, we call vm_map_res_deallocate upon it.
815 * If the entry is an object, we call vm_object_res_deallocate
816 * (this may iterate through the shadow chain).
817 * Note that we hold the map locked the entire time,
818 * even if we get back here via a recursive call in
819 * vm_map_res_deallocate.
821 entry
= vm_map_first_entry(map
);
823 while (entry
!= vm_map_to_entry(map
)) {
824 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
825 if (entry
->is_sub_map
) {
826 vm_map_t lmap
= entry
->object
.sub_map
;
827 mutex_lock(&lmap
->s_lock
);
828 vm_map_res_deallocate(lmap
);
829 mutex_unlock(&lmap
->s_lock
);
831 vm_object_t object
= entry
->object
.vm_object
;
832 vm_object_lock(object
);
834 * This call may take a long time,
835 * since it could actively push
836 * out pages (if we implement it
839 vm_object_res_deallocate(object
);
840 vm_object_unlock(object
);
843 entry
= entry
->vme_next
;
845 assert(map
->sw_state
== MAP_SW_IN
);
846 map
->sw_state
= MAP_SW_OUT
;
849 #endif /* TASK_SWAPPER */
855 * Saves the specified entry as the hint for
856 * future lookups. Performs necessary interlocks.
858 #define SAVE_HINT(map,value) \
860 mutex_lock(&(map)->s_lock); \
861 (map)->hint = (value); \
862 mutex_unlock(&(map)->s_lock); \
866 * vm_map_lookup_entry: [ internal use only ]
868 * Finds the map entry containing (or
869 * immediately preceding) the specified address
870 * in the given map; the entry is returned
871 * in the "entry" parameter. The boolean
872 * result indicates whether the address is
873 * actually contained in the map.
877 register vm_map_t map
,
878 register vm_offset_t address
,
879 vm_map_entry_t
*entry
) /* OUT */
881 register vm_map_entry_t cur
;
882 register vm_map_entry_t last
;
885 * Start looking either from the head of the
886 * list, or from the hint.
889 mutex_lock(&map
->s_lock
);
892 mutex_unlock(&map
->s_lock
);
894 if (cur
== vm_map_to_entry(map
))
897 if (address
>= cur
->vme_start
) {
899 * Go from hint to end of list.
901 * But first, make a quick check to see if
902 * we are already looking at the entry we
903 * want (which is usually the case).
904 * Note also that we don't need to save the hint
905 * here... it is the same hint (unless we are
906 * at the header, in which case the hint didn't
907 * buy us anything anyway).
909 last
= vm_map_to_entry(map
);
910 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
917 * Go from start to hint, *inclusively*
919 last
= cur
->vme_next
;
920 cur
= vm_map_first_entry(map
);
927 while (cur
!= last
) {
928 if (cur
->vme_end
> address
) {
929 if (address
>= cur
->vme_start
) {
931 * Save this lookup for future
944 *entry
= cur
->vme_prev
;
946 SAVE_HINT(map
, *entry
);
951 * Routine: vm_map_find_space
953 * Allocate a range in the specified virtual address map,
954 * returning the entry allocated for that range.
955 * Used by kmem_alloc, etc.
957 * The map must be NOT be locked. It will be returned locked
958 * on KERN_SUCCESS, unlocked on failure.
960 * If an entry is allocated, the object/offset fields
961 * are initialized to zero.
965 register vm_map_t map
,
966 vm_offset_t
*address
, /* OUT */
969 vm_map_entry_t
*o_entry
) /* OUT */
971 register vm_map_entry_t entry
, new_entry
;
972 register vm_offset_t start
;
973 register vm_offset_t end
;
975 new_entry
= vm_map_entry_create(map
);
978 * Look for the first possible address; if there's already
979 * something at this address, we have to start after it.
984 assert(first_free_is_valid(map
));
985 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
986 start
= map
->min_offset
;
988 start
= entry
->vme_end
;
991 * In any case, the "entry" always precedes
992 * the proposed new region throughout the loop:
996 register vm_map_entry_t next
;
999 * Find the end of the proposed new region.
1000 * Be sure we didn't go beyond the end, or
1001 * wrap around the address.
1004 end
= ((start
+ mask
) & ~mask
);
1006 vm_map_entry_dispose(map
, new_entry
);
1008 return(KERN_NO_SPACE
);
1013 if ((end
> map
->max_offset
) || (end
< start
)) {
1014 vm_map_entry_dispose(map
, new_entry
);
1016 return(KERN_NO_SPACE
);
1020 * If there are no more entries, we must win.
1023 next
= entry
->vme_next
;
1024 if (next
== vm_map_to_entry(map
))
1028 * If there is another entry, it must be
1029 * after the end of the potential new region.
1032 if (next
->vme_start
>= end
)
1036 * Didn't fit -- move to the next entry.
1040 start
= entry
->vme_end
;
1045 * "start" and "end" should define the endpoints of the
1046 * available new range, and
1047 * "entry" should refer to the region before the new
1050 * the map should be locked.
1055 new_entry
->vme_start
= start
;
1056 new_entry
->vme_end
= end
;
1057 assert(page_aligned(new_entry
->vme_start
));
1058 assert(page_aligned(new_entry
->vme_end
));
1060 new_entry
->is_shared
= FALSE
;
1061 new_entry
->is_sub_map
= FALSE
;
1062 new_entry
->use_pmap
= FALSE
;
1063 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1064 new_entry
->offset
= (vm_object_offset_t
) 0;
1066 new_entry
->needs_copy
= FALSE
;
1068 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1069 new_entry
->protection
= VM_PROT_DEFAULT
;
1070 new_entry
->max_protection
= VM_PROT_ALL
;
1071 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1072 new_entry
->wired_count
= 0;
1073 new_entry
->user_wired_count
= 0;
1075 new_entry
->in_transition
= FALSE
;
1076 new_entry
->needs_wakeup
= FALSE
;
1079 * Insert the new entry into the list
1082 vm_map_entry_link(map
, entry
, new_entry
);
1087 * Update the lookup hint
1089 SAVE_HINT(map
, new_entry
);
1091 *o_entry
= new_entry
;
1092 return(KERN_SUCCESS
);
1095 int vm_map_pmap_enter_print
= FALSE
;
1096 int vm_map_pmap_enter_enable
= FALSE
;
1099 * Routine: vm_map_pmap_enter
1102 * Force pages from the specified object to be entered into
1103 * the pmap at the specified address if they are present.
1104 * As soon as a page not found in the object the scan ends.
1109 * In/out conditions:
1110 * The source map should not be locked on entry.
1115 register vm_offset_t addr
,
1116 register vm_offset_t end_addr
,
1117 register vm_object_t object
,
1118 vm_object_offset_t offset
,
1119 vm_prot_t protection
)
1121 unsigned int cache_attr
;
1126 while (addr
< end_addr
) {
1127 register vm_page_t m
;
1129 vm_object_lock(object
);
1130 vm_object_paging_begin(object
);
1132 m
= vm_page_lookup(object
, offset
);
1133 if (m
== VM_PAGE_NULL
|| m
->busy
||
1134 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1135 protection
& m
->page_lock
))) {
1137 vm_object_paging_end(object
);
1138 vm_object_unlock(object
);
1142 assert(!m
->fictitious
); /* XXX is this possible ??? */
1144 if (vm_map_pmap_enter_print
) {
1145 printf("vm_map_pmap_enter:");
1146 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1147 map
, addr
, object
, offset
);
1151 if (m
->no_isync
== TRUE
) {
1152 pmap_sync_caches_phys(m
->phys_page
);
1153 m
->no_isync
= FALSE
;
1156 cache_attr
= ((unsigned int)object
->wimg_bits
) & VM_WIMG_MASK
;
1157 vm_object_unlock(object
);
1159 PMAP_ENTER(map
->pmap
, addr
, m
,
1160 protection
, cache_attr
, FALSE
);
1162 vm_object_lock(object
);
1164 PAGE_WAKEUP_DONE(m
);
1165 vm_page_lock_queues();
1166 if (!m
->active
&& !m
->inactive
)
1167 vm_page_activate(m
);
1168 vm_page_unlock_queues();
1169 vm_object_paging_end(object
);
1170 vm_object_unlock(object
);
1172 offset
+= PAGE_SIZE_64
;
1178 * Routine: vm_map_enter
1181 * Allocate a range in the specified virtual address map.
1182 * The resulting range will refer to memory defined by
1183 * the given memory object and offset into that object.
1185 * Arguments are as defined in the vm_map call.
1189 register vm_map_t map
,
1190 vm_offset_t
*address
, /* IN/OUT */
1195 vm_object_offset_t offset
,
1196 boolean_t needs_copy
,
1197 vm_prot_t cur_protection
,
1198 vm_prot_t max_protection
,
1199 vm_inherit_t inheritance
)
1201 vm_map_entry_t entry
;
1202 register vm_offset_t start
;
1203 register vm_offset_t end
;
1204 kern_return_t result
= KERN_SUCCESS
;
1206 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1209 VM_GET_FLAGS_ALIAS(flags
, alias
);
1211 #define RETURN(value) { result = value; goto BailOut; }
1213 assert(page_aligned(*address
));
1214 assert(page_aligned(size
));
1223 * Calculate the first possible address.
1226 if (start
< map
->min_offset
)
1227 start
= map
->min_offset
;
1228 if (start
> map
->max_offset
)
1229 RETURN(KERN_NO_SPACE
);
1232 * Look for the first possible address;
1233 * if there's already something at this
1234 * address, we have to start after it.
1237 assert(first_free_is_valid(map
));
1238 if (start
== map
->min_offset
) {
1239 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1240 start
= entry
->vme_end
;
1242 vm_map_entry_t tmp_entry
;
1243 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1244 start
= tmp_entry
->vme_end
;
1249 * In any case, the "entry" always precedes
1250 * the proposed new region throughout the
1255 register vm_map_entry_t next
;
1258 * Find the end of the proposed new region.
1259 * Be sure we didn't go beyond the end, or
1260 * wrap around the address.
1263 end
= ((start
+ mask
) & ~mask
);
1265 RETURN(KERN_NO_SPACE
);
1269 if ((end
> map
->max_offset
) || (end
< start
)) {
1270 if (map
->wait_for_space
) {
1271 if (size
<= (map
->max_offset
-
1273 assert_wait((event_t
)map
,
1276 thread_block((void (*)(void))0);
1280 RETURN(KERN_NO_SPACE
);
1284 * If there are no more entries, we must win.
1287 next
= entry
->vme_next
;
1288 if (next
== vm_map_to_entry(map
))
1292 * If there is another entry, it must be
1293 * after the end of the potential new region.
1296 if (next
->vme_start
>= end
)
1300 * Didn't fit -- move to the next entry.
1304 start
= entry
->vme_end
;
1308 vm_map_entry_t temp_entry
;
1312 * the address doesn't itself violate
1313 * the mask requirement.
1317 if ((start
& mask
) != 0)
1318 RETURN(KERN_NO_SPACE
);
1321 * ... the address is within bounds
1326 if ((start
< map
->min_offset
) ||
1327 (end
> map
->max_offset
) ||
1329 RETURN(KERN_INVALID_ADDRESS
);
1333 * ... the starting address isn't allocated
1336 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1337 RETURN(KERN_NO_SPACE
);
1342 * ... the next region doesn't overlap the
1346 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1347 (entry
->vme_next
->vme_start
< end
))
1348 RETURN(KERN_NO_SPACE
);
1353 * "start" and "end" should define the endpoints of the
1354 * available new range, and
1355 * "entry" should refer to the region before the new
1358 * the map should be locked.
1362 * See whether we can avoid creating a new entry (and object) by
1363 * extending one of our neighbors. [So far, we only attempt to
1364 * extend from below.]
1367 if ((object
== VM_OBJECT_NULL
) &&
1368 (entry
!= vm_map_to_entry(map
)) &&
1369 (entry
->vme_end
== start
) &&
1370 (!entry
->is_shared
) &&
1371 (!entry
->is_sub_map
) &&
1372 (entry
->alias
== alias
) &&
1373 (entry
->inheritance
== inheritance
) &&
1374 (entry
->protection
== cur_protection
) &&
1375 (entry
->max_protection
== max_protection
) &&
1376 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1377 (entry
->in_transition
== 0) &&
1378 ((alias
== VM_MEMORY_REALLOC
) || ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
)) &&
1379 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1380 if (vm_object_coalesce(entry
->object
.vm_object
,
1383 (vm_object_offset_t
) 0,
1384 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1385 (vm_size_t
)(end
- entry
->vme_end
))) {
1388 * Coalesced the two objects - can extend
1389 * the previous map entry to include the
1392 map
->size
+= (end
- entry
->vme_end
);
1393 entry
->vme_end
= end
;
1394 UPDATE_FIRST_FREE(map
, map
->first_free
);
1395 RETURN(KERN_SUCCESS
);
1400 * Create a new entry
1404 register vm_map_entry_t new_entry
;
1406 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1407 offset
, needs_copy
, FALSE
, FALSE
,
1408 cur_protection
, max_protection
,
1409 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1410 new_entry
->alias
= alias
;
1413 /* Wire down the new entry if the user
1414 * requested all new map entries be wired.
1416 if (map
->wiring_required
) {
1417 result
= vm_map_wire(map
, start
, end
,
1418 new_entry
->protection
, TRUE
);
1422 if ((object
!= VM_OBJECT_NULL
) &&
1423 (vm_map_pmap_enter_enable
) &&
1426 (size
< (128*1024))) {
1427 vm_map_pmap_enter(map
, start
, end
,
1428 object
, offset
, cur_protection
);
1442 * vm_map_clip_start: [ internal use only ]
1444 * Asserts that the given entry begins at or after
1445 * the specified address; if necessary,
1446 * it splits the entry into two.
1449 #define vm_map_clip_start(map, entry, startaddr) \
1451 vm_map_t VMCS_map; \
1452 vm_map_entry_t VMCS_entry; \
1453 vm_offset_t VMCS_startaddr; \
1455 VMCS_entry = (entry); \
1456 VMCS_startaddr = (startaddr); \
1457 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1458 if(entry->use_pmap) { \
1459 vm_offset_t pmap_base_addr; \
1461 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1462 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1463 entry->use_pmap = FALSE; \
1464 } else if(entry->object.vm_object \
1465 && !entry->is_sub_map \
1466 && entry->object.vm_object->phys_contiguous) { \
1467 pmap_remove(map->pmap, \
1468 (addr64_t)(entry->vme_start), \
1469 (addr64_t)(entry->vme_end)); \
1471 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1473 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1476 #define vm_map_clip_start(map, entry, startaddr) \
1478 vm_map_t VMCS_map; \
1479 vm_map_entry_t VMCS_entry; \
1480 vm_offset_t VMCS_startaddr; \
1482 VMCS_entry = (entry); \
1483 VMCS_startaddr = (startaddr); \
1484 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1485 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1487 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1491 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1493 if ((startaddr) > (entry)->vme_start) \
1494 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1498 * This routine is called only when it is known that
1499 * the entry must be split.
1503 register struct vm_map_header
*map_header
,
1504 register vm_map_entry_t entry
,
1505 register vm_offset_t start
)
1507 register vm_map_entry_t new_entry
;
1510 * Split off the front portion --
1511 * note that we must insert the new
1512 * entry BEFORE this one, so that
1513 * this entry has the specified starting
1517 new_entry
= _vm_map_entry_create(map_header
);
1518 vm_map_entry_copy_full(new_entry
, entry
);
1520 new_entry
->vme_end
= start
;
1521 entry
->offset
+= (start
- entry
->vme_start
);
1522 entry
->vme_start
= start
;
1524 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1526 if (entry
->is_sub_map
)
1527 vm_map_reference(new_entry
->object
.sub_map
);
1529 vm_object_reference(new_entry
->object
.vm_object
);
1534 * vm_map_clip_end: [ internal use only ]
1536 * Asserts that the given entry ends at or before
1537 * the specified address; if necessary,
1538 * it splits the entry into two.
1541 #define vm_map_clip_end(map, entry, endaddr) \
1543 vm_map_t VMCE_map; \
1544 vm_map_entry_t VMCE_entry; \
1545 vm_offset_t VMCE_endaddr; \
1547 VMCE_entry = (entry); \
1548 VMCE_endaddr = (endaddr); \
1549 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1550 if(entry->use_pmap) { \
1551 vm_offset_t pmap_base_addr; \
1553 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1554 pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
1555 entry->use_pmap = FALSE; \
1556 } else if(entry->object.vm_object \
1557 && !entry->is_sub_map \
1558 && entry->object.vm_object->phys_contiguous) { \
1559 pmap_remove(map->pmap, \
1560 (addr64_t)(entry->vme_start), \
1561 (addr64_t)(entry->vme_end)); \
1563 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1565 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1568 #define vm_map_clip_end(map, entry, endaddr) \
1570 vm_map_t VMCE_map; \
1571 vm_map_entry_t VMCE_entry; \
1572 vm_offset_t VMCE_endaddr; \
1574 VMCE_entry = (entry); \
1575 VMCE_endaddr = (endaddr); \
1576 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1577 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1579 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1583 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1585 if ((endaddr) < (entry)->vme_end) \
1586 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1590 * This routine is called only when it is known that
1591 * the entry must be split.
1595 register struct vm_map_header
*map_header
,
1596 register vm_map_entry_t entry
,
1597 register vm_offset_t end
)
1599 register vm_map_entry_t new_entry
;
1602 * Create a new entry and insert it
1603 * AFTER the specified entry
1606 new_entry
= _vm_map_entry_create(map_header
);
1607 vm_map_entry_copy_full(new_entry
, entry
);
1609 new_entry
->vme_start
= entry
->vme_end
= end
;
1610 new_entry
->offset
+= (end
- entry
->vme_start
);
1612 _vm_map_entry_link(map_header
, entry
, new_entry
);
1614 if (entry
->is_sub_map
)
1615 vm_map_reference(new_entry
->object
.sub_map
);
1617 vm_object_reference(new_entry
->object
.vm_object
);
1622 * VM_MAP_RANGE_CHECK: [ internal use only ]
1624 * Asserts that the starting and ending region
1625 * addresses fall within the valid range of the map.
1627 #define VM_MAP_RANGE_CHECK(map, start, end) \
1629 if (start < vm_map_min(map)) \
1630 start = vm_map_min(map); \
1631 if (end > vm_map_max(map)) \
1632 end = vm_map_max(map); \
1638 * vm_map_range_check: [ internal use only ]
1640 * Check that the region defined by the specified start and
1641 * end addresses are wholly contained within a single map
1642 * entry or set of adjacent map entries of the spacified map,
1643 * i.e. the specified region contains no unmapped space.
1644 * If any or all of the region is unmapped, FALSE is returned.
1645 * Otherwise, TRUE is returned and if the output argument 'entry'
1646 * is not NULL it points to the map entry containing the start
1649 * The map is locked for reading on entry and is left locked.
1653 register vm_map_t map
,
1654 register vm_offset_t start
,
1655 register vm_offset_t end
,
1656 vm_map_entry_t
*entry
)
1659 register vm_offset_t prev
;
1662 * Basic sanity checks first
1664 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1668 * Check first if the region starts within a valid
1669 * mapping for the map.
1671 if (!vm_map_lookup_entry(map
, start
, &cur
))
1675 * Optimize for the case that the region is contained
1676 * in a single map entry.
1678 if (entry
!= (vm_map_entry_t
*) NULL
)
1680 if (end
<= cur
->vme_end
)
1684 * If the region is not wholly contained within a
1685 * single entry, walk the entries looking for holes.
1687 prev
= cur
->vme_end
;
1688 cur
= cur
->vme_next
;
1689 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1690 if (end
<= cur
->vme_end
)
1692 prev
= cur
->vme_end
;
1693 cur
= cur
->vme_next
;
1699 * vm_map_submap: [ kernel use only ]
1701 * Mark the given range as handled by a subordinate map.
1703 * This range must have been created with vm_map_find using
1704 * the vm_submap_object, and no other operations may have been
1705 * performed on this range prior to calling vm_map_submap.
1707 * Only a limited number of operations can be performed
1708 * within this rage after calling vm_map_submap:
1710 * [Don't try vm_map_copyin!]
1712 * To remove a submapping, one must first remove the
1713 * range from the superior map, and then destroy the
1714 * submap (if desired). [Better yet, don't try it.]
1718 register vm_map_t map
,
1719 register vm_offset_t start
,
1720 register vm_offset_t end
,
1725 vm_map_entry_t entry
;
1726 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1727 register vm_object_t object
;
1731 submap
->mapped
= TRUE
;
1733 VM_MAP_RANGE_CHECK(map
, start
, end
);
1735 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1736 vm_map_clip_start(map
, entry
, start
);
1739 entry
= entry
->vme_next
;
1741 if(entry
== vm_map_to_entry(map
)) {
1743 return KERN_INVALID_ARGUMENT
;
1746 vm_map_clip_end(map
, entry
, end
);
1748 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1749 (!entry
->is_sub_map
) &&
1750 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1751 (object
->resident_page_count
== 0) &&
1752 (object
->copy
== VM_OBJECT_NULL
) &&
1753 (object
->shadow
== VM_OBJECT_NULL
) &&
1754 (!object
->pager_created
)) {
1755 entry
->offset
= (vm_object_offset_t
)offset
;
1756 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1757 vm_object_deallocate(object
);
1758 entry
->is_sub_map
= TRUE
;
1759 entry
->object
.sub_map
= submap
;
1760 vm_map_reference(submap
);
1762 if ((use_pmap
) && (offset
== 0)) {
1763 /* nest if platform code will allow */
1764 if(submap
->pmap
== NULL
) {
1765 submap
->pmap
= pmap_create((vm_size_t
) 0);
1766 if(submap
->pmap
== PMAP_NULL
) {
1767 return(KERN_NO_SPACE
);
1770 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1771 (addr64_t
)start
, (addr64_t
)start
, (uint64_t)(end
- start
));
1773 panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result
);
1774 entry
->use_pmap
= TRUE
;
1778 pmap_remove(map
->pmap
, (addr64_t
)start
, (addr64_t
)end
);
1780 result
= KERN_SUCCESS
;
1790 * Sets the protection of the specified address
1791 * region in the target map. If "set_max" is
1792 * specified, the maximum protection is to be set;
1793 * otherwise, only the current protection is affected.
1797 register vm_map_t map
,
1798 register vm_offset_t start
,
1799 register vm_offset_t end
,
1800 register vm_prot_t new_prot
,
1801 register boolean_t set_max
)
1803 register vm_map_entry_t current
;
1804 register vm_offset_t prev
;
1805 vm_map_entry_t entry
;
1810 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1811 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1816 * Lookup the entry. If it doesn't start in a valid
1817 * entry, return an error. Remember if we need to
1818 * clip the entry. We don't do it here because we don't
1819 * want to make any changes until we've scanned the
1820 * entire range below for address and protection
1823 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1825 return(KERN_INVALID_ADDRESS
);
1829 * Make a first pass to check for protection and address
1834 prev
= current
->vme_start
;
1835 while ((current
!= vm_map_to_entry(map
)) &&
1836 (current
->vme_start
< end
)) {
1839 * If there is a hole, return an error.
1841 if (current
->vme_start
!= prev
) {
1843 return(KERN_INVALID_ADDRESS
);
1846 new_max
= current
->max_protection
;
1847 if(new_prot
& VM_PROT_COPY
) {
1848 new_max
|= VM_PROT_WRITE
;
1849 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1851 return(KERN_PROTECTION_FAILURE
);
1854 if ((new_prot
& new_max
) != new_prot
) {
1856 return(KERN_PROTECTION_FAILURE
);
1860 prev
= current
->vme_end
;
1861 current
= current
->vme_next
;
1865 return(KERN_INVALID_ADDRESS
);
1869 * Go back and fix up protections.
1870 * Clip to start here if the range starts within
1876 vm_map_clip_start(map
, entry
, start
);
1878 while ((current
!= vm_map_to_entry(map
)) &&
1879 (current
->vme_start
< end
)) {
1883 vm_map_clip_end(map
, current
, end
);
1885 old_prot
= current
->protection
;
1887 if(new_prot
& VM_PROT_COPY
) {
1888 /* caller is asking specifically to copy the */
1889 /* mapped data, this implies that max protection */
1890 /* will include write. Caller must be prepared */
1891 /* for loss of shared memory communication in the */
1892 /* target area after taking this step */
1893 current
->needs_copy
= TRUE
;
1894 current
->max_protection
|= VM_PROT_WRITE
;
1898 current
->protection
=
1899 (current
->max_protection
=
1900 new_prot
& ~VM_PROT_COPY
) &
1903 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1906 * Update physical map if necessary.
1907 * If the request is to turn off write protection,
1908 * we won't do it for real (in pmap). This is because
1909 * it would cause copy-on-write to fail. We've already
1910 * set, the new protection in the map, so if a
1911 * write-protect fault occurred, it will be fixed up
1912 * properly, COW or not.
1914 /* the 256M hack for existing hardware limitations */
1915 if (current
->protection
!= old_prot
) {
1916 if(current
->is_sub_map
&& current
->use_pmap
) {
1917 vm_offset_t pmap_base_addr
;
1918 vm_offset_t pmap_end_addr
;
1919 vm_map_entry_t local_entry
;
1921 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1922 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1924 if(!vm_map_lookup_entry(map
,
1925 pmap_base_addr
, &local_entry
))
1926 panic("vm_map_protect: nested pmap area is missing");
1927 while ((local_entry
!= vm_map_to_entry(map
)) &&
1928 (local_entry
->vme_start
< pmap_end_addr
)) {
1929 local_entry
->use_pmap
= FALSE
;
1930 local_entry
= local_entry
->vme_next
;
1932 pmap_unnest(map
->pmap
, (addr64_t
)pmap_base_addr
);
1935 if (!(current
->protection
& VM_PROT_WRITE
)) {
1936 /* Look one level in we support nested pmaps */
1937 /* from mapped submaps which are direct entries */
1939 if(current
->is_sub_map
&& current
->use_pmap
) {
1940 pmap_protect(current
->object
.sub_map
->pmap
,
1943 current
->protection
);
1945 pmap_protect(map
->pmap
, current
->vme_start
,
1947 current
->protection
);
1951 current
= current
->vme_next
;
1955 return(KERN_SUCCESS
);
1961 * Sets the inheritance of the specified address
1962 * range in the target map. Inheritance
1963 * affects how the map will be shared with
1964 * child maps at the time of vm_map_fork.
1968 register vm_map_t map
,
1969 register vm_offset_t start
,
1970 register vm_offset_t end
,
1971 register vm_inherit_t new_inheritance
)
1973 register vm_map_entry_t entry
;
1974 vm_map_entry_t temp_entry
;
1978 VM_MAP_RANGE_CHECK(map
, start
, end
);
1980 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
1982 vm_map_clip_start(map
, entry
, start
);
1985 temp_entry
= temp_entry
->vme_next
;
1989 /* first check entire range for submaps which can't support the */
1990 /* given inheritance. */
1991 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1992 if(entry
->is_sub_map
) {
1993 if(new_inheritance
== VM_INHERIT_COPY
)
1994 return(KERN_INVALID_ARGUMENT
);
1997 entry
= entry
->vme_next
;
2002 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2003 vm_map_clip_end(map
, entry
, end
);
2005 entry
->inheritance
= new_inheritance
;
2007 entry
= entry
->vme_next
;
2011 return(KERN_SUCCESS
);
2017 * Sets the pageability of the specified address range in the
2018 * target map as wired. Regions specified as not pageable require
2019 * locked-down physical memory and physical page maps. The
2020 * access_type variable indicates types of accesses that must not
2021 * generate page faults. This is checked against protection of
2022 * memory being locked-down.
2024 * The map must not be locked, but a reference must remain to the
2025 * map throughout the call.
2029 register vm_map_t map
,
2030 register vm_offset_t start
,
2031 register vm_offset_t end
,
2032 register vm_prot_t access_type
,
2033 boolean_t user_wire
,
2035 vm_offset_t pmap_addr
)
2037 register vm_map_entry_t entry
;
2038 struct vm_map_entry
*first_entry
, tmp_entry
;
2040 register vm_offset_t s
,e
;
2042 boolean_t need_wakeup
;
2043 boolean_t main_map
= FALSE
;
2044 wait_interrupt_t interruptible_state
;
2045 thread_t cur_thread
;
2046 unsigned int last_timestamp
;
2050 if(map_pmap
== NULL
)
2052 last_timestamp
= map
->timestamp
;
2054 VM_MAP_RANGE_CHECK(map
, start
, end
);
2055 assert(page_aligned(start
));
2056 assert(page_aligned(end
));
2058 /* We wired what the caller asked for, zero pages */
2060 return KERN_SUCCESS
;
2063 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2064 entry
= first_entry
;
2065 /* vm_map_clip_start will be done later. */
2067 /* Start address is not in map */
2069 return(KERN_INVALID_ADDRESS
);
2073 need_wakeup
= FALSE
;
2074 cur_thread
= current_thread();
2075 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2077 * If another thread is wiring/unwiring this entry then
2078 * block after informing other thread to wake us up.
2080 if (entry
->in_transition
) {
2081 wait_result_t wait_result
;
2084 * We have not clipped the entry. Make sure that
2085 * the start address is in range so that the lookup
2086 * below will succeed.
2088 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2090 entry
->needs_wakeup
= TRUE
;
2093 * wake up anybody waiting on entries that we have
2097 vm_map_entry_wakeup(map
);
2098 need_wakeup
= FALSE
;
2101 * User wiring is interruptible
2103 wait_result
= vm_map_entry_wait(map
,
2104 (user_wire
) ? THREAD_ABORTSAFE
:
2106 if (user_wire
&& wait_result
== THREAD_INTERRUPTED
) {
2108 * undo the wirings we have done so far
2109 * We do not clear the needs_wakeup flag,
2110 * because we cannot tell if we were the
2114 vm_map_unwire(map
, start
, s
, user_wire
);
2115 return(KERN_FAILURE
);
2119 * Cannot avoid a lookup here. reset timestamp.
2121 last_timestamp
= map
->timestamp
;
2124 * The entry could have been clipped, look it up again.
2125 * Worse that can happen is, it may not exist anymore.
2127 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2129 panic("vm_map_wire: re-lookup failed");
2132 * User: undo everything upto the previous
2133 * entry. let vm_map_unwire worry about
2134 * checking the validity of the range.
2137 vm_map_unwire(map
, start
, s
, user_wire
);
2138 return(KERN_FAILURE
);
2140 entry
= first_entry
;
2144 if(entry
->is_sub_map
) {
2145 vm_offset_t sub_start
;
2146 vm_offset_t sub_end
;
2147 vm_offset_t local_start
;
2148 vm_offset_t local_end
;
2151 vm_map_clip_start(map
, entry
, start
);
2152 vm_map_clip_end(map
, entry
, end
);
2154 sub_start
= entry
->offset
;
2155 sub_end
= entry
->vme_end
- entry
->vme_start
;
2156 sub_end
+= entry
->offset
;
2158 local_end
= entry
->vme_end
;
2159 if(map_pmap
== NULL
) {
2160 if(entry
->use_pmap
) {
2161 pmap
= entry
->object
.sub_map
->pmap
;
2162 /* ppc implementation requires that */
2163 /* submaps pmap address ranges line */
2164 /* up with parent map */
2166 pmap_addr
= sub_start
;
2173 if (entry
->wired_count
) {
2174 if (entry
->wired_count
2176 panic("vm_map_wire: too many wirings");
2179 entry
->user_wired_count
2180 >= MAX_WIRE_COUNT
) {
2182 vm_map_unwire(map
, start
,
2183 entry
->vme_start
, user_wire
);
2184 return(KERN_FAILURE
);
2187 entry
->user_wired_count
++;
2189 (entry
->user_wired_count
== 0))
2190 entry
->wired_count
++;
2191 entry
= entry
->vme_next
;
2196 vm_object_offset_t offset_hi
;
2197 vm_object_offset_t offset_lo
;
2198 vm_object_offset_t offset
;
2201 vm_behavior_t behavior
;
2202 vm_map_entry_t local_entry
;
2203 vm_map_version_t version
;
2204 vm_map_t lookup_map
;
2206 /* call vm_map_lookup_locked to */
2207 /* cause any needs copy to be */
2209 local_start
= entry
->vme_start
;
2211 vm_map_lock_write_to_read(map
);
2212 if(vm_map_lookup_locked(
2213 &lookup_map
, local_start
,
2216 &offset
, &prot
, &wired
,
2217 &behavior
, &offset_lo
,
2218 &offset_hi
, &pmap_map
)) {
2220 vm_map_unlock(lookup_map
);
2221 vm_map_unwire(map
, start
,
2222 entry
->vme_start
, user_wire
);
2223 return(KERN_FAILURE
);
2225 if(pmap_map
!= lookup_map
)
2226 vm_map_unlock(pmap_map
);
2227 vm_map_unlock_read(lookup_map
);
2229 vm_object_unlock(object
);
2231 if (!vm_map_lookup_entry(map
,
2232 local_start
, &local_entry
)) {
2234 vm_map_unwire(map
, start
,
2235 entry
->vme_start
, user_wire
);
2236 return(KERN_FAILURE
);
2238 /* did we have a change of type? */
2239 if (!local_entry
->is_sub_map
) {
2240 last_timestamp
= map
->timestamp
;
2243 entry
= local_entry
;
2245 entry
->user_wired_count
++;
2247 (entry
->user_wired_count
== 1))
2248 entry
->wired_count
++;
2250 entry
->in_transition
= TRUE
;
2253 rc
= vm_map_wire_nested(
2254 entry
->object
.sub_map
,
2257 user_wire
, pmap
, pmap_addr
);
2261 local_start
= entry
->vme_start
;
2263 entry
->user_wired_count
++;
2265 (entry
->user_wired_count
== 1))
2266 entry
->wired_count
++;
2268 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2271 user_wire
, map_pmap
, pmap_addr
);
2274 s
= entry
->vme_start
;
2278 * Find the entry again. It could have been clipped
2279 * after we unlocked the map.
2281 if (!vm_map_lookup_entry(map
, local_start
,
2283 panic("vm_map_wire: re-lookup failed");
2284 entry
= first_entry
;
2286 last_timestamp
= map
->timestamp
;
2287 while ((entry
!= vm_map_to_entry(map
)) &&
2288 (entry
->vme_start
< e
)) {
2289 assert(entry
->in_transition
);
2290 entry
->in_transition
= FALSE
;
2291 if (entry
->needs_wakeup
) {
2292 entry
->needs_wakeup
= FALSE
;
2295 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2297 entry
->user_wired_count
--;
2299 (entry
->user_wired_count
== 0))
2300 entry
->wired_count
--;
2302 entry
= entry
->vme_next
;
2304 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2307 vm_map_entry_wakeup(map
);
2309 * undo everything upto the previous entry.
2311 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2318 * If this entry is already wired then increment
2319 * the appropriate wire reference count.
2321 if (entry
->wired_count
) {
2322 /* sanity check: wired_count is a short */
2323 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2324 panic("vm_map_wire: too many wirings");
2327 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2329 vm_map_unwire(map
, start
,
2330 entry
->vme_start
, user_wire
);
2331 return(KERN_FAILURE
);
2334 * entry is already wired down, get our reference
2335 * after clipping to our range.
2337 vm_map_clip_start(map
, entry
, start
);
2338 vm_map_clip_end(map
, entry
, end
);
2340 entry
->user_wired_count
++;
2341 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2342 entry
->wired_count
++;
2344 entry
= entry
->vme_next
;
2349 * Unwired entry or wire request transmitted via submap
2354 * Perform actions of vm_map_lookup that need the write
2355 * lock on the map: create a shadow object for a
2356 * copy-on-write region, or an object for a zero-fill
2359 size
= entry
->vme_end
- entry
->vme_start
;
2361 * If wiring a copy-on-write page, we need to copy it now
2362 * even if we're only (currently) requesting read access.
2363 * This is aggressive, but once it's wired we can't move it.
2365 if (entry
->needs_copy
) {
2366 vm_object_shadow(&entry
->object
.vm_object
,
2367 &entry
->offset
, size
);
2368 entry
->needs_copy
= FALSE
;
2369 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2370 entry
->object
.vm_object
= vm_object_allocate(size
);
2371 entry
->offset
= (vm_object_offset_t
)0;
2374 vm_map_clip_start(map
, entry
, start
);
2375 vm_map_clip_end(map
, entry
, end
);
2377 s
= entry
->vme_start
;
2381 * Check for holes and protection mismatch.
2382 * Holes: Next entry should be contiguous unless this
2383 * is the end of the region.
2384 * Protection: Access requested must be allowed, unless
2385 * wiring is by protection class
2387 if ((((entry
->vme_end
< end
) &&
2388 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2389 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2390 ((entry
->protection
& access_type
) != access_type
))) {
2392 * Found a hole or protection problem.
2393 * Unwire the region we wired so far.
2395 if (start
!= entry
->vme_start
) {
2397 vm_map_unwire(map
, start
, s
, user_wire
);
2401 return((entry
->protection
&access_type
) != access_type
?
2402 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2405 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2408 entry
->user_wired_count
++;
2409 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2410 entry
->wired_count
++;
2412 entry
->in_transition
= TRUE
;
2415 * This entry might get split once we unlock the map.
2416 * In vm_fault_wire(), we need the current range as
2417 * defined by this entry. In order for this to work
2418 * along with a simultaneous clip operation, we make a
2419 * temporary copy of this entry and use that for the
2420 * wiring. Note that the underlying objects do not
2421 * change during a clip.
2426 * The in_transition state guarentees that the entry
2427 * (or entries for this range, if split occured) will be
2428 * there when the map lock is acquired for the second time.
2432 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2433 interruptible_state
= thread_interrupt_level(THREAD_UNINT
);
2436 rc
= vm_fault_wire(map
,
2437 &tmp_entry
, map_pmap
, pmap_addr
);
2439 rc
= vm_fault_wire(map
,
2440 &tmp_entry
, map
->pmap
,
2441 tmp_entry
.vme_start
);
2443 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2444 thread_interrupt_level(interruptible_state
);
2448 if (last_timestamp
+1 != map
->timestamp
) {
2450 * Find the entry again. It could have been clipped
2451 * after we unlocked the map.
2453 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2455 panic("vm_map_wire: re-lookup failed");
2457 entry
= first_entry
;
2460 last_timestamp
= map
->timestamp
;
2462 while ((entry
!= vm_map_to_entry(map
)) &&
2463 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2464 assert(entry
->in_transition
);
2465 entry
->in_transition
= FALSE
;
2466 if (entry
->needs_wakeup
) {
2467 entry
->needs_wakeup
= FALSE
;
2470 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2472 entry
->user_wired_count
--;
2474 (entry
->user_wired_count
== 0))
2475 entry
->wired_count
--;
2477 entry
= entry
->vme_next
;
2480 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2483 vm_map_entry_wakeup(map
);
2485 * undo everything upto the previous entry.
2487 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2490 } /* end while loop through map entries */
2494 * wake up anybody waiting on entries we wired.
2497 vm_map_entry_wakeup(map
);
2499 return(KERN_SUCCESS
);
2505 register vm_map_t map
,
2506 register vm_offset_t start
,
2507 register vm_offset_t end
,
2508 register vm_prot_t access_type
,
2509 boolean_t user_wire
)
2516 * the calls to mapping_prealloc and mapping_relpre
2517 * (along with the VM_MAP_RANGE_CHECK to insure a
2518 * resonable range was passed in) are
2519 * currently necessary because
2520 * we haven't enabled kernel pre-emption
2521 * and/or the pmap_enter cannot purge and re-use
2524 VM_MAP_RANGE_CHECK(map
, start
, end
);
2525 mapping_prealloc(end
- start
);
2527 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2528 user_wire
, (pmap_t
)NULL
, 0);
2538 * Sets the pageability of the specified address range in the target
2539 * as pageable. Regions specified must have been wired previously.
2541 * The map must not be locked, but a reference must remain to the map
2542 * throughout the call.
2544 * Kernel will panic on failures. User unwire ignores holes and
2545 * unwired and intransition entries to avoid losing memory by leaving
2549 vm_map_unwire_nested(
2550 register vm_map_t map
,
2551 register vm_offset_t start
,
2552 register vm_offset_t end
,
2553 boolean_t user_wire
,
2555 vm_offset_t pmap_addr
)
2557 register vm_map_entry_t entry
;
2558 struct vm_map_entry
*first_entry
, tmp_entry
;
2559 boolean_t need_wakeup
;
2560 boolean_t main_map
= FALSE
;
2561 unsigned int last_timestamp
;
2564 if(map_pmap
== NULL
)
2566 last_timestamp
= map
->timestamp
;
2568 VM_MAP_RANGE_CHECK(map
, start
, end
);
2569 assert(page_aligned(start
));
2570 assert(page_aligned(end
));
2572 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2573 entry
= first_entry
;
2574 /* vm_map_clip_start will be done later. */
2577 /* Start address is not in map. */
2579 return(KERN_INVALID_ADDRESS
);
2582 need_wakeup
= FALSE
;
2583 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2584 if (entry
->in_transition
) {
2587 * Another thread is wiring down this entry. Note
2588 * that if it is not for the other thread we would
2589 * be unwiring an unwired entry. This is not
2590 * permitted. If we wait, we will be unwiring memory
2594 * Another thread is unwiring this entry. We did not
2595 * have a reference to it, because if we did, this
2596 * entry will not be getting unwired now.
2599 panic("vm_map_unwire: in_transition entry");
2601 entry
= entry
->vme_next
;
2605 if(entry
->is_sub_map
) {
2606 vm_offset_t sub_start
;
2607 vm_offset_t sub_end
;
2608 vm_offset_t local_end
;
2612 vm_map_clip_start(map
, entry
, start
);
2613 vm_map_clip_end(map
, entry
, end
);
2615 sub_start
= entry
->offset
;
2616 sub_end
= entry
->vme_end
- entry
->vme_start
;
2617 sub_end
+= entry
->offset
;
2618 local_end
= entry
->vme_end
;
2619 if(map_pmap
== NULL
) {
2620 if(entry
->use_pmap
) {
2621 pmap
= entry
->object
.sub_map
->pmap
;
2622 pmap_addr
= sub_start
;
2627 if (entry
->wired_count
== 0 ||
2628 (user_wire
&& entry
->user_wired_count
== 0)) {
2630 panic("vm_map_unwire: entry is unwired");
2631 entry
= entry
->vme_next
;
2637 * Holes: Next entry should be contiguous unless
2638 * this is the end of the region.
2640 if (((entry
->vme_end
< end
) &&
2641 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2642 (entry
->vme_next
->vme_start
2643 > entry
->vme_end
)))) {
2645 panic("vm_map_unwire: non-contiguous region");
2647 entry = entry->vme_next;
2652 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2653 entry
->wired_count
--;
2655 if (entry
->wired_count
!= 0) {
2656 entry
= entry
->vme_next
;
2660 entry
->in_transition
= TRUE
;
2661 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2664 * We can unlock the map now. The in_transition state
2665 * guarantees existance of the entry.
2668 vm_map_unwire_nested(entry
->object
.sub_map
,
2669 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2672 if (last_timestamp
+1 != map
->timestamp
) {
2674 * Find the entry again. It could have been
2675 * clipped or deleted after we unlocked the map.
2677 if (!vm_map_lookup_entry(map
,
2678 tmp_entry
.vme_start
,
2681 panic("vm_map_unwire: re-lookup failed");
2682 entry
= first_entry
->vme_next
;
2684 entry
= first_entry
;
2686 last_timestamp
= map
->timestamp
;
2689 * clear transition bit for all constituent entries
2690 * that were in the original entry (saved in
2691 * tmp_entry). Also check for waiters.
2693 while ((entry
!= vm_map_to_entry(map
)) &&
2694 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2695 assert(entry
->in_transition
);
2696 entry
->in_transition
= FALSE
;
2697 if (entry
->needs_wakeup
) {
2698 entry
->needs_wakeup
= FALSE
;
2701 entry
= entry
->vme_next
;
2706 vm_map_unwire_nested(entry
->object
.sub_map
,
2707 sub_start
, sub_end
, user_wire
, map_pmap
,
2711 if (last_timestamp
+1 != map
->timestamp
) {
2713 * Find the entry again. It could have been
2714 * clipped or deleted after we unlocked the map.
2716 if (!vm_map_lookup_entry(map
,
2717 tmp_entry
.vme_start
,
2720 panic("vm_map_unwire: re-lookup failed");
2721 entry
= first_entry
->vme_next
;
2723 entry
= first_entry
;
2725 last_timestamp
= map
->timestamp
;
2730 if ((entry
->wired_count
== 0) ||
2731 (user_wire
&& entry
->user_wired_count
== 0)) {
2733 panic("vm_map_unwire: entry is unwired");
2735 entry
= entry
->vme_next
;
2739 assert(entry
->wired_count
> 0 &&
2740 (!user_wire
|| entry
->user_wired_count
> 0));
2742 vm_map_clip_start(map
, entry
, start
);
2743 vm_map_clip_end(map
, entry
, end
);
2747 * Holes: Next entry should be contiguous unless
2748 * this is the end of the region.
2750 if (((entry
->vme_end
< end
) &&
2751 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2752 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2755 panic("vm_map_unwire: non-contiguous region");
2756 entry
= entry
->vme_next
;
2760 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2761 entry
->wired_count
--;
2763 if (entry
->wired_count
!= 0) {
2764 entry
= entry
->vme_next
;
2768 entry
->in_transition
= TRUE
;
2769 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2772 * We can unlock the map now. The in_transition state
2773 * guarantees existance of the entry.
2777 vm_fault_unwire(map
,
2778 &tmp_entry
, FALSE
, map_pmap
, pmap_addr
);
2780 vm_fault_unwire(map
,
2781 &tmp_entry
, FALSE
, map
->pmap
,
2782 tmp_entry
.vme_start
);
2786 if (last_timestamp
+1 != map
->timestamp
) {
2788 * Find the entry again. It could have been clipped
2789 * or deleted after we unlocked the map.
2791 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2794 panic("vm_map_unwire: re-lookup failed");
2795 entry
= first_entry
->vme_next
;
2797 entry
= first_entry
;
2799 last_timestamp
= map
->timestamp
;
2802 * clear transition bit for all constituent entries that
2803 * were in the original entry (saved in tmp_entry). Also
2804 * check for waiters.
2806 while ((entry
!= vm_map_to_entry(map
)) &&
2807 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2808 assert(entry
->in_transition
);
2809 entry
->in_transition
= FALSE
;
2810 if (entry
->needs_wakeup
) {
2811 entry
->needs_wakeup
= FALSE
;
2814 entry
= entry
->vme_next
;
2819 * wake up anybody waiting on entries that we have unwired.
2822 vm_map_entry_wakeup(map
);
2823 return(KERN_SUCCESS
);
2829 register vm_map_t map
,
2830 register vm_offset_t start
,
2831 register vm_offset_t end
,
2832 boolean_t user_wire
)
2834 return vm_map_unwire_nested(map
, start
, end
,
2835 user_wire
, (pmap_t
)NULL
, 0);
2840 * vm_map_entry_delete: [ internal use only ]
2842 * Deallocate the given entry from the target map.
2845 vm_map_entry_delete(
2846 register vm_map_t map
,
2847 register vm_map_entry_t entry
)
2849 register vm_offset_t s
, e
;
2850 register vm_object_t object
;
2851 register vm_map_t submap
;
2852 extern vm_object_t kernel_object
;
2854 s
= entry
->vme_start
;
2856 assert(page_aligned(s
));
2857 assert(page_aligned(e
));
2858 assert(entry
->wired_count
== 0);
2859 assert(entry
->user_wired_count
== 0);
2861 if (entry
->is_sub_map
) {
2863 submap
= entry
->object
.sub_map
;
2866 object
= entry
->object
.vm_object
;
2869 vm_map_entry_unlink(map
, entry
);
2872 vm_map_entry_dispose(map
, entry
);
2876 * Deallocate the object only after removing all
2877 * pmap entries pointing to its pages.
2880 vm_map_deallocate(submap
);
2882 vm_object_deallocate(object
);
2887 vm_map_submap_pmap_clean(
2894 vm_offset_t submap_start
;
2895 vm_offset_t submap_end
;
2897 vm_size_t remove_size
;
2898 vm_map_entry_t entry
;
2900 submap_end
= offset
+ (end
- start
);
2901 submap_start
= offset
;
2902 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2904 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2905 if(offset
> entry
->vme_start
)
2906 remove_size
-= offset
- entry
->vme_start
;
2909 if(submap_end
< entry
->vme_end
) {
2911 entry
->vme_end
- submap_end
;
2913 if(entry
->is_sub_map
) {
2914 vm_map_submap_pmap_clean(
2917 start
+ remove_size
,
2918 entry
->object
.sub_map
,
2922 if((map
->mapped
) && (map
->ref_count
)
2923 && (entry
->object
.vm_object
!= NULL
)) {
2924 vm_object_pmap_protect(
2925 entry
->object
.vm_object
,
2932 pmap_remove(map
->pmap
,
2934 (addr64_t
)(start
+ remove_size
));
2939 entry
= entry
->vme_next
;
2941 while((entry
!= vm_map_to_entry(sub_map
))
2942 && (entry
->vme_start
< submap_end
)) {
2943 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2944 if(submap_end
< entry
->vme_end
) {
2945 remove_size
-= entry
->vme_end
- submap_end
;
2947 if(entry
->is_sub_map
) {
2948 vm_map_submap_pmap_clean(
2950 (start
+ entry
->vme_start
) - offset
,
2951 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2952 entry
->object
.sub_map
,
2955 if((map
->mapped
) && (map
->ref_count
)
2956 && (entry
->object
.vm_object
!= NULL
)) {
2957 vm_object_pmap_protect(
2958 entry
->object
.vm_object
,
2965 pmap_remove(map
->pmap
,
2966 (addr64_t
)((start
+ entry
->vme_start
)
2968 (addr64_t
)(((start
+ entry
->vme_start
)
2969 - offset
) + remove_size
));
2972 entry
= entry
->vme_next
;
2978 * vm_map_delete: [ internal use only ]
2980 * Deallocates the given address range from the target map.
2981 * Removes all user wirings. Unwires one kernel wiring if
2982 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2983 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2984 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2986 * This routine is called with map locked and leaves map locked.
2990 register vm_map_t map
,
2992 register vm_offset_t end
,
2995 vm_map_entry_t entry
, next
;
2996 struct vm_map_entry
*first_entry
, tmp_entry
;
2997 register vm_offset_t s
, e
;
2998 register vm_object_t object
;
2999 boolean_t need_wakeup
;
3000 unsigned int last_timestamp
= ~0; /* unlikely value */
3002 extern vm_map_t kernel_map
;
3004 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
3005 THREAD_ABORTSAFE
: THREAD_UNINT
;
3008 * All our DMA I/O operations in IOKit are currently done by
3009 * wiring through the map entries of the task requesting the I/O.
3010 * Because of this, we must always wait for kernel wirings
3011 * to go away on the entries before deleting them.
3013 * Any caller who wants to actually remove a kernel wiring
3014 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3015 * properly remove one wiring instead of blasting through
3018 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3021 * Find the start of the region, and clip it
3023 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3024 entry
= first_entry
;
3025 vm_map_clip_start(map
, entry
, start
);
3028 * Fix the lookup hint now, rather than each
3029 * time through the loop.
3031 SAVE_HINT(map
, entry
->vme_prev
);
3033 entry
= first_entry
->vme_next
;
3036 need_wakeup
= FALSE
;
3038 * Step through all entries in this region
3040 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3042 vm_map_clip_end(map
, entry
, end
);
3043 if (entry
->in_transition
) {
3044 wait_result_t wait_result
;
3047 * Another thread is wiring/unwiring this entry.
3048 * Let the other thread know we are waiting.
3050 s
= entry
->vme_start
;
3051 entry
->needs_wakeup
= TRUE
;
3054 * wake up anybody waiting on entries that we have
3055 * already unwired/deleted.
3058 vm_map_entry_wakeup(map
);
3059 need_wakeup
= FALSE
;
3062 wait_result
= vm_map_entry_wait(map
, interruptible
);
3064 if (interruptible
&&
3065 wait_result
== THREAD_INTERRUPTED
) {
3067 * We do not clear the needs_wakeup flag,
3068 * since we cannot tell if we were the only one.
3071 return KERN_ABORTED
;
3075 * The entry could have been clipped or it
3076 * may not exist anymore. Look it up again.
3078 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3079 assert((map
!= kernel_map
) &&
3080 (!entry
->is_sub_map
));
3082 * User: use the next entry
3084 entry
= first_entry
->vme_next
;
3086 entry
= first_entry
;
3087 SAVE_HINT(map
, entry
->vme_prev
);
3089 last_timestamp
= map
->timestamp
;
3091 } /* end in_transition */
3093 if (entry
->wired_count
) {
3095 * Remove a kernel wiring if requested or if
3096 * there are user wirings.
3098 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3099 (entry
->user_wired_count
> 0))
3100 entry
->wired_count
--;
3102 /* remove all user wire references */
3103 entry
->user_wired_count
= 0;
3105 if (entry
->wired_count
!= 0) {
3106 assert((map
!= kernel_map
) &&
3107 (!entry
->is_sub_map
));
3109 * Cannot continue. Typical case is when
3110 * a user thread has physical io pending on
3111 * on this page. Either wait for the
3112 * kernel wiring to go away or return an
3115 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3116 wait_result_t wait_result
;
3118 s
= entry
->vme_start
;
3119 entry
->needs_wakeup
= TRUE
;
3120 wait_result
= vm_map_entry_wait(map
,
3123 if (interruptible
&&
3124 wait_result
== THREAD_INTERRUPTED
) {
3126 * We do not clear the
3127 * needs_wakeup flag, since we
3128 * cannot tell if we were the
3132 return KERN_ABORTED
;
3136 * The entry could have been clipped or
3137 * it may not exist anymore. Look it
3140 if (!vm_map_lookup_entry(map
, s
,
3142 assert((map
!= kernel_map
) &&
3143 (!entry
->is_sub_map
));
3145 * User: use the next entry
3147 entry
= first_entry
->vme_next
;
3149 entry
= first_entry
;
3150 SAVE_HINT(map
, entry
->vme_prev
);
3152 last_timestamp
= map
->timestamp
;
3156 return KERN_FAILURE
;
3160 entry
->in_transition
= TRUE
;
3162 * copy current entry. see comment in vm_map_wire()
3165 s
= entry
->vme_start
;
3169 * We can unlock the map now. The in_transition
3170 * state guarentees existance of the entry.
3173 vm_fault_unwire(map
, &tmp_entry
,
3174 tmp_entry
.object
.vm_object
== kernel_object
,
3175 map
->pmap
, tmp_entry
.vme_start
);
3178 if (last_timestamp
+1 != map
->timestamp
) {
3180 * Find the entry again. It could have
3181 * been clipped after we unlocked the map.
3183 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3184 assert((map
!= kernel_map
) &&
3185 (!entry
->is_sub_map
));
3186 first_entry
= first_entry
->vme_next
;
3188 SAVE_HINT(map
, entry
->vme_prev
);
3191 SAVE_HINT(map
, entry
->vme_prev
);
3192 first_entry
= entry
;
3195 last_timestamp
= map
->timestamp
;
3197 entry
= first_entry
;
3198 while ((entry
!= vm_map_to_entry(map
)) &&
3199 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3200 assert(entry
->in_transition
);
3201 entry
->in_transition
= FALSE
;
3202 if (entry
->needs_wakeup
) {
3203 entry
->needs_wakeup
= FALSE
;
3206 entry
= entry
->vme_next
;
3209 * We have unwired the entry(s). Go back and
3212 entry
= first_entry
;
3216 /* entry is unwired */
3217 assert(entry
->wired_count
== 0);
3218 assert(entry
->user_wired_count
== 0);
3220 if ((!entry
->is_sub_map
&&
3221 entry
->object
.vm_object
!= kernel_object
) ||
3222 entry
->is_sub_map
) {
3223 if(entry
->is_sub_map
) {
3224 if(entry
->use_pmap
) {
3226 pmap_unnest(map
->pmap
, (addr64_t
)entry
->vme_start
);
3228 if((map
->mapped
) && (map
->ref_count
)) {
3229 /* clean up parent map/maps */
3230 vm_map_submap_pmap_clean(
3231 map
, entry
->vme_start
,
3233 entry
->object
.sub_map
,
3237 vm_map_submap_pmap_clean(
3238 map
, entry
->vme_start
, entry
->vme_end
,
3239 entry
->object
.sub_map
,
3243 object
= entry
->object
.vm_object
;
3244 if((map
->mapped
) && (map
->ref_count
)) {
3245 vm_object_pmap_protect(
3246 object
, entry
->offset
,
3247 entry
->vme_end
- entry
->vme_start
,
3251 } else if(object
!= NULL
) {
3252 if ((object
->shadow
!= NULL
) ||
3253 (object
->phys_contiguous
) ||
3254 (object
->resident_page_count
>
3255 atop((entry
->vme_end
- entry
->vme_start
)/4))) {
3256 pmap_remove(map
->pmap
,
3257 (addr64_t
)(entry
->vme_start
),
3258 (addr64_t
)(entry
->vme_end
));
3261 vm_object_offset_t start_off
;
3262 vm_object_offset_t end_off
;
3263 start_off
= entry
->offset
;
3264 end_off
= start_off
+
3265 (entry
->vme_end
- entry
->vme_start
);
3266 vm_object_lock(object
);
3267 queue_iterate(&object
->memq
,
3268 p
, vm_page_t
, listq
) {
3269 if ((!p
->fictitious
) &&
3270 (p
->offset
>= start_off
) &&
3271 (p
->offset
< end_off
)) {
3273 start
= entry
->vme_start
;
3274 start
+= p
->offset
- start_off
;
3280 vm_object_unlock(object
);
3286 next
= entry
->vme_next
;
3287 s
= next
->vme_start
;
3288 last_timestamp
= map
->timestamp
;
3289 vm_map_entry_delete(map
, entry
);
3290 /* vm_map_entry_delete unlocks the map */
3294 if(entry
== vm_map_to_entry(map
)) {
3297 if (last_timestamp
+1 != map
->timestamp
) {
3299 * we are responsible for deleting everything
3300 * from the give space, if someone has interfered
3301 * we pick up where we left off, back fills should
3302 * be all right for anyone except map_delete and
3303 * we have to assume that the task has been fully
3304 * disabled before we get here
3306 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3307 entry
= entry
->vme_next
;
3309 SAVE_HINT(map
, entry
->vme_prev
);
3312 * others can not only allocate behind us, we can
3313 * also see coalesce while we don't have the map lock
3315 if(entry
== vm_map_to_entry(map
)) {
3318 vm_map_clip_start(map
, entry
, s
);
3320 last_timestamp
= map
->timestamp
;
3323 if (map
->wait_for_space
)
3324 thread_wakeup((event_t
) map
);
3326 * wake up anybody waiting on entries that we have already deleted.
3329 vm_map_entry_wakeup(map
);
3331 return KERN_SUCCESS
;
3337 * Remove the given address range from the target map.
3338 * This is the exported form of vm_map_delete.
3342 register vm_map_t map
,
3343 register vm_offset_t start
,
3344 register vm_offset_t end
,
3345 register boolean_t flags
)
3347 register kern_return_t result
;
3348 boolean_t funnel_set
= FALSE
;
3350 thread_t cur_thread
;
3352 cur_thread
= current_thread();
3354 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
3356 curflock
= cur_thread
->funnel_lock
;
3357 thread_funnel_set( curflock
, FALSE
);
3360 VM_MAP_RANGE_CHECK(map
, start
, end
);
3361 result
= vm_map_delete(map
, start
, end
, flags
);
3364 thread_funnel_set( curflock
, TRUE
);
3372 * Routine: vm_map_copy_discard
3375 * Dispose of a map copy object (returned by
3379 vm_map_copy_discard(
3382 TR_DECL("vm_map_copy_discard");
3384 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3386 if (copy
== VM_MAP_COPY_NULL
)
3389 switch (copy
->type
) {
3390 case VM_MAP_COPY_ENTRY_LIST
:
3391 while (vm_map_copy_first_entry(copy
) !=
3392 vm_map_copy_to_entry(copy
)) {
3393 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3395 vm_map_copy_entry_unlink(copy
, entry
);
3396 vm_object_deallocate(entry
->object
.vm_object
);
3397 vm_map_copy_entry_dispose(copy
, entry
);
3400 case VM_MAP_COPY_OBJECT
:
3401 vm_object_deallocate(copy
->cpy_object
);
3403 case VM_MAP_COPY_KERNEL_BUFFER
:
3406 * The vm_map_copy_t and possibly the data buffer were
3407 * allocated by a single call to kalloc(), i.e. the
3408 * vm_map_copy_t was not allocated out of the zone.
3410 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3413 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3417 * Routine: vm_map_copy_copy
3420 * Move the information in a map copy object to
3421 * a new map copy object, leaving the old one
3424 * This is used by kernel routines that need
3425 * to look at out-of-line data (in copyin form)
3426 * before deciding whether to return SUCCESS.
3427 * If the routine returns FAILURE, the original
3428 * copy object will be deallocated; therefore,
3429 * these routines must make a copy of the copy
3430 * object and leave the original empty so that
3431 * deallocation will not fail.
3437 vm_map_copy_t new_copy
;
3439 if (copy
== VM_MAP_COPY_NULL
)
3440 return VM_MAP_COPY_NULL
;
3443 * Allocate a new copy object, and copy the information
3444 * from the old one into it.
3447 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3450 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3452 * The links in the entry chain must be
3453 * changed to point to the new copy object.
3455 vm_map_copy_first_entry(copy
)->vme_prev
3456 = vm_map_copy_to_entry(new_copy
);
3457 vm_map_copy_last_entry(copy
)->vme_next
3458 = vm_map_copy_to_entry(new_copy
);
3462 * Change the old copy object into one that contains
3463 * nothing to be deallocated.
3465 copy
->type
= VM_MAP_COPY_OBJECT
;
3466 copy
->cpy_object
= VM_OBJECT_NULL
;
3469 * Return the new object.
3475 vm_map_overwrite_submap_recurse(
3477 vm_offset_t dst_addr
,
3480 vm_offset_t dst_end
;
3481 vm_map_entry_t tmp_entry
;
3482 vm_map_entry_t entry
;
3483 kern_return_t result
;
3484 boolean_t encountered_sub_map
= FALSE
;
3489 * Verify that the destination is all writeable
3490 * initially. We have to trunc the destination
3491 * address and round the copy size or we'll end up
3492 * splitting entries in strange ways.
3495 dst_end
= round_page_32(dst_addr
+ dst_size
);
3496 vm_map_lock(dst_map
);
3499 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3500 vm_map_unlock(dst_map
);
3501 return(KERN_INVALID_ADDRESS
);
3504 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3506 for (entry
= tmp_entry
;;) {
3507 vm_map_entry_t next
;
3509 next
= entry
->vme_next
;
3510 while(entry
->is_sub_map
) {
3511 vm_offset_t sub_start
;
3512 vm_offset_t sub_end
;
3513 vm_offset_t local_end
;
3515 if (entry
->in_transition
) {
3517 * Say that we are waiting, and wait for entry.
3519 entry
->needs_wakeup
= TRUE
;
3520 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3525 encountered_sub_map
= TRUE
;
3526 sub_start
= entry
->offset
;
3528 if(entry
->vme_end
< dst_end
)
3529 sub_end
= entry
->vme_end
;
3532 sub_end
-= entry
->vme_start
;
3533 sub_end
+= entry
->offset
;
3534 local_end
= entry
->vme_end
;
3535 vm_map_unlock(dst_map
);
3537 result
= vm_map_overwrite_submap_recurse(
3538 entry
->object
.sub_map
,
3540 sub_end
- sub_start
);
3542 if(result
!= KERN_SUCCESS
)
3544 if (dst_end
<= entry
->vme_end
)
3545 return KERN_SUCCESS
;
3546 vm_map_lock(dst_map
);
3547 if(!vm_map_lookup_entry(dst_map
, local_end
,
3549 vm_map_unlock(dst_map
);
3550 return(KERN_INVALID_ADDRESS
);
3553 next
= entry
->vme_next
;
3556 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3557 vm_map_unlock(dst_map
);
3558 return(KERN_PROTECTION_FAILURE
);
3562 * If the entry is in transition, we must wait
3563 * for it to exit that state. Anything could happen
3564 * when we unlock the map, so start over.
3566 if (entry
->in_transition
) {
3569 * Say that we are waiting, and wait for entry.
3571 entry
->needs_wakeup
= TRUE
;
3572 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3578 * our range is contained completely within this map entry
3580 if (dst_end
<= entry
->vme_end
) {
3581 vm_map_unlock(dst_map
);
3582 return KERN_SUCCESS
;
3585 * check that range specified is contiguous region
3587 if ((next
== vm_map_to_entry(dst_map
)) ||
3588 (next
->vme_start
!= entry
->vme_end
)) {
3589 vm_map_unlock(dst_map
);
3590 return(KERN_INVALID_ADDRESS
);
3594 * Check for permanent objects in the destination.
3596 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3597 ((!entry
->object
.vm_object
->internal
) ||
3598 (entry
->object
.vm_object
->true_share
))) {
3599 if(encountered_sub_map
) {
3600 vm_map_unlock(dst_map
);
3601 return(KERN_FAILURE
);
3608 vm_map_unlock(dst_map
);
3609 return(KERN_SUCCESS
);
3613 * Routine: vm_map_copy_overwrite
3616 * Copy the memory described by the map copy
3617 * object (copy; returned by vm_map_copyin) onto
3618 * the specified destination region (dst_map, dst_addr).
3619 * The destination must be writeable.
3621 * Unlike vm_map_copyout, this routine actually
3622 * writes over previously-mapped memory. If the
3623 * previous mapping was to a permanent (user-supplied)
3624 * memory object, it is preserved.
3626 * The attributes (protection and inheritance) of the
3627 * destination region are preserved.
3629 * If successful, consumes the copy object.
3630 * Otherwise, the caller is responsible for it.
3632 * Implementation notes:
3633 * To overwrite aligned temporary virtual memory, it is
3634 * sufficient to remove the previous mapping and insert
3635 * the new copy. This replacement is done either on
3636 * the whole region (if no permanent virtual memory
3637 * objects are embedded in the destination region) or
3638 * in individual map entries.
3640 * To overwrite permanent virtual memory , it is necessary
3641 * to copy each page, as the external memory management
3642 * interface currently does not provide any optimizations.
3644 * Unaligned memory also has to be copied. It is possible
3645 * to use 'vm_trickery' to copy the aligned data. This is
3646 * not done but not hard to implement.
3648 * Once a page of permanent memory has been overwritten,
3649 * it is impossible to interrupt this function; otherwise,
3650 * the call would be neither atomic nor location-independent.
3651 * The kernel-state portion of a user thread must be
3654 * It may be expensive to forward all requests that might
3655 * overwrite permanent memory (vm_write, vm_copy) to
3656 * uninterruptible kernel threads. This routine may be
3657 * called by interruptible threads; however, success is
3658 * not guaranteed -- if the request cannot be performed
3659 * atomically and interruptibly, an error indication is
3664 vm_map_copy_overwrite_nested(
3666 vm_offset_t dst_addr
,
3668 boolean_t interruptible
,
3671 vm_offset_t dst_end
;
3672 vm_map_entry_t tmp_entry
;
3673 vm_map_entry_t entry
;
3675 boolean_t aligned
= TRUE
;
3676 boolean_t contains_permanent_objects
= FALSE
;
3677 boolean_t encountered_sub_map
= FALSE
;
3678 vm_offset_t base_addr
;
3679 vm_size_t copy_size
;
3680 vm_size_t total_size
;
3684 * Check for null copy object.
3687 if (copy
== VM_MAP_COPY_NULL
)
3688 return(KERN_SUCCESS
);
3691 * Check for special kernel buffer allocated
3692 * by new_ipc_kmsg_copyin.
3695 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3696 return(vm_map_copyout_kernel_buffer(
3702 * Only works for entry lists at the moment. Will
3703 * support page lists later.
3706 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3708 if (copy
->size
== 0) {
3709 vm_map_copy_discard(copy
);
3710 return(KERN_SUCCESS
);
3714 * Verify that the destination is all writeable
3715 * initially. We have to trunc the destination
3716 * address and round the copy size or we'll end up
3717 * splitting entries in strange ways.
3720 if (!page_aligned(copy
->size
) ||
3721 !page_aligned (copy
->offset
) ||
3722 !page_aligned (dst_addr
))
3725 dst_end
= round_page_32(dst_addr
+ copy
->size
);
3727 dst_end
= dst_addr
+ copy
->size
;
3730 vm_map_lock(dst_map
);
3733 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3734 vm_map_unlock(dst_map
);
3735 return(KERN_INVALID_ADDRESS
);
3737 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(dst_addr
));
3738 for (entry
= tmp_entry
;;) {
3739 vm_map_entry_t next
= entry
->vme_next
;
3741 while(entry
->is_sub_map
) {
3742 vm_offset_t sub_start
;
3743 vm_offset_t sub_end
;
3744 vm_offset_t local_end
;
3746 if (entry
->in_transition
) {
3749 * Say that we are waiting, and wait for entry.
3751 entry
->needs_wakeup
= TRUE
;
3752 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3757 local_end
= entry
->vme_end
;
3758 if (!(entry
->needs_copy
)) {
3759 /* if needs_copy we are a COW submap */
3760 /* in such a case we just replace so */
3761 /* there is no need for the follow- */
3763 encountered_sub_map
= TRUE
;
3764 sub_start
= entry
->offset
;
3766 if(entry
->vme_end
< dst_end
)
3767 sub_end
= entry
->vme_end
;
3770 sub_end
-= entry
->vme_start
;
3771 sub_end
+= entry
->offset
;
3772 vm_map_unlock(dst_map
);
3774 kr
= vm_map_overwrite_submap_recurse(
3775 entry
->object
.sub_map
,
3777 sub_end
- sub_start
);
3778 if(kr
!= KERN_SUCCESS
)
3780 vm_map_lock(dst_map
);
3783 if (dst_end
<= entry
->vme_end
)
3784 goto start_overwrite
;
3785 if(!vm_map_lookup_entry(dst_map
, local_end
,
3787 vm_map_unlock(dst_map
);
3788 return(KERN_INVALID_ADDRESS
);
3790 next
= entry
->vme_next
;
3793 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3794 vm_map_unlock(dst_map
);
3795 return(KERN_PROTECTION_FAILURE
);
3799 * If the entry is in transition, we must wait
3800 * for it to exit that state. Anything could happen
3801 * when we unlock the map, so start over.
3803 if (entry
->in_transition
) {
3806 * Say that we are waiting, and wait for entry.
3808 entry
->needs_wakeup
= TRUE
;
3809 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3815 * our range is contained completely within this map entry
3817 if (dst_end
<= entry
->vme_end
)
3820 * check that range specified is contiguous region
3822 if ((next
== vm_map_to_entry(dst_map
)) ||
3823 (next
->vme_start
!= entry
->vme_end
)) {
3824 vm_map_unlock(dst_map
);
3825 return(KERN_INVALID_ADDRESS
);
3830 * Check for permanent objects in the destination.
3832 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3833 ((!entry
->object
.vm_object
->internal
) ||
3834 (entry
->object
.vm_object
->true_share
))) {
3835 contains_permanent_objects
= TRUE
;
3843 * If there are permanent objects in the destination, then
3844 * the copy cannot be interrupted.
3847 if (interruptible
&& contains_permanent_objects
) {
3848 vm_map_unlock(dst_map
);
3849 return(KERN_FAILURE
); /* XXX */
3854 * Make a second pass, overwriting the data
3855 * At the beginning of each loop iteration,
3856 * the next entry to be overwritten is "tmp_entry"
3857 * (initially, the value returned from the lookup above),
3858 * and the starting address expected in that entry
3862 total_size
= copy
->size
;
3863 if(encountered_sub_map
) {
3865 /* re-calculate tmp_entry since we've had the map */
3867 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3868 vm_map_unlock(dst_map
);
3869 return(KERN_INVALID_ADDRESS
);
3872 copy_size
= copy
->size
;
3875 base_addr
= dst_addr
;
3877 /* deconstruct the copy object and do in parts */
3878 /* only in sub_map, interruptable case */
3879 vm_map_entry_t copy_entry
;
3880 vm_map_entry_t previous_prev
;
3881 vm_map_entry_t next_copy
;
3883 int remaining_entries
;
3886 for (entry
= tmp_entry
; copy_size
== 0;) {
3887 vm_map_entry_t next
;
3889 next
= entry
->vme_next
;
3891 /* tmp_entry and base address are moved along */
3892 /* each time we encounter a sub-map. Otherwise */
3893 /* entry can outpase tmp_entry, and the copy_size */
3894 /* may reflect the distance between them */
3895 /* if the current entry is found to be in transition */
3896 /* we will start over at the beginning or the last */
3897 /* encounter of a submap as dictated by base_addr */
3898 /* we will zero copy_size accordingly. */
3899 if (entry
->in_transition
) {
3901 * Say that we are waiting, and wait for entry.
3903 entry
->needs_wakeup
= TRUE
;
3904 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3906 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3908 vm_map_unlock(dst_map
);
3909 return(KERN_INVALID_ADDRESS
);
3915 if(entry
->is_sub_map
) {
3916 vm_offset_t sub_start
;
3917 vm_offset_t sub_end
;
3918 vm_offset_t local_end
;
3920 if (entry
->needs_copy
) {
3921 /* if this is a COW submap */
3922 /* just back the range with a */
3923 /* anonymous entry */
3924 if(entry
->vme_end
< dst_end
)
3925 sub_end
= entry
->vme_end
;
3928 if(entry
->vme_start
< base_addr
)
3929 sub_start
= base_addr
;
3931 sub_start
= entry
->vme_start
;
3933 dst_map
, entry
, sub_end
);
3935 dst_map
, entry
, sub_start
);
3936 entry
->is_sub_map
= FALSE
;
3938 entry
->object
.sub_map
);
3939 entry
->object
.sub_map
= NULL
;
3940 entry
->is_shared
= FALSE
;
3941 entry
->needs_copy
= FALSE
;
3943 entry
->protection
= VM_PROT_ALL
;
3944 entry
->max_protection
= VM_PROT_ALL
;
3945 entry
->wired_count
= 0;
3946 entry
->user_wired_count
= 0;
3947 if(entry
->inheritance
3948 == VM_INHERIT_SHARE
)
3949 entry
->inheritance
= VM_INHERIT_COPY
;
3952 /* first take care of any non-sub_map */
3953 /* entries to send */
3954 if(base_addr
< entry
->vme_start
) {
3957 entry
->vme_start
- base_addr
;
3960 sub_start
= entry
->offset
;
3962 if(entry
->vme_end
< dst_end
)
3963 sub_end
= entry
->vme_end
;
3966 sub_end
-= entry
->vme_start
;
3967 sub_end
+= entry
->offset
;
3968 local_end
= entry
->vme_end
;
3969 vm_map_unlock(dst_map
);
3970 copy_size
= sub_end
- sub_start
;
3972 /* adjust the copy object */
3973 if (total_size
> copy_size
) {
3974 vm_size_t local_size
= 0;
3975 vm_size_t entry_size
;
3978 new_offset
= copy
->offset
;
3979 copy_entry
= vm_map_copy_first_entry(copy
);
3981 vm_map_copy_to_entry(copy
)){
3982 entry_size
= copy_entry
->vme_end
-
3983 copy_entry
->vme_start
;
3984 if((local_size
< copy_size
) &&
3985 ((local_size
+ entry_size
)
3987 vm_map_copy_clip_end(copy
,
3989 copy_entry
->vme_start
+
3990 (copy_size
- local_size
));
3991 entry_size
= copy_entry
->vme_end
-
3992 copy_entry
->vme_start
;
3993 local_size
+= entry_size
;
3994 new_offset
+= entry_size
;
3996 if(local_size
>= copy_size
) {
3997 next_copy
= copy_entry
->vme_next
;
3998 copy_entry
->vme_next
=
3999 vm_map_copy_to_entry(copy
);
4001 copy
->cpy_hdr
.links
.prev
;
4002 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4003 copy
->size
= copy_size
;
4005 copy
->cpy_hdr
.nentries
;
4006 remaining_entries
-= nentries
;
4007 copy
->cpy_hdr
.nentries
= nentries
;
4010 local_size
+= entry_size
;
4011 new_offset
+= entry_size
;
4014 copy_entry
= copy_entry
->vme_next
;
4018 if((entry
->use_pmap
) && (pmap
== NULL
)) {
4019 kr
= vm_map_copy_overwrite_nested(
4020 entry
->object
.sub_map
,
4024 entry
->object
.sub_map
->pmap
);
4025 } else if (pmap
!= NULL
) {
4026 kr
= vm_map_copy_overwrite_nested(
4027 entry
->object
.sub_map
,
4030 interruptible
, pmap
);
4032 kr
= vm_map_copy_overwrite_nested(
4033 entry
->object
.sub_map
,
4039 if(kr
!= KERN_SUCCESS
) {
4040 if(next_copy
!= NULL
) {
4041 copy
->cpy_hdr
.nentries
+=
4043 copy
->cpy_hdr
.links
.prev
->vme_next
=
4045 copy
->cpy_hdr
.links
.prev
4047 copy
->size
= total_size
;
4051 if (dst_end
<= local_end
) {
4052 return(KERN_SUCCESS
);
4054 /* otherwise copy no longer exists, it was */
4055 /* destroyed after successful copy_overwrite */
4056 copy
= (vm_map_copy_t
)
4057 zalloc(vm_map_copy_zone
);
4058 vm_map_copy_first_entry(copy
) =
4059 vm_map_copy_last_entry(copy
) =
4060 vm_map_copy_to_entry(copy
);
4061 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4062 copy
->offset
= new_offset
;
4064 total_size
-= copy_size
;
4066 /* put back remainder of copy in container */
4067 if(next_copy
!= NULL
) {
4068 copy
->cpy_hdr
.nentries
= remaining_entries
;
4069 copy
->cpy_hdr
.links
.next
= next_copy
;
4070 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4071 copy
->size
= total_size
;
4072 next_copy
->vme_prev
=
4073 vm_map_copy_to_entry(copy
);
4076 base_addr
= local_end
;
4077 vm_map_lock(dst_map
);
4078 if(!vm_map_lookup_entry(dst_map
,
4079 local_end
, &tmp_entry
)) {
4080 vm_map_unlock(dst_map
);
4081 return(KERN_INVALID_ADDRESS
);
4086 if (dst_end
<= entry
->vme_end
) {
4087 copy_size
= dst_end
- base_addr
;
4091 if ((next
== vm_map_to_entry(dst_map
)) ||
4092 (next
->vme_start
!= entry
->vme_end
)) {
4093 vm_map_unlock(dst_map
);
4094 return(KERN_INVALID_ADDRESS
);
4103 /* adjust the copy object */
4104 if (total_size
> copy_size
) {
4105 vm_size_t local_size
= 0;
4106 vm_size_t entry_size
;
4108 new_offset
= copy
->offset
;
4109 copy_entry
= vm_map_copy_first_entry(copy
);
4110 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4111 entry_size
= copy_entry
->vme_end
-
4112 copy_entry
->vme_start
;
4113 if((local_size
< copy_size
) &&
4114 ((local_size
+ entry_size
)
4116 vm_map_copy_clip_end(copy
, copy_entry
,
4117 copy_entry
->vme_start
+
4118 (copy_size
- local_size
));
4119 entry_size
= copy_entry
->vme_end
-
4120 copy_entry
->vme_start
;
4121 local_size
+= entry_size
;
4122 new_offset
+= entry_size
;
4124 if(local_size
>= copy_size
) {
4125 next_copy
= copy_entry
->vme_next
;
4126 copy_entry
->vme_next
=
4127 vm_map_copy_to_entry(copy
);
4129 copy
->cpy_hdr
.links
.prev
;
4130 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4131 copy
->size
= copy_size
;
4133 copy
->cpy_hdr
.nentries
;
4134 remaining_entries
-= nentries
;
4135 copy
->cpy_hdr
.nentries
= nentries
;
4138 local_size
+= entry_size
;
4139 new_offset
+= entry_size
;
4142 copy_entry
= copy_entry
->vme_next
;
4152 local_pmap
= dst_map
->pmap
;
4154 if ((kr
= vm_map_copy_overwrite_aligned(
4155 dst_map
, tmp_entry
, copy
,
4156 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4157 if(next_copy
!= NULL
) {
4158 copy
->cpy_hdr
.nentries
+=
4160 copy
->cpy_hdr
.links
.prev
->vme_next
=
4162 copy
->cpy_hdr
.links
.prev
=
4164 copy
->size
+= copy_size
;
4168 vm_map_unlock(dst_map
);
4173 * if the copy and dst address are misaligned but the same
4174 * offset within the page we can copy_not_aligned the
4175 * misaligned parts and copy aligned the rest. If they are
4176 * aligned but len is unaligned we simply need to copy
4177 * the end bit unaligned. We'll need to split the misaligned
4178 * bits of the region in this case !
4180 /* ALWAYS UNLOCKS THE dst_map MAP */
4181 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4182 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4183 if(next_copy
!= NULL
) {
4184 copy
->cpy_hdr
.nentries
+=
4186 copy
->cpy_hdr
.links
.prev
->vme_next
=
4188 copy
->cpy_hdr
.links
.prev
=
4190 copy
->size
+= copy_size
;
4195 total_size
-= copy_size
;
4198 base_addr
+= copy_size
;
4200 copy
->offset
= new_offset
;
4201 if(next_copy
!= NULL
) {
4202 copy
->cpy_hdr
.nentries
= remaining_entries
;
4203 copy
->cpy_hdr
.links
.next
= next_copy
;
4204 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4205 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4206 copy
->size
= total_size
;
4208 vm_map_lock(dst_map
);
4210 if (!vm_map_lookup_entry(dst_map
,
4211 base_addr
, &tmp_entry
)) {
4212 vm_map_unlock(dst_map
);
4213 return(KERN_INVALID_ADDRESS
);
4215 if (tmp_entry
->in_transition
) {
4216 entry
->needs_wakeup
= TRUE
;
4217 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4222 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page_32(base_addr
));
4228 * Throw away the vm_map_copy object
4230 vm_map_copy_discard(copy
);
4232 return(KERN_SUCCESS
);
4233 }/* vm_map_copy_overwrite */
4236 vm_map_copy_overwrite(
4238 vm_offset_t dst_addr
,
4240 boolean_t interruptible
)
4242 return vm_map_copy_overwrite_nested(
4243 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4248 * Routine: vm_map_copy_overwrite_unaligned
4251 * Physically copy unaligned data
4254 * Unaligned parts of pages have to be physically copied. We use
4255 * a modified form of vm_fault_copy (which understands none-aligned
4256 * page offsets and sizes) to do the copy. We attempt to copy as
4257 * much memory in one go as possibly, however vm_fault_copy copies
4258 * within 1 memory object so we have to find the smaller of "amount left"
4259 * "source object data size" and "target object data size". With
4260 * unaligned data we don't need to split regions, therefore the source
4261 * (copy) object should be one map entry, the target range may be split
4262 * over multiple map entries however. In any event we are pessimistic
4263 * about these assumptions.
4266 * dst_map is locked on entry and is return locked on success,
4267 * unlocked on error.
4271 vm_map_copy_overwrite_unaligned(
4273 vm_map_entry_t entry
,
4277 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4278 vm_map_version_t version
;
4279 vm_object_t dst_object
;
4280 vm_object_offset_t dst_offset
;
4281 vm_object_offset_t src_offset
;
4282 vm_object_offset_t entry_offset
;
4283 vm_offset_t entry_end
;
4288 kern_return_t kr
= KERN_SUCCESS
;
4290 vm_map_lock_write_to_read(dst_map
);
4292 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4293 amount_left
= copy
->size
;
4295 * unaligned so we never clipped this entry, we need the offset into
4296 * the vm_object not just the data.
4298 while (amount_left
> 0) {
4300 if (entry
== vm_map_to_entry(dst_map
)) {
4301 vm_map_unlock_read(dst_map
);
4302 return KERN_INVALID_ADDRESS
;
4305 /* "start" must be within the current map entry */
4306 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4308 dst_offset
= start
- entry
->vme_start
;
4310 dst_size
= entry
->vme_end
- start
;
4312 src_size
= copy_entry
->vme_end
-
4313 (copy_entry
->vme_start
+ src_offset
);
4315 if (dst_size
< src_size
) {
4317 * we can only copy dst_size bytes before
4318 * we have to get the next destination entry
4320 copy_size
= dst_size
;
4323 * we can only copy src_size bytes before
4324 * we have to get the next source copy entry
4326 copy_size
= src_size
;
4329 if (copy_size
> amount_left
) {
4330 copy_size
= amount_left
;
4333 * Entry needs copy, create a shadow shadow object for
4334 * Copy on write region.
4336 if (entry
->needs_copy
&&
4337 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4339 if (vm_map_lock_read_to_write(dst_map
)) {
4340 vm_map_lock_read(dst_map
);
4343 vm_object_shadow(&entry
->object
.vm_object
,
4345 (vm_size_t
)(entry
->vme_end
4346 - entry
->vme_start
));
4347 entry
->needs_copy
= FALSE
;
4348 vm_map_lock_write_to_read(dst_map
);
4350 dst_object
= entry
->object
.vm_object
;
4352 * unlike with the virtual (aligned) copy we're going
4353 * to fault on it therefore we need a target object.
4355 if (dst_object
== VM_OBJECT_NULL
) {
4356 if (vm_map_lock_read_to_write(dst_map
)) {
4357 vm_map_lock_read(dst_map
);
4360 dst_object
= vm_object_allocate((vm_size_t
)
4361 entry
->vme_end
- entry
->vme_start
);
4362 entry
->object
.vm_object
= dst_object
;
4364 vm_map_lock_write_to_read(dst_map
);
4367 * Take an object reference and unlock map. The "entry" may
4368 * disappear or change when the map is unlocked.
4370 vm_object_reference(dst_object
);
4371 version
.main_timestamp
= dst_map
->timestamp
;
4372 entry_offset
= entry
->offset
;
4373 entry_end
= entry
->vme_end
;
4374 vm_map_unlock_read(dst_map
);
4376 * Copy as much as possible in one pass
4379 copy_entry
->object
.vm_object
,
4380 copy_entry
->offset
+ src_offset
,
4383 entry_offset
+ dst_offset
,
4389 src_offset
+= copy_size
;
4390 amount_left
-= copy_size
;
4392 * Release the object reference
4394 vm_object_deallocate(dst_object
);
4396 * If a hard error occurred, return it now
4398 if (kr
!= KERN_SUCCESS
)
4401 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4402 || amount_left
== 0)
4405 * all done with this copy entry, dispose.
4407 vm_map_copy_entry_unlink(copy
, copy_entry
);
4408 vm_object_deallocate(copy_entry
->object
.vm_object
);
4409 vm_map_copy_entry_dispose(copy
, copy_entry
);
4411 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4412 == vm_map_copy_to_entry(copy
) && amount_left
) {
4414 * not finished copying but run out of source
4416 return KERN_INVALID_ADDRESS
;
4421 if (amount_left
== 0)
4422 return KERN_SUCCESS
;
4424 vm_map_lock_read(dst_map
);
4425 if (version
.main_timestamp
== dst_map
->timestamp
) {
4426 if (start
== entry_end
) {
4428 * destination region is split. Use the version
4429 * information to avoid a lookup in the normal
4432 entry
= entry
->vme_next
;
4434 * should be contiguous. Fail if we encounter
4435 * a hole in the destination.
4437 if (start
!= entry
->vme_start
) {
4438 vm_map_unlock_read(dst_map
);
4439 return KERN_INVALID_ADDRESS
;
4444 * Map version check failed.
4445 * we must lookup the entry because somebody
4446 * might have changed the map behind our backs.
4449 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4451 vm_map_unlock_read(dst_map
);
4452 return KERN_INVALID_ADDRESS
;
4458 vm_map_unlock_read(dst_map
);
4460 return KERN_SUCCESS
;
4461 }/* vm_map_copy_overwrite_unaligned */
4464 * Routine: vm_map_copy_overwrite_aligned
4467 * Does all the vm_trickery possible for whole pages.
4471 * If there are no permanent objects in the destination,
4472 * and the source and destination map entry zones match,
4473 * and the destination map entry is not shared,
4474 * then the map entries can be deleted and replaced
4475 * with those from the copy. The following code is the
4476 * basic idea of what to do, but there are lots of annoying
4477 * little details about getting protection and inheritance
4478 * right. Should add protection, inheritance, and sharing checks
4479 * to the above pass and make sure that no wiring is involved.
4483 vm_map_copy_overwrite_aligned(
4485 vm_map_entry_t tmp_entry
,
4491 vm_map_entry_t copy_entry
;
4492 vm_size_t copy_size
;
4494 vm_map_entry_t entry
;
4496 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4497 != vm_map_copy_to_entry(copy
))
4499 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4502 if (entry
== vm_map_to_entry(dst_map
)) {
4503 vm_map_unlock(dst_map
);
4504 return KERN_INVALID_ADDRESS
;
4506 size
= (entry
->vme_end
- entry
->vme_start
);
4508 * Make sure that no holes popped up in the
4509 * address map, and that the protection is
4510 * still valid, in case the map was unlocked
4514 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4515 && !entry
->needs_copy
)) {
4516 vm_map_unlock(dst_map
);
4517 return(KERN_INVALID_ADDRESS
);
4519 assert(entry
!= vm_map_to_entry(dst_map
));
4522 * Check protection again
4525 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4526 vm_map_unlock(dst_map
);
4527 return(KERN_PROTECTION_FAILURE
);
4531 * Adjust to source size first
4534 if (copy_size
< size
) {
4535 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4540 * Adjust to destination size
4543 if (size
< copy_size
) {
4544 vm_map_copy_clip_end(copy
, copy_entry
,
4545 copy_entry
->vme_start
+ size
);
4549 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4550 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4551 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4554 * If the destination contains temporary unshared memory,
4555 * we can perform the copy by throwing it away and
4556 * installing the source data.
4559 object
= entry
->object
.vm_object
;
4560 if ((!entry
->is_shared
&&
4561 ((object
== VM_OBJECT_NULL
) ||
4562 (object
->internal
&& !object
->true_share
))) ||
4563 entry
->needs_copy
) {
4564 vm_object_t old_object
= entry
->object
.vm_object
;
4565 vm_object_offset_t old_offset
= entry
->offset
;
4566 vm_object_offset_t offset
;
4569 * Ensure that the source and destination aren't
4572 if (old_object
== copy_entry
->object
.vm_object
&&
4573 old_offset
== copy_entry
->offset
) {
4574 vm_map_copy_entry_unlink(copy
, copy_entry
);
4575 vm_map_copy_entry_dispose(copy
, copy_entry
);
4577 if (old_object
!= VM_OBJECT_NULL
)
4578 vm_object_deallocate(old_object
);
4580 start
= tmp_entry
->vme_end
;
4581 tmp_entry
= tmp_entry
->vme_next
;
4585 if (old_object
!= VM_OBJECT_NULL
) {
4586 if(entry
->is_sub_map
) {
4587 if(entry
->use_pmap
) {
4589 pmap_unnest(dst_map
->pmap
,
4592 - entry
->vme_start
);
4594 if(dst_map
->mapped
) {
4595 /* clean up parent */
4597 vm_map_submap_pmap_clean(
4598 dst_map
, entry
->vme_start
,
4600 entry
->object
.sub_map
,
4604 vm_map_submap_pmap_clean(
4605 dst_map
, entry
->vme_start
,
4607 entry
->object
.sub_map
,
4611 entry
->object
.sub_map
);
4613 if(dst_map
->mapped
) {
4614 vm_object_pmap_protect(
4615 entry
->object
.vm_object
,
4623 pmap_remove(dst_map
->pmap
,
4624 (addr64_t
)(entry
->vme_start
),
4625 (addr64_t
)(entry
->vme_end
));
4627 vm_object_deallocate(old_object
);
4631 entry
->is_sub_map
= FALSE
;
4632 entry
->object
= copy_entry
->object
;
4633 object
= entry
->object
.vm_object
;
4634 entry
->needs_copy
= copy_entry
->needs_copy
;
4635 entry
->wired_count
= 0;
4636 entry
->user_wired_count
= 0;
4637 offset
= entry
->offset
= copy_entry
->offset
;
4639 vm_map_copy_entry_unlink(copy
, copy_entry
);
4640 vm_map_copy_entry_dispose(copy
, copy_entry
);
4641 #if BAD_OPTIMIZATION
4643 * if we turn this optimization back on
4644 * we need to revisit our use of pmap mappings
4645 * large copies will cause us to run out and panic
4646 * this optimization only saved on average 2 us per page if ALL
4647 * the pages in the source were currently mapped
4648 * and ALL the pages in the dest were touched, if there were fewer
4649 * than 2/3 of the pages touched, this optimization actually cost more cycles
4653 * Try to aggressively enter physical mappings
4654 * (but avoid uninstantiated objects)
4656 if (object
!= VM_OBJECT_NULL
) {
4657 vm_offset_t va
= entry
->vme_start
;
4659 while (va
< entry
->vme_end
) {
4660 register vm_page_t m
;
4664 * Look for the page in the top object
4666 prot
= entry
->protection
;
4667 vm_object_lock(object
);
4668 vm_object_paging_begin(object
);
4670 if ((m
= vm_page_lookup(object
,offset
)) !=
4671 VM_PAGE_NULL
&& !m
->busy
&&
4673 (!m
->unusual
|| (!m
->error
&&
4674 !m
->restart
&& !m
->absent
&&
4675 (prot
& m
->page_lock
) == 0))) {
4678 vm_object_unlock(object
);
4681 * Honor COW obligations
4683 if (entry
->needs_copy
)
4684 prot
&= ~VM_PROT_WRITE
;
4685 /* It is our policy to require */
4686 /* explicit sync from anyone */
4687 /* writing code and then */
4688 /* a pc to execute it. */
4691 PMAP_ENTER(pmap
, va
, m
, prot
,
4693 (m
->object
->wimg_bits
))
4697 vm_object_lock(object
);
4698 vm_page_lock_queues();
4699 if (!m
->active
&& !m
->inactive
)
4700 vm_page_activate(m
);
4701 vm_page_unlock_queues();
4702 PAGE_WAKEUP_DONE(m
);
4704 vm_object_paging_end(object
);
4705 vm_object_unlock(object
);
4707 offset
+= PAGE_SIZE_64
;
4709 } /* end while (va < entry->vme_end) */
4710 } /* end if (object) */
4713 * Set up for the next iteration. The map
4714 * has not been unlocked, so the next
4715 * address should be at the end of this
4716 * entry, and the next map entry should be
4717 * the one following it.
4720 start
= tmp_entry
->vme_end
;
4721 tmp_entry
= tmp_entry
->vme_next
;
4723 vm_map_version_t version
;
4724 vm_object_t dst_object
= entry
->object
.vm_object
;
4725 vm_object_offset_t dst_offset
= entry
->offset
;
4729 * Take an object reference, and record
4730 * the map version information so that the
4731 * map can be safely unlocked.
4734 vm_object_reference(dst_object
);
4736 /* account for unlock bumping up timestamp */
4737 version
.main_timestamp
= dst_map
->timestamp
+ 1;
4739 vm_map_unlock(dst_map
);
4742 * Copy as much as possible in one pass
4747 copy_entry
->object
.vm_object
,
4757 * Release the object reference
4760 vm_object_deallocate(dst_object
);
4763 * If a hard error occurred, return it now
4766 if (r
!= KERN_SUCCESS
)
4769 if (copy_size
!= 0) {
4771 * Dispose of the copied region
4774 vm_map_copy_clip_end(copy
, copy_entry
,
4775 copy_entry
->vme_start
+ copy_size
);
4776 vm_map_copy_entry_unlink(copy
, copy_entry
);
4777 vm_object_deallocate(copy_entry
->object
.vm_object
);
4778 vm_map_copy_entry_dispose(copy
, copy_entry
);
4782 * Pick up in the destination map where we left off.
4784 * Use the version information to avoid a lookup
4785 * in the normal case.
4789 vm_map_lock(dst_map
);
4790 if (version
.main_timestamp
== dst_map
->timestamp
) {
4791 /* We can safely use saved tmp_entry value */
4793 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4794 tmp_entry
= tmp_entry
->vme_next
;
4796 /* Must do lookup of tmp_entry */
4798 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4799 vm_map_unlock(dst_map
);
4800 return(KERN_INVALID_ADDRESS
);
4802 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4807 return(KERN_SUCCESS
);
4808 }/* vm_map_copy_overwrite_aligned */
4811 * Routine: vm_map_copyin_kernel_buffer
4814 * Copy in data to a kernel buffer from space in the
4815 * source map. The original space may be otpionally
4818 * If successful, returns a new copy object.
4821 vm_map_copyin_kernel_buffer(
4823 vm_offset_t src_addr
,
4825 boolean_t src_destroy
,
4826 vm_map_copy_t
*copy_result
)
4830 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4832 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4833 if (copy
== VM_MAP_COPY_NULL
) {
4834 return KERN_RESOURCE_SHORTAGE
;
4836 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4839 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4840 copy
->cpy_kalloc_size
= kalloc_size
;
4842 if (src_map
== kernel_map
) {
4843 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4844 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4845 VM_MAP_REMOVE_INTERRUPTIBLE
;
4848 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4849 if (kr
!= KERN_SUCCESS
) {
4850 kfree((vm_offset_t
)copy
, kalloc_size
);
4853 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4854 VM_MAP_REMOVE_INTERRUPTIBLE
;
4857 (void) vm_map_remove(src_map
, trunc_page_32(src_addr
),
4858 round_page_32(src_addr
+ len
),
4861 *copy_result
= copy
;
4862 return KERN_SUCCESS
;
4866 * Routine: vm_map_copyout_kernel_buffer
4869 * Copy out data from a kernel buffer into space in the
4870 * destination map. The space may be otpionally dynamically
4873 * If successful, consumes the copy object.
4874 * Otherwise, the caller is responsible for it.
4877 vm_map_copyout_kernel_buffer(
4879 vm_offset_t
*addr
, /* IN/OUT */
4881 boolean_t overwrite
)
4883 kern_return_t kr
= KERN_SUCCESS
;
4884 thread_act_t thr_act
= current_act();
4889 * Allocate space in the target map for the data
4892 kr
= vm_map_enter(map
,
4894 round_page_32(copy
->size
),
4898 (vm_object_offset_t
) 0,
4902 VM_INHERIT_DEFAULT
);
4903 if (kr
!= KERN_SUCCESS
)
4908 * Copyout the data from the kernel buffer to the target map.
4910 if (thr_act
->map
== map
) {
4913 * If the target map is the current map, just do
4916 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4918 return(KERN_INVALID_ADDRESS
);
4925 * If the target map is another map, assume the
4926 * target's address space identity for the duration
4929 vm_map_reference(map
);
4930 oldmap
= vm_map_switch(map
);
4932 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4934 return(KERN_INVALID_ADDRESS
);
4937 (void) vm_map_switch(oldmap
);
4938 vm_map_deallocate(map
);
4941 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4947 * Macro: vm_map_copy_insert
4950 * Link a copy chain ("copy") into a map at the
4951 * specified location (after "where").
4953 * The copy chain is destroyed.
4955 * The arguments are evaluated multiple times.
4957 #define vm_map_copy_insert(map, where, copy) \
4959 vm_map_t VMCI_map; \
4960 vm_map_entry_t VMCI_where; \
4961 vm_map_copy_t VMCI_copy; \
4963 VMCI_where = (where); \
4964 VMCI_copy = (copy); \
4965 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4966 ->vme_next = (VMCI_where->vme_next); \
4967 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4968 ->vme_prev = VMCI_where; \
4969 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4970 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4971 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4975 * Routine: vm_map_copyout
4978 * Copy out a copy chain ("copy") into newly-allocated
4979 * space in the destination map.
4981 * If successful, consumes the copy object.
4982 * Otherwise, the caller is responsible for it.
4986 register vm_map_t dst_map
,
4987 vm_offset_t
*dst_addr
, /* OUT */
4988 register vm_map_copy_t copy
)
4991 vm_size_t adjustment
;
4993 vm_object_offset_t vm_copy_start
;
4994 vm_map_entry_t last
;
4996 vm_map_entry_t entry
;
4999 * Check for null copy object.
5002 if (copy
== VM_MAP_COPY_NULL
) {
5004 return(KERN_SUCCESS
);
5008 * Check for special copy object, created
5009 * by vm_map_copyin_object.
5012 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
5013 vm_object_t object
= copy
->cpy_object
;
5015 vm_object_offset_t offset
;
5017 offset
= trunc_page_64(copy
->offset
);
5018 size
= round_page_32(copy
->size
+
5019 (vm_size_t
)(copy
->offset
- offset
));
5021 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
5022 (vm_offset_t
) 0, TRUE
,
5023 object
, offset
, FALSE
,
5024 VM_PROT_DEFAULT
, VM_PROT_ALL
,
5025 VM_INHERIT_DEFAULT
);
5026 if (kr
!= KERN_SUCCESS
)
5028 /* Account for non-pagealigned copy object */
5029 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
5030 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
5031 return(KERN_SUCCESS
);
5035 * Check for special kernel buffer allocated
5036 * by new_ipc_kmsg_copyin.
5039 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
5040 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
5045 * Find space for the data
5048 vm_copy_start
= trunc_page_64(copy
->offset
);
5049 size
= round_page_32((vm_size_t
)copy
->offset
+ copy
->size
)
5054 vm_map_lock(dst_map
);
5055 assert(first_free_is_valid(dst_map
));
5056 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
5057 vm_map_min(dst_map
) : last
->vme_end
;
5060 vm_map_entry_t next
= last
->vme_next
;
5061 vm_offset_t end
= start
+ size
;
5063 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5064 if (dst_map
->wait_for_space
) {
5065 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5066 assert_wait((event_t
) dst_map
,
5067 THREAD_INTERRUPTIBLE
);
5068 vm_map_unlock(dst_map
);
5069 thread_block((void (*)(void))0);
5073 vm_map_unlock(dst_map
);
5074 return(KERN_NO_SPACE
);
5077 if ((next
== vm_map_to_entry(dst_map
)) ||
5078 (next
->vme_start
>= end
))
5082 start
= last
->vme_end
;
5086 * Since we're going to just drop the map
5087 * entries from the copy into the destination
5088 * map, they must come from the same pool.
5091 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5093 * Mismatches occur when dealing with the default
5097 vm_map_entry_t next
, new;
5100 * Find the zone that the copies were allocated from
5102 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5104 : vm_map_kentry_zone
;
5105 entry
= vm_map_copy_first_entry(copy
);
5108 * Reinitialize the copy so that vm_map_copy_entry_link
5111 copy
->cpy_hdr
.nentries
= 0;
5112 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5113 vm_map_copy_first_entry(copy
) =
5114 vm_map_copy_last_entry(copy
) =
5115 vm_map_copy_to_entry(copy
);
5120 while (entry
!= vm_map_copy_to_entry(copy
)) {
5121 new = vm_map_copy_entry_create(copy
);
5122 vm_map_entry_copy_full(new, entry
);
5123 new->use_pmap
= FALSE
; /* clr address space specifics */
5124 vm_map_copy_entry_link(copy
,
5125 vm_map_copy_last_entry(copy
),
5127 next
= entry
->vme_next
;
5128 zfree(old_zone
, (vm_offset_t
) entry
);
5134 * Adjust the addresses in the copy chain, and
5135 * reset the region attributes.
5138 adjustment
= start
- vm_copy_start
;
5139 for (entry
= vm_map_copy_first_entry(copy
);
5140 entry
!= vm_map_copy_to_entry(copy
);
5141 entry
= entry
->vme_next
) {
5142 entry
->vme_start
+= adjustment
;
5143 entry
->vme_end
+= adjustment
;
5145 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5146 entry
->protection
= VM_PROT_DEFAULT
;
5147 entry
->max_protection
= VM_PROT_ALL
;
5148 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5151 * If the entry is now wired,
5152 * map the pages into the destination map.
5154 if (entry
->wired_count
!= 0) {
5155 register vm_offset_t va
;
5156 vm_object_offset_t offset
;
5157 register vm_object_t object
;
5159 object
= entry
->object
.vm_object
;
5160 offset
= entry
->offset
;
5161 va
= entry
->vme_start
;
5163 pmap_pageable(dst_map
->pmap
,
5168 while (va
< entry
->vme_end
) {
5169 register vm_page_t m
;
5172 * Look up the page in the object.
5173 * Assert that the page will be found in the
5176 * the object was newly created by
5177 * vm_object_copy_slowly, and has
5178 * copies of all of the pages from
5181 * the object was moved from the old
5182 * map entry; because the old map
5183 * entry was wired, all of the pages
5184 * were in the top-level object.
5185 * (XXX not true if we wire pages for
5188 vm_object_lock(object
);
5189 vm_object_paging_begin(object
);
5191 m
= vm_page_lookup(object
, offset
);
5192 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5194 panic("vm_map_copyout: wiring 0x%x", m
);
5197 vm_object_unlock(object
);
5199 PMAP_ENTER(dst_map
->pmap
, va
, m
, entry
->protection
,
5201 (m
->object
->wimg_bits
))
5205 vm_object_lock(object
);
5206 PAGE_WAKEUP_DONE(m
);
5207 /* the page is wired, so we don't have to activate */
5208 vm_object_paging_end(object
);
5209 vm_object_unlock(object
);
5211 offset
+= PAGE_SIZE_64
;
5215 else if (size
<= vm_map_aggressive_enter_max
) {
5217 register vm_offset_t va
;
5218 vm_object_offset_t offset
;
5219 register vm_object_t object
;
5222 object
= entry
->object
.vm_object
;
5223 if (object
!= VM_OBJECT_NULL
) {
5225 offset
= entry
->offset
;
5226 va
= entry
->vme_start
;
5227 while (va
< entry
->vme_end
) {
5228 register vm_page_t m
;
5231 * Look up the page in the object.
5232 * Assert that the page will be found
5233 * in the top object if at all...
5235 vm_object_lock(object
);
5236 vm_object_paging_begin(object
);
5238 if (((m
= vm_page_lookup(object
,
5241 !m
->busy
&& !m
->fictitious
&&
5242 !m
->absent
&& !m
->error
) {
5244 vm_object_unlock(object
);
5246 /* honor cow obligations */
5247 prot
= entry
->protection
;
5248 if (entry
->needs_copy
)
5249 prot
&= ~VM_PROT_WRITE
;
5251 PMAP_ENTER(dst_map
->pmap
, va
,
5254 (m
->object
->wimg_bits
))
5258 vm_object_lock(object
);
5259 vm_page_lock_queues();
5260 if (!m
->active
&& !m
->inactive
)
5261 vm_page_activate(m
);
5262 vm_page_unlock_queues();
5263 PAGE_WAKEUP_DONE(m
);
5265 vm_object_paging_end(object
);
5266 vm_object_unlock(object
);
5268 offset
+= PAGE_SIZE_64
;
5276 * Correct the page alignment for the result
5279 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5282 * Update the hints and the map size
5285 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5287 dst_map
->size
+= size
;
5293 vm_map_copy_insert(dst_map
, last
, copy
);
5295 vm_map_unlock(dst_map
);
5298 * XXX If wiring_required, call vm_map_pageable
5301 return(KERN_SUCCESS
);
5304 boolean_t vm_map_aggressive_enter
; /* not used yet */
5308 * Routine: vm_map_copyin
5311 * Copy the specified region (src_addr, len) from the
5312 * source address space (src_map), possibly removing
5313 * the region from the source address space (src_destroy).
5316 * A vm_map_copy_t object (copy_result), suitable for
5317 * insertion into another address space (using vm_map_copyout),
5318 * copying over another address space region (using
5319 * vm_map_copy_overwrite). If the copy is unused, it
5320 * should be destroyed (using vm_map_copy_discard).
5322 * In/out conditions:
5323 * The source map should not be locked on entry.
5326 typedef struct submap_map
{
5327 vm_map_t parent_map
;
5328 vm_offset_t base_start
;
5329 vm_offset_t base_end
;
5330 struct submap_map
*next
;
5334 vm_map_copyin_common(
5336 vm_offset_t src_addr
,
5338 boolean_t src_destroy
,
5339 boolean_t src_volatile
,
5340 vm_map_copy_t
*copy_result
, /* OUT */
5341 boolean_t use_maxprot
)
5343 extern int msg_ool_size_small
;
5345 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5346 * in multi-level lookup, this
5347 * entry contains the actual
5351 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5353 vm_offset_t src_start
; /* Start of current entry --
5354 * where copy is taking place now
5356 vm_offset_t src_end
; /* End of entire region to be
5358 vm_offset_t base_start
; /* submap fields to save offsets */
5359 /* in original map */
5360 vm_offset_t base_end
;
5361 vm_map_t base_map
=src_map
;
5362 vm_map_entry_t base_entry
;
5363 boolean_t map_share
=FALSE
;
5364 submap_map_t
*parent_maps
= NULL
;
5367 vm_map_copy_t copy
; /* Resulting copy */
5368 vm_offset_t copy_addr
;
5371 * Check for copies of zero bytes.
5375 *copy_result
= VM_MAP_COPY_NULL
;
5376 return(KERN_SUCCESS
);
5380 * Check that the end address doesn't overflow
5382 src_end
= src_addr
+ len
;
5383 if (src_end
< src_addr
)
5384 return KERN_INVALID_ADDRESS
;
5387 * If the copy is sufficiently small, use a kernel buffer instead
5388 * of making a virtual copy. The theory being that the cost of
5389 * setting up VM (and taking C-O-W faults) dominates the copy costs
5390 * for small regions.
5392 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5393 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5394 src_destroy
, copy_result
);
5397 * Compute (page aligned) start and end of region
5399 src_start
= trunc_page_32(src_addr
);
5400 src_end
= round_page_32(src_end
);
5402 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5405 * Allocate a header element for the list.
5407 * Use the start and end in the header to
5408 * remember the endpoints prior to rounding.
5411 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5412 vm_map_copy_first_entry(copy
) =
5413 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5414 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5415 copy
->cpy_hdr
.nentries
= 0;
5416 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5418 copy
->offset
= src_addr
;
5421 new_entry
= vm_map_copy_entry_create(copy
);
5425 vm_map_unlock(src_map); \
5426 if(src_map != base_map) \
5427 vm_map_deallocate(src_map); \
5428 if (new_entry != VM_MAP_ENTRY_NULL) \
5429 vm_map_copy_entry_dispose(copy,new_entry); \
5430 vm_map_copy_discard(copy); \
5432 submap_map_t *ptr; \
5434 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5435 parent_maps=parent_maps->next; \
5436 if (ptr->parent_map != base_map) \
5437 vm_map_deallocate(ptr->parent_map); \
5438 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5445 * Find the beginning of the region.
5448 vm_map_lock(src_map
);
5450 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5451 RETURN(KERN_INVALID_ADDRESS
);
5452 if(!tmp_entry
->is_sub_map
) {
5453 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5455 /* set for later submap fix-up */
5456 copy_addr
= src_start
;
5459 * Go through entries until we get to the end.
5464 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5465 vm_size_t src_size
; /* Size of source
5466 * map entry (in both
5471 vm_object_t src_object
; /* Object to copy */
5472 vm_object_offset_t src_offset
;
5474 boolean_t src_needs_copy
; /* Should source map
5476 * for copy-on-write?
5479 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5481 boolean_t was_wired
; /* Was source wired? */
5482 vm_map_version_t version
; /* Version before locks
5483 * dropped to make copy
5485 kern_return_t result
; /* Return value from
5486 * copy_strategically.
5488 while(tmp_entry
->is_sub_map
) {
5489 vm_size_t submap_len
;
5492 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5493 ptr
->next
= parent_maps
;
5495 ptr
->parent_map
= src_map
;
5496 ptr
->base_start
= src_start
;
5497 ptr
->base_end
= src_end
;
5498 submap_len
= tmp_entry
->vme_end
- src_start
;
5499 if(submap_len
> (src_end
-src_start
))
5500 submap_len
= src_end
-src_start
;
5501 ptr
->base_start
+= submap_len
;
5503 src_start
-= tmp_entry
->vme_start
;
5504 src_start
+= tmp_entry
->offset
;
5505 src_end
= src_start
+ submap_len
;
5506 src_map
= tmp_entry
->object
.sub_map
;
5507 vm_map_lock(src_map
);
5508 /* keep an outstanding reference for all maps in */
5509 /* the parents tree except the base map */
5510 vm_map_reference(src_map
);
5511 vm_map_unlock(ptr
->parent_map
);
5512 if (!vm_map_lookup_entry(
5513 src_map
, src_start
, &tmp_entry
))
5514 RETURN(KERN_INVALID_ADDRESS
);
5516 if(!tmp_entry
->is_sub_map
)
5517 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5518 src_entry
= tmp_entry
;
5520 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5521 (tmp_entry
->object
.vm_object
->phys_contiguous
)) {
5522 /* This is not, supported for now.In future */
5523 /* we will need to detect the phys_contig */
5524 /* condition and then upgrade copy_slowly */
5525 /* to do physical copy from the device mem */
5526 /* based object. We can piggy-back off of */
5527 /* the was wired boolean to set-up the */
5528 /* proper handling */
5529 RETURN(KERN_PROTECTION_FAILURE
);
5532 * Create a new address map entry to hold the result.
5533 * Fill in the fields from the appropriate source entries.
5534 * We must unlock the source map to do this if we need
5535 * to allocate a map entry.
5537 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5538 version
.main_timestamp
= src_map
->timestamp
;
5539 vm_map_unlock(src_map
);
5541 new_entry
= vm_map_copy_entry_create(copy
);
5543 vm_map_lock(src_map
);
5544 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5545 if (!vm_map_lookup_entry(src_map
, src_start
,
5547 RETURN(KERN_INVALID_ADDRESS
);
5549 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5550 continue; /* restart w/ new tmp_entry */
5555 * Verify that the region can be read.
5557 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5559 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5560 RETURN(KERN_PROTECTION_FAILURE
);
5563 * Clip against the endpoints of the entire region.
5566 vm_map_clip_end(src_map
, src_entry
, src_end
);
5568 src_size
= src_entry
->vme_end
- src_start
;
5569 src_object
= src_entry
->object
.vm_object
;
5570 src_offset
= src_entry
->offset
;
5571 was_wired
= (src_entry
->wired_count
!= 0);
5573 vm_map_entry_copy(new_entry
, src_entry
);
5574 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5577 * Attempt non-blocking copy-on-write optimizations.
5581 (src_object
== VM_OBJECT_NULL
||
5582 (src_object
->internal
&& !src_object
->true_share
5585 * If we are destroying the source, and the object
5586 * is internal, we can move the object reference
5587 * from the source to the copy. The copy is
5588 * copy-on-write only if the source is.
5589 * We make another reference to the object, because
5590 * destroying the source entry will deallocate it.
5592 vm_object_reference(src_object
);
5595 * Copy is always unwired. vm_map_copy_entry
5596 * set its wired count to zero.
5599 goto CopySuccessful
;
5604 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5605 src_object
, new_entry
, new_entry
->object
.vm_object
,
5607 if ((src_object
== VM_OBJECT_NULL
||
5608 (!was_wired
&& !map_share
&& !tmp_entry
->is_shared
)) &&
5609 vm_object_copy_quickly(
5610 &new_entry
->object
.vm_object
,
5614 &new_entry_needs_copy
)) {
5616 new_entry
->needs_copy
= new_entry_needs_copy
;
5619 * Handle copy-on-write obligations
5622 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5623 vm_object_pmap_protect(
5627 (src_entry
->is_shared
?
5630 src_entry
->vme_start
,
5631 src_entry
->protection
&
5633 tmp_entry
->needs_copy
= TRUE
;
5637 * The map has never been unlocked, so it's safe
5638 * to move to the next entry rather than doing
5642 goto CopySuccessful
;
5646 * Take an object reference, so that we may
5647 * release the map lock(s).
5650 assert(src_object
!= VM_OBJECT_NULL
);
5651 vm_object_reference(src_object
);
5654 * Record the timestamp for later verification.
5658 version
.main_timestamp
= src_map
->timestamp
;
5659 vm_map_unlock(src_map
); /* Increments timestamp once! */
5667 vm_object_lock(src_object
);
5668 result
= vm_object_copy_slowly(
5673 &new_entry
->object
.vm_object
);
5674 new_entry
->offset
= 0;
5675 new_entry
->needs_copy
= FALSE
;
5678 else if (src_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
&&
5679 (tmp_entry
->is_shared
|| map_share
)) {
5680 vm_object_t new_object
;
5682 vm_object_lock(src_object
);
5683 new_object
= vm_object_copy_delayed(
5687 if (new_object
== VM_OBJECT_NULL
)
5690 new_entry
->object
.vm_object
= new_object
;
5691 new_entry
->needs_copy
= TRUE
;
5692 result
= KERN_SUCCESS
;
5695 result
= vm_object_copy_strategically(src_object
,
5698 &new_entry
->object
.vm_object
,
5700 &new_entry_needs_copy
);
5702 new_entry
->needs_copy
= new_entry_needs_copy
;
5705 if (result
!= KERN_SUCCESS
&&
5706 result
!= KERN_MEMORY_RESTART_COPY
) {
5707 vm_map_lock(src_map
);
5712 * Throw away the extra reference
5715 vm_object_deallocate(src_object
);
5718 * Verify that the map has not substantially
5719 * changed while the copy was being made.
5722 vm_map_lock(src_map
);
5724 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5725 goto VerificationSuccessful
;
5728 * Simple version comparison failed.
5730 * Retry the lookup and verify that the
5731 * same object/offset are still present.
5733 * [Note: a memory manager that colludes with
5734 * the calling task can detect that we have
5735 * cheated. While the map was unlocked, the
5736 * mapping could have been changed and restored.]
5739 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5740 RETURN(KERN_INVALID_ADDRESS
);
5743 src_entry
= tmp_entry
;
5744 vm_map_clip_start(src_map
, src_entry
, src_start
);
5746 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5748 src_entry
->max_protection
& VM_PROT_READ
== 0)
5749 goto VerificationFailed
;
5751 if (src_entry
->vme_end
< new_entry
->vme_end
)
5752 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5754 if ((src_entry
->object
.vm_object
!= src_object
) ||
5755 (src_entry
->offset
!= src_offset
) ) {
5758 * Verification failed.
5760 * Start over with this top-level entry.
5763 VerificationFailed
: ;
5765 vm_object_deallocate(new_entry
->object
.vm_object
);
5766 tmp_entry
= src_entry
;
5771 * Verification succeeded.
5774 VerificationSuccessful
: ;
5776 if (result
== KERN_MEMORY_RESTART_COPY
)
5786 * Link in the new copy entry.
5789 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5793 * Determine whether the entire region
5796 src_start
= new_entry
->vme_end
;
5797 new_entry
= VM_MAP_ENTRY_NULL
;
5798 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5799 if (src_map
!= base_map
) {
5803 assert(ptr
!= NULL
);
5804 parent_maps
= parent_maps
->next
;
5805 vm_map_unlock(src_map
);
5806 vm_map_deallocate(src_map
);
5807 vm_map_lock(ptr
->parent_map
);
5808 src_map
= ptr
->parent_map
;
5809 src_start
= ptr
->base_start
;
5810 src_end
= ptr
->base_end
;
5811 if ((src_end
> src_start
) &&
5812 !vm_map_lookup_entry(
5813 src_map
, src_start
, &tmp_entry
))
5814 RETURN(KERN_INVALID_ADDRESS
);
5815 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5816 if(parent_maps
== NULL
)
5818 src_entry
= tmp_entry
->vme_prev
;
5822 if ((src_start
>= src_end
) && (src_end
!= 0))
5826 * Verify that there are no gaps in the region
5829 tmp_entry
= src_entry
->vme_next
;
5830 if ((tmp_entry
->vme_start
!= src_start
) ||
5831 (tmp_entry
== vm_map_to_entry(src_map
)))
5832 RETURN(KERN_INVALID_ADDRESS
);
5836 * If the source should be destroyed, do it now, since the
5837 * copy was successful.
5840 (void) vm_map_delete(src_map
,
5841 trunc_page_32(src_addr
),
5843 (src_map
== kernel_map
) ?
5844 VM_MAP_REMOVE_KUNWIRE
:
5848 vm_map_unlock(src_map
);
5850 /* Fix-up start and end points in copy. This is necessary */
5851 /* when the various entries in the copy object were picked */
5852 /* up from different sub-maps */
5854 tmp_entry
= vm_map_copy_first_entry(copy
);
5855 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5856 tmp_entry
->vme_end
= copy_addr
+
5857 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5858 tmp_entry
->vme_start
= copy_addr
;
5859 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5860 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5863 *copy_result
= copy
;
5864 return(KERN_SUCCESS
);
5870 * vm_map_copyin_object:
5872 * Create a copy object from an object.
5873 * Our caller donates an object reference.
5877 vm_map_copyin_object(
5879 vm_object_offset_t offset
, /* offset of region in object */
5880 vm_object_size_t size
, /* size of region in object */
5881 vm_map_copy_t
*copy_result
) /* OUT */
5883 vm_map_copy_t copy
; /* Resulting copy */
5886 * We drop the object into a special copy object
5887 * that contains the object directly.
5890 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5891 copy
->type
= VM_MAP_COPY_OBJECT
;
5892 copy
->cpy_object
= object
;
5893 copy
->cpy_index
= 0;
5894 copy
->offset
= offset
;
5897 *copy_result
= copy
;
5898 return(KERN_SUCCESS
);
5904 vm_map_entry_t old_entry
,
5908 vm_map_entry_t new_entry
;
5909 kern_return_t result
;
5912 * New sharing code. New map entry
5913 * references original object. Internal
5914 * objects use asynchronous copy algorithm for
5915 * future copies. First make sure we have
5916 * the right object. If we need a shadow,
5917 * or someone else already has one, then
5918 * make a new shadow and share it.
5921 object
= old_entry
->object
.vm_object
;
5922 if (old_entry
->is_sub_map
) {
5923 assert(old_entry
->wired_count
== 0);
5925 if(old_entry
->use_pmap
) {
5926 result
= pmap_nest(new_map
->pmap
,
5927 (old_entry
->object
.sub_map
)->pmap
,
5928 (addr64_t
)old_entry
->vme_start
,
5929 (addr64_t
)old_entry
->vme_start
,
5930 (uint64_t)(old_entry
->vme_end
- old_entry
->vme_start
));
5932 panic("vm_map_fork_share: pmap_nest failed!");
5935 } else if (object
== VM_OBJECT_NULL
) {
5936 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5937 old_entry
->vme_start
));
5938 old_entry
->offset
= 0;
5939 old_entry
->object
.vm_object
= object
;
5940 assert(!old_entry
->needs_copy
);
5941 } else if (object
->copy_strategy
!=
5942 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5945 * We are already using an asymmetric
5946 * copy, and therefore we already have
5950 assert(! old_entry
->needs_copy
);
5952 else if (old_entry
->needs_copy
|| /* case 1 */
5953 object
->shadowed
|| /* case 2 */
5954 (!object
->true_share
&& /* case 3 */
5955 !old_entry
->is_shared
&&
5957 (vm_size_t
)(old_entry
->vme_end
-
5958 old_entry
->vme_start
)))) {
5961 * We need to create a shadow.
5962 * There are three cases here.
5963 * In the first case, we need to
5964 * complete a deferred symmetrical
5965 * copy that we participated in.
5966 * In the second and third cases,
5967 * we need to create the shadow so
5968 * that changes that we make to the
5969 * object do not interfere with
5970 * any symmetrical copies which
5971 * have occured (case 2) or which
5972 * might occur (case 3).
5974 * The first case is when we had
5975 * deferred shadow object creation
5976 * via the entry->needs_copy mechanism.
5977 * This mechanism only works when
5978 * only one entry points to the source
5979 * object, and we are about to create
5980 * a second entry pointing to the
5981 * same object. The problem is that
5982 * there is no way of mapping from
5983 * an object to the entries pointing
5984 * to it. (Deferred shadow creation
5985 * works with one entry because occurs
5986 * at fault time, and we walk from the
5987 * entry to the object when handling
5990 * The second case is when the object
5991 * to be shared has already been copied
5992 * with a symmetric copy, but we point
5993 * directly to the object without
5994 * needs_copy set in our entry. (This
5995 * can happen because different ranges
5996 * of an object can be pointed to by
5997 * different entries. In particular,
5998 * a single entry pointing to an object
5999 * can be split by a call to vm_inherit,
6000 * which, combined with task_create, can
6001 * result in the different entries
6002 * having different needs_copy values.)
6003 * The shadowed flag in the object allows
6004 * us to detect this case. The problem
6005 * with this case is that if this object
6006 * has or will have shadows, then we
6007 * must not perform an asymmetric copy
6008 * of this object, since such a copy
6009 * allows the object to be changed, which
6010 * will break the previous symmetrical
6011 * copies (which rely upon the object
6012 * not changing). In a sense, the shadowed
6013 * flag says "don't change this object".
6014 * We fix this by creating a shadow
6015 * object for this object, and sharing
6016 * that. This works because we are free
6017 * to change the shadow object (and thus
6018 * to use an asymmetric copy strategy);
6019 * this is also semantically correct,
6020 * since this object is temporary, and
6021 * therefore a copy of the object is
6022 * as good as the object itself. (This
6023 * is not true for permanent objects,
6024 * since the pager needs to see changes,
6025 * which won't happen if the changes
6026 * are made to a copy.)
6028 * The third case is when the object
6029 * to be shared has parts sticking
6030 * outside of the entry we're working
6031 * with, and thus may in the future
6032 * be subject to a symmetrical copy.
6033 * (This is a preemptive version of
6037 assert(!(object
->shadowed
&& old_entry
->is_shared
));
6038 vm_object_shadow(&old_entry
->object
.vm_object
,
6040 (vm_size_t
) (old_entry
->vme_end
-
6041 old_entry
->vme_start
));
6044 * If we're making a shadow for other than
6045 * copy on write reasons, then we have
6046 * to remove write permission.
6049 if (!old_entry
->needs_copy
&&
6050 (old_entry
->protection
& VM_PROT_WRITE
)) {
6051 if(old_map
->mapped
) {
6052 vm_object_pmap_protect(
6053 old_entry
->object
.vm_object
,
6055 (old_entry
->vme_end
-
6056 old_entry
->vme_start
),
6058 old_entry
->vme_start
,
6059 old_entry
->protection
& ~VM_PROT_WRITE
);
6061 pmap_protect(old_map
->pmap
,
6062 old_entry
->vme_start
,
6064 old_entry
->protection
& ~VM_PROT_WRITE
);
6068 old_entry
->needs_copy
= FALSE
;
6069 object
= old_entry
->object
.vm_object
;
6073 * If object was using a symmetric copy strategy,
6074 * change its copy strategy to the default
6075 * asymmetric copy strategy, which is copy_delay
6076 * in the non-norma case and copy_call in the
6077 * norma case. Bump the reference count for the
6081 if(old_entry
->is_sub_map
) {
6082 vm_map_lock(old_entry
->object
.sub_map
);
6083 vm_map_reference(old_entry
->object
.sub_map
);
6084 vm_map_unlock(old_entry
->object
.sub_map
);
6086 vm_object_lock(object
);
6087 object
->ref_count
++;
6088 vm_object_res_reference(object
);
6089 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6090 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6092 vm_object_unlock(object
);
6096 * Clone the entry, using object ref from above.
6097 * Mark both entries as shared.
6100 new_entry
= vm_map_entry_create(new_map
);
6101 vm_map_entry_copy(new_entry
, old_entry
);
6102 old_entry
->is_shared
= TRUE
;
6103 new_entry
->is_shared
= TRUE
;
6106 * Insert the entry into the new map -- we
6107 * know we're inserting at the end of the new
6111 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6114 * Update the physical map
6117 if (old_entry
->is_sub_map
) {
6118 /* Bill Angell pmap support goes here */
6120 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6121 old_entry
->vme_end
- old_entry
->vme_start
,
6122 old_entry
->vme_start
);
6129 vm_map_entry_t
*old_entry_p
,
6132 vm_map_entry_t old_entry
= *old_entry_p
;
6133 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6134 vm_offset_t start
= old_entry
->vme_start
;
6136 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6138 vm_map_unlock(old_map
);
6140 * Use maxprot version of copyin because we
6141 * care about whether this memory can ever
6142 * be accessed, not just whether it's accessible
6145 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6148 * The map might have changed while it
6149 * was unlocked, check it again. Skip
6150 * any blank space or permanently
6151 * unreadable region.
6153 vm_map_lock(old_map
);
6154 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6155 (last
->max_protection
& VM_PROT_READ
) == VM_PROT_NONE
) {
6156 last
= last
->vme_next
;
6158 *old_entry_p
= last
;
6161 * XXX For some error returns, want to
6162 * XXX skip to the next element. Note
6163 * that INVALID_ADDRESS and
6164 * PROTECTION_FAILURE are handled above.
6171 * Insert the copy into the new map
6174 vm_map_copy_insert(new_map
, last
, copy
);
6177 * Pick up the traversal at the end of
6178 * the copied region.
6181 vm_map_lock(old_map
);
6182 start
+= entry_size
;
6183 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6184 last
= last
->vme_next
;
6186 vm_map_clip_start(old_map
, last
, start
);
6188 *old_entry_p
= last
;
6196 * Create and return a new map based on the old
6197 * map, according to the inheritance values on the
6198 * regions in that map.
6200 * The source map must not be locked.
6206 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6208 vm_map_entry_t old_entry
;
6209 vm_size_t new_size
= 0, entry_size
;
6210 vm_map_entry_t new_entry
;
6211 boolean_t src_needs_copy
;
6212 boolean_t new_entry_needs_copy
;
6214 vm_map_reference_swap(old_map
);
6215 vm_map_lock(old_map
);
6217 new_map
= vm_map_create(new_pmap
,
6218 old_map
->min_offset
,
6219 old_map
->max_offset
,
6220 old_map
->hdr
.entries_pageable
);
6223 old_entry
= vm_map_first_entry(old_map
);
6224 old_entry
!= vm_map_to_entry(old_map
);
6227 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6229 switch (old_entry
->inheritance
) {
6230 case VM_INHERIT_NONE
:
6233 case VM_INHERIT_SHARE
:
6234 vm_map_fork_share(old_map
, old_entry
, new_map
);
6235 new_size
+= entry_size
;
6238 case VM_INHERIT_COPY
:
6241 * Inline the copy_quickly case;
6242 * upon failure, fall back on call
6243 * to vm_map_fork_copy.
6246 if(old_entry
->is_sub_map
)
6248 if ((old_entry
->wired_count
!= 0) ||
6249 ((old_entry
->object
.vm_object
!= NULL
) &&
6250 (old_entry
->object
.vm_object
->true_share
))) {
6251 goto slow_vm_map_fork_copy
;
6254 new_entry
= vm_map_entry_create(new_map
);
6255 vm_map_entry_copy(new_entry
, old_entry
);
6256 /* clear address space specifics */
6257 new_entry
->use_pmap
= FALSE
;
6259 if (! vm_object_copy_quickly(
6260 &new_entry
->object
.vm_object
,
6262 (old_entry
->vme_end
-
6263 old_entry
->vme_start
),
6265 &new_entry_needs_copy
)) {
6266 vm_map_entry_dispose(new_map
, new_entry
);
6267 goto slow_vm_map_fork_copy
;
6271 * Handle copy-on-write obligations
6274 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6275 vm_object_pmap_protect(
6276 old_entry
->object
.vm_object
,
6278 (old_entry
->vme_end
-
6279 old_entry
->vme_start
),
6280 ((old_entry
->is_shared
6284 old_entry
->vme_start
,
6285 old_entry
->protection
& ~VM_PROT_WRITE
);
6287 old_entry
->needs_copy
= TRUE
;
6289 new_entry
->needs_copy
= new_entry_needs_copy
;
6292 * Insert the entry at the end
6296 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6298 new_size
+= entry_size
;
6301 slow_vm_map_fork_copy
:
6302 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6303 new_size
+= entry_size
;
6307 old_entry
= old_entry
->vme_next
;
6310 new_map
->size
= new_size
;
6311 vm_map_unlock(old_map
);
6312 vm_map_deallocate(old_map
);
6319 * vm_map_lookup_locked:
6321 * Finds the VM object, offset, and
6322 * protection for a given virtual address in the
6323 * specified map, assuming a page fault of the
6326 * Returns the (object, offset, protection) for
6327 * this address, whether it is wired down, and whether
6328 * this map has the only reference to the data in question.
6329 * In order to later verify this lookup, a "version"
6332 * The map MUST be locked by the caller and WILL be
6333 * locked on exit. In order to guarantee the
6334 * existence of the returned object, it is returned
6337 * If a lookup is requested with "write protection"
6338 * specified, the map may be changed to perform virtual
6339 * copying operations, although the data referenced will
6343 vm_map_lookup_locked(
6344 vm_map_t
*var_map
, /* IN/OUT */
6345 register vm_offset_t vaddr
,
6346 register vm_prot_t fault_type
,
6347 vm_map_version_t
*out_version
, /* OUT */
6348 vm_object_t
*object
, /* OUT */
6349 vm_object_offset_t
*offset
, /* OUT */
6350 vm_prot_t
*out_prot
, /* OUT */
6351 boolean_t
*wired
, /* OUT */
6352 int *behavior
, /* OUT */
6353 vm_object_offset_t
*lo_offset
, /* OUT */
6354 vm_object_offset_t
*hi_offset
, /* OUT */
6357 vm_map_entry_t entry
;
6358 register vm_map_t map
= *var_map
;
6359 vm_map_t old_map
= *var_map
;
6360 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6361 vm_offset_t cow_parent_vaddr
;
6362 vm_offset_t old_start
;
6363 vm_offset_t old_end
;
6364 register vm_prot_t prot
;
6370 * If the map has an interesting hint, try it before calling
6371 * full blown lookup routine.
6374 mutex_lock(&map
->s_lock
);
6376 mutex_unlock(&map
->s_lock
);
6378 if ((entry
== vm_map_to_entry(map
)) ||
6379 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6380 vm_map_entry_t tmp_entry
;
6383 * Entry was either not a valid hint, or the vaddr
6384 * was not contained in the entry, so do a full lookup.
6386 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6387 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6388 vm_map_unlock(cow_sub_map_parent
);
6389 if((*pmap_map
!= map
)
6390 && (*pmap_map
!= cow_sub_map_parent
))
6391 vm_map_unlock(*pmap_map
);
6392 return KERN_INVALID_ADDRESS
;
6397 if(map
== old_map
) {
6398 old_start
= entry
->vme_start
;
6399 old_end
= entry
->vme_end
;
6403 * Handle submaps. Drop lock on upper map, submap is
6408 if (entry
->is_sub_map
) {
6409 vm_offset_t local_vaddr
;
6410 vm_offset_t end_delta
;
6411 vm_offset_t start_delta
;
6412 vm_offset_t object_start_delta
;
6413 vm_map_entry_t submap_entry
;
6414 boolean_t mapped_needs_copy
=FALSE
;
6416 local_vaddr
= vaddr
;
6418 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6419 /* if pmap_map equals map we unlock below */
6420 if ((*pmap_map
!= map
) &&
6421 (*pmap_map
!= cow_sub_map_parent
))
6422 vm_map_unlock(*pmap_map
);
6423 *pmap_map
= entry
->object
.sub_map
;
6426 if(entry
->needs_copy
) {
6427 if (!mapped_needs_copy
) {
6428 if (vm_map_lock_read_to_write(map
)) {
6429 vm_map_lock_read(map
);
6430 if(*pmap_map
== entry
->object
.sub_map
)
6434 vm_map_lock_read(entry
->object
.sub_map
);
6435 cow_sub_map_parent
= map
;
6436 /* reset base to map before cow object */
6437 /* this is the map which will accept */
6438 /* the new cow object */
6439 old_start
= entry
->vme_start
;
6440 old_end
= entry
->vme_end
;
6441 cow_parent_vaddr
= vaddr
;
6442 mapped_needs_copy
= TRUE
;
6444 vm_map_lock_read(entry
->object
.sub_map
);
6445 if((cow_sub_map_parent
!= map
) &&
6450 vm_map_lock_read(entry
->object
.sub_map
);
6451 /* leave map locked if it is a target */
6452 /* cow sub_map above otherwise, just */
6453 /* follow the maps down to the object */
6454 /* here we unlock knowing we are not */
6455 /* revisiting the map. */
6456 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6457 vm_map_unlock_read(map
);
6460 *var_map
= map
= entry
->object
.sub_map
;
6462 /* calculate the offset in the submap for vaddr */
6463 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6466 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6467 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6468 vm_map_unlock(cow_sub_map_parent
);
6470 if((*pmap_map
!= map
)
6471 && (*pmap_map
!= cow_sub_map_parent
)) {
6472 vm_map_unlock(*pmap_map
);
6475 return KERN_INVALID_ADDRESS
;
6477 /* find the attenuated shadow of the underlying object */
6478 /* on our target map */
6480 /* in english the submap object may extend beyond the */
6481 /* region mapped by the entry or, may only fill a portion */
6482 /* of it. For our purposes, we only care if the object */
6483 /* doesn't fill. In this case the area which will */
6484 /* ultimately be clipped in the top map will only need */
6485 /* to be as big as the portion of the underlying entry */
6486 /* which is mapped */
6487 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6488 submap_entry
->vme_start
- entry
->offset
: 0;
6491 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6492 submap_entry
->vme_end
?
6493 0 : (entry
->offset
+
6494 (old_end
- old_start
))
6495 - submap_entry
->vme_end
;
6497 old_start
+= start_delta
;
6498 old_end
-= end_delta
;
6500 if(submap_entry
->is_sub_map
) {
6501 entry
= submap_entry
;
6502 vaddr
= local_vaddr
;
6503 goto submap_recurse
;
6506 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6508 vm_object_t copy_object
;
6509 vm_offset_t local_start
;
6510 vm_offset_t local_end
;
6511 boolean_t copied_slowly
= FALSE
;
6513 if (vm_map_lock_read_to_write(map
)) {
6514 vm_map_lock_read(map
);
6515 old_start
-= start_delta
;
6516 old_end
+= end_delta
;
6521 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6522 submap_entry
->object
.vm_object
=
6525 (submap_entry
->vme_end
6526 - submap_entry
->vme_start
));
6527 submap_entry
->offset
= 0;
6529 local_start
= local_vaddr
-
6530 (cow_parent_vaddr
- old_start
);
6531 local_end
= local_vaddr
+
6532 (old_end
- cow_parent_vaddr
);
6533 vm_map_clip_start(map
, submap_entry
, local_start
);
6534 vm_map_clip_end(map
, submap_entry
, local_end
);
6536 /* This is the COW case, lets connect */
6537 /* an entry in our space to the underlying */
6538 /* object in the submap, bypassing the */
6542 if(submap_entry
->wired_count
!= 0) {
6544 submap_entry
->object
.vm_object
);
6545 vm_object_copy_slowly(
6546 submap_entry
->object
.vm_object
,
6547 submap_entry
->offset
,
6548 submap_entry
->vme_end
-
6549 submap_entry
->vme_start
,
6552 copied_slowly
= TRUE
;
6555 /* set up shadow object */
6556 copy_object
= submap_entry
->object
.vm_object
;
6557 vm_object_reference(copy_object
);
6558 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6559 submap_entry
->needs_copy
= TRUE
;
6560 vm_object_pmap_protect(
6561 submap_entry
->object
.vm_object
,
6562 submap_entry
->offset
,
6563 submap_entry
->vme_end
-
6564 submap_entry
->vme_start
,
6565 (submap_entry
->is_shared
6567 PMAP_NULL
: map
->pmap
,
6568 submap_entry
->vme_start
,
6569 submap_entry
->protection
&
6574 /* This works diffently than the */
6575 /* normal submap case. We go back */
6576 /* to the parent of the cow map and*/
6577 /* clip out the target portion of */
6578 /* the sub_map, substituting the */
6579 /* new copy object, */
6582 local_start
= old_start
;
6583 local_end
= old_end
;
6584 map
= cow_sub_map_parent
;
6585 *var_map
= cow_sub_map_parent
;
6586 vaddr
= cow_parent_vaddr
;
6587 cow_sub_map_parent
= NULL
;
6589 if(!vm_map_lookup_entry(map
,
6591 vm_object_deallocate(
6593 vm_map_lock_write_to_read(map
);
6594 return KERN_INVALID_ADDRESS
;
6597 /* clip out the portion of space */
6598 /* mapped by the sub map which */
6599 /* corresponds to the underlying */
6601 vm_map_clip_start(map
, entry
, local_start
);
6602 vm_map_clip_end(map
, entry
, local_end
);
6605 /* substitute copy object for */
6606 /* shared map entry */
6607 vm_map_deallocate(entry
->object
.sub_map
);
6608 entry
->is_sub_map
= FALSE
;
6609 entry
->object
.vm_object
= copy_object
;
6611 entry
->protection
|= VM_PROT_WRITE
;
6612 entry
->max_protection
|= VM_PROT_WRITE
;
6615 entry
->needs_copy
= FALSE
;
6616 entry
->is_shared
= FALSE
;
6618 entry
->offset
= submap_entry
->offset
;
6619 entry
->needs_copy
= TRUE
;
6620 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6621 entry
->inheritance
= VM_INHERIT_COPY
;
6623 entry
->is_shared
= TRUE
;
6625 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6626 entry
->inheritance
= VM_INHERIT_COPY
;
6628 vm_map_lock_write_to_read(map
);
6630 if((cow_sub_map_parent
)
6631 && (cow_sub_map_parent
!= *pmap_map
)
6632 && (cow_sub_map_parent
!= map
)) {
6633 vm_map_unlock(cow_sub_map_parent
);
6635 entry
= submap_entry
;
6636 vaddr
= local_vaddr
;
6641 * Check whether this task is allowed to have
6645 prot
= entry
->protection
;
6646 if ((fault_type
& (prot
)) != fault_type
) {
6647 if (*pmap_map
!= map
) {
6648 vm_map_unlock(*pmap_map
);
6651 return KERN_PROTECTION_FAILURE
;
6655 * If this page is not pageable, we have to get
6656 * it for all possible accesses.
6659 if (*wired
= (entry
->wired_count
!= 0))
6660 prot
= fault_type
= entry
->protection
;
6663 * If the entry was copy-on-write, we either ...
6666 if (entry
->needs_copy
) {
6668 * If we want to write the page, we may as well
6669 * handle that now since we've got the map locked.
6671 * If we don't need to write the page, we just
6672 * demote the permissions allowed.
6675 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6677 * Make a new object, and place it in the
6678 * object chain. Note that no new references
6679 * have appeared -- one just moved from the
6680 * map to the new object.
6683 if (vm_map_lock_read_to_write(map
)) {
6684 vm_map_lock_read(map
);
6687 vm_object_shadow(&entry
->object
.vm_object
,
6689 (vm_size_t
) (entry
->vme_end
-
6692 entry
->object
.vm_object
->shadowed
= TRUE
;
6693 entry
->needs_copy
= FALSE
;
6694 vm_map_lock_write_to_read(map
);
6698 * We're attempting to read a copy-on-write
6699 * page -- don't allow writes.
6702 prot
&= (~VM_PROT_WRITE
);
6707 * Create an object if necessary.
6709 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6711 if (vm_map_lock_read_to_write(map
)) {
6712 vm_map_lock_read(map
);
6716 entry
->object
.vm_object
= vm_object_allocate(
6717 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6719 vm_map_lock_write_to_read(map
);
6723 * Return the object/offset from this entry. If the entry
6724 * was copy-on-write or empty, it has been fixed up. Also
6725 * return the protection.
6728 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6729 *object
= entry
->object
.vm_object
;
6731 *behavior
= entry
->behavior
;
6732 *lo_offset
= entry
->offset
;
6733 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6736 * Lock the object to prevent it from disappearing
6739 vm_object_lock(*object
);
6742 * Save the version number
6745 out_version
->main_timestamp
= map
->timestamp
;
6747 return KERN_SUCCESS
;
6754 * Verifies that the map in question has not changed
6755 * since the given version. If successful, the map
6756 * will not change until vm_map_verify_done() is called.
6760 register vm_map_t map
,
6761 register vm_map_version_t
*version
) /* REF */
6765 vm_map_lock_read(map
);
6766 result
= (map
->timestamp
== version
->main_timestamp
);
6769 vm_map_unlock_read(map
);
6775 * vm_map_verify_done:
6777 * Releases locks acquired by a vm_map_verify.
6779 * This is now a macro in vm/vm_map.h. It does a
6780 * vm_map_unlock_read on the map.
6787 * User call to obtain information about a region in
6788 * a task's address map. Currently, only one flavor is
6791 * XXX The reserved and behavior fields cannot be filled
6792 * in until the vm merge from the IK is completed, and
6793 * vm_reserve is implemented.
6795 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6801 vm_offset_t
*address
, /* IN/OUT */
6802 vm_size_t
*size
, /* OUT */
6803 vm_region_flavor_t flavor
, /* IN */
6804 vm_region_info_t info
, /* OUT */
6805 mach_msg_type_number_t
*count
, /* IN/OUT */
6806 ipc_port_t
*object_name
) /* OUT */
6808 vm_map_entry_t tmp_entry
;
6810 vm_map_entry_t entry
;
6813 vm_region_basic_info_t basic
;
6814 vm_region_extended_info_t extended
;
6815 vm_region_top_info_t top
;
6817 if (map
== VM_MAP_NULL
)
6818 return(KERN_INVALID_ARGUMENT
);
6822 case VM_REGION_BASIC_INFO
:
6824 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6825 return(KERN_INVALID_ARGUMENT
);
6827 basic
= (vm_region_basic_info_t
) info
;
6828 *count
= VM_REGION_BASIC_INFO_COUNT
;
6830 vm_map_lock_read(map
);
6833 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6834 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6835 vm_map_unlock_read(map
);
6836 return(KERN_INVALID_ADDRESS
);
6842 start
= entry
->vme_start
;
6844 basic
->offset
= entry
->offset
;
6845 basic
->protection
= entry
->protection
;
6846 basic
->inheritance
= entry
->inheritance
;
6847 basic
->max_protection
= entry
->max_protection
;
6848 basic
->behavior
= entry
->behavior
;
6849 basic
->user_wired_count
= entry
->user_wired_count
;
6850 basic
->reserved
= entry
->is_sub_map
;
6852 *size
= (entry
->vme_end
- start
);
6854 if (object_name
) *object_name
= IP_NULL
;
6855 if (entry
->is_sub_map
) {
6856 basic
->shared
= FALSE
;
6858 basic
->shared
= entry
->is_shared
;
6861 vm_map_unlock_read(map
);
6862 return(KERN_SUCCESS
);
6864 case VM_REGION_EXTENDED_INFO
:
6867 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6868 return(KERN_INVALID_ARGUMENT
);
6870 extended
= (vm_region_extended_info_t
) info
;
6871 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6873 vm_map_lock_read(map
);
6876 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6877 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6878 vm_map_unlock_read(map
);
6879 return(KERN_INVALID_ADDRESS
);
6884 start
= entry
->vme_start
;
6886 extended
->protection
= entry
->protection
;
6887 extended
->user_tag
= entry
->alias
;
6888 extended
->pages_resident
= 0;
6889 extended
->pages_swapped_out
= 0;
6890 extended
->pages_shared_now_private
= 0;
6891 extended
->pages_dirtied
= 0;
6892 extended
->external_pager
= 0;
6893 extended
->shadow_depth
= 0;
6895 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6897 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6898 extended
->share_mode
= SM_PRIVATE
;
6901 *object_name
= IP_NULL
;
6903 *size
= (entry
->vme_end
- start
);
6905 vm_map_unlock_read(map
);
6906 return(KERN_SUCCESS
);
6908 case VM_REGION_TOP_INFO
:
6911 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6912 return(KERN_INVALID_ARGUMENT
);
6914 top
= (vm_region_top_info_t
) info
;
6915 *count
= VM_REGION_TOP_INFO_COUNT
;
6917 vm_map_lock_read(map
);
6920 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6921 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6922 vm_map_unlock_read(map
);
6923 return(KERN_INVALID_ADDRESS
);
6929 start
= entry
->vme_start
;
6931 top
->private_pages_resident
= 0;
6932 top
->shared_pages_resident
= 0;
6934 vm_region_top_walk(entry
, top
);
6937 *object_name
= IP_NULL
;
6939 *size
= (entry
->vme_end
- start
);
6941 vm_map_unlock_read(map
);
6942 return(KERN_SUCCESS
);
6945 return(KERN_INVALID_ARGUMENT
);
6950 * vm_region_recurse: A form of vm_region which follows the
6951 * submaps in a target map
6958 vm_offset_t
*address
, /* IN/OUT */
6959 vm_size_t
*size
, /* OUT */
6960 natural_t
*nesting_depth
, /* IN/OUT */
6961 vm_region_recurse_info_t info
, /* IN/OUT */
6962 mach_msg_type_number_t
*count
) /* IN/OUT */
6964 vm_map_entry_t tmp_entry
;
6966 vm_map_entry_t entry
;
6970 unsigned int recurse_count
;
6973 vm_map_entry_t base_entry
;
6974 vm_offset_t base_next
;
6975 vm_offset_t base_addr
;
6976 vm_offset_t baddr_start_delta
;
6977 vm_region_submap_info_t submap_info
;
6978 vm_region_extended_info_data_t extended
;
6980 if (map
== VM_MAP_NULL
)
6981 return(KERN_INVALID_ARGUMENT
);
6983 submap_info
= (vm_region_submap_info_t
) info
;
6984 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6986 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6987 return(KERN_INVALID_ARGUMENT
);
6991 recurse_count
= *nesting_depth
;
6993 LOOKUP_NEXT_BASE_ENTRY
:
6994 vm_map_lock_read(map
);
6995 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6996 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6997 vm_map_unlock_read(map
);
6998 return(KERN_INVALID_ADDRESS
);
7003 *size
= entry
->vme_end
- entry
->vme_start
;
7004 start
= entry
->vme_start
;
7006 baddr_start_delta
= *address
- start
;
7007 base_next
= entry
->vme_end
;
7010 while(entry
->is_sub_map
&& recurse_count
) {
7012 vm_map_lock_read(entry
->object
.sub_map
);
7015 if(entry
== base_entry
) {
7016 start
= entry
->offset
;
7017 start
+= *address
- entry
->vme_start
;
7020 submap
= entry
->object
.sub_map
;
7021 vm_map_unlock_read(map
);
7024 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7025 if ((entry
= tmp_entry
->vme_next
)
7026 == vm_map_to_entry(map
)) {
7027 vm_map_unlock_read(map
);
7032 goto LOOKUP_NEXT_BASE_ENTRY
;
7038 if(start
<= entry
->vme_start
) {
7039 vm_offset_t old_start
= start
;
7040 if(baddr_start_delta
) {
7041 base_addr
+= (baddr_start_delta
);
7042 *size
-= baddr_start_delta
;
7043 baddr_start_delta
= 0;
7046 (base_addr
+= (entry
->vme_start
- start
))) {
7047 vm_map_unlock_read(map
);
7052 goto LOOKUP_NEXT_BASE_ENTRY
;
7054 *size
-= entry
->vme_start
- start
;
7055 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7056 *size
= entry
->vme_end
- entry
->vme_start
;
7060 if(baddr_start_delta
) {
7061 if((start
- entry
->vme_start
)
7062 < baddr_start_delta
) {
7063 base_addr
+= start
- entry
->vme_start
;
7064 *size
-= start
- entry
->vme_start
;
7066 base_addr
+= baddr_start_delta
;
7067 *size
+= baddr_start_delta
;
7069 baddr_start_delta
= 0;
7071 base_addr
+= entry
->vme_start
;
7072 if(base_addr
>= base_next
) {
7073 vm_map_unlock_read(map
);
7078 goto LOOKUP_NEXT_BASE_ENTRY
;
7080 if (*size
> (entry
->vme_end
- start
))
7081 *size
= entry
->vme_end
- start
;
7083 start
= entry
->vme_start
- start
;
7086 start
+= entry
->offset
;
7089 *nesting_depth
-= recurse_count
;
7090 if(entry
!= base_entry
) {
7091 start
= entry
->vme_start
+ (start
- entry
->offset
);
7095 submap_info
->user_tag
= entry
->alias
;
7096 submap_info
->offset
= entry
->offset
;
7097 submap_info
->protection
= entry
->protection
;
7098 submap_info
->inheritance
= entry
->inheritance
;
7099 submap_info
->max_protection
= entry
->max_protection
;
7100 submap_info
->behavior
= entry
->behavior
;
7101 submap_info
->user_wired_count
= entry
->user_wired_count
;
7102 submap_info
->is_submap
= entry
->is_sub_map
;
7103 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7104 *address
= base_addr
;
7107 extended
.pages_resident
= 0;
7108 extended
.pages_swapped_out
= 0;
7109 extended
.pages_shared_now_private
= 0;
7110 extended
.pages_dirtied
= 0;
7111 extended
.external_pager
= 0;
7112 extended
.shadow_depth
= 0;
7114 if(!entry
->is_sub_map
) {
7115 vm_region_walk(entry
, &extended
, entry
->offset
,
7116 entry
->vme_end
- start
, map
, start
);
7117 submap_info
->share_mode
= extended
.share_mode
;
7118 if (extended
.external_pager
&& extended
.ref_count
== 2
7119 && extended
.share_mode
== SM_SHARED
)
7120 submap_info
->share_mode
= SM_PRIVATE
;
7121 submap_info
->ref_count
= extended
.ref_count
;
7124 submap_info
->share_mode
= SM_TRUESHARED
;
7126 submap_info
->share_mode
= SM_PRIVATE
;
7127 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7130 submap_info
->pages_resident
= extended
.pages_resident
;
7131 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7132 submap_info
->pages_shared_now_private
=
7133 extended
.pages_shared_now_private
;
7134 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7135 submap_info
->external_pager
= extended
.external_pager
;
7136 submap_info
->shadow_depth
= extended
.shadow_depth
;
7138 vm_map_unlock_read(map
);
7139 return(KERN_SUCCESS
);
7143 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7144 * Goes away after regular vm_region_recurse function migrates to
7146 * vm_region_recurse: A form of vm_region which follows the
7147 * submaps in a target map
7152 vm_region_recurse_64(
7154 vm_offset_t
*address
, /* IN/OUT */
7155 vm_size_t
*size
, /* OUT */
7156 natural_t
*nesting_depth
, /* IN/OUT */
7157 vm_region_recurse_info_t info
, /* IN/OUT */
7158 mach_msg_type_number_t
*count
) /* IN/OUT */
7160 vm_map_entry_t tmp_entry
;
7162 vm_map_entry_t entry
;
7166 unsigned int recurse_count
;
7169 vm_map_entry_t base_entry
;
7170 vm_offset_t base_next
;
7171 vm_offset_t base_addr
;
7172 vm_offset_t baddr_start_delta
;
7173 vm_region_submap_info_64_t submap_info
;
7174 vm_region_extended_info_data_t extended
;
7176 if (map
== VM_MAP_NULL
)
7177 return(KERN_INVALID_ARGUMENT
);
7179 submap_info
= (vm_region_submap_info_64_t
) info
;
7180 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7182 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7183 return(KERN_INVALID_ARGUMENT
);
7187 recurse_count
= *nesting_depth
;
7189 LOOKUP_NEXT_BASE_ENTRY
:
7191 vm_map_lock_read(map
);
7192 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7193 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7195 vm_map_unlock_read(map
);
7196 return(KERN_INVALID_ADDRESS
);
7201 *size
= entry
->vme_end
- entry
->vme_start
;
7202 start
= entry
->vme_start
;
7204 baddr_start_delta
= *address
- start
;
7205 base_next
= entry
->vme_end
;
7208 while(entry
->is_sub_map
&& recurse_count
) {
7211 vm_map_lock_read(entry
->object
.sub_map
);
7214 if(entry
== base_entry
) {
7215 start
= entry
->offset
;
7216 start
+= *address
- entry
->vme_start
;
7219 submap
= entry
->object
.sub_map
;
7221 vm_map_unlock_read(map
);
7224 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7225 if ((entry
= tmp_entry
->vme_next
)
7226 == vm_map_to_entry(map
)) {
7228 vm_map_unlock_read(map
);
7233 goto LOOKUP_NEXT_BASE_ENTRY
;
7239 if(start
<= entry
->vme_start
) {
7240 vm_offset_t old_start
= start
;
7241 if(baddr_start_delta
) {
7242 base_addr
+= (baddr_start_delta
);
7243 *size
-= baddr_start_delta
;
7244 baddr_start_delta
= 0;
7247 (base_addr
+= (entry
->vme_start
- start
))) {
7249 vm_map_unlock_read(map
);
7254 goto LOOKUP_NEXT_BASE_ENTRY
;
7256 *size
-= entry
->vme_start
- start
;
7257 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7258 *size
= entry
->vme_end
- entry
->vme_start
;
7262 if(baddr_start_delta
) {
7263 if((start
- entry
->vme_start
)
7264 < baddr_start_delta
) {
7265 base_addr
+= start
- entry
->vme_start
;
7266 *size
-= start
- entry
->vme_start
;
7268 base_addr
+= baddr_start_delta
;
7269 *size
+= baddr_start_delta
;
7271 baddr_start_delta
= 0;
7273 base_addr
+= entry
->vme_start
;
7274 if(base_addr
>= base_next
) {
7276 vm_map_unlock_read(map
);
7281 goto LOOKUP_NEXT_BASE_ENTRY
;
7283 if (*size
> (entry
->vme_end
- start
))
7284 *size
= entry
->vme_end
- start
;
7286 start
= entry
->vme_start
- start
;
7289 start
+= entry
->offset
;
7292 *nesting_depth
-= recurse_count
;
7293 if(entry
!= base_entry
) {
7294 start
= entry
->vme_start
+ (start
- entry
->offset
);
7298 submap_info
->user_tag
= entry
->alias
;
7299 submap_info
->offset
= entry
->offset
;
7300 submap_info
->protection
= entry
->protection
;
7301 submap_info
->inheritance
= entry
->inheritance
;
7302 submap_info
->max_protection
= entry
->max_protection
;
7303 submap_info
->behavior
= entry
->behavior
;
7304 submap_info
->user_wired_count
= entry
->user_wired_count
;
7305 submap_info
->is_submap
= entry
->is_sub_map
;
7306 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7307 *address
= base_addr
;
7310 extended
.pages_resident
= 0;
7311 extended
.pages_swapped_out
= 0;
7312 extended
.pages_shared_now_private
= 0;
7313 extended
.pages_dirtied
= 0;
7314 extended
.external_pager
= 0;
7315 extended
.shadow_depth
= 0;
7318 if(!entry
->is_sub_map
) {
7319 vm_region_walk(entry
, &extended
, entry
->offset
,
7320 entry
->vme_end
- start
, map
, start
);
7321 submap_info
->share_mode
= extended
.share_mode
;
7322 if (extended
.external_pager
&& extended
.ref_count
== 2
7323 && extended
.share_mode
== SM_SHARED
)
7324 submap_info
->share_mode
= SM_PRIVATE
;
7325 submap_info
->ref_count
= extended
.ref_count
;
7328 submap_info
->share_mode
= SM_TRUESHARED
;
7330 submap_info
->share_mode
= SM_PRIVATE
;
7331 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7334 submap_info
->pages_resident
= extended
.pages_resident
;
7335 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7336 submap_info
->pages_shared_now_private
=
7337 extended
.pages_shared_now_private
;
7338 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7339 submap_info
->external_pager
= extended
.external_pager
;
7340 submap_info
->shadow_depth
= extended
.shadow_depth
;
7342 vm_map_unlock_read(map
);
7343 return(KERN_SUCCESS
);
7348 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7349 * Goes away after regular vm_region function migrates to
7357 vm_offset_t
*address
, /* IN/OUT */
7358 vm_size_t
*size
, /* OUT */
7359 vm_region_flavor_t flavor
, /* IN */
7360 vm_region_info_t info
, /* OUT */
7361 mach_msg_type_number_t
*count
, /* IN/OUT */
7362 ipc_port_t
*object_name
) /* OUT */
7364 vm_map_entry_t tmp_entry
;
7366 vm_map_entry_t entry
;
7369 vm_region_basic_info_64_t basic
;
7370 vm_region_extended_info_t extended
;
7371 vm_region_top_info_t top
;
7372 vm_region_object_info_64_t object_info_64
;
7374 if (map
== VM_MAP_NULL
)
7375 return(KERN_INVALID_ARGUMENT
);
7379 case VM_REGION_BASIC_INFO
:
7381 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7382 return(KERN_INVALID_ARGUMENT
);
7384 basic
= (vm_region_basic_info_64_t
) info
;
7385 *count
= VM_REGION_BASIC_INFO_COUNT
;
7387 vm_map_lock_read(map
);
7390 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7391 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7392 vm_map_unlock_read(map
);
7393 return(KERN_INVALID_ADDRESS
);
7399 start
= entry
->vme_start
;
7401 basic
->offset
= entry
->offset
;
7402 basic
->protection
= entry
->protection
;
7403 basic
->inheritance
= entry
->inheritance
;
7404 basic
->max_protection
= entry
->max_protection
;
7405 basic
->behavior
= entry
->behavior
;
7406 basic
->user_wired_count
= entry
->user_wired_count
;
7407 basic
->reserved
= entry
->is_sub_map
;
7409 *size
= (entry
->vme_end
- start
);
7411 if (object_name
) *object_name
= IP_NULL
;
7412 if (entry
->is_sub_map
) {
7413 basic
->shared
= FALSE
;
7415 basic
->shared
= entry
->is_shared
;
7418 vm_map_unlock_read(map
);
7419 return(KERN_SUCCESS
);
7421 case VM_REGION_EXTENDED_INFO
:
7424 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7425 return(KERN_INVALID_ARGUMENT
);
7427 extended
= (vm_region_extended_info_t
) info
;
7428 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7430 vm_map_lock_read(map
);
7433 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7434 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7435 vm_map_unlock_read(map
);
7436 return(KERN_INVALID_ADDRESS
);
7441 start
= entry
->vme_start
;
7443 extended
->protection
= entry
->protection
;
7444 extended
->user_tag
= entry
->alias
;
7445 extended
->pages_resident
= 0;
7446 extended
->pages_swapped_out
= 0;
7447 extended
->pages_shared_now_private
= 0;
7448 extended
->pages_dirtied
= 0;
7449 extended
->external_pager
= 0;
7450 extended
->shadow_depth
= 0;
7452 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7454 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7455 extended
->share_mode
= SM_PRIVATE
;
7458 *object_name
= IP_NULL
;
7460 *size
= (entry
->vme_end
- start
);
7462 vm_map_unlock_read(map
);
7463 return(KERN_SUCCESS
);
7465 case VM_REGION_TOP_INFO
:
7468 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7469 return(KERN_INVALID_ARGUMENT
);
7471 top
= (vm_region_top_info_t
) info
;
7472 *count
= VM_REGION_TOP_INFO_COUNT
;
7474 vm_map_lock_read(map
);
7477 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7478 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7479 vm_map_unlock_read(map
);
7480 return(KERN_INVALID_ADDRESS
);
7486 start
= entry
->vme_start
;
7488 top
->private_pages_resident
= 0;
7489 top
->shared_pages_resident
= 0;
7491 vm_region_top_walk(entry
, top
);
7494 *object_name
= IP_NULL
;
7496 *size
= (entry
->vme_end
- start
);
7498 vm_map_unlock_read(map
);
7499 return(KERN_SUCCESS
);
7501 case VM_REGION_OBJECT_INFO_64
:
7503 if (*count
< VM_REGION_OBJECT_INFO_COUNT_64
)
7504 return(KERN_INVALID_ARGUMENT
);
7506 object_info_64
= (vm_region_object_info_64_t
) info
;
7507 *count
= VM_REGION_OBJECT_INFO_COUNT_64
;
7509 vm_map_lock_read(map
);
7512 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7513 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7514 vm_map_unlock_read(map
);
7515 return(KERN_INVALID_ADDRESS
);
7521 start
= entry
->vme_start
;
7523 object_info_64
->offset
= entry
->offset
;
7524 object_info_64
->protection
= entry
->protection
;
7525 object_info_64
->inheritance
= entry
->inheritance
;
7526 object_info_64
->max_protection
= entry
->max_protection
;
7527 object_info_64
->behavior
= entry
->behavior
;
7528 object_info_64
->user_wired_count
= entry
->user_wired_count
;
7529 object_info_64
->is_sub_map
= entry
->is_sub_map
;
7531 *size
= (entry
->vme_end
- start
);
7533 if (object_name
) *object_name
= IP_NULL
;
7534 if (entry
->is_sub_map
) {
7535 object_info_64
->shared
= FALSE
;
7536 object_info_64
->object_id
= 0;
7538 object_info_64
->shared
= entry
->is_shared
;
7539 object_info_64
->object_id
=
7540 (vm_offset_t
) entry
->object
.vm_object
;
7543 vm_map_unlock_read(map
);
7544 return(KERN_SUCCESS
);
7547 return(KERN_INVALID_ARGUMENT
);
7553 vm_map_entry_t entry
,
7554 vm_region_top_info_t top
)
7556 register struct vm_object
*obj
, *tmp_obj
;
7557 register int ref_count
;
7559 if (entry
->object
.vm_object
== 0 || entry
->is_sub_map
) {
7560 top
->share_mode
= SM_EMPTY
;
7566 obj
= entry
->object
.vm_object
;
7568 vm_object_lock(obj
);
7570 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7575 top
->private_pages_resident
= obj
->resident_page_count
;
7577 top
->shared_pages_resident
= obj
->resident_page_count
;
7578 top
->ref_count
= ref_count
;
7579 top
->share_mode
= SM_COW
;
7581 while (tmp_obj
= obj
->shadow
) {
7582 vm_object_lock(tmp_obj
);
7583 vm_object_unlock(obj
);
7586 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7589 top
->shared_pages_resident
+= obj
->resident_page_count
;
7590 top
->ref_count
+= ref_count
- 1;
7593 if (entry
->needs_copy
) {
7594 top
->share_mode
= SM_COW
;
7595 top
->shared_pages_resident
= obj
->resident_page_count
;
7597 if (ref_count
== 1 ||
7598 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7599 top
->share_mode
= SM_PRIVATE
;
7600 top
->private_pages_resident
= obj
->resident_page_count
;
7602 top
->share_mode
= SM_SHARED
;
7603 top
->shared_pages_resident
= obj
->resident_page_count
;
7606 top
->ref_count
= ref_count
;
7608 top
->obj_id
= (int)obj
;
7610 vm_object_unlock(obj
);
7616 vm_map_entry_t entry
,
7617 vm_region_extended_info_t extended
,
7618 vm_object_offset_t offset
,
7623 register struct vm_object
*obj
, *tmp_obj
;
7624 register vm_offset_t last_offset
;
7626 register int ref_count
;
7627 void vm_region_look_for_page();
7629 if ((entry
->object
.vm_object
== 0) ||
7630 (entry
->is_sub_map
) ||
7631 (entry
->object
.vm_object
->phys_contiguous
)) {
7632 extended
->share_mode
= SM_EMPTY
;
7633 extended
->ref_count
= 0;
7637 obj
= entry
->object
.vm_object
;
7639 vm_object_lock(obj
);
7641 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7644 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7645 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7647 if (extended
->shadow_depth
|| entry
->needs_copy
)
7648 extended
->share_mode
= SM_COW
;
7651 extended
->share_mode
= SM_PRIVATE
;
7653 if (obj
->true_share
)
7654 extended
->share_mode
= SM_TRUESHARED
;
7656 extended
->share_mode
= SM_SHARED
;
7659 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7661 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7662 if ((tmp_obj
= obj
->shadow
) == 0)
7664 vm_object_lock(tmp_obj
);
7665 vm_object_unlock(obj
);
7667 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7670 extended
->ref_count
+= ref_count
;
7673 vm_object_unlock(obj
);
7675 if (extended
->share_mode
== SM_SHARED
) {
7676 register vm_map_entry_t cur
;
7677 register vm_map_entry_t last
;
7680 obj
= entry
->object
.vm_object
;
7681 last
= vm_map_to_entry(map
);
7684 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7686 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7687 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7689 if (my_refs
== ref_count
)
7690 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7691 else if (my_refs
> 1)
7692 extended
->share_mode
= SM_SHARED_ALIASED
;
7698 /* object is locked on entry and locked on return */
7702 vm_region_look_for_page(
7704 vm_region_extended_info_t extended
,
7705 vm_object_offset_t offset
,
7711 register vm_page_t p
;
7712 register vm_object_t shadow
;
7713 register int ref_count
;
7714 vm_object_t caller_object
;
7716 shadow
= object
->shadow
;
7717 caller_object
= object
;
7722 if ( !(object
->pager_trusted
) && !(object
->internal
))
7723 extended
->external_pager
= 1;
7725 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7726 if (shadow
&& (max_refcnt
== 1))
7727 extended
->pages_shared_now_private
++;
7729 if (!p
->fictitious
&&
7730 (p
->dirty
|| pmap_is_modified(p
->phys_page
)))
7731 extended
->pages_dirtied
++;
7732 extended
->pages_resident
++;
7734 if(object
!= caller_object
)
7735 vm_object_unlock(object
);
7739 if (object
->existence_map
) {
7740 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7742 extended
->pages_swapped_out
++;
7744 if(object
!= caller_object
)
7745 vm_object_unlock(object
);
7751 vm_object_lock(shadow
);
7753 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7756 if (++depth
> extended
->shadow_depth
)
7757 extended
->shadow_depth
= depth
;
7759 if (ref_count
> max_refcnt
)
7760 max_refcnt
= ref_count
;
7762 if(object
!= caller_object
)
7763 vm_object_unlock(object
);
7766 shadow
= object
->shadow
;
7767 offset
= offset
+ object
->shadow_offset
;
7770 if(object
!= caller_object
)
7771 vm_object_unlock(object
);
7777 vm_region_count_obj_refs(
7778 vm_map_entry_t entry
,
7781 register int ref_count
;
7782 register vm_object_t chk_obj
;
7783 register vm_object_t tmp_obj
;
7785 if (entry
->object
.vm_object
== 0)
7788 if (entry
->is_sub_map
)
7793 chk_obj
= entry
->object
.vm_object
;
7794 vm_object_lock(chk_obj
);
7797 if (chk_obj
== object
)
7799 if (tmp_obj
= chk_obj
->shadow
)
7800 vm_object_lock(tmp_obj
);
7801 vm_object_unlock(chk_obj
);
7811 * Routine: vm_map_simplify
7814 * Attempt to simplify the map representation in
7815 * the vicinity of the given starting address.
7817 * This routine is intended primarily to keep the
7818 * kernel maps more compact -- they generally don't
7819 * benefit from the "expand a map entry" technology
7820 * at allocation time because the adjacent entry
7821 * is often wired down.
7828 vm_map_entry_t this_entry
;
7829 vm_map_entry_t prev_entry
;
7830 vm_map_entry_t next_entry
;
7834 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7835 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7837 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7839 (prev_entry
->is_shared
== FALSE
) &&
7840 (prev_entry
->is_sub_map
== FALSE
) &&
7842 (this_entry
->is_shared
== FALSE
) &&
7843 (this_entry
->is_sub_map
== FALSE
) &&
7845 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7846 (prev_entry
->protection
== this_entry
->protection
) &&
7847 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7848 (prev_entry
->behavior
== this_entry
->behavior
) &&
7849 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7850 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7851 (prev_entry
->in_transition
== FALSE
) &&
7852 (this_entry
->in_transition
== FALSE
) &&
7854 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7856 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7857 ((prev_entry
->offset
+
7858 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7859 == this_entry
->offset
)
7861 SAVE_HINT(map
, prev_entry
);
7862 vm_map_entry_unlink(map
, this_entry
);
7863 prev_entry
->vme_end
= this_entry
->vme_end
;
7864 UPDATE_FIRST_FREE(map
, map
->first_free
);
7865 vm_object_deallocate(this_entry
->object
.vm_object
);
7866 vm_map_entry_dispose(map
, this_entry
);
7867 counter(c_vm_map_simplified_lower
++);
7870 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7871 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7873 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7875 (next_entry
->is_shared
== FALSE
) &&
7876 (next_entry
->is_sub_map
== FALSE
) &&
7878 (next_entry
->is_shared
== FALSE
) &&
7879 (next_entry
->is_sub_map
== FALSE
) &&
7881 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7882 (next_entry
->protection
== this_entry
->protection
) &&
7883 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7884 (next_entry
->behavior
== this_entry
->behavior
) &&
7885 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7886 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7887 (this_entry
->in_transition
== FALSE
) &&
7888 (next_entry
->in_transition
== FALSE
) &&
7890 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7892 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7893 ((this_entry
->offset
+
7894 (this_entry
->vme_end
- this_entry
->vme_start
))
7895 == next_entry
->offset
)
7897 vm_map_entry_unlink(map
, next_entry
);
7898 this_entry
->vme_end
= next_entry
->vme_end
;
7899 UPDATE_FIRST_FREE(map
, map
->first_free
);
7900 vm_object_deallocate(next_entry
->object
.vm_object
);
7901 vm_map_entry_dispose(map
, next_entry
);
7902 counter(c_vm_map_simplified_upper
++);
7904 counter(c_vm_map_simplify_called
++);
7910 * Routine: vm_map_machine_attribute
7912 * Provide machine-specific attributes to mappings,
7913 * such as cachability etc. for machines that provide
7914 * them. NUMA architectures and machines with big/strange
7915 * caches will use this.
7917 * Responsibilities for locking and checking are handled here,
7918 * everything else in the pmap module. If any non-volatile
7919 * information must be kept, the pmap module should handle
7920 * it itself. [This assumes that attributes do not
7921 * need to be inherited, which seems ok to me]
7924 vm_map_machine_attribute(
7926 vm_offset_t address
,
7928 vm_machine_attribute_t attribute
,
7929 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7932 vm_size_t sync_size
;
7934 vm_map_entry_t entry
;
7936 if (address
< vm_map_min(map
) ||
7937 (address
+ size
) > vm_map_max(map
))
7938 return KERN_INVALID_ADDRESS
;
7942 if (attribute
!= MATTR_CACHE
) {
7943 /* If we don't have to find physical addresses, we */
7944 /* don't have to do an explicit traversal here. */
7945 ret
= pmap_attribute(map
->pmap
,
7946 address
, size
, attribute
, value
);
7951 /* Get the starting address */
7952 start
= trunc_page_32(address
);
7953 /* Figure how much memory we need to flush (in page increments) */
7954 sync_size
= round_page_32(start
+ size
) - start
;
7957 ret
= KERN_SUCCESS
; /* Assume it all worked */
7960 if (vm_map_lookup_entry(map
, start
, &entry
)) {
7962 if((entry
->vme_end
- start
) > sync_size
) {
7963 sub_size
= sync_size
;
7966 sub_size
= entry
->vme_end
- start
;
7967 sync_size
-= sub_size
;
7969 if(entry
->is_sub_map
) {
7970 vm_map_machine_attribute(
7971 entry
->object
.sub_map
,
7972 (start
- entry
->vme_start
)
7977 if(entry
->object
.vm_object
) {
7980 vm_object_t base_object
;
7981 vm_object_offset_t offset
;
7982 vm_object_offset_t base_offset
;
7985 offset
= (start
- entry
->vme_start
)
7987 base_offset
= offset
;
7988 object
= entry
->object
.vm_object
;
7989 base_object
= object
;
7993 if(m
&& !m
->fictitious
) {
7996 pmap_attribute_cache_sync(
8000 } else if (object
->shadow
) {
8002 object
->shadow_offset
;
8003 object
= object
->shadow
;
8007 /* Bump to the next page */
8008 base_offset
+= PAGE_SIZE
;
8009 offset
= base_offset
;
8010 object
= base_object
;
8018 return KERN_FAILURE
;
8029 * vm_map_behavior_set:
8031 * Sets the paging reference behavior of the specified address
8032 * range in the target map. Paging reference behavior affects
8033 * how pagein operations resulting from faults on the map will be
8037 vm_map_behavior_set(
8041 vm_behavior_t new_behavior
)
8043 register vm_map_entry_t entry
;
8044 vm_map_entry_t temp_entry
;
8047 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
8048 (integer_t
)map
, start
, end
, new_behavior
, 0);
8050 switch (new_behavior
) {
8051 case VM_BEHAVIOR_DEFAULT
:
8052 case VM_BEHAVIOR_RANDOM
:
8053 case VM_BEHAVIOR_SEQUENTIAL
:
8054 case VM_BEHAVIOR_RSEQNTL
:
8056 case VM_BEHAVIOR_WILLNEED
:
8057 case VM_BEHAVIOR_DONTNEED
:
8058 new_behavior
= VM_BEHAVIOR_DEFAULT
;
8061 return(KERN_INVALID_ARGUMENT
);
8067 * The entire address range must be valid for the map.
8068 * Note that vm_map_range_check() does a
8069 * vm_map_lookup_entry() internally and returns the
8070 * entry containing the start of the address range if
8071 * the entire range is valid.
8073 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
8075 vm_map_clip_start(map
, entry
, start
);
8079 return(KERN_INVALID_ADDRESS
);
8082 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
8083 vm_map_clip_end(map
, entry
, end
);
8085 entry
->behavior
= new_behavior
;
8087 entry
= entry
->vme_next
;
8091 return(KERN_SUCCESS
);
8095 #include <mach_kdb.h>
8097 #include <ddb/db_output.h>
8098 #include <vm/vm_print.h>
8100 #define printf db_printf
8103 * Forward declarations for internal functions.
8105 extern void vm_map_links_print(
8106 struct vm_map_links
*links
);
8108 extern void vm_map_header_print(
8109 struct vm_map_header
*header
);
8111 extern void vm_map_entry_print(
8112 vm_map_entry_t entry
);
8114 extern void vm_follow_entry(
8115 vm_map_entry_t entry
);
8117 extern void vm_follow_map(
8121 * vm_map_links_print: [ debug ]
8125 struct vm_map_links
*links
)
8127 iprintf("prev = %08X next = %08X start = %08X end = %08X\n",
8135 * vm_map_header_print: [ debug ]
8138 vm_map_header_print(
8139 struct vm_map_header
*header
)
8141 vm_map_links_print(&header
->links
);
8142 iprintf("nentries = %08X, %sentries_pageable\n",
8144 (header
->entries_pageable
? "" : "!"));
8148 * vm_follow_entry: [ debug ]
8152 vm_map_entry_t entry
)
8154 extern int db_indent
;
8157 iprintf("map entry %08X\n", entry
);
8161 shadows
= vm_follow_object(entry
->object
.vm_object
);
8162 iprintf("Total objects : %d\n",shadows
);
8168 * vm_map_entry_print: [ debug ]
8172 register vm_map_entry_t entry
)
8174 extern int db_indent
;
8175 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
8176 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
8178 iprintf("map entry %08X n", entry
);
8182 vm_map_links_print(&entry
->links
);
8184 iprintf("start = %08X end = %08X, prot=%x/%x/%s\n",
8188 entry
->max_protection
,
8189 inheritance_name
[(entry
->inheritance
& 0x3)]);
8191 iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
8192 behavior_name
[(entry
->behavior
& 0x3)],
8194 entry
->user_wired_count
);
8195 iprintf("%sin_transition, %sneeds_wakeup\n",
8196 (entry
->in_transition
? "" : "!"),
8197 (entry
->needs_wakeup
? "" : "!"));
8199 if (entry
->is_sub_map
) {
8200 iprintf("submap = %08X - offset=%08X\n",
8201 entry
->object
.sub_map
,
8204 iprintf("object=%08X, offset=%08X, ",
8205 entry
->object
.vm_object
,
8207 printf("%sis_shared, %sneeds_copy\n",
8208 (entry
->is_shared
? "" : "!"),
8209 (entry
->needs_copy
? "" : "!"));
8216 * vm_follow_map: [ debug ]
8222 register vm_map_entry_t entry
;
8223 extern int db_indent
;
8225 iprintf("task map %08X\n", map
);
8229 for (entry
= vm_map_first_entry(map
);
8230 entry
&& entry
!= vm_map_to_entry(map
);
8231 entry
= entry
->vme_next
) {
8232 vm_follow_entry(entry
);
8239 * vm_map_print: [ debug ]
8245 register vm_map_entry_t entry
;
8247 extern int db_indent
;
8250 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8252 iprintf("task map %08X\n", map
);
8256 vm_map_header_print(&map
->hdr
);
8258 iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
8265 iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
8266 (map
->wait_for_space
? "" : "!"),
8267 (map
->wiring_required
? "" : "!"),
8271 switch (map
->sw_state
) {
8282 iprintf("res = %d, sw_state = %s\n", map
->res_count
, swstate
);
8283 #endif /* TASK_SWAPPER */
8285 for (entry
= vm_map_first_entry(map
);
8286 entry
&& entry
!= vm_map_to_entry(map
);
8287 entry
= entry
->vme_next
) {
8288 vm_map_entry_print(entry
);
8295 * Routine: vm_map_copy_print
8297 * Pretty-print a copy object for ddb.
8304 extern int db_indent
;
8307 vm_map_entry_t entry
;
8309 copy
= (vm_map_copy_t
)incopy
; /* Make sure we have the right type */
8311 printf("copy object 0x%x\n", copy
);
8315 iprintf("type=%d", copy
->type
);
8316 switch (copy
->type
) {
8317 case VM_MAP_COPY_ENTRY_LIST
:
8318 printf("[entry_list]");
8321 case VM_MAP_COPY_OBJECT
:
8325 case VM_MAP_COPY_KERNEL_BUFFER
:
8326 printf("[kernel_buffer]");
8330 printf("[bad type]");
8333 printf(", offset=0x%x", copy
->offset
);
8334 printf(", size=0x%x\n", copy
->size
);
8336 switch (copy
->type
) {
8337 case VM_MAP_COPY_ENTRY_LIST
:
8338 vm_map_header_print(©
->cpy_hdr
);
8339 for (entry
= vm_map_copy_first_entry(copy
);
8340 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8341 entry
= entry
->vme_next
) {
8342 vm_map_entry_print(entry
);
8346 case VM_MAP_COPY_OBJECT
:
8347 iprintf("object=0x%x\n", copy
->cpy_object
);
8350 case VM_MAP_COPY_KERNEL_BUFFER
:
8351 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8352 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8361 * db_vm_map_total_size(map) [ debug ]
8363 * return the total virtual size (in bytes) of the map
8366 db_vm_map_total_size(
8369 vm_map_entry_t entry
;
8373 map
= (vm_map_t
)inmap
; /* Make sure we have the right type */
8376 for (entry
= vm_map_first_entry(map
);
8377 entry
!= vm_map_to_entry(map
);
8378 entry
= entry
->vme_next
) {
8379 total
+= entry
->vme_end
- entry
->vme_start
;
8385 #endif /* MACH_KDB */
8388 * Routine: vm_map_entry_insert
8390 * Descritpion: This routine inserts a new vm_entry in a locked map.
8393 vm_map_entry_insert(
8395 vm_map_entry_t insp_entry
,
8399 vm_object_offset_t offset
,
8400 boolean_t needs_copy
,
8401 boolean_t is_shared
,
8402 boolean_t in_transition
,
8403 vm_prot_t cur_protection
,
8404 vm_prot_t max_protection
,
8405 vm_behavior_t behavior
,
8406 vm_inherit_t inheritance
,
8407 unsigned wired_count
)
8409 vm_map_entry_t new_entry
;
8411 assert(insp_entry
!= (vm_map_entry_t
)0);
8413 new_entry
= vm_map_entry_create(map
);
8415 new_entry
->vme_start
= start
;
8416 new_entry
->vme_end
= end
;
8417 assert(page_aligned(new_entry
->vme_start
));
8418 assert(page_aligned(new_entry
->vme_end
));
8420 new_entry
->object
.vm_object
= object
;
8421 new_entry
->offset
= offset
;
8422 new_entry
->is_shared
= is_shared
;
8423 new_entry
->is_sub_map
= FALSE
;
8424 new_entry
->needs_copy
= needs_copy
;
8425 new_entry
->in_transition
= in_transition
;
8426 new_entry
->needs_wakeup
= FALSE
;
8427 new_entry
->inheritance
= inheritance
;
8428 new_entry
->protection
= cur_protection
;
8429 new_entry
->max_protection
= max_protection
;
8430 new_entry
->behavior
= behavior
;
8431 new_entry
->wired_count
= wired_count
;
8432 new_entry
->user_wired_count
= 0;
8433 new_entry
->use_pmap
= FALSE
;
8436 * Insert the new entry into the list.
8439 vm_map_entry_link(map
, insp_entry
, new_entry
);
8440 map
->size
+= end
- start
;
8443 * Update the free space hint and the lookup hint.
8446 SAVE_HINT(map
, new_entry
);
8451 * Routine: vm_remap_extract
8453 * Descritpion: This routine returns a vm_entry list from a map.
8461 struct vm_map_header
*map_header
,
8462 vm_prot_t
*cur_protection
,
8463 vm_prot_t
*max_protection
,
8464 /* What, no behavior? */
8465 vm_inherit_t inheritance
,
8468 kern_return_t result
;
8469 vm_size_t mapped_size
;
8471 vm_map_entry_t src_entry
; /* result of last map lookup */
8472 vm_map_entry_t new_entry
;
8473 vm_object_offset_t offset
;
8474 vm_offset_t map_address
;
8475 vm_offset_t src_start
; /* start of entry to map */
8476 vm_offset_t src_end
; /* end of region to be mapped */
8478 vm_map_version_t version
;
8479 boolean_t src_needs_copy
;
8480 boolean_t new_entry_needs_copy
;
8482 assert(map
!= VM_MAP_NULL
);
8483 assert(size
!= 0 && size
== round_page_32(size
));
8484 assert(inheritance
== VM_INHERIT_NONE
||
8485 inheritance
== VM_INHERIT_COPY
||
8486 inheritance
== VM_INHERIT_SHARE
);
8489 * Compute start and end of region.
8491 src_start
= trunc_page_32(addr
);
8492 src_end
= round_page_32(src_start
+ size
);
8495 * Initialize map_header.
8497 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8498 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8499 map_header
->nentries
= 0;
8500 map_header
->entries_pageable
= pageable
;
8502 *cur_protection
= VM_PROT_ALL
;
8503 *max_protection
= VM_PROT_ALL
;
8507 result
= KERN_SUCCESS
;
8510 * The specified source virtual space might correspond to
8511 * multiple map entries, need to loop on them.
8514 while (mapped_size
!= size
) {
8515 vm_size_t entry_size
;
8518 * Find the beginning of the region.
8520 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8521 result
= KERN_INVALID_ADDRESS
;
8525 if (src_start
< src_entry
->vme_start
||
8526 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8527 result
= KERN_INVALID_ADDRESS
;
8531 if(src_entry
->is_sub_map
) {
8532 result
= KERN_INVALID_ADDRESS
;
8536 tmp_size
= size
- mapped_size
;
8537 if (src_end
> src_entry
->vme_end
)
8538 tmp_size
-= (src_end
- src_entry
->vme_end
);
8540 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8541 src_entry
->vme_start
);
8543 if(src_entry
->is_sub_map
) {
8544 vm_map_reference(src_entry
->object
.sub_map
);
8546 object
= src_entry
->object
.vm_object
;
8548 if (object
== VM_OBJECT_NULL
) {
8549 object
= vm_object_allocate(entry_size
);
8550 src_entry
->offset
= 0;
8551 src_entry
->object
.vm_object
= object
;
8552 } else if (object
->copy_strategy
!=
8553 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8555 * We are already using an asymmetric
8556 * copy, and therefore we already have
8559 assert(!src_entry
->needs_copy
);
8560 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8561 (object
->internal
&& !object
->true_share
&&
8562 !src_entry
->is_shared
&&
8563 object
->size
> entry_size
)) {
8565 vm_object_shadow(&src_entry
->object
.vm_object
,
8569 if (!src_entry
->needs_copy
&&
8570 (src_entry
->protection
& VM_PROT_WRITE
)) {
8572 vm_object_pmap_protect(
8573 src_entry
->object
.vm_object
,
8577 src_entry
->vme_start
,
8578 src_entry
->protection
&
8581 pmap_protect(vm_map_pmap(map
),
8582 src_entry
->vme_start
,
8584 src_entry
->protection
&
8589 object
= src_entry
->object
.vm_object
;
8590 src_entry
->needs_copy
= FALSE
;
8594 vm_object_lock(object
);
8595 object
->ref_count
++; /* object ref. for new entry */
8596 VM_OBJ_RES_INCR(object
);
8597 if (object
->copy_strategy
==
8598 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8599 object
->copy_strategy
=
8600 MEMORY_OBJECT_COPY_DELAY
;
8602 vm_object_unlock(object
);
8605 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8607 new_entry
= _vm_map_entry_create(map_header
);
8608 vm_map_entry_copy(new_entry
, src_entry
);
8609 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8611 new_entry
->vme_start
= map_address
;
8612 new_entry
->vme_end
= map_address
+ tmp_size
;
8613 new_entry
->inheritance
= inheritance
;
8614 new_entry
->offset
= offset
;
8617 * The new region has to be copied now if required.
8621 src_entry
->is_shared
= TRUE
;
8622 new_entry
->is_shared
= TRUE
;
8623 if (!(new_entry
->is_sub_map
))
8624 new_entry
->needs_copy
= FALSE
;
8626 } else if (src_entry
->is_sub_map
) {
8627 /* make this a COW sub_map if not already */
8628 new_entry
->needs_copy
= TRUE
;
8629 } else if (src_entry
->wired_count
== 0 &&
8630 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8632 (new_entry
->vme_end
-
8633 new_entry
->vme_start
),
8635 &new_entry_needs_copy
)) {
8637 new_entry
->needs_copy
= new_entry_needs_copy
;
8638 new_entry
->is_shared
= FALSE
;
8641 * Handle copy_on_write semantics.
8643 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8644 vm_object_pmap_protect(object
,
8647 ((src_entry
->is_shared
8649 PMAP_NULL
: map
->pmap
),
8650 src_entry
->vme_start
,
8651 src_entry
->protection
&
8654 src_entry
->needs_copy
= TRUE
;
8657 * Throw away the old object reference of the new entry.
8659 vm_object_deallocate(object
);
8662 new_entry
->is_shared
= FALSE
;
8665 * The map can be safely unlocked since we
8666 * already hold a reference on the object.
8668 * Record the timestamp of the map for later
8669 * verification, and unlock the map.
8671 version
.main_timestamp
= map
->timestamp
;
8672 vm_map_unlock(map
); /* Increments timestamp once! */
8677 if (src_entry
->wired_count
> 0) {
8678 vm_object_lock(object
);
8679 result
= vm_object_copy_slowly(
8684 &new_entry
->object
.vm_object
);
8686 new_entry
->offset
= 0;
8687 new_entry
->needs_copy
= FALSE
;
8689 result
= vm_object_copy_strategically(
8693 &new_entry
->object
.vm_object
,
8695 &new_entry_needs_copy
);
8697 new_entry
->needs_copy
= new_entry_needs_copy
;
8701 * Throw away the old object reference of the new entry.
8703 vm_object_deallocate(object
);
8705 if (result
!= KERN_SUCCESS
&&
8706 result
!= KERN_MEMORY_RESTART_COPY
) {
8707 _vm_map_entry_dispose(map_header
, new_entry
);
8712 * Verify that the map has not substantially
8713 * changed while the copy was being made.
8717 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8719 * Simple version comparison failed.
8721 * Retry the lookup and verify that the
8722 * same object/offset are still present.
8724 vm_object_deallocate(new_entry
->
8726 _vm_map_entry_dispose(map_header
, new_entry
);
8727 if (result
== KERN_MEMORY_RESTART_COPY
)
8728 result
= KERN_SUCCESS
;
8732 if (result
== KERN_MEMORY_RESTART_COPY
) {
8733 vm_object_reference(object
);
8738 _vm_map_entry_link(map_header
,
8739 map_header
->links
.prev
, new_entry
);
8741 *cur_protection
&= src_entry
->protection
;
8742 *max_protection
&= src_entry
->max_protection
;
8744 map_address
+= tmp_size
;
8745 mapped_size
+= tmp_size
;
8746 src_start
+= tmp_size
;
8751 if (result
!= KERN_SUCCESS
) {
8753 * Free all allocated elements.
8755 for (src_entry
= map_header
->links
.next
;
8756 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8757 src_entry
= new_entry
) {
8758 new_entry
= src_entry
->vme_next
;
8759 _vm_map_entry_unlink(map_header
, src_entry
);
8760 vm_object_deallocate(src_entry
->object
.vm_object
);
8761 _vm_map_entry_dispose(map_header
, src_entry
);
8770 * Map portion of a task's address space.
8771 * Mapped region must not overlap more than
8772 * one vm memory object. Protections and
8773 * inheritance attributes remain the same
8774 * as in the original task and are out parameters.
8775 * Source and Target task can be identical
8776 * Other attributes are identical as for vm_map()
8780 vm_map_t target_map
,
8781 vm_offset_t
*address
,
8786 vm_offset_t memory_address
,
8788 vm_prot_t
*cur_protection
,
8789 vm_prot_t
*max_protection
,
8790 vm_inherit_t inheritance
)
8792 kern_return_t result
;
8793 vm_map_entry_t entry
;
8794 vm_map_entry_t insp_entry
;
8795 vm_map_entry_t new_entry
;
8796 struct vm_map_header map_header
;
8798 if (target_map
== VM_MAP_NULL
)
8799 return KERN_INVALID_ARGUMENT
;
8801 switch (inheritance
) {
8802 case VM_INHERIT_NONE
:
8803 case VM_INHERIT_COPY
:
8804 case VM_INHERIT_SHARE
:
8805 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8809 return KERN_INVALID_ARGUMENT
;
8812 size
= round_page_32(size
);
8814 result
= vm_remap_extract(src_map
, memory_address
,
8815 size
, copy
, &map_header
,
8822 if (result
!= KERN_SUCCESS
) {
8827 * Allocate/check a range of free virtual address
8828 * space for the target
8830 *address
= trunc_page_32(*address
);
8831 vm_map_lock(target_map
);
8832 result
= vm_remap_range_allocate(target_map
, address
, size
,
8833 mask
, anywhere
, &insp_entry
);
8835 for (entry
= map_header
.links
.next
;
8836 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8837 entry
= new_entry
) {
8838 new_entry
= entry
->vme_next
;
8839 _vm_map_entry_unlink(&map_header
, entry
);
8840 if (result
== KERN_SUCCESS
) {
8841 entry
->vme_start
+= *address
;
8842 entry
->vme_end
+= *address
;
8843 vm_map_entry_link(target_map
, insp_entry
, entry
);
8846 if (!entry
->is_sub_map
) {
8847 vm_object_deallocate(entry
->object
.vm_object
);
8849 vm_map_deallocate(entry
->object
.sub_map
);
8851 _vm_map_entry_dispose(&map_header
, entry
);
8855 if (result
== KERN_SUCCESS
) {
8856 target_map
->size
+= size
;
8857 SAVE_HINT(target_map
, insp_entry
);
8859 vm_map_unlock(target_map
);
8861 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8862 result
= vm_map_wire(target_map
, *address
,
8863 *address
+ size
, *cur_protection
, TRUE
);
8868 * Routine: vm_remap_range_allocate
8871 * Allocate a range in the specified virtual address map.
8872 * returns the address and the map entry just before the allocated
8875 * Map must be locked.
8879 vm_remap_range_allocate(
8881 vm_offset_t
*address
, /* IN/OUT */
8885 vm_map_entry_t
*map_entry
) /* OUT */
8887 register vm_map_entry_t entry
;
8888 register vm_offset_t start
;
8889 register vm_offset_t end
;
8890 kern_return_t result
= KERN_SUCCESS
;
8899 * Calculate the first possible address.
8902 if (start
< map
->min_offset
)
8903 start
= map
->min_offset
;
8904 if (start
> map
->max_offset
)
8905 return(KERN_NO_SPACE
);
8908 * Look for the first possible address;
8909 * if there's already something at this
8910 * address, we have to start after it.
8913 assert(first_free_is_valid(map
));
8914 if (start
== map
->min_offset
) {
8915 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8916 start
= entry
->vme_end
;
8918 vm_map_entry_t tmp_entry
;
8919 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8920 start
= tmp_entry
->vme_end
;
8925 * In any case, the "entry" always precedes
8926 * the proposed new region throughout the
8931 register vm_map_entry_t next
;
8934 * Find the end of the proposed new region.
8935 * Be sure we didn't go beyond the end, or
8936 * wrap around the address.
8939 end
= ((start
+ mask
) & ~mask
);
8941 return(KERN_NO_SPACE
);
8945 if ((end
> map
->max_offset
) || (end
< start
)) {
8946 if (map
->wait_for_space
) {
8947 if (size
<= (map
->max_offset
-
8949 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8951 thread_block((void (*)(void))0);
8957 return(KERN_NO_SPACE
);
8961 * If there are no more entries, we must win.
8964 next
= entry
->vme_next
;
8965 if (next
== vm_map_to_entry(map
))
8969 * If there is another entry, it must be
8970 * after the end of the potential new region.
8973 if (next
->vme_start
>= end
)
8977 * Didn't fit -- move to the next entry.
8981 start
= entry
->vme_end
;
8985 vm_map_entry_t temp_entry
;
8989 * the address doesn't itself violate
8990 * the mask requirement.
8993 if ((start
& mask
) != 0)
8994 return(KERN_NO_SPACE
);
8998 * ... the address is within bounds
9003 if ((start
< map
->min_offset
) ||
9004 (end
> map
->max_offset
) ||
9006 return(KERN_INVALID_ADDRESS
);
9010 * ... the starting address isn't allocated
9013 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
9014 return(KERN_NO_SPACE
);
9019 * ... the next region doesn't overlap the
9023 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
9024 (entry
->vme_next
->vme_start
< end
))
9025 return(KERN_NO_SPACE
);
9028 return(KERN_SUCCESS
);
9034 * Set the address map for the current thr_act to the specified map
9042 thread_act_t thr_act
= current_act();
9043 vm_map_t oldmap
= thr_act
->map
;
9045 mp_disable_preemption();
9046 mycpu
= cpu_number();
9049 * Deactivate the current map and activate the requested map
9051 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
9053 mp_enable_preemption();
9059 * Routine: vm_map_write_user
9062 * Copy out data from a kernel space into space in the
9063 * destination map. The space must already exist in the
9065 * NOTE: This routine should only be called by threads
9066 * which can block on a page fault. i.e. kernel mode user
9073 vm_offset_t src_addr
,
9074 vm_offset_t dst_addr
,
9077 thread_act_t thr_act
= current_act();
9078 kern_return_t kr
= KERN_SUCCESS
;
9080 if(thr_act
->map
== map
) {
9081 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9082 kr
= KERN_INVALID_ADDRESS
;
9087 /* take on the identity of the target map while doing */
9090 vm_map_reference(map
);
9091 oldmap
= vm_map_switch(map
);
9092 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
9093 kr
= KERN_INVALID_ADDRESS
;
9095 vm_map_switch(oldmap
);
9096 vm_map_deallocate(map
);
9102 * Routine: vm_map_read_user
9105 * Copy in data from a user space source map into the
9106 * kernel map. The space must already exist in the
9108 * NOTE: This routine should only be called by threads
9109 * which can block on a page fault. i.e. kernel mode user
9116 vm_offset_t src_addr
,
9117 vm_offset_t dst_addr
,
9120 thread_act_t thr_act
= current_act();
9121 kern_return_t kr
= KERN_SUCCESS
;
9123 if(thr_act
->map
== map
) {
9124 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9125 kr
= KERN_INVALID_ADDRESS
;
9130 /* take on the identity of the target map while doing */
9133 vm_map_reference(map
);
9134 oldmap
= vm_map_switch(map
);
9135 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9136 kr
= KERN_INVALID_ADDRESS
;
9138 vm_map_switch(oldmap
);
9139 vm_map_deallocate(map
);
9144 /* Takes existing source and destination sub-maps and clones the contents of */
9145 /* the source map */
9149 ipc_port_t src_region
,
9150 ipc_port_t dst_region
)
9152 vm_named_entry_t src_object
;
9153 vm_named_entry_t dst_object
;
9157 vm_offset_t max_off
;
9158 vm_map_entry_t entry
;
9159 vm_map_entry_t new_entry
;
9160 vm_map_entry_t insert_point
;
9162 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
9163 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
9164 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
9165 return KERN_INVALID_ARGUMENT
;
9167 src_map
= (vm_map_t
)src_object
->backing
.map
;
9168 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
9169 /* destination map is assumed to be unavailable to any other */
9170 /* activity. i.e. it is new */
9171 vm_map_lock(src_map
);
9172 if((src_map
->min_offset
!= dst_map
->min_offset
)
9173 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
9174 vm_map_unlock(src_map
);
9175 return KERN_INVALID_ARGUMENT
;
9177 addr
= src_map
->min_offset
;
9178 vm_map_lookup_entry(dst_map
, addr
, &entry
);
9179 if(entry
== vm_map_to_entry(dst_map
)) {
9180 entry
= entry
->vme_next
;
9182 if(entry
== vm_map_to_entry(dst_map
)) {
9183 max_off
= src_map
->max_offset
;
9185 max_off
= entry
->vme_start
;
9187 vm_map_lookup_entry(src_map
, addr
, &entry
);
9188 if(entry
== vm_map_to_entry(src_map
)) {
9189 entry
= entry
->vme_next
;
9191 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
9192 while((entry
!= vm_map_to_entry(src_map
)) &&
9193 (entry
->vme_end
<= max_off
)) {
9194 addr
= entry
->vme_start
;
9195 new_entry
= vm_map_entry_create(dst_map
);
9196 vm_map_entry_copy(new_entry
, entry
);
9197 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
9198 insert_point
= new_entry
;
9199 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
9200 if (new_entry
->is_sub_map
) {
9201 vm_map_reference(new_entry
->object
.sub_map
);
9203 vm_object_reference(
9204 new_entry
->object
.vm_object
);
9207 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
9208 entry
= entry
->vme_next
;
9210 vm_map_unlock(src_map
);
9211 return KERN_SUCCESS
;
9215 * Export routines to other components for the things we access locally through
9222 return (current_map_fast());
9226 * vm_map_check_protection:
9228 * Assert that the target map allows the specified
9229 * privilege on the entire address region given.
9230 * The entire region must be allocated.
9232 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9233 register vm_map_t map
;
9234 register vm_offset_t start
;
9235 register vm_offset_t end
;
9236 register vm_prot_t protection
;
9238 register vm_map_entry_t entry
;
9239 vm_map_entry_t tmp_entry
;
9243 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9249 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9256 while (start
< end
) {
9257 if (entry
== vm_map_to_entry(map
)) {
9266 if (start
< entry
->vme_start
) {
9272 * Check protection associated with entry.
9275 if ((entry
->protection
& protection
) != protection
) {
9280 /* go to next entry */
9282 start
= entry
->vme_end
;
9283 entry
= entry
->vme_next
;
9290 * This routine is obsolete, but included for backward
9291 * compatibility for older drivers.
9294 kernel_vm_map_reference(
9297 vm_map_reference(map
);
9303 * Most code internal to the osfmk will go through a
9304 * macro defining this. This is always here for the
9305 * use of other kernel components.
9307 #undef vm_map_reference
9310 register vm_map_t map
)
9312 if (map
== VM_MAP_NULL
)
9315 mutex_lock(&map
->s_lock
);
9317 assert(map
->res_count
> 0);
9318 assert(map
->ref_count
>= map
->res_count
);
9322 mutex_unlock(&map
->s_lock
);
9326 * vm_map_deallocate:
9328 * Removes a reference from the specified map,
9329 * destroying it if no references remain.
9330 * The map should not be locked.
9334 register vm_map_t map
)
9338 if (map
== VM_MAP_NULL
)
9341 mutex_lock(&map
->s_lock
);
9342 ref
= --map
->ref_count
;
9344 vm_map_res_deallocate(map
);
9345 mutex_unlock(&map
->s_lock
);
9348 assert(map
->ref_count
== 0);
9349 mutex_unlock(&map
->s_lock
);
9353 * The map residence count isn't decremented here because
9354 * the vm_map_delete below will traverse the entire map,
9355 * deleting entries, and the residence counts on objects
9356 * and sharing maps will go away then.
9360 vm_map_destroy(map
);