2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
60 * Virtual memory mapping module.
64 #include <task_swapper.h>
65 #include <mach_assert.h>
67 #include <mach/kern_return.h>
68 #include <mach/port.h>
69 #include <mach/vm_attributes.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <kern/assert.h>
73 #include <kern/counters.h>
74 #include <kern/zalloc.h>
75 #include <vm/vm_init.h>
76 #include <vm/vm_fault.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_kern.h>
81 #include <ipc/ipc_port.h>
82 #include <kern/sched_prim.h>
83 #include <kern/misc_protos.h>
84 #include <mach/vm_map_server.h>
85 #include <mach/mach_host_server.h>
89 /* Internal prototypes
91 extern boolean_t
vm_map_range_check(
95 vm_map_entry_t
*entry
);
97 extern vm_map_entry_t
_vm_map_entry_create(
98 struct vm_map_header
*map_header
);
100 extern void _vm_map_entry_dispose(
101 struct vm_map_header
*map_header
,
102 vm_map_entry_t entry
);
104 extern void vm_map_pmap_enter(
107 vm_offset_t end_addr
,
109 vm_object_offset_t offset
,
110 vm_prot_t protection
);
112 extern void _vm_map_clip_end(
113 struct vm_map_header
*map_header
,
114 vm_map_entry_t entry
,
117 extern void vm_map_entry_delete(
119 vm_map_entry_t entry
);
121 extern kern_return_t
vm_map_delete(
127 extern void vm_map_copy_steal_pages(
130 extern kern_return_t
vm_map_copy_overwrite_unaligned(
132 vm_map_entry_t entry
,
136 extern kern_return_t
vm_map_copy_overwrite_aligned(
138 vm_map_entry_t tmp_entry
,
143 extern kern_return_t
vm_map_copyin_kernel_buffer(
145 vm_offset_t src_addr
,
147 boolean_t src_destroy
,
148 vm_map_copy_t
*copy_result
); /* OUT */
150 extern kern_return_t
vm_map_copyout_kernel_buffer(
152 vm_offset_t
*addr
, /* IN/OUT */
154 boolean_t overwrite
);
156 extern void vm_map_fork_share(
158 vm_map_entry_t old_entry
,
161 extern boolean_t
vm_map_fork_copy(
163 vm_map_entry_t
*old_entry_p
,
166 extern kern_return_t
vm_remap_range_allocate(
168 vm_offset_t
*address
, /* IN/OUT */
172 vm_map_entry_t
*map_entry
); /* OUT */
174 extern void _vm_map_clip_start(
175 struct vm_map_header
*map_header
,
176 vm_map_entry_t entry
,
179 void vm_region_top_walk(
180 vm_map_entry_t entry
,
181 vm_region_top_info_t top
);
184 vm_map_entry_t entry
,
185 vm_region_extended_info_t extended
,
186 vm_object_offset_t offset
,
192 * Macros to copy a vm_map_entry. We must be careful to correctly
193 * manage the wired page count. vm_map_entry_copy() creates a new
194 * map entry to the same memory - the wired count in the new entry
195 * must be set to zero. vm_map_entry_copy_full() creates a new
196 * entry that is identical to the old entry. This preserves the
197 * wire count; it's used for map splitting and zone changing in
200 #define vm_map_entry_copy(NEW,OLD) \
203 (NEW)->is_shared = FALSE; \
204 (NEW)->needs_wakeup = FALSE; \
205 (NEW)->in_transition = FALSE; \
206 (NEW)->wired_count = 0; \
207 (NEW)->user_wired_count = 0; \
210 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
213 * Virtual memory maps provide for the mapping, protection,
214 * and sharing of virtual memory objects. In addition,
215 * this module provides for an efficient virtual copy of
216 * memory from one map to another.
218 * Synchronization is required prior to most operations.
220 * Maps consist of an ordered doubly-linked list of simple
221 * entries; a single hint is used to speed up lookups.
223 * Sharing maps have been deleted from this version of Mach.
224 * All shared objects are now mapped directly into the respective
225 * maps. This requires a change in the copy on write strategy;
226 * the asymmetric (delayed) strategy is used for shared temporary
227 * objects instead of the symmetric (shadow) strategy. All maps
228 * are now "top level" maps (either task map, kernel map or submap
229 * of the kernel map).
231 * Since portions of maps are specified by start/end addreses,
232 * which may not align with existing map entries, all
233 * routines merely "clip" entries to these start/end values.
234 * [That is, an entry is split into two, bordering at a
235 * start or end value.] Note that these clippings may not
236 * always be necessary (as the two resulting entries are then
237 * not changed); however, the clipping is done for convenience.
238 * No attempt is currently made to "glue back together" two
241 * The symmetric (shadow) copy strategy implements virtual copy
242 * by copying VM object references from one map to
243 * another, and then marking both regions as copy-on-write.
244 * It is important to note that only one writeable reference
245 * to a VM object region exists in any map when this strategy
246 * is used -- this means that shadow object creation can be
247 * delayed until a write operation occurs. The symmetric (delayed)
248 * strategy allows multiple maps to have writeable references to
249 * the same region of a vm object, and hence cannot delay creating
250 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
251 * Copying of permanent objects is completely different; see
252 * vm_object_copy_strategically() in vm_object.c.
255 zone_t vm_map_zone
; /* zone for vm_map structures */
256 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
257 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
258 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
262 * Placeholder object for submap operations. This object is dropped
263 * into the range by a call to vm_map_find, and removed when
264 * vm_map_submap creates the submap.
267 vm_object_t vm_submap_object
;
272 * Initialize the vm_map module. Must be called before
273 * any other vm_map routines.
275 * Map and entry structures are allocated from zones -- we must
276 * initialize those zones.
278 * There are three zones of interest:
280 * vm_map_zone: used to allocate maps.
281 * vm_map_entry_zone: used to allocate map entries.
282 * vm_map_kentry_zone: used to allocate map entries for the kernel.
284 * The kernel allocates map entries from a special zone that is initially
285 * "crammed" with memory. It would be difficult (perhaps impossible) for
286 * the kernel to allocate more memory to a entry zone when it became
287 * empty since the very act of allocating memory implies the creation
291 vm_offset_t map_data
;
292 vm_size_t map_data_size
;
293 vm_offset_t kentry_data
;
294 vm_size_t kentry_data_size
;
295 int kentry_count
= 2048; /* to init kentry_data_size */
297 #define NO_COALESCE_LIMIT (1024 * 128)
300 * Threshold for aggressive (eager) page map entering for vm copyout
301 * operations. Any copyout larger will NOT be aggressively entered.
303 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
309 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
312 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
313 1024*1024, PAGE_SIZE
*5,
314 "non-kernel map entries");
316 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
317 kentry_data_size
, kentry_data_size
,
318 "kernel map entries");
320 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
321 16*1024, PAGE_SIZE
, "map copies");
324 * Cram the map and kentry zones with initial data.
325 * Set kentry_zone non-collectible to aid zone_gc().
327 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
328 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
329 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
330 zcram(vm_map_zone
, map_data
, map_data_size
);
331 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
338 map_data_size
= round_page(10 * sizeof(struct vm_map
));
339 map_data
= pmap_steal_memory(map_data_size
);
343 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
344 * physical page (i.e. that beyond the kernel image and page tables)
345 * individually; we guess at most one entry per eight pages in the
346 * real world. This works out to roughly .1 of 1% of physical memory,
347 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
350 kentry_count
= pmap_free_pages() / 8;
354 round_page(kentry_count
* sizeof(struct vm_map_entry
));
355 kentry_data
= pmap_steal_memory(kentry_data_size
);
361 * Creates and returns a new empty VM map with
362 * the given physical map structure, and having
363 * the given lower and upper address bounds.
372 register vm_map_t result
;
374 result
= (vm_map_t
) zalloc(vm_map_zone
);
375 if (result
== VM_MAP_NULL
)
376 panic("vm_map_create");
378 vm_map_first_entry(result
) = vm_map_to_entry(result
);
379 vm_map_last_entry(result
) = vm_map_to_entry(result
);
380 result
->hdr
.nentries
= 0;
381 result
->hdr
.entries_pageable
= pageable
;
384 result
->ref_count
= 1;
386 result
->res_count
= 1;
387 result
->sw_state
= MAP_SW_IN
;
388 #endif /* TASK_SWAPPER */
390 result
->min_offset
= min
;
391 result
->max_offset
= max
;
392 result
->wiring_required
= FALSE
;
393 result
->no_zero_fill
= FALSE
;
394 result
->mapped
= FALSE
;
395 result
->wait_for_space
= FALSE
;
396 result
->first_free
= vm_map_to_entry(result
);
397 result
->hint
= vm_map_to_entry(result
);
398 vm_map_lock_init(result
);
399 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
405 * vm_map_entry_create: [ internal use only ]
407 * Allocates a VM map entry for insertion in the
408 * given map (or map copy). No fields are filled.
410 #define vm_map_entry_create(map) \
411 _vm_map_entry_create(&(map)->hdr)
413 #define vm_map_copy_entry_create(copy) \
414 _vm_map_entry_create(&(copy)->cpy_hdr)
417 _vm_map_entry_create(
418 register struct vm_map_header
*map_header
)
420 register zone_t zone
;
421 register vm_map_entry_t entry
;
423 if (map_header
->entries_pageable
)
424 zone
= vm_map_entry_zone
;
426 zone
= vm_map_kentry_zone
;
428 entry
= (vm_map_entry_t
) zalloc(zone
);
429 if (entry
== VM_MAP_ENTRY_NULL
)
430 panic("vm_map_entry_create");
436 * vm_map_entry_dispose: [ internal use only ]
438 * Inverse of vm_map_entry_create.
440 #define vm_map_entry_dispose(map, entry) \
442 if((entry) == (map)->first_free) \
443 (map)->first_free = vm_map_to_entry(map); \
444 if((entry) == (map)->hint) \
445 (map)->hint = vm_map_to_entry(map); \
446 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
449 #define vm_map_copy_entry_dispose(map, entry) \
450 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
453 _vm_map_entry_dispose(
454 register struct vm_map_header
*map_header
,
455 register vm_map_entry_t entry
)
457 register zone_t zone
;
459 if (map_header
->entries_pageable
)
460 zone
= vm_map_entry_zone
;
462 zone
= vm_map_kentry_zone
;
464 zfree(zone
, (vm_offset_t
) entry
);
467 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
468 boolean_t first_free_check
= FALSE
;
473 vm_map_entry_t entry
, next
;
475 if (!first_free_check
)
478 entry
= vm_map_to_entry(map
);
479 next
= entry
->vme_next
;
480 while (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_end
) ||
481 (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_start
) &&
482 next
!= vm_map_to_entry(map
))) {
484 next
= entry
->vme_next
;
485 if (entry
== vm_map_to_entry(map
))
488 if (map
->first_free
!= entry
) {
489 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
490 map
, map
->first_free
, entry
);
499 * Updates the map->first_free pointer to the
500 * entry immediately before the first hole in the map.
501 * The map should be locked.
503 #define UPDATE_FIRST_FREE(map, new_first_free) \
506 vm_map_entry_t UFF_first_free; \
507 vm_map_entry_t UFF_next_entry; \
509 UFF_first_free = (new_first_free); \
510 UFF_next_entry = UFF_first_free->vme_next; \
511 while (trunc_page(UFF_next_entry->vme_start) == \
512 trunc_page(UFF_first_free->vme_end) || \
513 (trunc_page(UFF_next_entry->vme_start) == \
514 trunc_page(UFF_first_free->vme_start) && \
515 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
516 UFF_first_free = UFF_next_entry; \
517 UFF_next_entry = UFF_first_free->vme_next; \
518 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
521 UFF_map->first_free = UFF_first_free; \
522 assert(first_free_is_valid(UFF_map)); \
526 * vm_map_entry_{un,}link:
528 * Insert/remove entries from maps (or map copies).
530 #define vm_map_entry_link(map, after_where, entry) \
533 vm_map_entry_t VMEL_entry; \
535 VMEL_entry = (entry); \
536 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
537 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
541 #define vm_map_copy_entry_link(copy, after_where, entry) \
542 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
544 #define _vm_map_entry_link(hdr, after_where, entry) \
547 (entry)->vme_prev = (after_where); \
548 (entry)->vme_next = (after_where)->vme_next; \
549 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
552 #define vm_map_entry_unlink(map, entry) \
555 vm_map_entry_t VMEU_entry; \
556 vm_map_entry_t VMEU_first_free; \
558 VMEU_entry = (entry); \
559 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
560 VMEU_first_free = VMEU_entry->vme_prev; \
562 VMEU_first_free = VMEU_map->first_free; \
563 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
564 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
567 #define vm_map_copy_entry_unlink(copy, entry) \
568 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
570 #define _vm_map_entry_unlink(hdr, entry) \
573 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
574 (entry)->vme_prev->vme_next = (entry)->vme_next; \
577 #if MACH_ASSERT && TASK_SWAPPER
579 * vm_map_res_reference:
581 * Adds another valid residence count to the given map.
583 * Map is locked so this function can be called from
587 void vm_map_res_reference(register vm_map_t map
)
589 /* assert map is locked */
590 assert(map
->res_count
>= 0);
591 assert(map
->ref_count
>= map
->res_count
);
592 if (map
->res_count
== 0) {
593 mutex_unlock(&map
->s_lock
);
596 mutex_lock(&map
->s_lock
);
604 * vm_map_reference_swap:
606 * Adds valid reference and residence counts to the given map.
608 * The map may not be in memory (i.e. zero residence count).
611 void vm_map_reference_swap(register vm_map_t map
)
613 assert(map
!= VM_MAP_NULL
);
614 mutex_lock(&map
->s_lock
);
615 assert(map
->res_count
>= 0);
616 assert(map
->ref_count
>= map
->res_count
);
618 vm_map_res_reference(map
);
619 mutex_unlock(&map
->s_lock
);
623 * vm_map_res_deallocate:
625 * Decrement residence count on a map; possibly causing swapout.
627 * The map must be in memory (i.e. non-zero residence count).
629 * The map is locked, so this function is callable from vm_map_deallocate.
632 void vm_map_res_deallocate(register vm_map_t map
)
634 assert(map
->res_count
> 0);
635 if (--map
->res_count
== 0) {
636 mutex_unlock(&map
->s_lock
);
640 mutex_lock(&map
->s_lock
);
642 assert(map
->ref_count
>= map
->res_count
);
644 #endif /* MACH_ASSERT && TASK_SWAPPER */
649 * Actually destroy a map.
653 register vm_map_t map
)
656 (void) vm_map_delete(map
, map
->min_offset
,
657 map
->max_offset
, VM_MAP_NO_FLAGS
);
660 pmap_destroy(map
->pmap
);
662 zfree(vm_map_zone
, (vm_offset_t
) map
);
667 * vm_map_swapin/vm_map_swapout
669 * Swap a map in and out, either referencing or releasing its resources.
670 * These functions are internal use only; however, they must be exported
671 * because they may be called from macros, which are exported.
673 * In the case of swapout, there could be races on the residence count,
674 * so if the residence count is up, we return, assuming that a
675 * vm_map_deallocate() call in the near future will bring us back.
678 * -- We use the map write lock for synchronization among races.
679 * -- The map write lock, and not the simple s_lock, protects the
680 * swap state of the map.
681 * -- If a map entry is a share map, then we hold both locks, in
682 * hierarchical order.
684 * Synchronization Notes:
685 * 1) If a vm_map_swapin() call happens while swapout in progress, it
686 * will block on the map lock and proceed when swapout is through.
687 * 2) A vm_map_reference() call at this time is illegal, and will
688 * cause a panic. vm_map_reference() is only allowed on resident
689 * maps, since it refuses to block.
690 * 3) A vm_map_swapin() call during a swapin will block, and
691 * proceeed when the first swapin is done, turning into a nop.
692 * This is the reason the res_count is not incremented until
693 * after the swapin is complete.
694 * 4) There is a timing hole after the checks of the res_count, before
695 * the map lock is taken, during which a swapin may get the lock
696 * before a swapout about to happen. If this happens, the swapin
697 * will detect the state and increment the reference count, causing
698 * the swapout to be a nop, thereby delaying it until a later
699 * vm_map_deallocate. If the swapout gets the lock first, then
700 * the swapin will simply block until the swapout is done, and
703 * Because vm_map_swapin() is potentially an expensive operation, it
704 * should be used with caution.
707 * 1) A map with a residence count of zero is either swapped, or
709 * 2) A map with a non-zero residence count is either resident,
710 * or being swapped in.
713 int vm_map_swap_enable
= 1;
715 void vm_map_swapin (vm_map_t map
)
717 register vm_map_entry_t entry
;
719 if (!vm_map_swap_enable
) /* debug */
724 * First deal with various races.
726 if (map
->sw_state
== MAP_SW_IN
)
728 * we raced with swapout and won. Returning will incr.
729 * the res_count, turning the swapout into a nop.
734 * The residence count must be zero. If we raced with another
735 * swapin, the state would have been IN; if we raced with a
736 * swapout (after another competing swapin), we must have lost
737 * the race to get here (see above comment), in which case
738 * res_count is still 0.
740 assert(map
->res_count
== 0);
743 * There are no intermediate states of a map going out or
744 * coming in, since the map is locked during the transition.
746 assert(map
->sw_state
== MAP_SW_OUT
);
749 * We now operate upon each map entry. If the entry is a sub-
750 * or share-map, we call vm_map_res_reference upon it.
751 * If the entry is an object, we call vm_object_res_reference
752 * (this may iterate through the shadow chain).
753 * Note that we hold the map locked the entire time,
754 * even if we get back here via a recursive call in
755 * vm_map_res_reference.
757 entry
= vm_map_first_entry(map
);
759 while (entry
!= vm_map_to_entry(map
)) {
760 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
761 if (entry
->is_sub_map
) {
762 vm_map_t lmap
= entry
->object
.sub_map
;
763 mutex_lock(&lmap
->s_lock
);
764 vm_map_res_reference(lmap
);
765 mutex_unlock(&lmap
->s_lock
);
767 vm_object_t object
= entry
->object
.vm_object
;
768 vm_object_lock(object
);
770 * This call may iterate through the
773 vm_object_res_reference(object
);
774 vm_object_unlock(object
);
777 entry
= entry
->vme_next
;
779 assert(map
->sw_state
== MAP_SW_OUT
);
780 map
->sw_state
= MAP_SW_IN
;
783 void vm_map_swapout(vm_map_t map
)
785 register vm_map_entry_t entry
;
789 * First deal with various races.
790 * If we raced with a swapin and lost, the residence count
791 * will have been incremented to 1, and we simply return.
793 mutex_lock(&map
->s_lock
);
794 if (map
->res_count
!= 0) {
795 mutex_unlock(&map
->s_lock
);
798 mutex_unlock(&map
->s_lock
);
801 * There are no intermediate states of a map going out or
802 * coming in, since the map is locked during the transition.
804 assert(map
->sw_state
== MAP_SW_IN
);
806 if (!vm_map_swap_enable
)
810 * We now operate upon each map entry. If the entry is a sub-
811 * or share-map, we call vm_map_res_deallocate upon it.
812 * If the entry is an object, we call vm_object_res_deallocate
813 * (this may iterate through the shadow chain).
814 * Note that we hold the map locked the entire time,
815 * even if we get back here via a recursive call in
816 * vm_map_res_deallocate.
818 entry
= vm_map_first_entry(map
);
820 while (entry
!= vm_map_to_entry(map
)) {
821 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
822 if (entry
->is_sub_map
) {
823 vm_map_t lmap
= entry
->object
.sub_map
;
824 mutex_lock(&lmap
->s_lock
);
825 vm_map_res_deallocate(lmap
);
826 mutex_unlock(&lmap
->s_lock
);
828 vm_object_t object
= entry
->object
.vm_object
;
829 vm_object_lock(object
);
831 * This call may take a long time,
832 * since it could actively push
833 * out pages (if we implement it
836 vm_object_res_deallocate(object
);
837 vm_object_unlock(object
);
840 entry
= entry
->vme_next
;
842 assert(map
->sw_state
== MAP_SW_IN
);
843 map
->sw_state
= MAP_SW_OUT
;
846 #endif /* TASK_SWAPPER */
852 * Saves the specified entry as the hint for
853 * future lookups. Performs necessary interlocks.
855 #define SAVE_HINT(map,value) \
856 mutex_lock(&(map)->s_lock); \
857 (map)->hint = (value); \
858 mutex_unlock(&(map)->s_lock);
861 * vm_map_lookup_entry: [ internal use only ]
863 * Finds the map entry containing (or
864 * immediately preceding) the specified address
865 * in the given map; the entry is returned
866 * in the "entry" parameter. The boolean
867 * result indicates whether the address is
868 * actually contained in the map.
872 register vm_map_t map
,
873 register vm_offset_t address
,
874 vm_map_entry_t
*entry
) /* OUT */
876 register vm_map_entry_t cur
;
877 register vm_map_entry_t last
;
880 * Start looking either from the head of the
881 * list, or from the hint.
884 mutex_lock(&map
->s_lock
);
886 mutex_unlock(&map
->s_lock
);
888 if (cur
== vm_map_to_entry(map
))
891 if (address
>= cur
->vme_start
) {
893 * Go from hint to end of list.
895 * But first, make a quick check to see if
896 * we are already looking at the entry we
897 * want (which is usually the case).
898 * Note also that we don't need to save the hint
899 * here... it is the same hint (unless we are
900 * at the header, in which case the hint didn't
901 * buy us anything anyway).
903 last
= vm_map_to_entry(map
);
904 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
911 * Go from start to hint, *inclusively*
913 last
= cur
->vme_next
;
914 cur
= vm_map_first_entry(map
);
921 while (cur
!= last
) {
922 if (cur
->vme_end
> address
) {
923 if (address
>= cur
->vme_start
) {
925 * Save this lookup for future
937 *entry
= cur
->vme_prev
;
938 SAVE_HINT(map
, *entry
);
943 * Routine: vm_map_find_space
945 * Allocate a range in the specified virtual address map,
946 * returning the entry allocated for that range.
947 * Used by kmem_alloc, etc.
949 * The map must be NOT be locked. It will be returned locked
950 * on KERN_SUCCESS, unlocked on failure.
952 * If an entry is allocated, the object/offset fields
953 * are initialized to zero.
957 register vm_map_t map
,
958 vm_offset_t
*address
, /* OUT */
961 vm_map_entry_t
*o_entry
) /* OUT */
963 register vm_map_entry_t entry
, new_entry
;
964 register vm_offset_t start
;
965 register vm_offset_t end
;
967 new_entry
= vm_map_entry_create(map
);
970 * Look for the first possible address; if there's already
971 * something at this address, we have to start after it.
976 assert(first_free_is_valid(map
));
977 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
978 start
= map
->min_offset
;
980 start
= entry
->vme_end
;
983 * In any case, the "entry" always precedes
984 * the proposed new region throughout the loop:
988 register vm_map_entry_t next
;
991 * Find the end of the proposed new region.
992 * Be sure we didn't go beyond the end, or
993 * wrap around the address.
996 end
= ((start
+ mask
) & ~mask
);
998 vm_map_entry_dispose(map
, new_entry
);
1000 return(KERN_NO_SPACE
);
1005 if ((end
> map
->max_offset
) || (end
< start
)) {
1006 vm_map_entry_dispose(map
, new_entry
);
1008 return(KERN_NO_SPACE
);
1012 * If there are no more entries, we must win.
1015 next
= entry
->vme_next
;
1016 if (next
== vm_map_to_entry(map
))
1020 * If there is another entry, it must be
1021 * after the end of the potential new region.
1024 if (next
->vme_start
>= end
)
1028 * Didn't fit -- move to the next entry.
1032 start
= entry
->vme_end
;
1037 * "start" and "end" should define the endpoints of the
1038 * available new range, and
1039 * "entry" should refer to the region before the new
1042 * the map should be locked.
1047 new_entry
->vme_start
= start
;
1048 new_entry
->vme_end
= end
;
1049 assert(page_aligned(new_entry
->vme_start
));
1050 assert(page_aligned(new_entry
->vme_end
));
1052 new_entry
->is_shared
= FALSE
;
1053 new_entry
->is_sub_map
= FALSE
;
1054 new_entry
->use_pmap
= FALSE
;
1055 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1056 new_entry
->offset
= (vm_object_offset_t
) 0;
1058 new_entry
->needs_copy
= FALSE
;
1060 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1061 new_entry
->protection
= VM_PROT_DEFAULT
;
1062 new_entry
->max_protection
= VM_PROT_ALL
;
1063 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1064 new_entry
->wired_count
= 0;
1065 new_entry
->user_wired_count
= 0;
1067 new_entry
->in_transition
= FALSE
;
1068 new_entry
->needs_wakeup
= FALSE
;
1071 * Insert the new entry into the list
1074 vm_map_entry_link(map
, entry
, new_entry
);
1079 * Update the lookup hint
1081 SAVE_HINT(map
, new_entry
);
1083 *o_entry
= new_entry
;
1084 return(KERN_SUCCESS
);
1087 int vm_map_pmap_enter_print
= FALSE
;
1088 int vm_map_pmap_enter_enable
= FALSE
;
1091 * Routine: vm_map_pmap_enter
1094 * Force pages from the specified object to be entered into
1095 * the pmap at the specified address if they are present.
1096 * As soon as a page not found in the object the scan ends.
1101 * In/out conditions:
1102 * The source map should not be locked on entry.
1107 register vm_offset_t addr
,
1108 register vm_offset_t end_addr
,
1109 register vm_object_t object
,
1110 vm_object_offset_t offset
,
1111 vm_prot_t protection
)
1113 unsigned int cache_attr
;
1115 while (addr
< end_addr
) {
1116 register vm_page_t m
;
1118 vm_object_lock(object
);
1119 vm_object_paging_begin(object
);
1121 m
= vm_page_lookup(object
, offset
);
1122 if (m
== VM_PAGE_NULL
|| m
->busy
||
1123 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1124 protection
& m
->page_lock
))) {
1126 vm_object_paging_end(object
);
1127 vm_object_unlock(object
);
1131 assert(!m
->fictitious
); /* XXX is this possible ??? */
1133 if (vm_map_pmap_enter_print
) {
1134 printf("vm_map_pmap_enter:");
1135 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1136 map
, addr
, object
, offset
);
1140 if (m
->no_isync
== TRUE
) {
1141 pmap_sync_caches_phys(m
->phys_addr
);
1142 m
->no_isync
= FALSE
;
1145 cache_attr
= ((unsigned int)object
->wimg_bits
) & VM_WIMG_MASK
;
1146 vm_object_unlock(object
);
1148 PMAP_ENTER(map
->pmap
, addr
, m
,
1149 protection
, cache_attr
, FALSE
);
1151 vm_object_lock(object
);
1153 PAGE_WAKEUP_DONE(m
);
1154 vm_page_lock_queues();
1155 if (!m
->active
&& !m
->inactive
)
1156 vm_page_activate(m
);
1157 vm_page_unlock_queues();
1158 vm_object_paging_end(object
);
1159 vm_object_unlock(object
);
1161 offset
+= PAGE_SIZE_64
;
1167 * Routine: vm_map_enter
1170 * Allocate a range in the specified virtual address map.
1171 * The resulting range will refer to memory defined by
1172 * the given memory object and offset into that object.
1174 * Arguments are as defined in the vm_map call.
1178 register vm_map_t map
,
1179 vm_offset_t
*address
, /* IN/OUT */
1184 vm_object_offset_t offset
,
1185 boolean_t needs_copy
,
1186 vm_prot_t cur_protection
,
1187 vm_prot_t max_protection
,
1188 vm_inherit_t inheritance
)
1190 vm_map_entry_t entry
;
1191 register vm_offset_t start
;
1192 register vm_offset_t end
;
1193 kern_return_t result
= KERN_SUCCESS
;
1195 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1198 VM_GET_FLAGS_ALIAS(flags
, alias
);
1200 #define RETURN(value) { result = value; goto BailOut; }
1202 assert(page_aligned(*address
));
1203 assert(page_aligned(size
));
1212 * Calculate the first possible address.
1215 if (start
< map
->min_offset
)
1216 start
= map
->min_offset
;
1217 if (start
> map
->max_offset
)
1218 RETURN(KERN_NO_SPACE
);
1221 * Look for the first possible address;
1222 * if there's already something at this
1223 * address, we have to start after it.
1226 assert(first_free_is_valid(map
));
1227 if (start
== map
->min_offset
) {
1228 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1229 start
= entry
->vme_end
;
1231 vm_map_entry_t tmp_entry
;
1232 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1233 start
= tmp_entry
->vme_end
;
1238 * In any case, the "entry" always precedes
1239 * the proposed new region throughout the
1244 register vm_map_entry_t next
;
1247 * Find the end of the proposed new region.
1248 * Be sure we didn't go beyond the end, or
1249 * wrap around the address.
1252 end
= ((start
+ mask
) & ~mask
);
1254 RETURN(KERN_NO_SPACE
);
1258 if ((end
> map
->max_offset
) || (end
< start
)) {
1259 if (map
->wait_for_space
) {
1260 if (size
<= (map
->max_offset
-
1262 assert_wait((event_t
)map
,
1265 thread_block((void (*)(void))0);
1269 RETURN(KERN_NO_SPACE
);
1273 * If there are no more entries, we must win.
1276 next
= entry
->vme_next
;
1277 if (next
== vm_map_to_entry(map
))
1281 * If there is another entry, it must be
1282 * after the end of the potential new region.
1285 if (next
->vme_start
>= end
)
1289 * Didn't fit -- move to the next entry.
1293 start
= entry
->vme_end
;
1297 vm_map_entry_t temp_entry
;
1301 * the address doesn't itself violate
1302 * the mask requirement.
1306 if ((start
& mask
) != 0)
1307 RETURN(KERN_NO_SPACE
);
1310 * ... the address is within bounds
1315 if ((start
< map
->min_offset
) ||
1316 (end
> map
->max_offset
) ||
1318 RETURN(KERN_INVALID_ADDRESS
);
1322 * ... the starting address isn't allocated
1325 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1326 RETURN(KERN_NO_SPACE
);
1331 * ... the next region doesn't overlap the
1335 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1336 (entry
->vme_next
->vme_start
< end
))
1337 RETURN(KERN_NO_SPACE
);
1342 * "start" and "end" should define the endpoints of the
1343 * available new range, and
1344 * "entry" should refer to the region before the new
1347 * the map should be locked.
1351 * See whether we can avoid creating a new entry (and object) by
1352 * extending one of our neighbors. [So far, we only attempt to
1353 * extend from below.]
1356 if ((object
== VM_OBJECT_NULL
) &&
1357 (entry
!= vm_map_to_entry(map
)) &&
1358 (entry
->vme_end
== start
) &&
1359 (!entry
->is_shared
) &&
1360 (!entry
->is_sub_map
) &&
1361 (entry
->alias
== alias
) &&
1362 (entry
->inheritance
== inheritance
) &&
1363 (entry
->protection
== cur_protection
) &&
1364 (entry
->max_protection
== max_protection
) &&
1365 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1366 (entry
->in_transition
== 0) &&
1367 ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
) &&
1368 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1369 if (vm_object_coalesce(entry
->object
.vm_object
,
1372 (vm_object_offset_t
) 0,
1373 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1374 (vm_size_t
)(end
- entry
->vme_end
))) {
1377 * Coalesced the two objects - can extend
1378 * the previous map entry to include the
1381 map
->size
+= (end
- entry
->vme_end
);
1382 entry
->vme_end
= end
;
1383 UPDATE_FIRST_FREE(map
, map
->first_free
);
1384 RETURN(KERN_SUCCESS
);
1389 * Create a new entry
1393 register vm_map_entry_t new_entry
;
1395 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1396 offset
, needs_copy
, FALSE
, FALSE
,
1397 cur_protection
, max_protection
,
1398 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1399 new_entry
->alias
= alias
;
1402 /* Wire down the new entry if the user
1403 * requested all new map entries be wired.
1405 if (map
->wiring_required
) {
1406 result
= vm_map_wire(map
, start
, end
,
1407 new_entry
->protection
, TRUE
);
1411 if ((object
!= VM_OBJECT_NULL
) &&
1412 (vm_map_pmap_enter_enable
) &&
1415 (size
< (128*1024))) {
1416 vm_map_pmap_enter(map
, start
, end
,
1417 object
, offset
, cur_protection
);
1431 * vm_map_clip_start: [ internal use only ]
1433 * Asserts that the given entry begins at or after
1434 * the specified address; if necessary,
1435 * it splits the entry into two.
1438 #define vm_map_clip_start(map, entry, startaddr) \
1440 vm_map_t VMCS_map; \
1441 vm_map_entry_t VMCS_entry; \
1442 vm_offset_t VMCS_startaddr; \
1444 VMCS_entry = (entry); \
1445 VMCS_startaddr = (startaddr); \
1446 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1447 if(entry->use_pmap) { \
1448 vm_offset_t pmap_base_addr; \
1450 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1451 pmap_unnest(map->pmap, pmap_base_addr, \
1453 entry->use_pmap = FALSE; \
1454 } else if(entry->object.vm_object \
1455 && !entry->is_sub_map \
1456 && entry->object.vm_object->phys_contiguous) { \
1457 pmap_remove(map->pmap, \
1458 entry->vme_start, entry->vme_end); \
1460 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1462 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1465 #define vm_map_clip_start(map, entry, startaddr) \
1467 vm_map_t VMCS_map; \
1468 vm_map_entry_t VMCS_entry; \
1469 vm_offset_t VMCS_startaddr; \
1471 VMCS_entry = (entry); \
1472 VMCS_startaddr = (startaddr); \
1473 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1474 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1476 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1480 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1482 if ((startaddr) > (entry)->vme_start) \
1483 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1487 * This routine is called only when it is known that
1488 * the entry must be split.
1492 register struct vm_map_header
*map_header
,
1493 register vm_map_entry_t entry
,
1494 register vm_offset_t start
)
1496 register vm_map_entry_t new_entry
;
1499 * Split off the front portion --
1500 * note that we must insert the new
1501 * entry BEFORE this one, so that
1502 * this entry has the specified starting
1506 new_entry
= _vm_map_entry_create(map_header
);
1507 vm_map_entry_copy_full(new_entry
, entry
);
1509 new_entry
->vme_end
= start
;
1510 entry
->offset
+= (start
- entry
->vme_start
);
1511 entry
->vme_start
= start
;
1513 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1515 if (entry
->is_sub_map
)
1516 vm_map_reference(new_entry
->object
.sub_map
);
1518 vm_object_reference(new_entry
->object
.vm_object
);
1523 * vm_map_clip_end: [ internal use only ]
1525 * Asserts that the given entry ends at or before
1526 * the specified address; if necessary,
1527 * it splits the entry into two.
1530 #define vm_map_clip_end(map, entry, endaddr) \
1532 vm_map_t VMCE_map; \
1533 vm_map_entry_t VMCE_entry; \
1534 vm_offset_t VMCE_endaddr; \
1536 VMCE_entry = (entry); \
1537 VMCE_endaddr = (endaddr); \
1538 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1539 if(entry->use_pmap) { \
1540 vm_offset_t pmap_base_addr; \
1542 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1543 pmap_unnest(map->pmap, pmap_base_addr, \
1545 entry->use_pmap = FALSE; \
1546 } else if(entry->object.vm_object \
1547 && !entry->is_sub_map \
1548 && entry->object.vm_object->phys_contiguous) { \
1549 pmap_remove(map->pmap, \
1550 entry->vme_start, entry->vme_end); \
1552 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1554 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1557 #define vm_map_clip_end(map, entry, endaddr) \
1559 vm_map_t VMCE_map; \
1560 vm_map_entry_t VMCE_entry; \
1561 vm_offset_t VMCE_endaddr; \
1563 VMCE_entry = (entry); \
1564 VMCE_endaddr = (endaddr); \
1565 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1566 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1568 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1572 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1574 if ((endaddr) < (entry)->vme_end) \
1575 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1579 * This routine is called only when it is known that
1580 * the entry must be split.
1584 register struct vm_map_header
*map_header
,
1585 register vm_map_entry_t entry
,
1586 register vm_offset_t end
)
1588 register vm_map_entry_t new_entry
;
1591 * Create a new entry and insert it
1592 * AFTER the specified entry
1595 new_entry
= _vm_map_entry_create(map_header
);
1596 vm_map_entry_copy_full(new_entry
, entry
);
1598 new_entry
->vme_start
= entry
->vme_end
= end
;
1599 new_entry
->offset
+= (end
- entry
->vme_start
);
1601 _vm_map_entry_link(map_header
, entry
, new_entry
);
1603 if (entry
->is_sub_map
)
1604 vm_map_reference(new_entry
->object
.sub_map
);
1606 vm_object_reference(new_entry
->object
.vm_object
);
1611 * VM_MAP_RANGE_CHECK: [ internal use only ]
1613 * Asserts that the starting and ending region
1614 * addresses fall within the valid range of the map.
1616 #define VM_MAP_RANGE_CHECK(map, start, end) \
1618 if (start < vm_map_min(map)) \
1619 start = vm_map_min(map); \
1620 if (end > vm_map_max(map)) \
1621 end = vm_map_max(map); \
1627 * vm_map_range_check: [ internal use only ]
1629 * Check that the region defined by the specified start and
1630 * end addresses are wholly contained within a single map
1631 * entry or set of adjacent map entries of the spacified map,
1632 * i.e. the specified region contains no unmapped space.
1633 * If any or all of the region is unmapped, FALSE is returned.
1634 * Otherwise, TRUE is returned and if the output argument 'entry'
1635 * is not NULL it points to the map entry containing the start
1638 * The map is locked for reading on entry and is left locked.
1642 register vm_map_t map
,
1643 register vm_offset_t start
,
1644 register vm_offset_t end
,
1645 vm_map_entry_t
*entry
)
1648 register vm_offset_t prev
;
1651 * Basic sanity checks first
1653 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1657 * Check first if the region starts within a valid
1658 * mapping for the map.
1660 if (!vm_map_lookup_entry(map
, start
, &cur
))
1664 * Optimize for the case that the region is contained
1665 * in a single map entry.
1667 if (entry
!= (vm_map_entry_t
*) NULL
)
1669 if (end
<= cur
->vme_end
)
1673 * If the region is not wholly contained within a
1674 * single entry, walk the entries looking for holes.
1676 prev
= cur
->vme_end
;
1677 cur
= cur
->vme_next
;
1678 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1679 if (end
<= cur
->vme_end
)
1681 prev
= cur
->vme_end
;
1682 cur
= cur
->vme_next
;
1688 * vm_map_submap: [ kernel use only ]
1690 * Mark the given range as handled by a subordinate map.
1692 * This range must have been created with vm_map_find using
1693 * the vm_submap_object, and no other operations may have been
1694 * performed on this range prior to calling vm_map_submap.
1696 * Only a limited number of operations can be performed
1697 * within this rage after calling vm_map_submap:
1699 * [Don't try vm_map_copyin!]
1701 * To remove a submapping, one must first remove the
1702 * range from the superior map, and then destroy the
1703 * submap (if desired). [Better yet, don't try it.]
1707 register vm_map_t map
,
1708 register vm_offset_t start
,
1709 register vm_offset_t end
,
1714 vm_map_entry_t entry
;
1715 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1716 register vm_object_t object
;
1720 submap
->mapped
= TRUE
;
1722 VM_MAP_RANGE_CHECK(map
, start
, end
);
1724 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1725 vm_map_clip_start(map
, entry
, start
);
1728 entry
= entry
->vme_next
;
1730 if(entry
== vm_map_to_entry(map
)) {
1732 return KERN_INVALID_ARGUMENT
;
1735 vm_map_clip_end(map
, entry
, end
);
1737 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1738 (!entry
->is_sub_map
) &&
1739 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1740 (object
->resident_page_count
== 0) &&
1741 (object
->copy
== VM_OBJECT_NULL
) &&
1742 (object
->shadow
== VM_OBJECT_NULL
) &&
1743 (!object
->pager_created
)) {
1744 entry
->offset
= (vm_object_offset_t
)offset
;
1745 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1746 vm_object_deallocate(object
);
1747 entry
->is_sub_map
= TRUE
;
1748 vm_map_reference(entry
->object
.sub_map
= submap
);
1750 if ((use_pmap
) && (offset
== 0)) {
1751 /* nest if platform code will allow */
1752 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1753 start
, end
- start
);
1755 panic("pmap_nest failed!");
1756 entry
->use_pmap
= TRUE
;
1760 pmap_remove(map
->pmap
, start
, end
);
1762 result
= KERN_SUCCESS
;
1772 * Sets the protection of the specified address
1773 * region in the target map. If "set_max" is
1774 * specified, the maximum protection is to be set;
1775 * otherwise, only the current protection is affected.
1779 register vm_map_t map
,
1780 register vm_offset_t start
,
1781 register vm_offset_t end
,
1782 register vm_prot_t new_prot
,
1783 register boolean_t set_max
)
1785 register vm_map_entry_t current
;
1786 register vm_offset_t prev
;
1787 vm_map_entry_t entry
;
1792 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1793 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1798 * Lookup the entry. If it doesn't start in a valid
1799 * entry, return an error. Remember if we need to
1800 * clip the entry. We don't do it here because we don't
1801 * want to make any changes until we've scanned the
1802 * entire range below for address and protection
1805 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1807 return(KERN_INVALID_ADDRESS
);
1811 * Make a first pass to check for protection and address
1816 prev
= current
->vme_start
;
1817 while ((current
!= vm_map_to_entry(map
)) &&
1818 (current
->vme_start
< end
)) {
1821 * If there is a hole, return an error.
1823 if (current
->vme_start
!= prev
) {
1825 return(KERN_INVALID_ADDRESS
);
1828 new_max
= current
->max_protection
;
1829 if(new_prot
& VM_PROT_COPY
) {
1830 new_max
|= VM_PROT_WRITE
;
1831 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1833 return(KERN_PROTECTION_FAILURE
);
1836 if ((new_prot
& new_max
) != new_prot
) {
1838 return(KERN_PROTECTION_FAILURE
);
1842 prev
= current
->vme_end
;
1843 current
= current
->vme_next
;
1847 return(KERN_INVALID_ADDRESS
);
1851 * Go back and fix up protections.
1852 * Clip to start here if the range starts within
1858 vm_map_clip_start(map
, entry
, start
);
1860 while ((current
!= vm_map_to_entry(map
)) &&
1861 (current
->vme_start
< end
)) {
1865 vm_map_clip_end(map
, current
, end
);
1867 old_prot
= current
->protection
;
1869 if(new_prot
& VM_PROT_COPY
) {
1870 /* caller is asking specifically to copy the */
1871 /* mapped data, this implies that max protection */
1872 /* will include write. Caller must be prepared */
1873 /* for loss of shared memory communication in the */
1874 /* target area after taking this step */
1875 current
->needs_copy
= TRUE
;
1876 current
->max_protection
|= VM_PROT_WRITE
;
1880 current
->protection
=
1881 (current
->max_protection
=
1882 new_prot
& ~VM_PROT_COPY
) &
1885 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1888 * Update physical map if necessary.
1889 * If the request is to turn off write protection,
1890 * we won't do it for real (in pmap). This is because
1891 * it would cause copy-on-write to fail. We've already
1892 * set, the new protection in the map, so if a
1893 * write-protect fault occurred, it will be fixed up
1894 * properly, COW or not.
1896 /* the 256M hack for existing hardware limitations */
1897 if (current
->protection
!= old_prot
) {
1898 if(current
->is_sub_map
&& current
->use_pmap
) {
1899 vm_offset_t pmap_base_addr
;
1900 vm_offset_t pmap_end_addr
;
1901 vm_map_entry_t local_entry
;
1903 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1904 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1906 if(!vm_map_lookup_entry(map
,
1907 pmap_base_addr
, &local_entry
))
1908 panic("vm_map_protect: nested pmap area is missing");
1909 while ((local_entry
!= vm_map_to_entry(map
)) &&
1910 (local_entry
->vme_start
< pmap_end_addr
)) {
1911 local_entry
->use_pmap
= FALSE
;
1912 local_entry
= local_entry
->vme_next
;
1914 pmap_unnest(map
->pmap
, pmap_base_addr
,
1915 (pmap_end_addr
- pmap_base_addr
) + 1);
1918 if (!(current
->protection
& VM_PROT_WRITE
)) {
1919 /* Look one level in we support nested pmaps */
1920 /* from mapped submaps which are direct entries */
1922 if(current
->is_sub_map
&& current
->use_pmap
) {
1923 pmap_protect(current
->object
.sub_map
->pmap
,
1926 current
->protection
);
1928 pmap_protect(map
->pmap
, current
->vme_start
,
1930 current
->protection
);
1934 current
= current
->vme_next
;
1938 return(KERN_SUCCESS
);
1944 * Sets the inheritance of the specified address
1945 * range in the target map. Inheritance
1946 * affects how the map will be shared with
1947 * child maps at the time of vm_map_fork.
1951 register vm_map_t map
,
1952 register vm_offset_t start
,
1953 register vm_offset_t end
,
1954 register vm_inherit_t new_inheritance
)
1956 register vm_map_entry_t entry
;
1957 vm_map_entry_t temp_entry
;
1961 VM_MAP_RANGE_CHECK(map
, start
, end
);
1963 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
1965 vm_map_clip_start(map
, entry
, start
);
1968 temp_entry
= temp_entry
->vme_next
;
1972 /* first check entire range for submaps which can't support the */
1973 /* given inheritance. */
1974 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1975 if(entry
->is_sub_map
) {
1976 if(new_inheritance
== VM_INHERIT_COPY
)
1977 return(KERN_INVALID_ARGUMENT
);
1980 entry
= entry
->vme_next
;
1985 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1986 vm_map_clip_end(map
, entry
, end
);
1988 entry
->inheritance
= new_inheritance
;
1990 entry
= entry
->vme_next
;
1994 return(KERN_SUCCESS
);
2000 * Sets the pageability of the specified address range in the
2001 * target map as wired. Regions specified as not pageable require
2002 * locked-down physical memory and physical page maps. The
2003 * access_type variable indicates types of accesses that must not
2004 * generate page faults. This is checked against protection of
2005 * memory being locked-down.
2007 * The map must not be locked, but a reference must remain to the
2008 * map throughout the call.
2012 register vm_map_t map
,
2013 register vm_offset_t start
,
2014 register vm_offset_t end
,
2015 register vm_prot_t access_type
,
2016 boolean_t user_wire
,
2018 vm_offset_t pmap_addr
)
2020 register vm_map_entry_t entry
;
2021 struct vm_map_entry
*first_entry
, tmp_entry
;
2023 register vm_offset_t s
,e
;
2025 boolean_t need_wakeup
;
2026 boolean_t main_map
= FALSE
;
2027 wait_interrupt_t interruptible_state
;
2028 thread_t cur_thread
;
2029 unsigned int last_timestamp
;
2033 if(map_pmap
== NULL
)
2035 last_timestamp
= map
->timestamp
;
2037 VM_MAP_RANGE_CHECK(map
, start
, end
);
2038 assert(page_aligned(start
));
2039 assert(page_aligned(end
));
2041 /* We wired what the caller asked for, zero pages */
2043 return KERN_SUCCESS
;
2046 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2047 entry
= first_entry
;
2048 /* vm_map_clip_start will be done later. */
2050 /* Start address is not in map */
2052 return(KERN_INVALID_ADDRESS
);
2056 need_wakeup
= FALSE
;
2057 cur_thread
= current_thread();
2058 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2060 * If another thread is wiring/unwiring this entry then
2061 * block after informing other thread to wake us up.
2063 if (entry
->in_transition
) {
2064 wait_result_t wait_result
;
2067 * We have not clipped the entry. Make sure that
2068 * the start address is in range so that the lookup
2069 * below will succeed.
2071 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2073 entry
->needs_wakeup
= TRUE
;
2076 * wake up anybody waiting on entries that we have
2080 vm_map_entry_wakeup(map
);
2081 need_wakeup
= FALSE
;
2084 * User wiring is interruptible
2086 wait_result
= vm_map_entry_wait(map
,
2087 (user_wire
) ? THREAD_ABORTSAFE
:
2089 if (user_wire
&& wait_result
== THREAD_INTERRUPTED
) {
2091 * undo the wirings we have done so far
2092 * We do not clear the needs_wakeup flag,
2093 * because we cannot tell if we were the
2097 vm_map_unwire(map
, start
, s
, user_wire
);
2098 return(KERN_FAILURE
);
2102 * Cannot avoid a lookup here. reset timestamp.
2104 last_timestamp
= map
->timestamp
;
2107 * The entry could have been clipped, look it up again.
2108 * Worse that can happen is, it may not exist anymore.
2110 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2112 panic("vm_map_wire: re-lookup failed");
2115 * User: undo everything upto the previous
2116 * entry. let vm_map_unwire worry about
2117 * checking the validity of the range.
2120 vm_map_unwire(map
, start
, s
, user_wire
);
2121 return(KERN_FAILURE
);
2123 entry
= first_entry
;
2127 if(entry
->is_sub_map
) {
2128 vm_offset_t sub_start
;
2129 vm_offset_t sub_end
;
2130 vm_offset_t local_start
;
2131 vm_offset_t local_end
;
2134 vm_map_clip_start(map
, entry
, start
);
2135 vm_map_clip_end(map
, entry
, end
);
2137 sub_start
= entry
->offset
;
2138 sub_end
= entry
->vme_end
- entry
->vme_start
;
2139 sub_end
+= entry
->offset
;
2141 local_end
= entry
->vme_end
;
2142 if(map_pmap
== NULL
) {
2143 if(entry
->use_pmap
) {
2144 pmap
= entry
->object
.sub_map
->pmap
;
2145 /* ppc implementation requires that */
2146 /* submaps pmap address ranges line */
2147 /* up with parent map */
2149 pmap_addr
= sub_start
;
2156 if (entry
->wired_count
) {
2157 if (entry
->wired_count
2159 panic("vm_map_wire: too many wirings");
2162 entry
->user_wired_count
2163 >= MAX_WIRE_COUNT
) {
2165 vm_map_unwire(map
, start
,
2166 entry
->vme_start
, user_wire
);
2167 return(KERN_FAILURE
);
2170 entry
->user_wired_count
++;
2172 (entry
->user_wired_count
== 0))
2173 entry
->wired_count
++;
2174 entry
= entry
->vme_next
;
2179 vm_object_offset_t offset_hi
;
2180 vm_object_offset_t offset_lo
;
2181 vm_object_offset_t offset
;
2184 vm_behavior_t behavior
;
2185 vm_map_entry_t local_entry
;
2186 vm_map_version_t version
;
2187 vm_map_t lookup_map
;
2189 /* call vm_map_lookup_locked to */
2190 /* cause any needs copy to be */
2192 local_start
= entry
->vme_start
;
2194 vm_map_lock_write_to_read(map
);
2195 if(vm_map_lookup_locked(
2196 &lookup_map
, local_start
,
2199 &offset
, &prot
, &wired
,
2200 &behavior
, &offset_lo
,
2201 &offset_hi
, &pmap_map
)) {
2203 vm_map_unlock(lookup_map
);
2204 vm_map_unwire(map
, start
,
2205 entry
->vme_start
, user_wire
);
2206 return(KERN_FAILURE
);
2208 if(pmap_map
!= lookup_map
)
2209 vm_map_unlock(pmap_map
);
2210 vm_map_unlock_read(lookup_map
);
2212 vm_object_unlock(object
);
2214 if (!vm_map_lookup_entry(map
,
2215 local_start
, &local_entry
)) {
2217 vm_map_unwire(map
, start
,
2218 entry
->vme_start
, user_wire
);
2219 return(KERN_FAILURE
);
2221 /* did we have a change of type? */
2222 if (!local_entry
->is_sub_map
) {
2223 last_timestamp
= map
->timestamp
;
2226 entry
= local_entry
;
2228 entry
->user_wired_count
++;
2230 (entry
->user_wired_count
== 1))
2231 entry
->wired_count
++;
2233 entry
->in_transition
= TRUE
;
2236 rc
= vm_map_wire_nested(
2237 entry
->object
.sub_map
,
2240 user_wire
, pmap
, pmap_addr
);
2244 local_start
= entry
->vme_start
;
2246 entry
->user_wired_count
++;
2248 (entry
->user_wired_count
== 1))
2249 entry
->wired_count
++;
2251 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2254 user_wire
, pmap
, pmap_addr
);
2257 s
= entry
->vme_start
;
2261 * Find the entry again. It could have been clipped
2262 * after we unlocked the map.
2264 if (!vm_map_lookup_entry(map
, local_start
,
2266 panic("vm_map_wire: re-lookup failed");
2267 entry
= first_entry
;
2269 last_timestamp
= map
->timestamp
;
2270 while ((entry
!= vm_map_to_entry(map
)) &&
2271 (entry
->vme_start
< e
)) {
2272 assert(entry
->in_transition
);
2273 entry
->in_transition
= FALSE
;
2274 if (entry
->needs_wakeup
) {
2275 entry
->needs_wakeup
= FALSE
;
2278 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2280 entry
->user_wired_count
--;
2282 (entry
->user_wired_count
== 0))
2283 entry
->wired_count
--;
2285 entry
= entry
->vme_next
;
2287 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2290 vm_map_entry_wakeup(map
);
2292 * undo everything upto the previous entry.
2294 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2301 * If this entry is already wired then increment
2302 * the appropriate wire reference count.
2304 if (entry
->wired_count
) {
2305 /* sanity check: wired_count is a short */
2306 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2307 panic("vm_map_wire: too many wirings");
2310 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2312 vm_map_unwire(map
, start
,
2313 entry
->vme_start
, user_wire
);
2314 return(KERN_FAILURE
);
2317 * entry is already wired down, get our reference
2318 * after clipping to our range.
2320 vm_map_clip_start(map
, entry
, start
);
2321 vm_map_clip_end(map
, entry
, end
);
2323 entry
->user_wired_count
++;
2324 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2325 entry
->wired_count
++;
2327 entry
= entry
->vme_next
;
2332 * Unwired entry or wire request transmitted via submap
2337 * Perform actions of vm_map_lookup that need the write
2338 * lock on the map: create a shadow object for a
2339 * copy-on-write region, or an object for a zero-fill
2342 size
= entry
->vme_end
- entry
->vme_start
;
2344 * If wiring a copy-on-write page, we need to copy it now
2345 * even if we're only (currently) requesting read access.
2346 * This is aggressive, but once it's wired we can't move it.
2348 if (entry
->needs_copy
) {
2349 vm_object_shadow(&entry
->object
.vm_object
,
2350 &entry
->offset
, size
);
2351 entry
->needs_copy
= FALSE
;
2352 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2353 entry
->object
.vm_object
= vm_object_allocate(size
);
2354 entry
->offset
= (vm_object_offset_t
)0;
2357 vm_map_clip_start(map
, entry
, start
);
2358 vm_map_clip_end(map
, entry
, end
);
2360 s
= entry
->vme_start
;
2364 * Check for holes and protection mismatch.
2365 * Holes: Next entry should be contiguous unless this
2366 * is the end of the region.
2367 * Protection: Access requested must be allowed, unless
2368 * wiring is by protection class
2370 if ((((entry
->vme_end
< end
) &&
2371 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2372 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2373 ((entry
->protection
& access_type
) != access_type
))) {
2375 * Found a hole or protection problem.
2376 * Unwire the region we wired so far.
2378 if (start
!= entry
->vme_start
) {
2380 vm_map_unwire(map
, start
, s
, user_wire
);
2384 return((entry
->protection
&access_type
) != access_type
?
2385 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2388 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2391 entry
->user_wired_count
++;
2392 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2393 entry
->wired_count
++;
2395 entry
->in_transition
= TRUE
;
2398 * This entry might get split once we unlock the map.
2399 * In vm_fault_wire(), we need the current range as
2400 * defined by this entry. In order for this to work
2401 * along with a simultaneous clip operation, we make a
2402 * temporary copy of this entry and use that for the
2403 * wiring. Note that the underlying objects do not
2404 * change during a clip.
2409 * The in_transition state guarentees that the entry
2410 * (or entries for this range, if split occured) will be
2411 * there when the map lock is acquired for the second time.
2415 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2416 interruptible_state
= thread_interrupt_level(THREAD_UNINT
);
2419 rc
= vm_fault_wire(map
,
2420 &tmp_entry
, map_pmap
, pmap_addr
);
2422 rc
= vm_fault_wire(map
,
2423 &tmp_entry
, map
->pmap
,
2424 tmp_entry
.vme_start
);
2426 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2427 thread_interrupt_level(interruptible_state
);
2431 if (last_timestamp
+1 != map
->timestamp
) {
2433 * Find the entry again. It could have been clipped
2434 * after we unlocked the map.
2436 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2438 panic("vm_map_wire: re-lookup failed");
2440 entry
= first_entry
;
2443 last_timestamp
= map
->timestamp
;
2445 while ((entry
!= vm_map_to_entry(map
)) &&
2446 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2447 assert(entry
->in_transition
);
2448 entry
->in_transition
= FALSE
;
2449 if (entry
->needs_wakeup
) {
2450 entry
->needs_wakeup
= FALSE
;
2453 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2455 entry
->user_wired_count
--;
2457 (entry
->user_wired_count
== 0))
2458 entry
->wired_count
--;
2460 entry
= entry
->vme_next
;
2463 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2466 vm_map_entry_wakeup(map
);
2468 * undo everything upto the previous entry.
2470 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2473 } /* end while loop through map entries */
2477 * wake up anybody waiting on entries we wired.
2480 vm_map_entry_wakeup(map
);
2482 return(KERN_SUCCESS
);
2488 register vm_map_t map
,
2489 register vm_offset_t start
,
2490 register vm_offset_t end
,
2491 register vm_prot_t access_type
,
2492 boolean_t user_wire
)
2499 * the calls to mapping_prealloc and mapping_relpre
2500 * (along with the VM_MAP_RANGE_CHECK to insure a
2501 * resonable range was passed in) are
2502 * currently necessary because
2503 * we haven't enabled kernel pre-emption
2504 * and/or the pmap_enter cannot purge and re-use
2507 VM_MAP_RANGE_CHECK(map
, start
, end
);
2508 mapping_prealloc(end
- start
);
2510 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2511 user_wire
, (pmap_t
)NULL
, 0);
2521 * Sets the pageability of the specified address range in the target
2522 * as pageable. Regions specified must have been wired previously.
2524 * The map must not be locked, but a reference must remain to the map
2525 * throughout the call.
2527 * Kernel will panic on failures. User unwire ignores holes and
2528 * unwired and intransition entries to avoid losing memory by leaving
2532 vm_map_unwire_nested(
2533 register vm_map_t map
,
2534 register vm_offset_t start
,
2535 register vm_offset_t end
,
2536 boolean_t user_wire
,
2538 vm_offset_t pmap_addr
)
2540 register vm_map_entry_t entry
;
2541 struct vm_map_entry
*first_entry
, tmp_entry
;
2542 boolean_t need_wakeup
;
2543 boolean_t main_map
= FALSE
;
2544 unsigned int last_timestamp
;
2547 if(map_pmap
== NULL
)
2549 last_timestamp
= map
->timestamp
;
2551 VM_MAP_RANGE_CHECK(map
, start
, end
);
2552 assert(page_aligned(start
));
2553 assert(page_aligned(end
));
2555 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2556 entry
= first_entry
;
2557 /* vm_map_clip_start will be done later. */
2560 /* Start address is not in map. */
2562 return(KERN_INVALID_ADDRESS
);
2565 need_wakeup
= FALSE
;
2566 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2567 if (entry
->in_transition
) {
2570 * Another thread is wiring down this entry. Note
2571 * that if it is not for the other thread we would
2572 * be unwiring an unwired entry. This is not
2573 * permitted. If we wait, we will be unwiring memory
2577 * Another thread is unwiring this entry. We did not
2578 * have a reference to it, because if we did, this
2579 * entry will not be getting unwired now.
2582 panic("vm_map_unwire: in_transition entry");
2584 entry
= entry
->vme_next
;
2588 if(entry
->is_sub_map
) {
2589 vm_offset_t sub_start
;
2590 vm_offset_t sub_end
;
2591 vm_offset_t local_end
;
2595 vm_map_clip_start(map
, entry
, start
);
2596 vm_map_clip_end(map
, entry
, end
);
2598 sub_start
= entry
->offset
;
2599 sub_end
= entry
->vme_end
- entry
->vme_start
;
2600 sub_end
+= entry
->offset
;
2601 local_end
= entry
->vme_end
;
2602 if(map_pmap
== NULL
) {
2603 if(entry
->use_pmap
) {
2604 pmap
= entry
->object
.sub_map
->pmap
;
2605 pmap_addr
= sub_start
;
2610 if (entry
->wired_count
== 0 ||
2611 (user_wire
&& entry
->user_wired_count
== 0)) {
2613 panic("vm_map_unwire: entry is unwired");
2614 entry
= entry
->vme_next
;
2620 * Holes: Next entry should be contiguous unless
2621 * this is the end of the region.
2623 if (((entry
->vme_end
< end
) &&
2624 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2625 (entry
->vme_next
->vme_start
2626 > entry
->vme_end
)))) {
2628 panic("vm_map_unwire: non-contiguous region");
2630 entry = entry->vme_next;
2635 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2636 entry
->wired_count
--;
2638 if (entry
->wired_count
!= 0) {
2639 entry
= entry
->vme_next
;
2643 entry
->in_transition
= TRUE
;
2644 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2647 * We can unlock the map now. The in_transition state
2648 * guarantees existance of the entry.
2651 vm_map_unwire_nested(entry
->object
.sub_map
,
2652 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2655 if (last_timestamp
+1 != map
->timestamp
) {
2657 * Find the entry again. It could have been
2658 * clipped or deleted after we unlocked the map.
2660 if (!vm_map_lookup_entry(map
,
2661 tmp_entry
.vme_start
,
2664 panic("vm_map_unwire: re-lookup failed");
2665 entry
= first_entry
->vme_next
;
2667 entry
= first_entry
;
2669 last_timestamp
= map
->timestamp
;
2672 * clear transition bit for all constituent entries
2673 * that were in the original entry (saved in
2674 * tmp_entry). Also check for waiters.
2676 while ((entry
!= vm_map_to_entry(map
)) &&
2677 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2678 assert(entry
->in_transition
);
2679 entry
->in_transition
= FALSE
;
2680 if (entry
->needs_wakeup
) {
2681 entry
->needs_wakeup
= FALSE
;
2684 entry
= entry
->vme_next
;
2689 vm_map_unwire_nested(entry
->object
.sub_map
,
2690 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2693 if (last_timestamp
+1 != map
->timestamp
) {
2695 * Find the entry again. It could have been
2696 * clipped or deleted after we unlocked the map.
2698 if (!vm_map_lookup_entry(map
,
2699 tmp_entry
.vme_start
,
2702 panic("vm_map_unwire: re-lookup failed");
2703 entry
= first_entry
->vme_next
;
2705 entry
= first_entry
;
2707 last_timestamp
= map
->timestamp
;
2712 if ((entry
->wired_count
== 0) ||
2713 (user_wire
&& entry
->user_wired_count
== 0)) {
2715 panic("vm_map_unwire: entry is unwired");
2717 entry
= entry
->vme_next
;
2721 assert(entry
->wired_count
> 0 &&
2722 (!user_wire
|| entry
->user_wired_count
> 0));
2724 vm_map_clip_start(map
, entry
, start
);
2725 vm_map_clip_end(map
, entry
, end
);
2729 * Holes: Next entry should be contiguous unless
2730 * this is the end of the region.
2732 if (((entry
->vme_end
< end
) &&
2733 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2734 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2737 panic("vm_map_unwire: non-contiguous region");
2738 entry
= entry
->vme_next
;
2742 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2743 entry
->wired_count
--;
2745 if (entry
->wired_count
!= 0) {
2746 entry
= entry
->vme_next
;
2750 entry
->in_transition
= TRUE
;
2751 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2754 * We can unlock the map now. The in_transition state
2755 * guarantees existance of the entry.
2759 vm_fault_unwire(map
,
2760 &tmp_entry
, FALSE
, map_pmap
, pmap_addr
);
2762 vm_fault_unwire(map
,
2763 &tmp_entry
, FALSE
, map
->pmap
,
2764 tmp_entry
.vme_start
);
2768 if (last_timestamp
+1 != map
->timestamp
) {
2770 * Find the entry again. It could have been clipped
2771 * or deleted after we unlocked the map.
2773 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2776 panic("vm_map_unwire: re-lookup failed");
2777 entry
= first_entry
->vme_next
;
2779 entry
= first_entry
;
2781 last_timestamp
= map
->timestamp
;
2784 * clear transition bit for all constituent entries that
2785 * were in the original entry (saved in tmp_entry). Also
2786 * check for waiters.
2788 while ((entry
!= vm_map_to_entry(map
)) &&
2789 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2790 assert(entry
->in_transition
);
2791 entry
->in_transition
= FALSE
;
2792 if (entry
->needs_wakeup
) {
2793 entry
->needs_wakeup
= FALSE
;
2796 entry
= entry
->vme_next
;
2801 * wake up anybody waiting on entries that we have unwired.
2804 vm_map_entry_wakeup(map
);
2805 return(KERN_SUCCESS
);
2811 register vm_map_t map
,
2812 register vm_offset_t start
,
2813 register vm_offset_t end
,
2814 boolean_t user_wire
)
2816 return vm_map_unwire_nested(map
, start
, end
,
2817 user_wire
, (pmap_t
)NULL
, 0);
2822 * vm_map_entry_delete: [ internal use only ]
2824 * Deallocate the given entry from the target map.
2827 vm_map_entry_delete(
2828 register vm_map_t map
,
2829 register vm_map_entry_t entry
)
2831 register vm_offset_t s
, e
;
2832 register vm_object_t object
;
2833 register vm_map_t submap
;
2834 extern vm_object_t kernel_object
;
2836 s
= entry
->vme_start
;
2838 assert(page_aligned(s
));
2839 assert(page_aligned(e
));
2840 assert(entry
->wired_count
== 0);
2841 assert(entry
->user_wired_count
== 0);
2843 if (entry
->is_sub_map
) {
2845 submap
= entry
->object
.sub_map
;
2848 object
= entry
->object
.vm_object
;
2851 vm_map_entry_unlink(map
, entry
);
2854 vm_map_entry_dispose(map
, entry
);
2858 * Deallocate the object only after removing all
2859 * pmap entries pointing to its pages.
2862 vm_map_deallocate(submap
);
2864 vm_object_deallocate(object
);
2869 vm_map_submap_pmap_clean(
2876 vm_offset_t submap_start
;
2877 vm_offset_t submap_end
;
2879 vm_size_t remove_size
;
2880 vm_map_entry_t entry
;
2882 submap_end
= offset
+ (end
- start
);
2883 submap_start
= offset
;
2884 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2886 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2887 if(offset
> entry
->vme_start
)
2888 remove_size
-= offset
- entry
->vme_start
;
2891 if(submap_end
< entry
->vme_end
) {
2893 entry
->vme_end
- submap_end
;
2895 if(entry
->is_sub_map
) {
2896 vm_map_submap_pmap_clean(
2899 start
+ remove_size
,
2900 entry
->object
.sub_map
,
2904 if((map
->mapped
) && (map
->ref_count
)
2905 && (entry
->object
.vm_object
!= NULL
)) {
2906 vm_object_pmap_protect(
2907 entry
->object
.vm_object
,
2914 pmap_remove(map
->pmap
,
2915 start
, start
+ remove_size
);
2920 entry
= entry
->vme_next
;
2922 while((entry
!= vm_map_to_entry(sub_map
))
2923 && (entry
->vme_start
< submap_end
)) {
2924 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2925 if(submap_end
< entry
->vme_end
) {
2926 remove_size
-= entry
->vme_end
- submap_end
;
2928 if(entry
->is_sub_map
) {
2929 vm_map_submap_pmap_clean(
2931 (start
+ entry
->vme_start
) - offset
,
2932 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2933 entry
->object
.sub_map
,
2936 if((map
->mapped
) && (map
->ref_count
)
2937 && (entry
->object
.vm_object
!= NULL
)) {
2938 vm_object_pmap_protect(
2939 entry
->object
.vm_object
,
2946 pmap_remove(map
->pmap
,
2947 (start
+ entry
->vme_start
) - offset
,
2948 ((start
+ entry
->vme_start
)
2949 - offset
) + remove_size
);
2952 entry
= entry
->vme_next
;
2958 * vm_map_delete: [ internal use only ]
2960 * Deallocates the given address range from the target map.
2961 * Removes all user wirings. Unwires one kernel wiring if
2962 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2963 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2964 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2966 * This routine is called with map locked and leaves map locked.
2970 register vm_map_t map
,
2972 register vm_offset_t end
,
2975 vm_map_entry_t entry
, next
;
2976 struct vm_map_entry
*first_entry
, tmp_entry
;
2977 register vm_offset_t s
, e
;
2978 register vm_object_t object
;
2979 boolean_t need_wakeup
;
2980 unsigned int last_timestamp
= ~0; /* unlikely value */
2982 extern vm_map_t kernel_map
;
2984 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
2985 THREAD_ABORTSAFE
: THREAD_UNINT
;
2988 * All our DMA I/O operations in IOKit are currently done by
2989 * wiring through the map entries of the task requesting the I/O.
2990 * Because of this, we must always wait for kernel wirings
2991 * to go away on the entries before deleting them.
2993 * Any caller who wants to actually remove a kernel wiring
2994 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
2995 * properly remove one wiring instead of blasting through
2998 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3001 * Find the start of the region, and clip it
3003 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3004 entry
= first_entry
;
3005 vm_map_clip_start(map
, entry
, start
);
3008 * Fix the lookup hint now, rather than each
3009 * time through the loop.
3011 SAVE_HINT(map
, entry
->vme_prev
);
3013 entry
= first_entry
->vme_next
;
3016 need_wakeup
= FALSE
;
3018 * Step through all entries in this region
3020 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3022 vm_map_clip_end(map
, entry
, end
);
3023 if (entry
->in_transition
) {
3024 wait_result_t wait_result
;
3027 * Another thread is wiring/unwiring this entry.
3028 * Let the other thread know we are waiting.
3030 s
= entry
->vme_start
;
3031 entry
->needs_wakeup
= TRUE
;
3034 * wake up anybody waiting on entries that we have
3035 * already unwired/deleted.
3038 vm_map_entry_wakeup(map
);
3039 need_wakeup
= FALSE
;
3042 wait_result
= vm_map_entry_wait(map
, interruptible
);
3044 if (interruptible
&&
3045 wait_result
== THREAD_INTERRUPTED
) {
3047 * We do not clear the needs_wakeup flag,
3048 * since we cannot tell if we were the only one.
3051 return KERN_ABORTED
;
3055 * The entry could have been clipped or it
3056 * may not exist anymore. Look it up again.
3058 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3059 assert((map
!= kernel_map
) &&
3060 (!entry
->is_sub_map
));
3062 * User: use the next entry
3064 entry
= first_entry
->vme_next
;
3066 entry
= first_entry
;
3067 SAVE_HINT(map
, entry
->vme_prev
);
3069 last_timestamp
= map
->timestamp
;
3071 } /* end in_transition */
3073 if (entry
->wired_count
) {
3075 * Remove a kernel wiring if requested or if
3076 * there are user wirings.
3078 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3079 (entry
->user_wired_count
> 0))
3080 entry
->wired_count
--;
3082 /* remove all user wire references */
3083 entry
->user_wired_count
= 0;
3085 if (entry
->wired_count
!= 0) {
3086 assert((map
!= kernel_map
) &&
3087 (!entry
->is_sub_map
));
3089 * Cannot continue. Typical case is when
3090 * a user thread has physical io pending on
3091 * on this page. Either wait for the
3092 * kernel wiring to go away or return an
3095 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3096 wait_result_t wait_result
;
3098 s
= entry
->vme_start
;
3099 entry
->needs_wakeup
= TRUE
;
3100 wait_result
= vm_map_entry_wait(map
,
3103 if (interruptible
&&
3104 wait_result
== THREAD_INTERRUPTED
) {
3106 * We do not clear the
3107 * needs_wakeup flag, since we
3108 * cannot tell if we were the
3112 return KERN_ABORTED
;
3116 * The entry could have been clipped or
3117 * it may not exist anymore. Look it
3120 if (!vm_map_lookup_entry(map
, s
,
3122 assert((map
!= kernel_map
) &&
3123 (!entry
->is_sub_map
));
3125 * User: use the next entry
3127 entry
= first_entry
->vme_next
;
3129 entry
= first_entry
;
3130 SAVE_HINT(map
, entry
->vme_prev
);
3132 last_timestamp
= map
->timestamp
;
3136 return KERN_FAILURE
;
3140 entry
->in_transition
= TRUE
;
3142 * copy current entry. see comment in vm_map_wire()
3145 s
= entry
->vme_start
;
3149 * We can unlock the map now. The in_transition
3150 * state guarentees existance of the entry.
3153 vm_fault_unwire(map
, &tmp_entry
,
3154 tmp_entry
.object
.vm_object
== kernel_object
,
3155 map
->pmap
, tmp_entry
.vme_start
);
3158 if (last_timestamp
+1 != map
->timestamp
) {
3160 * Find the entry again. It could have
3161 * been clipped after we unlocked the map.
3163 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3164 assert((map
!= kernel_map
) &&
3165 (!entry
->is_sub_map
));
3166 first_entry
= first_entry
->vme_next
;
3168 SAVE_HINT(map
, entry
->vme_prev
);
3171 SAVE_HINT(map
, entry
->vme_prev
);
3172 first_entry
= entry
;
3175 last_timestamp
= map
->timestamp
;
3177 entry
= first_entry
;
3178 while ((entry
!= vm_map_to_entry(map
)) &&
3179 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3180 assert(entry
->in_transition
);
3181 entry
->in_transition
= FALSE
;
3182 if (entry
->needs_wakeup
) {
3183 entry
->needs_wakeup
= FALSE
;
3186 entry
= entry
->vme_next
;
3189 * We have unwired the entry(s). Go back and
3192 entry
= first_entry
;
3196 /* entry is unwired */
3197 assert(entry
->wired_count
== 0);
3198 assert(entry
->user_wired_count
== 0);
3200 if ((!entry
->is_sub_map
&&
3201 entry
->object
.vm_object
!= kernel_object
) ||
3202 entry
->is_sub_map
) {
3203 if(entry
->is_sub_map
) {
3204 if(entry
->use_pmap
) {
3206 pmap_unnest(map
->pmap
, entry
->vme_start
,
3207 entry
->vme_end
- entry
->vme_start
);
3209 if((map
->mapped
) && (map
->ref_count
)) {
3210 /* clean up parent map/maps */
3211 vm_map_submap_pmap_clean(
3212 map
, entry
->vme_start
,
3214 entry
->object
.sub_map
,
3218 vm_map_submap_pmap_clean(
3219 map
, entry
->vme_start
, entry
->vme_end
,
3220 entry
->object
.sub_map
,
3224 if((map
->mapped
) && (map
->ref_count
)) {
3225 vm_object_pmap_protect(
3226 entry
->object
.vm_object
,
3228 entry
->vme_end
- entry
->vme_start
,
3233 pmap_remove(map
->pmap
,
3240 next
= entry
->vme_next
;
3241 s
= next
->vme_start
;
3242 last_timestamp
= map
->timestamp
;
3243 vm_map_entry_delete(map
, entry
);
3244 /* vm_map_entry_delete unlocks the map */
3248 if(entry
== vm_map_to_entry(map
)) {
3251 if (last_timestamp
+1 != map
->timestamp
) {
3253 * we are responsible for deleting everything
3254 * from the give space, if someone has interfered
3255 * we pick up where we left off, back fills should
3256 * be all right for anyone except map_delete and
3257 * we have to assume that the task has been fully
3258 * disabled before we get here
3260 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3261 entry
= entry
->vme_next
;
3263 SAVE_HINT(map
, entry
->vme_prev
);
3266 * others can not only allocate behind us, we can
3267 * also see coalesce while we don't have the map lock
3269 if(entry
== vm_map_to_entry(map
)) {
3272 vm_map_clip_start(map
, entry
, s
);
3274 last_timestamp
= map
->timestamp
;
3277 if (map
->wait_for_space
)
3278 thread_wakeup((event_t
) map
);
3280 * wake up anybody waiting on entries that we have already deleted.
3283 vm_map_entry_wakeup(map
);
3285 return KERN_SUCCESS
;
3291 * Remove the given address range from the target map.
3292 * This is the exported form of vm_map_delete.
3296 register vm_map_t map
,
3297 register vm_offset_t start
,
3298 register vm_offset_t end
,
3299 register boolean_t flags
)
3301 register kern_return_t result
;
3302 boolean_t funnel_set
= FALSE
;
3304 thread_t cur_thread
;
3306 cur_thread
= current_thread();
3308 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
3310 curflock
= cur_thread
->funnel_lock
;
3311 thread_funnel_set( curflock
, FALSE
);
3314 VM_MAP_RANGE_CHECK(map
, start
, end
);
3315 result
= vm_map_delete(map
, start
, end
, flags
);
3318 thread_funnel_set( curflock
, TRUE
);
3326 * Routine: vm_map_copy_discard
3329 * Dispose of a map copy object (returned by
3333 vm_map_copy_discard(
3336 TR_DECL("vm_map_copy_discard");
3338 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3340 if (copy
== VM_MAP_COPY_NULL
)
3343 switch (copy
->type
) {
3344 case VM_MAP_COPY_ENTRY_LIST
:
3345 while (vm_map_copy_first_entry(copy
) !=
3346 vm_map_copy_to_entry(copy
)) {
3347 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3349 vm_map_copy_entry_unlink(copy
, entry
);
3350 vm_object_deallocate(entry
->object
.vm_object
);
3351 vm_map_copy_entry_dispose(copy
, entry
);
3354 case VM_MAP_COPY_OBJECT
:
3355 vm_object_deallocate(copy
->cpy_object
);
3357 case VM_MAP_COPY_KERNEL_BUFFER
:
3360 * The vm_map_copy_t and possibly the data buffer were
3361 * allocated by a single call to kalloc(), i.e. the
3362 * vm_map_copy_t was not allocated out of the zone.
3364 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3367 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3371 * Routine: vm_map_copy_copy
3374 * Move the information in a map copy object to
3375 * a new map copy object, leaving the old one
3378 * This is used by kernel routines that need
3379 * to look at out-of-line data (in copyin form)
3380 * before deciding whether to return SUCCESS.
3381 * If the routine returns FAILURE, the original
3382 * copy object will be deallocated; therefore,
3383 * these routines must make a copy of the copy
3384 * object and leave the original empty so that
3385 * deallocation will not fail.
3391 vm_map_copy_t new_copy
;
3393 if (copy
== VM_MAP_COPY_NULL
)
3394 return VM_MAP_COPY_NULL
;
3397 * Allocate a new copy object, and copy the information
3398 * from the old one into it.
3401 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3404 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3406 * The links in the entry chain must be
3407 * changed to point to the new copy object.
3409 vm_map_copy_first_entry(copy
)->vme_prev
3410 = vm_map_copy_to_entry(new_copy
);
3411 vm_map_copy_last_entry(copy
)->vme_next
3412 = vm_map_copy_to_entry(new_copy
);
3416 * Change the old copy object into one that contains
3417 * nothing to be deallocated.
3419 copy
->type
= VM_MAP_COPY_OBJECT
;
3420 copy
->cpy_object
= VM_OBJECT_NULL
;
3423 * Return the new object.
3429 vm_map_overwrite_submap_recurse(
3431 vm_offset_t dst_addr
,
3434 vm_offset_t dst_end
;
3435 vm_map_entry_t tmp_entry
;
3436 vm_map_entry_t entry
;
3437 kern_return_t result
;
3438 boolean_t encountered_sub_map
= FALSE
;
3443 * Verify that the destination is all writeable
3444 * initially. We have to trunc the destination
3445 * address and round the copy size or we'll end up
3446 * splitting entries in strange ways.
3449 dst_end
= round_page(dst_addr
+ dst_size
);
3450 vm_map_lock(dst_map
);
3453 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3454 vm_map_unlock(dst_map
);
3455 return(KERN_INVALID_ADDRESS
);
3458 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3460 for (entry
= tmp_entry
;;) {
3461 vm_map_entry_t next
;
3463 next
= entry
->vme_next
;
3464 while(entry
->is_sub_map
) {
3465 vm_offset_t sub_start
;
3466 vm_offset_t sub_end
;
3467 vm_offset_t local_end
;
3469 if (entry
->in_transition
) {
3471 * Say that we are waiting, and wait for entry.
3473 entry
->needs_wakeup
= TRUE
;
3474 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3479 encountered_sub_map
= TRUE
;
3480 sub_start
= entry
->offset
;
3482 if(entry
->vme_end
< dst_end
)
3483 sub_end
= entry
->vme_end
;
3486 sub_end
-= entry
->vme_start
;
3487 sub_end
+= entry
->offset
;
3488 local_end
= entry
->vme_end
;
3489 vm_map_unlock(dst_map
);
3491 result
= vm_map_overwrite_submap_recurse(
3492 entry
->object
.sub_map
,
3494 sub_end
- sub_start
);
3496 if(result
!= KERN_SUCCESS
)
3498 if (dst_end
<= entry
->vme_end
)
3499 return KERN_SUCCESS
;
3500 vm_map_lock(dst_map
);
3501 if(!vm_map_lookup_entry(dst_map
, local_end
,
3503 vm_map_unlock(dst_map
);
3504 return(KERN_INVALID_ADDRESS
);
3507 next
= entry
->vme_next
;
3510 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3511 vm_map_unlock(dst_map
);
3512 return(KERN_PROTECTION_FAILURE
);
3516 * If the entry is in transition, we must wait
3517 * for it to exit that state. Anything could happen
3518 * when we unlock the map, so start over.
3520 if (entry
->in_transition
) {
3523 * Say that we are waiting, and wait for entry.
3525 entry
->needs_wakeup
= TRUE
;
3526 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3532 * our range is contained completely within this map entry
3534 if (dst_end
<= entry
->vme_end
) {
3535 vm_map_unlock(dst_map
);
3536 return KERN_SUCCESS
;
3539 * check that range specified is contiguous region
3541 if ((next
== vm_map_to_entry(dst_map
)) ||
3542 (next
->vme_start
!= entry
->vme_end
)) {
3543 vm_map_unlock(dst_map
);
3544 return(KERN_INVALID_ADDRESS
);
3548 * Check for permanent objects in the destination.
3550 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3551 ((!entry
->object
.vm_object
->internal
) ||
3552 (entry
->object
.vm_object
->true_share
))) {
3553 if(encountered_sub_map
) {
3554 vm_map_unlock(dst_map
);
3555 return(KERN_FAILURE
);
3562 vm_map_unlock(dst_map
);
3563 return(KERN_SUCCESS
);
3567 * Routine: vm_map_copy_overwrite
3570 * Copy the memory described by the map copy
3571 * object (copy; returned by vm_map_copyin) onto
3572 * the specified destination region (dst_map, dst_addr).
3573 * The destination must be writeable.
3575 * Unlike vm_map_copyout, this routine actually
3576 * writes over previously-mapped memory. If the
3577 * previous mapping was to a permanent (user-supplied)
3578 * memory object, it is preserved.
3580 * The attributes (protection and inheritance) of the
3581 * destination region are preserved.
3583 * If successful, consumes the copy object.
3584 * Otherwise, the caller is responsible for it.
3586 * Implementation notes:
3587 * To overwrite aligned temporary virtual memory, it is
3588 * sufficient to remove the previous mapping and insert
3589 * the new copy. This replacement is done either on
3590 * the whole region (if no permanent virtual memory
3591 * objects are embedded in the destination region) or
3592 * in individual map entries.
3594 * To overwrite permanent virtual memory , it is necessary
3595 * to copy each page, as the external memory management
3596 * interface currently does not provide any optimizations.
3598 * Unaligned memory also has to be copied. It is possible
3599 * to use 'vm_trickery' to copy the aligned data. This is
3600 * not done but not hard to implement.
3602 * Once a page of permanent memory has been overwritten,
3603 * it is impossible to interrupt this function; otherwise,
3604 * the call would be neither atomic nor location-independent.
3605 * The kernel-state portion of a user thread must be
3608 * It may be expensive to forward all requests that might
3609 * overwrite permanent memory (vm_write, vm_copy) to
3610 * uninterruptible kernel threads. This routine may be
3611 * called by interruptible threads; however, success is
3612 * not guaranteed -- if the request cannot be performed
3613 * atomically and interruptibly, an error indication is
3618 vm_map_copy_overwrite_nested(
3620 vm_offset_t dst_addr
,
3622 boolean_t interruptible
,
3625 vm_offset_t dst_end
;
3626 vm_map_entry_t tmp_entry
;
3627 vm_map_entry_t entry
;
3629 boolean_t aligned
= TRUE
;
3630 boolean_t contains_permanent_objects
= FALSE
;
3631 boolean_t encountered_sub_map
= FALSE
;
3632 vm_offset_t base_addr
;
3633 vm_size_t copy_size
;
3634 vm_size_t total_size
;
3638 * Check for null copy object.
3641 if (copy
== VM_MAP_COPY_NULL
)
3642 return(KERN_SUCCESS
);
3645 * Check for special kernel buffer allocated
3646 * by new_ipc_kmsg_copyin.
3649 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3650 return(vm_map_copyout_kernel_buffer(
3656 * Only works for entry lists at the moment. Will
3657 * support page lists later.
3660 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3662 if (copy
->size
== 0) {
3663 vm_map_copy_discard(copy
);
3664 return(KERN_SUCCESS
);
3668 * Verify that the destination is all writeable
3669 * initially. We have to trunc the destination
3670 * address and round the copy size or we'll end up
3671 * splitting entries in strange ways.
3674 if (!page_aligned(copy
->size
) ||
3675 !page_aligned (copy
->offset
) ||
3676 !page_aligned (dst_addr
))
3679 dst_end
= round_page(dst_addr
+ copy
->size
);
3681 dst_end
= dst_addr
+ copy
->size
;
3684 vm_map_lock(dst_map
);
3687 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3688 vm_map_unlock(dst_map
);
3689 return(KERN_INVALID_ADDRESS
);
3691 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3692 for (entry
= tmp_entry
;;) {
3693 vm_map_entry_t next
= entry
->vme_next
;
3695 while(entry
->is_sub_map
) {
3696 vm_offset_t sub_start
;
3697 vm_offset_t sub_end
;
3698 vm_offset_t local_end
;
3700 if (entry
->in_transition
) {
3703 * Say that we are waiting, and wait for entry.
3705 entry
->needs_wakeup
= TRUE
;
3706 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3711 local_end
= entry
->vme_end
;
3712 if (!(entry
->needs_copy
)) {
3713 /* if needs_copy we are a COW submap */
3714 /* in such a case we just replace so */
3715 /* there is no need for the follow- */
3717 encountered_sub_map
= TRUE
;
3718 sub_start
= entry
->offset
;
3720 if(entry
->vme_end
< dst_end
)
3721 sub_end
= entry
->vme_end
;
3724 sub_end
-= entry
->vme_start
;
3725 sub_end
+= entry
->offset
;
3726 vm_map_unlock(dst_map
);
3728 kr
= vm_map_overwrite_submap_recurse(
3729 entry
->object
.sub_map
,
3731 sub_end
- sub_start
);
3732 if(kr
!= KERN_SUCCESS
)
3734 vm_map_lock(dst_map
);
3737 if (dst_end
<= entry
->vme_end
)
3738 goto start_overwrite
;
3739 if(!vm_map_lookup_entry(dst_map
, local_end
,
3741 vm_map_unlock(dst_map
);
3742 return(KERN_INVALID_ADDRESS
);
3744 next
= entry
->vme_next
;
3747 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3748 vm_map_unlock(dst_map
);
3749 return(KERN_PROTECTION_FAILURE
);
3753 * If the entry is in transition, we must wait
3754 * for it to exit that state. Anything could happen
3755 * when we unlock the map, so start over.
3757 if (entry
->in_transition
) {
3760 * Say that we are waiting, and wait for entry.
3762 entry
->needs_wakeup
= TRUE
;
3763 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3769 * our range is contained completely within this map entry
3771 if (dst_end
<= entry
->vme_end
)
3774 * check that range specified is contiguous region
3776 if ((next
== vm_map_to_entry(dst_map
)) ||
3777 (next
->vme_start
!= entry
->vme_end
)) {
3778 vm_map_unlock(dst_map
);
3779 return(KERN_INVALID_ADDRESS
);
3784 * Check for permanent objects in the destination.
3786 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3787 ((!entry
->object
.vm_object
->internal
) ||
3788 (entry
->object
.vm_object
->true_share
))) {
3789 contains_permanent_objects
= TRUE
;
3797 * If there are permanent objects in the destination, then
3798 * the copy cannot be interrupted.
3801 if (interruptible
&& contains_permanent_objects
) {
3802 vm_map_unlock(dst_map
);
3803 return(KERN_FAILURE
); /* XXX */
3808 * Make a second pass, overwriting the data
3809 * At the beginning of each loop iteration,
3810 * the next entry to be overwritten is "tmp_entry"
3811 * (initially, the value returned from the lookup above),
3812 * and the starting address expected in that entry
3816 total_size
= copy
->size
;
3817 if(encountered_sub_map
) {
3819 /* re-calculate tmp_entry since we've had the map */
3821 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3822 vm_map_unlock(dst_map
);
3823 return(KERN_INVALID_ADDRESS
);
3826 copy_size
= copy
->size
;
3829 base_addr
= dst_addr
;
3831 /* deconstruct the copy object and do in parts */
3832 /* only in sub_map, interruptable case */
3833 vm_map_entry_t copy_entry
;
3834 vm_map_entry_t previous_prev
;
3835 vm_map_entry_t next_copy
;
3837 int remaining_entries
;
3840 for (entry
= tmp_entry
; copy_size
== 0;) {
3841 vm_map_entry_t next
;
3843 next
= entry
->vme_next
;
3845 /* tmp_entry and base address are moved along */
3846 /* each time we encounter a sub-map. Otherwise */
3847 /* entry can outpase tmp_entry, and the copy_size */
3848 /* may reflect the distance between them */
3849 /* if the current entry is found to be in transition */
3850 /* we will start over at the beginning or the last */
3851 /* encounter of a submap as dictated by base_addr */
3852 /* we will zero copy_size accordingly. */
3853 if (entry
->in_transition
) {
3855 * Say that we are waiting, and wait for entry.
3857 entry
->needs_wakeup
= TRUE
;
3858 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3860 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3862 vm_map_unlock(dst_map
);
3863 return(KERN_INVALID_ADDRESS
);
3869 if(entry
->is_sub_map
) {
3870 vm_offset_t sub_start
;
3871 vm_offset_t sub_end
;
3872 vm_offset_t local_end
;
3874 if (entry
->needs_copy
) {
3875 /* if this is a COW submap */
3876 /* just back the range with a */
3877 /* anonymous entry */
3878 if(entry
->vme_end
< dst_end
)
3879 sub_end
= entry
->vme_end
;
3882 if(entry
->vme_start
< base_addr
)
3883 sub_start
= base_addr
;
3885 sub_start
= entry
->vme_start
;
3887 dst_map
, entry
, sub_end
);
3889 dst_map
, entry
, sub_start
);
3890 entry
->is_sub_map
= FALSE
;
3892 entry
->object
.sub_map
);
3893 entry
->object
.sub_map
= NULL
;
3894 entry
->is_shared
= FALSE
;
3895 entry
->needs_copy
= FALSE
;
3897 entry
->protection
= VM_PROT_ALL
;
3898 entry
->max_protection
= VM_PROT_ALL
;
3899 entry
->wired_count
= 0;
3900 entry
->user_wired_count
= 0;
3901 if(entry
->inheritance
3902 == VM_INHERIT_SHARE
)
3903 entry
->inheritance
= VM_INHERIT_COPY
;
3906 /* first take care of any non-sub_map */
3907 /* entries to send */
3908 if(base_addr
< entry
->vme_start
) {
3911 entry
->vme_start
- base_addr
;
3914 sub_start
= entry
->offset
;
3916 if(entry
->vme_end
< dst_end
)
3917 sub_end
= entry
->vme_end
;
3920 sub_end
-= entry
->vme_start
;
3921 sub_end
+= entry
->offset
;
3922 local_end
= entry
->vme_end
;
3923 vm_map_unlock(dst_map
);
3924 copy_size
= sub_end
- sub_start
;
3926 /* adjust the copy object */
3927 if (total_size
> copy_size
) {
3928 vm_size_t local_size
= 0;
3929 vm_size_t entry_size
;
3932 new_offset
= copy
->offset
;
3933 copy_entry
= vm_map_copy_first_entry(copy
);
3935 vm_map_copy_to_entry(copy
)){
3936 entry_size
= copy_entry
->vme_end
-
3937 copy_entry
->vme_start
;
3938 if((local_size
< copy_size
) &&
3939 ((local_size
+ entry_size
)
3941 vm_map_copy_clip_end(copy
,
3943 copy_entry
->vme_start
+
3944 (copy_size
- local_size
));
3945 entry_size
= copy_entry
->vme_end
-
3946 copy_entry
->vme_start
;
3947 local_size
+= entry_size
;
3948 new_offset
+= entry_size
;
3950 if(local_size
>= copy_size
) {
3951 next_copy
= copy_entry
->vme_next
;
3952 copy_entry
->vme_next
=
3953 vm_map_copy_to_entry(copy
);
3955 copy
->cpy_hdr
.links
.prev
;
3956 copy
->cpy_hdr
.links
.prev
= copy_entry
;
3957 copy
->size
= copy_size
;
3959 copy
->cpy_hdr
.nentries
;
3960 remaining_entries
-= nentries
;
3961 copy
->cpy_hdr
.nentries
= nentries
;
3964 local_size
+= entry_size
;
3965 new_offset
+= entry_size
;
3968 copy_entry
= copy_entry
->vme_next
;
3972 if((entry
->use_pmap
) && (pmap
== NULL
)) {
3973 kr
= vm_map_copy_overwrite_nested(
3974 entry
->object
.sub_map
,
3978 entry
->object
.sub_map
->pmap
);
3979 } else if (pmap
!= NULL
) {
3980 kr
= vm_map_copy_overwrite_nested(
3981 entry
->object
.sub_map
,
3984 interruptible
, pmap
);
3986 kr
= vm_map_copy_overwrite_nested(
3987 entry
->object
.sub_map
,
3993 if(kr
!= KERN_SUCCESS
) {
3994 if(next_copy
!= NULL
) {
3995 copy
->cpy_hdr
.nentries
+=
3997 copy
->cpy_hdr
.links
.prev
->vme_next
=
3999 copy
->cpy_hdr
.links
.prev
4001 copy
->size
= total_size
;
4005 if (dst_end
<= local_end
) {
4006 return(KERN_SUCCESS
);
4008 /* otherwise copy no longer exists, it was */
4009 /* destroyed after successful copy_overwrite */
4010 copy
= (vm_map_copy_t
)
4011 zalloc(vm_map_copy_zone
);
4012 vm_map_copy_first_entry(copy
) =
4013 vm_map_copy_last_entry(copy
) =
4014 vm_map_copy_to_entry(copy
);
4015 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4016 copy
->offset
= new_offset
;
4018 total_size
-= copy_size
;
4020 /* put back remainder of copy in container */
4021 if(next_copy
!= NULL
) {
4022 copy
->cpy_hdr
.nentries
= remaining_entries
;
4023 copy
->cpy_hdr
.links
.next
= next_copy
;
4024 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4025 copy
->size
= total_size
;
4026 next_copy
->vme_prev
=
4027 vm_map_copy_to_entry(copy
);
4030 base_addr
= local_end
;
4031 vm_map_lock(dst_map
);
4032 if(!vm_map_lookup_entry(dst_map
,
4033 local_end
, &tmp_entry
)) {
4034 vm_map_unlock(dst_map
);
4035 return(KERN_INVALID_ADDRESS
);
4040 if (dst_end
<= entry
->vme_end
) {
4041 copy_size
= dst_end
- base_addr
;
4045 if ((next
== vm_map_to_entry(dst_map
)) ||
4046 (next
->vme_start
!= entry
->vme_end
)) {
4047 vm_map_unlock(dst_map
);
4048 return(KERN_INVALID_ADDRESS
);
4057 /* adjust the copy object */
4058 if (total_size
> copy_size
) {
4059 vm_size_t local_size
= 0;
4060 vm_size_t entry_size
;
4062 new_offset
= copy
->offset
;
4063 copy_entry
= vm_map_copy_first_entry(copy
);
4064 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4065 entry_size
= copy_entry
->vme_end
-
4066 copy_entry
->vme_start
;
4067 if((local_size
< copy_size
) &&
4068 ((local_size
+ entry_size
)
4070 vm_map_copy_clip_end(copy
, copy_entry
,
4071 copy_entry
->vme_start
+
4072 (copy_size
- local_size
));
4073 entry_size
= copy_entry
->vme_end
-
4074 copy_entry
->vme_start
;
4075 local_size
+= entry_size
;
4076 new_offset
+= entry_size
;
4078 if(local_size
>= copy_size
) {
4079 next_copy
= copy_entry
->vme_next
;
4080 copy_entry
->vme_next
=
4081 vm_map_copy_to_entry(copy
);
4083 copy
->cpy_hdr
.links
.prev
;
4084 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4085 copy
->size
= copy_size
;
4087 copy
->cpy_hdr
.nentries
;
4088 remaining_entries
-= nentries
;
4089 copy
->cpy_hdr
.nentries
= nentries
;
4092 local_size
+= entry_size
;
4093 new_offset
+= entry_size
;
4096 copy_entry
= copy_entry
->vme_next
;
4106 local_pmap
= dst_map
->pmap
;
4108 if ((kr
= vm_map_copy_overwrite_aligned(
4109 dst_map
, tmp_entry
, copy
,
4110 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4111 if(next_copy
!= NULL
) {
4112 copy
->cpy_hdr
.nentries
+=
4114 copy
->cpy_hdr
.links
.prev
->vme_next
=
4116 copy
->cpy_hdr
.links
.prev
=
4118 copy
->size
+= copy_size
;
4122 vm_map_unlock(dst_map
);
4127 * if the copy and dst address are misaligned but the same
4128 * offset within the page we can copy_not_aligned the
4129 * misaligned parts and copy aligned the rest. If they are
4130 * aligned but len is unaligned we simply need to copy
4131 * the end bit unaligned. We'll need to split the misaligned
4132 * bits of the region in this case !
4134 /* ALWAYS UNLOCKS THE dst_map MAP */
4135 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4136 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4137 if(next_copy
!= NULL
) {
4138 copy
->cpy_hdr
.nentries
+=
4140 copy
->cpy_hdr
.links
.prev
->vme_next
=
4142 copy
->cpy_hdr
.links
.prev
=
4144 copy
->size
+= copy_size
;
4149 total_size
-= copy_size
;
4152 base_addr
+= copy_size
;
4154 copy
->offset
= new_offset
;
4155 if(next_copy
!= NULL
) {
4156 copy
->cpy_hdr
.nentries
= remaining_entries
;
4157 copy
->cpy_hdr
.links
.next
= next_copy
;
4158 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4159 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4160 copy
->size
= total_size
;
4162 vm_map_lock(dst_map
);
4164 if (!vm_map_lookup_entry(dst_map
,
4165 base_addr
, &tmp_entry
)) {
4166 vm_map_unlock(dst_map
);
4167 return(KERN_INVALID_ADDRESS
);
4169 if (tmp_entry
->in_transition
) {
4170 entry
->needs_wakeup
= TRUE
;
4171 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4176 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(base_addr
));
4182 * Throw away the vm_map_copy object
4184 vm_map_copy_discard(copy
);
4186 return(KERN_SUCCESS
);
4187 }/* vm_map_copy_overwrite */
4190 vm_map_copy_overwrite(
4192 vm_offset_t dst_addr
,
4194 boolean_t interruptible
)
4196 return vm_map_copy_overwrite_nested(
4197 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4202 * Routine: vm_map_copy_overwrite_unaligned
4205 * Physically copy unaligned data
4208 * Unaligned parts of pages have to be physically copied. We use
4209 * a modified form of vm_fault_copy (which understands none-aligned
4210 * page offsets and sizes) to do the copy. We attempt to copy as
4211 * much memory in one go as possibly, however vm_fault_copy copies
4212 * within 1 memory object so we have to find the smaller of "amount left"
4213 * "source object data size" and "target object data size". With
4214 * unaligned data we don't need to split regions, therefore the source
4215 * (copy) object should be one map entry, the target range may be split
4216 * over multiple map entries however. In any event we are pessimistic
4217 * about these assumptions.
4220 * dst_map is locked on entry and is return locked on success,
4221 * unlocked on error.
4225 vm_map_copy_overwrite_unaligned(
4227 vm_map_entry_t entry
,
4231 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4232 vm_map_version_t version
;
4233 vm_object_t dst_object
;
4234 vm_object_offset_t dst_offset
;
4235 vm_object_offset_t src_offset
;
4236 vm_object_offset_t entry_offset
;
4237 vm_offset_t entry_end
;
4242 kern_return_t kr
= KERN_SUCCESS
;
4244 vm_map_lock_write_to_read(dst_map
);
4246 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4247 amount_left
= copy
->size
;
4249 * unaligned so we never clipped this entry, we need the offset into
4250 * the vm_object not just the data.
4252 while (amount_left
> 0) {
4254 if (entry
== vm_map_to_entry(dst_map
)) {
4255 vm_map_unlock_read(dst_map
);
4256 return KERN_INVALID_ADDRESS
;
4259 /* "start" must be within the current map entry */
4260 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4262 dst_offset
= start
- entry
->vme_start
;
4264 dst_size
= entry
->vme_end
- start
;
4266 src_size
= copy_entry
->vme_end
-
4267 (copy_entry
->vme_start
+ src_offset
);
4269 if (dst_size
< src_size
) {
4271 * we can only copy dst_size bytes before
4272 * we have to get the next destination entry
4274 copy_size
= dst_size
;
4277 * we can only copy src_size bytes before
4278 * we have to get the next source copy entry
4280 copy_size
= src_size
;
4283 if (copy_size
> amount_left
) {
4284 copy_size
= amount_left
;
4287 * Entry needs copy, create a shadow shadow object for
4288 * Copy on write region.
4290 if (entry
->needs_copy
&&
4291 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4293 if (vm_map_lock_read_to_write(dst_map
)) {
4294 vm_map_lock_read(dst_map
);
4297 vm_object_shadow(&entry
->object
.vm_object
,
4299 (vm_size_t
)(entry
->vme_end
4300 - entry
->vme_start
));
4301 entry
->needs_copy
= FALSE
;
4302 vm_map_lock_write_to_read(dst_map
);
4304 dst_object
= entry
->object
.vm_object
;
4306 * unlike with the virtual (aligned) copy we're going
4307 * to fault on it therefore we need a target object.
4309 if (dst_object
== VM_OBJECT_NULL
) {
4310 if (vm_map_lock_read_to_write(dst_map
)) {
4311 vm_map_lock_read(dst_map
);
4314 dst_object
= vm_object_allocate((vm_size_t
)
4315 entry
->vme_end
- entry
->vme_start
);
4316 entry
->object
.vm_object
= dst_object
;
4318 vm_map_lock_write_to_read(dst_map
);
4321 * Take an object reference and unlock map. The "entry" may
4322 * disappear or change when the map is unlocked.
4324 vm_object_reference(dst_object
);
4325 version
.main_timestamp
= dst_map
->timestamp
;
4326 entry_offset
= entry
->offset
;
4327 entry_end
= entry
->vme_end
;
4328 vm_map_unlock_read(dst_map
);
4330 * Copy as much as possible in one pass
4333 copy_entry
->object
.vm_object
,
4334 copy_entry
->offset
+ src_offset
,
4337 entry_offset
+ dst_offset
,
4343 src_offset
+= copy_size
;
4344 amount_left
-= copy_size
;
4346 * Release the object reference
4348 vm_object_deallocate(dst_object
);
4350 * If a hard error occurred, return it now
4352 if (kr
!= KERN_SUCCESS
)
4355 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4356 || amount_left
== 0)
4359 * all done with this copy entry, dispose.
4361 vm_map_copy_entry_unlink(copy
, copy_entry
);
4362 vm_object_deallocate(copy_entry
->object
.vm_object
);
4363 vm_map_copy_entry_dispose(copy
, copy_entry
);
4365 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4366 == vm_map_copy_to_entry(copy
) && amount_left
) {
4368 * not finished copying but run out of source
4370 return KERN_INVALID_ADDRESS
;
4375 if (amount_left
== 0)
4376 return KERN_SUCCESS
;
4378 vm_map_lock_read(dst_map
);
4379 if (version
.main_timestamp
== dst_map
->timestamp
) {
4380 if (start
== entry_end
) {
4382 * destination region is split. Use the version
4383 * information to avoid a lookup in the normal
4386 entry
= entry
->vme_next
;
4388 * should be contiguous. Fail if we encounter
4389 * a hole in the destination.
4391 if (start
!= entry
->vme_start
) {
4392 vm_map_unlock_read(dst_map
);
4393 return KERN_INVALID_ADDRESS
;
4398 * Map version check failed.
4399 * we must lookup the entry because somebody
4400 * might have changed the map behind our backs.
4403 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4405 vm_map_unlock_read(dst_map
);
4406 return KERN_INVALID_ADDRESS
;
4412 vm_map_unlock_read(dst_map
);
4414 return KERN_SUCCESS
;
4415 }/* vm_map_copy_overwrite_unaligned */
4418 * Routine: vm_map_copy_overwrite_aligned
4421 * Does all the vm_trickery possible for whole pages.
4425 * If there are no permanent objects in the destination,
4426 * and the source and destination map entry zones match,
4427 * and the destination map entry is not shared,
4428 * then the map entries can be deleted and replaced
4429 * with those from the copy. The following code is the
4430 * basic idea of what to do, but there are lots of annoying
4431 * little details about getting protection and inheritance
4432 * right. Should add protection, inheritance, and sharing checks
4433 * to the above pass and make sure that no wiring is involved.
4437 vm_map_copy_overwrite_aligned(
4439 vm_map_entry_t tmp_entry
,
4445 vm_map_entry_t copy_entry
;
4446 vm_size_t copy_size
;
4448 vm_map_entry_t entry
;
4450 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4451 != vm_map_copy_to_entry(copy
))
4453 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4456 if (entry
== vm_map_to_entry(dst_map
)) {
4457 vm_map_unlock(dst_map
);
4458 return KERN_INVALID_ADDRESS
;
4460 size
= (entry
->vme_end
- entry
->vme_start
);
4462 * Make sure that no holes popped up in the
4463 * address map, and that the protection is
4464 * still valid, in case the map was unlocked
4468 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4469 && !entry
->needs_copy
)) {
4470 vm_map_unlock(dst_map
);
4471 return(KERN_INVALID_ADDRESS
);
4473 assert(entry
!= vm_map_to_entry(dst_map
));
4476 * Check protection again
4479 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4480 vm_map_unlock(dst_map
);
4481 return(KERN_PROTECTION_FAILURE
);
4485 * Adjust to source size first
4488 if (copy_size
< size
) {
4489 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4494 * Adjust to destination size
4497 if (size
< copy_size
) {
4498 vm_map_copy_clip_end(copy
, copy_entry
,
4499 copy_entry
->vme_start
+ size
);
4503 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4504 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4505 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4508 * If the destination contains temporary unshared memory,
4509 * we can perform the copy by throwing it away and
4510 * installing the source data.
4513 object
= entry
->object
.vm_object
;
4514 if ((!entry
->is_shared
&&
4515 ((object
== VM_OBJECT_NULL
) ||
4516 (object
->internal
&& !object
->true_share
))) ||
4517 entry
->needs_copy
) {
4518 vm_object_t old_object
= entry
->object
.vm_object
;
4519 vm_object_offset_t old_offset
= entry
->offset
;
4520 vm_object_offset_t offset
;
4523 * Ensure that the source and destination aren't
4526 if (old_object
== copy_entry
->object
.vm_object
&&
4527 old_offset
== copy_entry
->offset
) {
4528 vm_map_copy_entry_unlink(copy
, copy_entry
);
4529 vm_map_copy_entry_dispose(copy
, copy_entry
);
4531 if (old_object
!= VM_OBJECT_NULL
)
4532 vm_object_deallocate(old_object
);
4534 start
= tmp_entry
->vme_end
;
4535 tmp_entry
= tmp_entry
->vme_next
;
4539 if (old_object
!= VM_OBJECT_NULL
) {
4540 if(entry
->is_sub_map
) {
4541 if(entry
->use_pmap
) {
4543 pmap_unnest(dst_map
->pmap
,
4546 - entry
->vme_start
);
4548 if(dst_map
->mapped
) {
4549 /* clean up parent */
4551 vm_map_submap_pmap_clean(
4552 dst_map
, entry
->vme_start
,
4554 entry
->object
.sub_map
,
4558 vm_map_submap_pmap_clean(
4559 dst_map
, entry
->vme_start
,
4561 entry
->object
.sub_map
,
4565 entry
->object
.sub_map
);
4567 if(dst_map
->mapped
) {
4568 vm_object_pmap_protect(
4569 entry
->object
.vm_object
,
4577 pmap_remove(dst_map
->pmap
,
4581 vm_object_deallocate(old_object
);
4585 entry
->is_sub_map
= FALSE
;
4586 entry
->object
= copy_entry
->object
;
4587 object
= entry
->object
.vm_object
;
4588 entry
->needs_copy
= copy_entry
->needs_copy
;
4589 entry
->wired_count
= 0;
4590 entry
->user_wired_count
= 0;
4591 offset
= entry
->offset
= copy_entry
->offset
;
4593 vm_map_copy_entry_unlink(copy
, copy_entry
);
4594 vm_map_copy_entry_dispose(copy
, copy_entry
);
4595 #if BAD_OPTIMIZATION
4597 * if we turn this optimization back on
4598 * we need to revisit our use of pmap mappings
4599 * large copies will cause us to run out and panic
4600 * this optimization only saved on average 2 us per page if ALL
4601 * the pages in the source were currently mapped
4602 * and ALL the pages in the dest were touched, if there were fewer
4603 * than 2/3 of the pages touched, this optimization actually cost more cycles
4607 * Try to aggressively enter physical mappings
4608 * (but avoid uninstantiated objects)
4610 if (object
!= VM_OBJECT_NULL
) {
4611 vm_offset_t va
= entry
->vme_start
;
4613 while (va
< entry
->vme_end
) {
4614 register vm_page_t m
;
4618 * Look for the page in the top object
4620 prot
= entry
->protection
;
4621 vm_object_lock(object
);
4622 vm_object_paging_begin(object
);
4624 if ((m
= vm_page_lookup(object
,offset
)) !=
4625 VM_PAGE_NULL
&& !m
->busy
&&
4627 (!m
->unusual
|| (!m
->error
&&
4628 !m
->restart
&& !m
->absent
&&
4629 (prot
& m
->page_lock
) == 0))) {
4632 vm_object_unlock(object
);
4635 * Honor COW obligations
4637 if (entry
->needs_copy
)
4638 prot
&= ~VM_PROT_WRITE
;
4639 /* It is our policy to require */
4640 /* explicit sync from anyone */
4641 /* writing code and then */
4642 /* a pc to execute it. */
4645 PMAP_ENTER(pmap
, va
, m
, prot
,
4646 VM_WIMG_USE_DEFAULT
, FALSE
);
4648 vm_object_lock(object
);
4649 vm_page_lock_queues();
4650 if (!m
->active
&& !m
->inactive
)
4651 vm_page_activate(m
);
4652 vm_page_unlock_queues();
4653 PAGE_WAKEUP_DONE(m
);
4655 vm_object_paging_end(object
);
4656 vm_object_unlock(object
);
4658 offset
+= PAGE_SIZE_64
;
4660 } /* end while (va < entry->vme_end) */
4661 } /* end if (object) */
4664 * Set up for the next iteration. The map
4665 * has not been unlocked, so the next
4666 * address should be at the end of this
4667 * entry, and the next map entry should be
4668 * the one following it.
4671 start
= tmp_entry
->vme_end
;
4672 tmp_entry
= tmp_entry
->vme_next
;
4674 vm_map_version_t version
;
4675 vm_object_t dst_object
= entry
->object
.vm_object
;
4676 vm_object_offset_t dst_offset
= entry
->offset
;
4680 * Take an object reference, and record
4681 * the map version information so that the
4682 * map can be safely unlocked.
4685 vm_object_reference(dst_object
);
4687 /* account for unlock bumping up timestamp */
4688 version
.main_timestamp
= dst_map
->timestamp
+ 1;
4690 vm_map_unlock(dst_map
);
4693 * Copy as much as possible in one pass
4698 copy_entry
->object
.vm_object
,
4708 * Release the object reference
4711 vm_object_deallocate(dst_object
);
4714 * If a hard error occurred, return it now
4717 if (r
!= KERN_SUCCESS
)
4720 if (copy_size
!= 0) {
4722 * Dispose of the copied region
4725 vm_map_copy_clip_end(copy
, copy_entry
,
4726 copy_entry
->vme_start
+ copy_size
);
4727 vm_map_copy_entry_unlink(copy
, copy_entry
);
4728 vm_object_deallocate(copy_entry
->object
.vm_object
);
4729 vm_map_copy_entry_dispose(copy
, copy_entry
);
4733 * Pick up in the destination map where we left off.
4735 * Use the version information to avoid a lookup
4736 * in the normal case.
4740 vm_map_lock(dst_map
);
4741 if (version
.main_timestamp
== dst_map
->timestamp
) {
4742 /* We can safely use saved tmp_entry value */
4744 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4745 tmp_entry
= tmp_entry
->vme_next
;
4747 /* Must do lookup of tmp_entry */
4749 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4750 vm_map_unlock(dst_map
);
4751 return(KERN_INVALID_ADDRESS
);
4753 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4758 return(KERN_SUCCESS
);
4759 }/* vm_map_copy_overwrite_aligned */
4762 * Routine: vm_map_copyin_kernel_buffer
4765 * Copy in data to a kernel buffer from space in the
4766 * source map. The original space may be otpionally
4769 * If successful, returns a new copy object.
4772 vm_map_copyin_kernel_buffer(
4774 vm_offset_t src_addr
,
4776 boolean_t src_destroy
,
4777 vm_map_copy_t
*copy_result
)
4781 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4783 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4784 if (copy
== VM_MAP_COPY_NULL
) {
4785 return KERN_RESOURCE_SHORTAGE
;
4787 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4790 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4791 copy
->cpy_kalloc_size
= kalloc_size
;
4793 if (src_map
== kernel_map
) {
4794 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4795 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4796 VM_MAP_REMOVE_INTERRUPTIBLE
;
4799 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4800 if (kr
!= KERN_SUCCESS
) {
4801 kfree((vm_offset_t
)copy
, kalloc_size
);
4804 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4805 VM_MAP_REMOVE_INTERRUPTIBLE
;
4808 (void) vm_map_remove(src_map
, trunc_page(src_addr
),
4809 round_page(src_addr
+ len
),
4812 *copy_result
= copy
;
4813 return KERN_SUCCESS
;
4817 * Routine: vm_map_copyout_kernel_buffer
4820 * Copy out data from a kernel buffer into space in the
4821 * destination map. The space may be otpionally dynamically
4824 * If successful, consumes the copy object.
4825 * Otherwise, the caller is responsible for it.
4828 vm_map_copyout_kernel_buffer(
4830 vm_offset_t
*addr
, /* IN/OUT */
4832 boolean_t overwrite
)
4834 kern_return_t kr
= KERN_SUCCESS
;
4835 thread_act_t thr_act
= current_act();
4840 * Allocate space in the target map for the data
4843 kr
= vm_map_enter(map
,
4845 round_page(copy
->size
),
4849 (vm_object_offset_t
) 0,
4853 VM_INHERIT_DEFAULT
);
4854 if (kr
!= KERN_SUCCESS
)
4859 * Copyout the data from the kernel buffer to the target map.
4861 if (thr_act
->map
== map
) {
4864 * If the target map is the current map, just do
4867 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4869 return(KERN_INVALID_ADDRESS
);
4876 * If the target map is another map, assume the
4877 * target's address space identity for the duration
4880 vm_map_reference(map
);
4881 oldmap
= vm_map_switch(map
);
4883 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4885 return(KERN_INVALID_ADDRESS
);
4888 (void) vm_map_switch(oldmap
);
4889 vm_map_deallocate(map
);
4892 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4898 * Macro: vm_map_copy_insert
4901 * Link a copy chain ("copy") into a map at the
4902 * specified location (after "where").
4904 * The copy chain is destroyed.
4906 * The arguments are evaluated multiple times.
4908 #define vm_map_copy_insert(map, where, copy) \
4910 vm_map_t VMCI_map; \
4911 vm_map_entry_t VMCI_where; \
4912 vm_map_copy_t VMCI_copy; \
4914 VMCI_where = (where); \
4915 VMCI_copy = (copy); \
4916 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4917 ->vme_next = (VMCI_where->vme_next); \
4918 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4919 ->vme_prev = VMCI_where; \
4920 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4921 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4922 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4926 * Routine: vm_map_copyout
4929 * Copy out a copy chain ("copy") into newly-allocated
4930 * space in the destination map.
4932 * If successful, consumes the copy object.
4933 * Otherwise, the caller is responsible for it.
4937 register vm_map_t dst_map
,
4938 vm_offset_t
*dst_addr
, /* OUT */
4939 register vm_map_copy_t copy
)
4942 vm_size_t adjustment
;
4944 vm_object_offset_t vm_copy_start
;
4945 vm_map_entry_t last
;
4947 vm_map_entry_t entry
;
4950 * Check for null copy object.
4953 if (copy
== VM_MAP_COPY_NULL
) {
4955 return(KERN_SUCCESS
);
4959 * Check for special copy object, created
4960 * by vm_map_copyin_object.
4963 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
4964 vm_object_t object
= copy
->cpy_object
;
4966 vm_object_offset_t offset
;
4968 offset
= trunc_page_64(copy
->offset
);
4969 size
= round_page(copy
->size
+
4970 (vm_size_t
)(copy
->offset
- offset
));
4972 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
4973 (vm_offset_t
) 0, TRUE
,
4974 object
, offset
, FALSE
,
4975 VM_PROT_DEFAULT
, VM_PROT_ALL
,
4976 VM_INHERIT_DEFAULT
);
4977 if (kr
!= KERN_SUCCESS
)
4979 /* Account for non-pagealigned copy object */
4980 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
4981 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
4982 return(KERN_SUCCESS
);
4986 * Check for special kernel buffer allocated
4987 * by new_ipc_kmsg_copyin.
4990 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
4991 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
4996 * Find space for the data
4999 vm_copy_start
= trunc_page_64(copy
->offset
);
5000 size
= round_page((vm_size_t
)copy
->offset
+ copy
->size
)
5005 vm_map_lock(dst_map
);
5006 assert(first_free_is_valid(dst_map
));
5007 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
5008 vm_map_min(dst_map
) : last
->vme_end
;
5011 vm_map_entry_t next
= last
->vme_next
;
5012 vm_offset_t end
= start
+ size
;
5014 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5015 if (dst_map
->wait_for_space
) {
5016 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5017 assert_wait((event_t
) dst_map
,
5018 THREAD_INTERRUPTIBLE
);
5019 vm_map_unlock(dst_map
);
5020 thread_block((void (*)(void))0);
5024 vm_map_unlock(dst_map
);
5025 return(KERN_NO_SPACE
);
5028 if ((next
== vm_map_to_entry(dst_map
)) ||
5029 (next
->vme_start
>= end
))
5033 start
= last
->vme_end
;
5037 * Since we're going to just drop the map
5038 * entries from the copy into the destination
5039 * map, they must come from the same pool.
5042 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5044 * Mismatches occur when dealing with the default
5048 vm_map_entry_t next
, new;
5051 * Find the zone that the copies were allocated from
5053 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5055 : vm_map_kentry_zone
;
5056 entry
= vm_map_copy_first_entry(copy
);
5059 * Reinitialize the copy so that vm_map_copy_entry_link
5062 copy
->cpy_hdr
.nentries
= 0;
5063 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5064 vm_map_copy_first_entry(copy
) =
5065 vm_map_copy_last_entry(copy
) =
5066 vm_map_copy_to_entry(copy
);
5071 while (entry
!= vm_map_copy_to_entry(copy
)) {
5072 new = vm_map_copy_entry_create(copy
);
5073 vm_map_entry_copy_full(new, entry
);
5074 new->use_pmap
= FALSE
; /* clr address space specifics */
5075 vm_map_copy_entry_link(copy
,
5076 vm_map_copy_last_entry(copy
),
5078 next
= entry
->vme_next
;
5079 zfree(old_zone
, (vm_offset_t
) entry
);
5085 * Adjust the addresses in the copy chain, and
5086 * reset the region attributes.
5089 adjustment
= start
- vm_copy_start
;
5090 for (entry
= vm_map_copy_first_entry(copy
);
5091 entry
!= vm_map_copy_to_entry(copy
);
5092 entry
= entry
->vme_next
) {
5093 entry
->vme_start
+= adjustment
;
5094 entry
->vme_end
+= adjustment
;
5096 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5097 entry
->protection
= VM_PROT_DEFAULT
;
5098 entry
->max_protection
= VM_PROT_ALL
;
5099 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5102 * If the entry is now wired,
5103 * map the pages into the destination map.
5105 if (entry
->wired_count
!= 0) {
5106 register vm_offset_t va
;
5107 vm_object_offset_t offset
;
5108 register vm_object_t object
;
5110 object
= entry
->object
.vm_object
;
5111 offset
= entry
->offset
;
5112 va
= entry
->vme_start
;
5114 pmap_pageable(dst_map
->pmap
,
5119 while (va
< entry
->vme_end
) {
5120 register vm_page_t m
;
5123 * Look up the page in the object.
5124 * Assert that the page will be found in the
5127 * the object was newly created by
5128 * vm_object_copy_slowly, and has
5129 * copies of all of the pages from
5132 * the object was moved from the old
5133 * map entry; because the old map
5134 * entry was wired, all of the pages
5135 * were in the top-level object.
5136 * (XXX not true if we wire pages for
5139 vm_object_lock(object
);
5140 vm_object_paging_begin(object
);
5142 m
= vm_page_lookup(object
, offset
);
5143 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5145 panic("vm_map_copyout: wiring 0x%x", m
);
5148 vm_object_unlock(object
);
5150 PMAP_ENTER(dst_map
->pmap
, va
, m
, entry
->protection
,
5151 VM_WIMG_USE_DEFAULT
, TRUE
);
5153 vm_object_lock(object
);
5154 PAGE_WAKEUP_DONE(m
);
5155 /* the page is wired, so we don't have to activate */
5156 vm_object_paging_end(object
);
5157 vm_object_unlock(object
);
5159 offset
+= PAGE_SIZE_64
;
5163 else if (size
<= vm_map_aggressive_enter_max
) {
5165 register vm_offset_t va
;
5166 vm_object_offset_t offset
;
5167 register vm_object_t object
;
5170 object
= entry
->object
.vm_object
;
5171 if (object
!= VM_OBJECT_NULL
) {
5173 offset
= entry
->offset
;
5174 va
= entry
->vme_start
;
5175 while (va
< entry
->vme_end
) {
5176 register vm_page_t m
;
5179 * Look up the page in the object.
5180 * Assert that the page will be found
5181 * in the top object if at all...
5183 vm_object_lock(object
);
5184 vm_object_paging_begin(object
);
5186 if (((m
= vm_page_lookup(object
,
5189 !m
->busy
&& !m
->fictitious
&&
5190 !m
->absent
&& !m
->error
) {
5192 vm_object_unlock(object
);
5194 /* honor cow obligations */
5195 prot
= entry
->protection
;
5196 if (entry
->needs_copy
)
5197 prot
&= ~VM_PROT_WRITE
;
5199 PMAP_ENTER(dst_map
->pmap
, va
,
5201 VM_WIMG_USE_DEFAULT
,
5204 vm_object_lock(object
);
5205 vm_page_lock_queues();
5206 if (!m
->active
&& !m
->inactive
)
5207 vm_page_activate(m
);
5208 vm_page_unlock_queues();
5209 PAGE_WAKEUP_DONE(m
);
5211 vm_object_paging_end(object
);
5212 vm_object_unlock(object
);
5214 offset
+= PAGE_SIZE_64
;
5222 * Correct the page alignment for the result
5225 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5228 * Update the hints and the map size
5231 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5233 dst_map
->size
+= size
;
5239 vm_map_copy_insert(dst_map
, last
, copy
);
5241 vm_map_unlock(dst_map
);
5244 * XXX If wiring_required, call vm_map_pageable
5247 return(KERN_SUCCESS
);
5250 boolean_t vm_map_aggressive_enter
; /* not used yet */
5254 * Routine: vm_map_copyin
5257 * Copy the specified region (src_addr, len) from the
5258 * source address space (src_map), possibly removing
5259 * the region from the source address space (src_destroy).
5262 * A vm_map_copy_t object (copy_result), suitable for
5263 * insertion into another address space (using vm_map_copyout),
5264 * copying over another address space region (using
5265 * vm_map_copy_overwrite). If the copy is unused, it
5266 * should be destroyed (using vm_map_copy_discard).
5268 * In/out conditions:
5269 * The source map should not be locked on entry.
5272 typedef struct submap_map
{
5273 vm_map_t parent_map
;
5274 vm_offset_t base_start
;
5275 vm_offset_t base_end
;
5276 struct submap_map
*next
;
5280 vm_map_copyin_common(
5282 vm_offset_t src_addr
,
5284 boolean_t src_destroy
,
5285 boolean_t src_volatile
,
5286 vm_map_copy_t
*copy_result
, /* OUT */
5287 boolean_t use_maxprot
)
5289 extern int msg_ool_size_small
;
5291 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5292 * in multi-level lookup, this
5293 * entry contains the actual
5297 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5299 vm_offset_t src_start
; /* Start of current entry --
5300 * where copy is taking place now
5302 vm_offset_t src_end
; /* End of entire region to be
5304 vm_offset_t base_start
; /* submap fields to save offsets */
5305 /* in original map */
5306 vm_offset_t base_end
;
5307 vm_map_t base_map
=src_map
;
5308 vm_map_entry_t base_entry
;
5309 boolean_t map_share
=FALSE
;
5310 submap_map_t
*parent_maps
= NULL
;
5313 vm_map_copy_t copy
; /* Resulting copy */
5314 vm_offset_t copy_addr
;
5317 * Check for copies of zero bytes.
5321 *copy_result
= VM_MAP_COPY_NULL
;
5322 return(KERN_SUCCESS
);
5326 * If the copy is sufficiently small, use a kernel buffer instead
5327 * of making a virtual copy. The theory being that the cost of
5328 * setting up VM (and taking C-O-W faults) dominates the copy costs
5329 * for small regions.
5331 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5332 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5333 src_destroy
, copy_result
);
5336 * Compute start and end of region
5339 src_start
= trunc_page(src_addr
);
5340 src_end
= round_page(src_addr
+ len
);
5342 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5345 * Check that the end address doesn't overflow
5348 if (src_end
<= src_start
)
5349 if ((src_end
< src_start
) || (src_start
!= 0))
5350 return(KERN_INVALID_ADDRESS
);
5353 * Allocate a header element for the list.
5355 * Use the start and end in the header to
5356 * remember the endpoints prior to rounding.
5359 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5360 vm_map_copy_first_entry(copy
) =
5361 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5362 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5363 copy
->cpy_hdr
.nentries
= 0;
5364 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5366 copy
->offset
= src_addr
;
5369 new_entry
= vm_map_copy_entry_create(copy
);
5373 vm_map_unlock(src_map); \
5374 if(src_map != base_map) \
5375 vm_map_deallocate(src_map); \
5376 if (new_entry != VM_MAP_ENTRY_NULL) \
5377 vm_map_copy_entry_dispose(copy,new_entry); \
5378 vm_map_copy_discard(copy); \
5380 submap_map_t *ptr; \
5382 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5383 parent_maps=parent_maps->next; \
5384 if (ptr->parent_map != base_map) \
5385 vm_map_deallocate(ptr->parent_map); \
5386 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5393 * Find the beginning of the region.
5396 vm_map_lock(src_map
);
5398 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5399 RETURN(KERN_INVALID_ADDRESS
);
5400 if(!tmp_entry
->is_sub_map
) {
5401 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5403 /* set for later submap fix-up */
5404 copy_addr
= src_start
;
5407 * Go through entries until we get to the end.
5412 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5413 vm_size_t src_size
; /* Size of source
5414 * map entry (in both
5419 vm_object_t src_object
; /* Object to copy */
5420 vm_object_offset_t src_offset
;
5422 boolean_t src_needs_copy
; /* Should source map
5424 * for copy-on-write?
5427 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5429 boolean_t was_wired
; /* Was source wired? */
5430 vm_map_version_t version
; /* Version before locks
5431 * dropped to make copy
5433 kern_return_t result
; /* Return value from
5434 * copy_strategically.
5436 while(tmp_entry
->is_sub_map
) {
5437 vm_size_t submap_len
;
5440 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5441 ptr
->next
= parent_maps
;
5443 ptr
->parent_map
= src_map
;
5444 ptr
->base_start
= src_start
;
5445 ptr
->base_end
= src_end
;
5446 submap_len
= tmp_entry
->vme_end
- src_start
;
5447 if(submap_len
> (src_end
-src_start
))
5448 submap_len
= src_end
-src_start
;
5449 ptr
->base_start
+= submap_len
;
5451 src_start
-= tmp_entry
->vme_start
;
5452 src_start
+= tmp_entry
->offset
;
5453 src_end
= src_start
+ submap_len
;
5454 src_map
= tmp_entry
->object
.sub_map
;
5455 vm_map_lock(src_map
);
5456 /* keep an outstanding reference for all maps in */
5457 /* the parents tree except the base map */
5458 vm_map_reference(src_map
);
5459 vm_map_unlock(ptr
->parent_map
);
5460 if (!vm_map_lookup_entry(
5461 src_map
, src_start
, &tmp_entry
))
5462 RETURN(KERN_INVALID_ADDRESS
);
5464 if(!tmp_entry
->is_sub_map
)
5465 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5466 src_entry
= tmp_entry
;
5468 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5469 ((tmp_entry
->object
.vm_object
->wimg_bits
!= VM_WIMG_DEFAULT
) ||
5470 (tmp_entry
->object
.vm_object
->phys_contiguous
))) {
5471 /* This is not, cannot be supported for now */
5472 /* we need a description of the caching mode */
5473 /* reflected in the object before we can */
5474 /* support copyin, and then the support will */
5475 /* be for direct copy */
5476 RETURN(KERN_PROTECTION_FAILURE
);
5479 * Create a new address map entry to hold the result.
5480 * Fill in the fields from the appropriate source entries.
5481 * We must unlock the source map to do this if we need
5482 * to allocate a map entry.
5484 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5485 version
.main_timestamp
= src_map
->timestamp
;
5486 vm_map_unlock(src_map
);
5488 new_entry
= vm_map_copy_entry_create(copy
);
5490 vm_map_lock(src_map
);
5491 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5492 if (!vm_map_lookup_entry(src_map
, src_start
,
5494 RETURN(KERN_INVALID_ADDRESS
);
5496 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5497 continue; /* restart w/ new tmp_entry */
5502 * Verify that the region can be read.
5504 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5506 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5507 RETURN(KERN_PROTECTION_FAILURE
);
5510 * Clip against the endpoints of the entire region.
5513 vm_map_clip_end(src_map
, src_entry
, src_end
);
5515 src_size
= src_entry
->vme_end
- src_start
;
5516 src_object
= src_entry
->object
.vm_object
;
5517 src_offset
= src_entry
->offset
;
5518 was_wired
= (src_entry
->wired_count
!= 0);
5520 vm_map_entry_copy(new_entry
, src_entry
);
5521 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5524 * Attempt non-blocking copy-on-write optimizations.
5528 (src_object
== VM_OBJECT_NULL
||
5529 (src_object
->internal
&& !src_object
->true_share
5532 * If we are destroying the source, and the object
5533 * is internal, we can move the object reference
5534 * from the source to the copy. The copy is
5535 * copy-on-write only if the source is.
5536 * We make another reference to the object, because
5537 * destroying the source entry will deallocate it.
5539 vm_object_reference(src_object
);
5542 * Copy is always unwired. vm_map_copy_entry
5543 * set its wired count to zero.
5546 goto CopySuccessful
;
5551 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5552 src_object
, new_entry
, new_entry
->object
.vm_object
,
5555 vm_object_copy_quickly(
5556 &new_entry
->object
.vm_object
,
5560 &new_entry_needs_copy
)) {
5562 new_entry
->needs_copy
= new_entry_needs_copy
;
5565 * Handle copy-on-write obligations
5568 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5569 if (tmp_entry
->is_shared
||
5570 tmp_entry
->object
.vm_object
->true_share
||
5572 vm_map_unlock(src_map
);
5573 new_entry
->object
.vm_object
=
5574 vm_object_copy_delayed(
5578 /* dec ref gained in copy_quickly */
5579 vm_object_lock(src_object
);
5580 src_object
->ref_count
--;
5581 assert(src_object
->ref_count
> 0);
5582 vm_object_res_deallocate(src_object
);
5583 vm_object_unlock(src_object
);
5584 vm_map_lock(src_map
);
5586 * it turns out that we have
5587 * finished our copy. No matter
5588 * what the state of the map
5589 * we will lock it again here
5590 * knowing that if there is
5591 * additional data to copy
5592 * it will be checked at
5593 * the top of the loop
5595 * Don't do timestamp check
5599 vm_object_pmap_protect(
5603 (src_entry
->is_shared
?
5606 src_entry
->vme_start
,
5607 src_entry
->protection
&
5610 tmp_entry
->needs_copy
= TRUE
;
5615 * The map has never been unlocked, so it's safe
5616 * to move to the next entry rather than doing
5620 goto CopySuccessful
;
5623 new_entry
->needs_copy
= FALSE
;
5626 * Take an object reference, so that we may
5627 * release the map lock(s).
5630 assert(src_object
!= VM_OBJECT_NULL
);
5631 vm_object_reference(src_object
);
5634 * Record the timestamp for later verification.
5638 version
.main_timestamp
= src_map
->timestamp
;
5639 vm_map_unlock(src_map
); /* Increments timestamp once! */
5646 vm_object_lock(src_object
);
5647 result
= vm_object_copy_slowly(
5652 &new_entry
->object
.vm_object
);
5653 new_entry
->offset
= 0;
5654 new_entry
->needs_copy
= FALSE
;
5656 result
= vm_object_copy_strategically(src_object
,
5659 &new_entry
->object
.vm_object
,
5661 &new_entry_needs_copy
);
5663 new_entry
->needs_copy
= new_entry_needs_copy
;
5667 if (result
!= KERN_SUCCESS
&&
5668 result
!= KERN_MEMORY_RESTART_COPY
) {
5669 vm_map_lock(src_map
);
5674 * Throw away the extra reference
5677 vm_object_deallocate(src_object
);
5680 * Verify that the map has not substantially
5681 * changed while the copy was being made.
5684 vm_map_lock(src_map
);
5686 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5687 goto VerificationSuccessful
;
5690 * Simple version comparison failed.
5692 * Retry the lookup and verify that the
5693 * same object/offset are still present.
5695 * [Note: a memory manager that colludes with
5696 * the calling task can detect that we have
5697 * cheated. While the map was unlocked, the
5698 * mapping could have been changed and restored.]
5701 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5702 RETURN(KERN_INVALID_ADDRESS
);
5705 src_entry
= tmp_entry
;
5706 vm_map_clip_start(src_map
, src_entry
, src_start
);
5708 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5710 src_entry
->max_protection
& VM_PROT_READ
== 0)
5711 goto VerificationFailed
;
5713 if (src_entry
->vme_end
< new_entry
->vme_end
)
5714 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5716 if ((src_entry
->object
.vm_object
!= src_object
) ||
5717 (src_entry
->offset
!= src_offset
) ) {
5720 * Verification failed.
5722 * Start over with this top-level entry.
5725 VerificationFailed
: ;
5727 vm_object_deallocate(new_entry
->object
.vm_object
);
5728 tmp_entry
= src_entry
;
5733 * Verification succeeded.
5736 VerificationSuccessful
: ;
5738 if (result
== KERN_MEMORY_RESTART_COPY
)
5748 * Link in the new copy entry.
5751 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5755 * Determine whether the entire region
5758 src_start
= new_entry
->vme_end
;
5759 new_entry
= VM_MAP_ENTRY_NULL
;
5760 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5761 if (src_map
!= base_map
) {
5765 assert(ptr
!= NULL
);
5766 parent_maps
= parent_maps
->next
;
5767 vm_map_unlock(src_map
);
5768 vm_map_deallocate(src_map
);
5769 vm_map_lock(ptr
->parent_map
);
5770 src_map
= ptr
->parent_map
;
5771 src_start
= ptr
->base_start
;
5772 src_end
= ptr
->base_end
;
5773 if ((src_end
> src_start
) &&
5774 !vm_map_lookup_entry(
5775 src_map
, src_start
, &tmp_entry
))
5776 RETURN(KERN_INVALID_ADDRESS
);
5777 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5778 if(parent_maps
== NULL
)
5780 src_entry
= tmp_entry
->vme_prev
;
5784 if ((src_start
>= src_end
) && (src_end
!= 0))
5788 * Verify that there are no gaps in the region
5791 tmp_entry
= src_entry
->vme_next
;
5792 if ((tmp_entry
->vme_start
!= src_start
) ||
5793 (tmp_entry
== vm_map_to_entry(src_map
)))
5794 RETURN(KERN_INVALID_ADDRESS
);
5798 * If the source should be destroyed, do it now, since the
5799 * copy was successful.
5802 (void) vm_map_delete(src_map
,
5803 trunc_page(src_addr
),
5805 (src_map
== kernel_map
) ?
5806 VM_MAP_REMOVE_KUNWIRE
:
5810 vm_map_unlock(src_map
);
5812 /* Fix-up start and end points in copy. This is necessary */
5813 /* when the various entries in the copy object were picked */
5814 /* up from different sub-maps */
5816 tmp_entry
= vm_map_copy_first_entry(copy
);
5817 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5818 tmp_entry
->vme_end
= copy_addr
+
5819 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5820 tmp_entry
->vme_start
= copy_addr
;
5821 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5822 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5825 *copy_result
= copy
;
5826 return(KERN_SUCCESS
);
5832 * vm_map_copyin_object:
5834 * Create a copy object from an object.
5835 * Our caller donates an object reference.
5839 vm_map_copyin_object(
5841 vm_object_offset_t offset
, /* offset of region in object */
5842 vm_object_size_t size
, /* size of region in object */
5843 vm_map_copy_t
*copy_result
) /* OUT */
5845 vm_map_copy_t copy
; /* Resulting copy */
5848 * We drop the object into a special copy object
5849 * that contains the object directly.
5852 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5853 copy
->type
= VM_MAP_COPY_OBJECT
;
5854 copy
->cpy_object
= object
;
5855 copy
->cpy_index
= 0;
5856 copy
->offset
= offset
;
5859 *copy_result
= copy
;
5860 return(KERN_SUCCESS
);
5866 vm_map_entry_t old_entry
,
5870 vm_map_entry_t new_entry
;
5871 kern_return_t result
;
5874 * New sharing code. New map entry
5875 * references original object. Internal
5876 * objects use asynchronous copy algorithm for
5877 * future copies. First make sure we have
5878 * the right object. If we need a shadow,
5879 * or someone else already has one, then
5880 * make a new shadow and share it.
5883 object
= old_entry
->object
.vm_object
;
5884 if (old_entry
->is_sub_map
) {
5885 assert(old_entry
->wired_count
== 0);
5887 if(old_entry
->use_pmap
) {
5888 result
= pmap_nest(new_map
->pmap
,
5889 (old_entry
->object
.sub_map
)->pmap
,
5890 old_entry
->vme_start
,
5891 old_entry
->vme_end
- old_entry
->vme_start
);
5893 panic("vm_map_fork_share: pmap_nest failed!");
5896 } else if (object
== VM_OBJECT_NULL
) {
5897 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5898 old_entry
->vme_start
));
5899 old_entry
->offset
= 0;
5900 old_entry
->object
.vm_object
= object
;
5901 assert(!old_entry
->needs_copy
);
5902 } else if (object
->copy_strategy
!=
5903 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5906 * We are already using an asymmetric
5907 * copy, and therefore we already have
5911 assert(! old_entry
->needs_copy
);
5913 else if (old_entry
->needs_copy
|| /* case 1 */
5914 object
->shadowed
|| /* case 2 */
5915 (!object
->true_share
&& /* case 3 */
5916 !old_entry
->is_shared
&&
5918 (vm_size_t
)(old_entry
->vme_end
-
5919 old_entry
->vme_start
)))) {
5922 * We need to create a shadow.
5923 * There are three cases here.
5924 * In the first case, we need to
5925 * complete a deferred symmetrical
5926 * copy that we participated in.
5927 * In the second and third cases,
5928 * we need to create the shadow so
5929 * that changes that we make to the
5930 * object do not interfere with
5931 * any symmetrical copies which
5932 * have occured (case 2) or which
5933 * might occur (case 3).
5935 * The first case is when we had
5936 * deferred shadow object creation
5937 * via the entry->needs_copy mechanism.
5938 * This mechanism only works when
5939 * only one entry points to the source
5940 * object, and we are about to create
5941 * a second entry pointing to the
5942 * same object. The problem is that
5943 * there is no way of mapping from
5944 * an object to the entries pointing
5945 * to it. (Deferred shadow creation
5946 * works with one entry because occurs
5947 * at fault time, and we walk from the
5948 * entry to the object when handling
5951 * The second case is when the object
5952 * to be shared has already been copied
5953 * with a symmetric copy, but we point
5954 * directly to the object without
5955 * needs_copy set in our entry. (This
5956 * can happen because different ranges
5957 * of an object can be pointed to by
5958 * different entries. In particular,
5959 * a single entry pointing to an object
5960 * can be split by a call to vm_inherit,
5961 * which, combined with task_create, can
5962 * result in the different entries
5963 * having different needs_copy values.)
5964 * The shadowed flag in the object allows
5965 * us to detect this case. The problem
5966 * with this case is that if this object
5967 * has or will have shadows, then we
5968 * must not perform an asymmetric copy
5969 * of this object, since such a copy
5970 * allows the object to be changed, which
5971 * will break the previous symmetrical
5972 * copies (which rely upon the object
5973 * not changing). In a sense, the shadowed
5974 * flag says "don't change this object".
5975 * We fix this by creating a shadow
5976 * object for this object, and sharing
5977 * that. This works because we are free
5978 * to change the shadow object (and thus
5979 * to use an asymmetric copy strategy);
5980 * this is also semantically correct,
5981 * since this object is temporary, and
5982 * therefore a copy of the object is
5983 * as good as the object itself. (This
5984 * is not true for permanent objects,
5985 * since the pager needs to see changes,
5986 * which won't happen if the changes
5987 * are made to a copy.)
5989 * The third case is when the object
5990 * to be shared has parts sticking
5991 * outside of the entry we're working
5992 * with, and thus may in the future
5993 * be subject to a symmetrical copy.
5994 * (This is a preemptive version of
5998 assert(!(object
->shadowed
&& old_entry
->is_shared
));
5999 vm_object_shadow(&old_entry
->object
.vm_object
,
6001 (vm_size_t
) (old_entry
->vme_end
-
6002 old_entry
->vme_start
));
6005 * If we're making a shadow for other than
6006 * copy on write reasons, then we have
6007 * to remove write permission.
6010 if (!old_entry
->needs_copy
&&
6011 (old_entry
->protection
& VM_PROT_WRITE
)) {
6012 if(old_map
->mapped
) {
6013 vm_object_pmap_protect(
6014 old_entry
->object
.vm_object
,
6016 (old_entry
->vme_end
-
6017 old_entry
->vme_start
),
6019 old_entry
->vme_start
,
6020 old_entry
->protection
& ~VM_PROT_WRITE
);
6022 pmap_protect(old_map
->pmap
,
6023 old_entry
->vme_start
,
6025 old_entry
->protection
& ~VM_PROT_WRITE
);
6029 old_entry
->needs_copy
= FALSE
;
6030 object
= old_entry
->object
.vm_object
;
6034 * If object was using a symmetric copy strategy,
6035 * change its copy strategy to the default
6036 * asymmetric copy strategy, which is copy_delay
6037 * in the non-norma case and copy_call in the
6038 * norma case. Bump the reference count for the
6042 if(old_entry
->is_sub_map
) {
6043 vm_map_lock(old_entry
->object
.sub_map
);
6044 vm_map_reference(old_entry
->object
.sub_map
);
6045 vm_map_unlock(old_entry
->object
.sub_map
);
6047 vm_object_lock(object
);
6048 object
->ref_count
++;
6049 vm_object_res_reference(object
);
6050 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6051 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6053 vm_object_unlock(object
);
6057 * Clone the entry, using object ref from above.
6058 * Mark both entries as shared.
6061 new_entry
= vm_map_entry_create(new_map
);
6062 vm_map_entry_copy(new_entry
, old_entry
);
6063 old_entry
->is_shared
= TRUE
;
6064 new_entry
->is_shared
= TRUE
;
6067 * Insert the entry into the new map -- we
6068 * know we're inserting at the end of the new
6072 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6075 * Update the physical map
6078 if (old_entry
->is_sub_map
) {
6079 /* Bill Angell pmap support goes here */
6081 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6082 old_entry
->vme_end
- old_entry
->vme_start
,
6083 old_entry
->vme_start
);
6090 vm_map_entry_t
*old_entry_p
,
6093 vm_map_entry_t old_entry
= *old_entry_p
;
6094 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6095 vm_offset_t start
= old_entry
->vme_start
;
6097 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6099 vm_map_unlock(old_map
);
6101 * Use maxprot version of copyin because we
6102 * care about whether this memory can ever
6103 * be accessed, not just whether it's accessible
6106 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6109 * The map might have changed while it
6110 * was unlocked, check it again. Skip
6111 * any blank space or permanently
6112 * unreadable region.
6114 vm_map_lock(old_map
);
6115 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6116 last
->max_protection
& VM_PROT_READ
==
6118 last
= last
->vme_next
;
6120 *old_entry_p
= last
;
6123 * XXX For some error returns, want to
6124 * XXX skip to the next element. Note
6125 * that INVALID_ADDRESS and
6126 * PROTECTION_FAILURE are handled above.
6133 * Insert the copy into the new map
6136 vm_map_copy_insert(new_map
, last
, copy
);
6139 * Pick up the traversal at the end of
6140 * the copied region.
6143 vm_map_lock(old_map
);
6144 start
+= entry_size
;
6145 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6146 last
= last
->vme_next
;
6148 vm_map_clip_start(old_map
, last
, start
);
6150 *old_entry_p
= last
;
6158 * Create and return a new map based on the old
6159 * map, according to the inheritance values on the
6160 * regions in that map.
6162 * The source map must not be locked.
6168 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6170 vm_map_entry_t old_entry
;
6171 vm_size_t new_size
= 0, entry_size
;
6172 vm_map_entry_t new_entry
;
6173 boolean_t src_needs_copy
;
6174 boolean_t new_entry_needs_copy
;
6176 vm_map_reference_swap(old_map
);
6177 vm_map_lock(old_map
);
6179 new_map
= vm_map_create(new_pmap
,
6180 old_map
->min_offset
,
6181 old_map
->max_offset
,
6182 old_map
->hdr
.entries_pageable
);
6185 old_entry
= vm_map_first_entry(old_map
);
6186 old_entry
!= vm_map_to_entry(old_map
);
6189 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6191 switch (old_entry
->inheritance
) {
6192 case VM_INHERIT_NONE
:
6195 case VM_INHERIT_SHARE
:
6196 vm_map_fork_share(old_map
, old_entry
, new_map
);
6197 new_size
+= entry_size
;
6200 case VM_INHERIT_COPY
:
6203 * Inline the copy_quickly case;
6204 * upon failure, fall back on call
6205 * to vm_map_fork_copy.
6208 if(old_entry
->is_sub_map
)
6210 if ((old_entry
->wired_count
!= 0) ||
6211 ((old_entry
->object
.vm_object
!= NULL
) &&
6212 (old_entry
->object
.vm_object
->true_share
))) {
6213 goto slow_vm_map_fork_copy
;
6216 new_entry
= vm_map_entry_create(new_map
);
6217 vm_map_entry_copy(new_entry
, old_entry
);
6218 /* clear address space specifics */
6219 new_entry
->use_pmap
= FALSE
;
6221 if (! vm_object_copy_quickly(
6222 &new_entry
->object
.vm_object
,
6224 (old_entry
->vme_end
-
6225 old_entry
->vme_start
),
6227 &new_entry_needs_copy
)) {
6228 vm_map_entry_dispose(new_map
, new_entry
);
6229 goto slow_vm_map_fork_copy
;
6233 * Handle copy-on-write obligations
6236 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6237 vm_object_pmap_protect(
6238 old_entry
->object
.vm_object
,
6240 (old_entry
->vme_end
-
6241 old_entry
->vme_start
),
6242 ((old_entry
->is_shared
6246 old_entry
->vme_start
,
6247 old_entry
->protection
& ~VM_PROT_WRITE
);
6249 old_entry
->needs_copy
= TRUE
;
6251 new_entry
->needs_copy
= new_entry_needs_copy
;
6254 * Insert the entry at the end
6258 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6260 new_size
+= entry_size
;
6263 slow_vm_map_fork_copy
:
6264 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6265 new_size
+= entry_size
;
6269 old_entry
= old_entry
->vme_next
;
6272 new_map
->size
= new_size
;
6273 vm_map_unlock(old_map
);
6274 vm_map_deallocate(old_map
);
6281 * vm_map_lookup_locked:
6283 * Finds the VM object, offset, and
6284 * protection for a given virtual address in the
6285 * specified map, assuming a page fault of the
6288 * Returns the (object, offset, protection) for
6289 * this address, whether it is wired down, and whether
6290 * this map has the only reference to the data in question.
6291 * In order to later verify this lookup, a "version"
6294 * The map MUST be locked by the caller and WILL be
6295 * locked on exit. In order to guarantee the
6296 * existence of the returned object, it is returned
6299 * If a lookup is requested with "write protection"
6300 * specified, the map may be changed to perform virtual
6301 * copying operations, although the data referenced will
6305 vm_map_lookup_locked(
6306 vm_map_t
*var_map
, /* IN/OUT */
6307 register vm_offset_t vaddr
,
6308 register vm_prot_t fault_type
,
6309 vm_map_version_t
*out_version
, /* OUT */
6310 vm_object_t
*object
, /* OUT */
6311 vm_object_offset_t
*offset
, /* OUT */
6312 vm_prot_t
*out_prot
, /* OUT */
6313 boolean_t
*wired
, /* OUT */
6314 int *behavior
, /* OUT */
6315 vm_object_offset_t
*lo_offset
, /* OUT */
6316 vm_object_offset_t
*hi_offset
, /* OUT */
6319 vm_map_entry_t entry
;
6320 register vm_map_t map
= *var_map
;
6321 vm_map_t old_map
= *var_map
;
6322 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6323 vm_offset_t cow_parent_vaddr
;
6324 vm_offset_t old_start
;
6325 vm_offset_t old_end
;
6326 register vm_prot_t prot
;
6332 * If the map has an interesting hint, try it before calling
6333 * full blown lookup routine.
6336 mutex_lock(&map
->s_lock
);
6338 mutex_unlock(&map
->s_lock
);
6340 if ((entry
== vm_map_to_entry(map
)) ||
6341 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6342 vm_map_entry_t tmp_entry
;
6345 * Entry was either not a valid hint, or the vaddr
6346 * was not contained in the entry, so do a full lookup.
6348 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6349 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6350 vm_map_unlock(cow_sub_map_parent
);
6351 if((*pmap_map
!= map
)
6352 && (*pmap_map
!= cow_sub_map_parent
))
6353 vm_map_unlock(*pmap_map
);
6354 return KERN_INVALID_ADDRESS
;
6359 if(map
== old_map
) {
6360 old_start
= entry
->vme_start
;
6361 old_end
= entry
->vme_end
;
6365 * Handle submaps. Drop lock on upper map, submap is
6370 if (entry
->is_sub_map
) {
6371 vm_offset_t local_vaddr
;
6372 vm_offset_t end_delta
;
6373 vm_offset_t start_delta
;
6374 vm_offset_t object_start_delta
;
6375 vm_map_entry_t submap_entry
;
6376 boolean_t mapped_needs_copy
=FALSE
;
6378 local_vaddr
= vaddr
;
6380 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6381 /* if pmap_map equals map we unlock below */
6382 if ((*pmap_map
!= map
) &&
6383 (*pmap_map
!= cow_sub_map_parent
))
6384 vm_map_unlock(*pmap_map
);
6385 *pmap_map
= entry
->object
.sub_map
;
6388 if(entry
->needs_copy
) {
6389 if (!mapped_needs_copy
) {
6390 if (vm_map_lock_read_to_write(map
)) {
6391 vm_map_lock_read(map
);
6392 if(*pmap_map
== entry
->object
.sub_map
)
6396 vm_map_lock_read(entry
->object
.sub_map
);
6397 cow_sub_map_parent
= map
;
6398 /* reset base to map before cow object */
6399 /* this is the map which will accept */
6400 /* the new cow object */
6401 old_start
= entry
->vme_start
;
6402 old_end
= entry
->vme_end
;
6403 cow_parent_vaddr
= vaddr
;
6404 mapped_needs_copy
= TRUE
;
6406 vm_map_lock_read(entry
->object
.sub_map
);
6407 if((cow_sub_map_parent
!= map
) &&
6412 vm_map_lock_read(entry
->object
.sub_map
);
6413 /* leave map locked if it is a target */
6414 /* cow sub_map above otherwise, just */
6415 /* follow the maps down to the object */
6416 /* here we unlock knowing we are not */
6417 /* revisiting the map. */
6418 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6419 vm_map_unlock_read(map
);
6422 *var_map
= map
= entry
->object
.sub_map
;
6424 /* calculate the offset in the submap for vaddr */
6425 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6428 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6429 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6430 vm_map_unlock(cow_sub_map_parent
);
6432 if((*pmap_map
!= map
)
6433 && (*pmap_map
!= cow_sub_map_parent
)) {
6434 vm_map_unlock(*pmap_map
);
6437 return KERN_INVALID_ADDRESS
;
6439 /* find the attenuated shadow of the underlying object */
6440 /* on our target map */
6442 /* in english the submap object may extend beyond the */
6443 /* region mapped by the entry or, may only fill a portion */
6444 /* of it. For our purposes, we only care if the object */
6445 /* doesn't fill. In this case the area which will */
6446 /* ultimately be clipped in the top map will only need */
6447 /* to be as big as the portion of the underlying entry */
6448 /* which is mapped */
6449 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6450 submap_entry
->vme_start
- entry
->offset
: 0;
6453 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6454 submap_entry
->vme_end
?
6455 0 : (entry
->offset
+
6456 (old_end
- old_start
))
6457 - submap_entry
->vme_end
;
6459 old_start
+= start_delta
;
6460 old_end
-= end_delta
;
6462 if(submap_entry
->is_sub_map
) {
6463 entry
= submap_entry
;
6464 vaddr
= local_vaddr
;
6465 goto submap_recurse
;
6468 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6470 vm_object_t copy_object
;
6471 vm_offset_t local_start
;
6472 vm_offset_t local_end
;
6473 boolean_t copied_slowly
= FALSE
;
6475 if (vm_map_lock_read_to_write(map
)) {
6476 vm_map_lock_read(map
);
6477 old_start
-= start_delta
;
6478 old_end
+= end_delta
;
6483 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6484 submap_entry
->object
.vm_object
=
6487 (submap_entry
->vme_end
6488 - submap_entry
->vme_start
));
6489 submap_entry
->offset
= 0;
6491 local_start
= local_vaddr
-
6492 (cow_parent_vaddr
- old_start
);
6493 local_end
= local_vaddr
+
6494 (old_end
- cow_parent_vaddr
);
6495 vm_map_clip_start(map
, submap_entry
, local_start
);
6496 vm_map_clip_end(map
, submap_entry
, local_end
);
6498 /* This is the COW case, lets connect */
6499 /* an entry in our space to the underlying */
6500 /* object in the submap, bypassing the */
6504 if(submap_entry
->wired_count
!= 0) {
6506 submap_entry
->object
.vm_object
);
6507 vm_object_copy_slowly(
6508 submap_entry
->object
.vm_object
,
6509 submap_entry
->offset
,
6510 submap_entry
->vme_end
-
6511 submap_entry
->vme_start
,
6514 copied_slowly
= TRUE
;
6517 /* set up shadow object */
6518 copy_object
= submap_entry
->object
.vm_object
;
6519 vm_object_reference(copy_object
);
6520 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6521 submap_entry
->needs_copy
= TRUE
;
6522 vm_object_pmap_protect(
6523 submap_entry
->object
.vm_object
,
6524 submap_entry
->offset
,
6525 submap_entry
->vme_end
-
6526 submap_entry
->vme_start
,
6527 (submap_entry
->is_shared
6529 PMAP_NULL
: map
->pmap
,
6530 submap_entry
->vme_start
,
6531 submap_entry
->protection
&
6536 /* This works diffently than the */
6537 /* normal submap case. We go back */
6538 /* to the parent of the cow map and*/
6539 /* clip out the target portion of */
6540 /* the sub_map, substituting the */
6541 /* new copy object, */
6544 local_start
= old_start
;
6545 local_end
= old_end
;
6546 map
= cow_sub_map_parent
;
6547 *var_map
= cow_sub_map_parent
;
6548 vaddr
= cow_parent_vaddr
;
6549 cow_sub_map_parent
= NULL
;
6551 if(!vm_map_lookup_entry(map
,
6553 vm_object_deallocate(
6555 vm_map_lock_write_to_read(map
);
6556 return KERN_INVALID_ADDRESS
;
6559 /* clip out the portion of space */
6560 /* mapped by the sub map which */
6561 /* corresponds to the underlying */
6563 vm_map_clip_start(map
, entry
, local_start
);
6564 vm_map_clip_end(map
, entry
, local_end
);
6567 /* substitute copy object for */
6568 /* shared map entry */
6569 vm_map_deallocate(entry
->object
.sub_map
);
6570 entry
->is_sub_map
= FALSE
;
6571 entry
->object
.vm_object
= copy_object
;
6573 entry
->protection
|= VM_PROT_WRITE
;
6574 entry
->max_protection
|= VM_PROT_WRITE
;
6577 entry
->needs_copy
= FALSE
;
6578 entry
->is_shared
= FALSE
;
6580 entry
->offset
= submap_entry
->offset
;
6581 entry
->needs_copy
= TRUE
;
6582 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6583 entry
->inheritance
= VM_INHERIT_COPY
;
6585 entry
->is_shared
= TRUE
;
6587 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6588 entry
->inheritance
= VM_INHERIT_COPY
;
6590 vm_map_lock_write_to_read(map
);
6592 if((cow_sub_map_parent
)
6593 && (cow_sub_map_parent
!= *pmap_map
)
6594 && (cow_sub_map_parent
!= map
)) {
6595 vm_map_unlock(cow_sub_map_parent
);
6597 entry
= submap_entry
;
6598 vaddr
= local_vaddr
;
6603 * Check whether this task is allowed to have
6607 prot
= entry
->protection
;
6608 if ((fault_type
& (prot
)) != fault_type
) {
6609 if (*pmap_map
!= map
) {
6610 vm_map_unlock(*pmap_map
);
6613 return KERN_PROTECTION_FAILURE
;
6617 * If this page is not pageable, we have to get
6618 * it for all possible accesses.
6621 if (*wired
= (entry
->wired_count
!= 0))
6622 prot
= fault_type
= entry
->protection
;
6625 * If the entry was copy-on-write, we either ...
6628 if (entry
->needs_copy
) {
6630 * If we want to write the page, we may as well
6631 * handle that now since we've got the map locked.
6633 * If we don't need to write the page, we just
6634 * demote the permissions allowed.
6637 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6639 * Make a new object, and place it in the
6640 * object chain. Note that no new references
6641 * have appeared -- one just moved from the
6642 * map to the new object.
6645 if (vm_map_lock_read_to_write(map
)) {
6646 vm_map_lock_read(map
);
6649 vm_object_shadow(&entry
->object
.vm_object
,
6651 (vm_size_t
) (entry
->vme_end
-
6654 entry
->object
.vm_object
->shadowed
= TRUE
;
6655 entry
->needs_copy
= FALSE
;
6656 vm_map_lock_write_to_read(map
);
6660 * We're attempting to read a copy-on-write
6661 * page -- don't allow writes.
6664 prot
&= (~VM_PROT_WRITE
);
6669 * Create an object if necessary.
6671 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6673 if (vm_map_lock_read_to_write(map
)) {
6674 vm_map_lock_read(map
);
6678 entry
->object
.vm_object
= vm_object_allocate(
6679 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6681 vm_map_lock_write_to_read(map
);
6685 * Return the object/offset from this entry. If the entry
6686 * was copy-on-write or empty, it has been fixed up. Also
6687 * return the protection.
6690 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6691 *object
= entry
->object
.vm_object
;
6693 *behavior
= entry
->behavior
;
6694 *lo_offset
= entry
->offset
;
6695 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6698 * Lock the object to prevent it from disappearing
6701 vm_object_lock(*object
);
6704 * Save the version number
6707 out_version
->main_timestamp
= map
->timestamp
;
6709 return KERN_SUCCESS
;
6716 * Verifies that the map in question has not changed
6717 * since the given version. If successful, the map
6718 * will not change until vm_map_verify_done() is called.
6722 register vm_map_t map
,
6723 register vm_map_version_t
*version
) /* REF */
6727 vm_map_lock_read(map
);
6728 result
= (map
->timestamp
== version
->main_timestamp
);
6731 vm_map_unlock_read(map
);
6737 * vm_map_verify_done:
6739 * Releases locks acquired by a vm_map_verify.
6741 * This is now a macro in vm/vm_map.h. It does a
6742 * vm_map_unlock_read on the map.
6749 * User call to obtain information about a region in
6750 * a task's address map. Currently, only one flavor is
6753 * XXX The reserved and behavior fields cannot be filled
6754 * in until the vm merge from the IK is completed, and
6755 * vm_reserve is implemented.
6757 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6763 vm_offset_t
*address
, /* IN/OUT */
6764 vm_size_t
*size
, /* OUT */
6765 vm_region_flavor_t flavor
, /* IN */
6766 vm_region_info_t info
, /* OUT */
6767 mach_msg_type_number_t
*count
, /* IN/OUT */
6768 ipc_port_t
*object_name
) /* OUT */
6770 vm_map_entry_t tmp_entry
;
6772 vm_map_entry_t entry
;
6775 vm_region_basic_info_t basic
;
6776 vm_region_extended_info_t extended
;
6777 vm_region_top_info_t top
;
6779 if (map
== VM_MAP_NULL
)
6780 return(KERN_INVALID_ARGUMENT
);
6784 case VM_REGION_BASIC_INFO
:
6786 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6787 return(KERN_INVALID_ARGUMENT
);
6789 basic
= (vm_region_basic_info_t
) info
;
6790 *count
= VM_REGION_BASIC_INFO_COUNT
;
6792 vm_map_lock_read(map
);
6795 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6796 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6797 vm_map_unlock_read(map
);
6798 return(KERN_INVALID_ADDRESS
);
6804 start
= entry
->vme_start
;
6806 basic
->offset
= entry
->offset
;
6807 basic
->protection
= entry
->protection
;
6808 basic
->inheritance
= entry
->inheritance
;
6809 basic
->max_protection
= entry
->max_protection
;
6810 basic
->behavior
= entry
->behavior
;
6811 basic
->user_wired_count
= entry
->user_wired_count
;
6812 basic
->reserved
= entry
->is_sub_map
;
6814 *size
= (entry
->vme_end
- start
);
6816 if (object_name
) *object_name
= IP_NULL
;
6817 if (entry
->is_sub_map
) {
6818 basic
->shared
= FALSE
;
6820 basic
->shared
= entry
->is_shared
;
6823 vm_map_unlock_read(map
);
6824 return(KERN_SUCCESS
);
6826 case VM_REGION_EXTENDED_INFO
:
6829 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6830 return(KERN_INVALID_ARGUMENT
);
6832 extended
= (vm_region_extended_info_t
) info
;
6833 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6835 vm_map_lock_read(map
);
6838 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6839 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6840 vm_map_unlock_read(map
);
6841 return(KERN_INVALID_ADDRESS
);
6846 start
= entry
->vme_start
;
6848 extended
->protection
= entry
->protection
;
6849 extended
->user_tag
= entry
->alias
;
6850 extended
->pages_resident
= 0;
6851 extended
->pages_swapped_out
= 0;
6852 extended
->pages_shared_now_private
= 0;
6853 extended
->pages_dirtied
= 0;
6854 extended
->external_pager
= 0;
6855 extended
->shadow_depth
= 0;
6857 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6859 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6860 extended
->share_mode
= SM_PRIVATE
;
6863 *object_name
= IP_NULL
;
6865 *size
= (entry
->vme_end
- start
);
6867 vm_map_unlock_read(map
);
6868 return(KERN_SUCCESS
);
6870 case VM_REGION_TOP_INFO
:
6873 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6874 return(KERN_INVALID_ARGUMENT
);
6876 top
= (vm_region_top_info_t
) info
;
6877 *count
= VM_REGION_TOP_INFO_COUNT
;
6879 vm_map_lock_read(map
);
6882 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6883 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6884 vm_map_unlock_read(map
);
6885 return(KERN_INVALID_ADDRESS
);
6891 start
= entry
->vme_start
;
6893 top
->private_pages_resident
= 0;
6894 top
->shared_pages_resident
= 0;
6896 vm_region_top_walk(entry
, top
);
6899 *object_name
= IP_NULL
;
6901 *size
= (entry
->vme_end
- start
);
6903 vm_map_unlock_read(map
);
6904 return(KERN_SUCCESS
);
6907 return(KERN_INVALID_ARGUMENT
);
6912 * vm_region_recurse: A form of vm_region which follows the
6913 * submaps in a target map
6920 vm_offset_t
*address
, /* IN/OUT */
6921 vm_size_t
*size
, /* OUT */
6922 natural_t
*nesting_depth
, /* IN/OUT */
6923 vm_region_recurse_info_t info
, /* IN/OUT */
6924 mach_msg_type_number_t
*count
) /* IN/OUT */
6926 vm_map_entry_t tmp_entry
;
6928 vm_map_entry_t entry
;
6932 unsigned int recurse_count
;
6935 vm_map_entry_t base_entry
;
6936 vm_offset_t base_next
;
6937 vm_offset_t base_addr
;
6938 vm_offset_t baddr_start_delta
;
6939 vm_region_submap_info_t submap_info
;
6940 vm_region_extended_info_data_t extended
;
6942 if (map
== VM_MAP_NULL
)
6943 return(KERN_INVALID_ARGUMENT
);
6945 submap_info
= (vm_region_submap_info_t
) info
;
6946 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6948 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6949 return(KERN_INVALID_ARGUMENT
);
6953 recurse_count
= *nesting_depth
;
6955 LOOKUP_NEXT_BASE_ENTRY
:
6956 vm_map_lock_read(map
);
6957 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6958 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6959 vm_map_unlock_read(map
);
6960 return(KERN_INVALID_ADDRESS
);
6965 *size
= entry
->vme_end
- entry
->vme_start
;
6966 start
= entry
->vme_start
;
6968 baddr_start_delta
= *address
- start
;
6969 base_next
= entry
->vme_end
;
6972 while(entry
->is_sub_map
&& recurse_count
) {
6974 vm_map_lock_read(entry
->object
.sub_map
);
6977 if(entry
== base_entry
) {
6978 start
= entry
->offset
;
6979 start
+= *address
- entry
->vme_start
;
6982 submap
= entry
->object
.sub_map
;
6983 vm_map_unlock_read(map
);
6986 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6987 if ((entry
= tmp_entry
->vme_next
)
6988 == vm_map_to_entry(map
)) {
6989 vm_map_unlock_read(map
);
6994 goto LOOKUP_NEXT_BASE_ENTRY
;
7000 if(start
<= entry
->vme_start
) {
7001 vm_offset_t old_start
= start
;
7002 if(baddr_start_delta
) {
7003 base_addr
+= (baddr_start_delta
);
7004 *size
-= baddr_start_delta
;
7005 baddr_start_delta
= 0;
7008 (base_addr
+= (entry
->vme_start
- start
))) {
7009 vm_map_unlock_read(map
);
7014 goto LOOKUP_NEXT_BASE_ENTRY
;
7016 *size
-= entry
->vme_start
- start
;
7017 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7018 *size
= entry
->vme_end
- entry
->vme_start
;
7022 if(baddr_start_delta
) {
7023 if((start
- entry
->vme_start
)
7024 < baddr_start_delta
) {
7025 base_addr
+= start
- entry
->vme_start
;
7026 *size
-= start
- entry
->vme_start
;
7028 base_addr
+= baddr_start_delta
;
7029 *size
+= baddr_start_delta
;
7031 baddr_start_delta
= 0;
7033 base_addr
+= entry
->vme_start
;
7034 if(base_addr
>= base_next
) {
7035 vm_map_unlock_read(map
);
7040 goto LOOKUP_NEXT_BASE_ENTRY
;
7042 if (*size
> (entry
->vme_end
- start
))
7043 *size
= entry
->vme_end
- start
;
7045 start
= entry
->vme_start
- start
;
7048 start
+= entry
->offset
;
7051 *nesting_depth
-= recurse_count
;
7052 if(entry
!= base_entry
) {
7053 start
= entry
->vme_start
+ (start
- entry
->offset
);
7057 submap_info
->user_tag
= entry
->alias
;
7058 submap_info
->offset
= entry
->offset
;
7059 submap_info
->protection
= entry
->protection
;
7060 submap_info
->inheritance
= entry
->inheritance
;
7061 submap_info
->max_protection
= entry
->max_protection
;
7062 submap_info
->behavior
= entry
->behavior
;
7063 submap_info
->user_wired_count
= entry
->user_wired_count
;
7064 submap_info
->is_submap
= entry
->is_sub_map
;
7065 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7066 *address
= base_addr
;
7069 extended
.pages_resident
= 0;
7070 extended
.pages_swapped_out
= 0;
7071 extended
.pages_shared_now_private
= 0;
7072 extended
.pages_dirtied
= 0;
7073 extended
.external_pager
= 0;
7074 extended
.shadow_depth
= 0;
7076 if(!entry
->is_sub_map
) {
7077 vm_region_walk(entry
, &extended
, entry
->offset
,
7078 entry
->vme_end
- start
, map
, start
);
7079 submap_info
->share_mode
= extended
.share_mode
;
7080 if (extended
.external_pager
&& extended
.ref_count
== 2
7081 && extended
.share_mode
== SM_SHARED
)
7082 submap_info
->share_mode
= SM_PRIVATE
;
7083 submap_info
->ref_count
= extended
.ref_count
;
7086 submap_info
->share_mode
= SM_TRUESHARED
;
7088 submap_info
->share_mode
= SM_PRIVATE
;
7089 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7092 submap_info
->pages_resident
= extended
.pages_resident
;
7093 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7094 submap_info
->pages_shared_now_private
=
7095 extended
.pages_shared_now_private
;
7096 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7097 submap_info
->external_pager
= extended
.external_pager
;
7098 submap_info
->shadow_depth
= extended
.shadow_depth
;
7100 vm_map_unlock_read(map
);
7101 return(KERN_SUCCESS
);
7105 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7106 * Goes away after regular vm_region_recurse function migrates to
7108 * vm_region_recurse: A form of vm_region which follows the
7109 * submaps in a target map
7114 vm_region_recurse_64(
7116 vm_offset_t
*address
, /* IN/OUT */
7117 vm_size_t
*size
, /* OUT */
7118 natural_t
*nesting_depth
, /* IN/OUT */
7119 vm_region_recurse_info_t info
, /* IN/OUT */
7120 mach_msg_type_number_t
*count
) /* IN/OUT */
7122 vm_map_entry_t tmp_entry
;
7124 vm_map_entry_t entry
;
7128 unsigned int recurse_count
;
7131 vm_map_entry_t base_entry
;
7132 vm_offset_t base_next
;
7133 vm_offset_t base_addr
;
7134 vm_offset_t baddr_start_delta
;
7135 vm_region_submap_info_64_t submap_info
;
7136 vm_region_extended_info_data_t extended
;
7138 if (map
== VM_MAP_NULL
)
7139 return(KERN_INVALID_ARGUMENT
);
7141 submap_info
= (vm_region_submap_info_64_t
) info
;
7142 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7144 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7145 return(KERN_INVALID_ARGUMENT
);
7149 recurse_count
= *nesting_depth
;
7151 LOOKUP_NEXT_BASE_ENTRY
:
7152 vm_map_lock_read(map
);
7153 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7154 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7155 vm_map_unlock_read(map
);
7156 return(KERN_INVALID_ADDRESS
);
7161 *size
= entry
->vme_end
- entry
->vme_start
;
7162 start
= entry
->vme_start
;
7164 baddr_start_delta
= *address
- start
;
7165 base_next
= entry
->vme_end
;
7168 while(entry
->is_sub_map
&& recurse_count
) {
7170 vm_map_lock_read(entry
->object
.sub_map
);
7173 if(entry
== base_entry
) {
7174 start
= entry
->offset
;
7175 start
+= *address
- entry
->vme_start
;
7178 submap
= entry
->object
.sub_map
;
7179 vm_map_unlock_read(map
);
7182 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7183 if ((entry
= tmp_entry
->vme_next
)
7184 == vm_map_to_entry(map
)) {
7185 vm_map_unlock_read(map
);
7190 goto LOOKUP_NEXT_BASE_ENTRY
;
7196 if(start
<= entry
->vme_start
) {
7197 vm_offset_t old_start
= start
;
7198 if(baddr_start_delta
) {
7199 base_addr
+= (baddr_start_delta
);
7200 *size
-= baddr_start_delta
;
7201 baddr_start_delta
= 0;
7204 (base_addr
+= (entry
->vme_start
- start
))) {
7205 vm_map_unlock_read(map
);
7210 goto LOOKUP_NEXT_BASE_ENTRY
;
7212 *size
-= entry
->vme_start
- start
;
7213 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7214 *size
= entry
->vme_end
- entry
->vme_start
;
7218 if(baddr_start_delta
) {
7219 if((start
- entry
->vme_start
)
7220 < baddr_start_delta
) {
7221 base_addr
+= start
- entry
->vme_start
;
7222 *size
-= start
- entry
->vme_start
;
7224 base_addr
+= baddr_start_delta
;
7225 *size
+= baddr_start_delta
;
7227 baddr_start_delta
= 0;
7229 base_addr
+= entry
->vme_start
;
7230 if(base_addr
>= base_next
) {
7231 vm_map_unlock_read(map
);
7236 goto LOOKUP_NEXT_BASE_ENTRY
;
7238 if (*size
> (entry
->vme_end
- start
))
7239 *size
= entry
->vme_end
- start
;
7241 start
= entry
->vme_start
- start
;
7244 start
+= entry
->offset
;
7247 *nesting_depth
-= recurse_count
;
7248 if(entry
!= base_entry
) {
7249 start
= entry
->vme_start
+ (start
- entry
->offset
);
7253 submap_info
->user_tag
= entry
->alias
;
7254 submap_info
->offset
= entry
->offset
;
7255 submap_info
->protection
= entry
->protection
;
7256 submap_info
->inheritance
= entry
->inheritance
;
7257 submap_info
->max_protection
= entry
->max_protection
;
7258 submap_info
->behavior
= entry
->behavior
;
7259 submap_info
->user_wired_count
= entry
->user_wired_count
;
7260 submap_info
->is_submap
= entry
->is_sub_map
;
7261 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7262 *address
= base_addr
;
7265 extended
.pages_resident
= 0;
7266 extended
.pages_swapped_out
= 0;
7267 extended
.pages_shared_now_private
= 0;
7268 extended
.pages_dirtied
= 0;
7269 extended
.external_pager
= 0;
7270 extended
.shadow_depth
= 0;
7272 if(!entry
->is_sub_map
) {
7273 vm_region_walk(entry
, &extended
, entry
->offset
,
7274 entry
->vme_end
- start
, map
, start
);
7275 submap_info
->share_mode
= extended
.share_mode
;
7276 if (extended
.external_pager
&& extended
.ref_count
== 2
7277 && extended
.share_mode
== SM_SHARED
)
7278 submap_info
->share_mode
= SM_PRIVATE
;
7279 submap_info
->ref_count
= extended
.ref_count
;
7282 submap_info
->share_mode
= SM_TRUESHARED
;
7284 submap_info
->share_mode
= SM_PRIVATE
;
7285 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7288 submap_info
->pages_resident
= extended
.pages_resident
;
7289 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7290 submap_info
->pages_shared_now_private
=
7291 extended
.pages_shared_now_private
;
7292 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7293 submap_info
->external_pager
= extended
.external_pager
;
7294 submap_info
->shadow_depth
= extended
.shadow_depth
;
7296 vm_map_unlock_read(map
);
7297 return(KERN_SUCCESS
);
7302 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7303 * Goes away after regular vm_region function migrates to
7311 vm_offset_t
*address
, /* IN/OUT */
7312 vm_size_t
*size
, /* OUT */
7313 vm_region_flavor_t flavor
, /* IN */
7314 vm_region_info_t info
, /* OUT */
7315 mach_msg_type_number_t
*count
, /* IN/OUT */
7316 ipc_port_t
*object_name
) /* OUT */
7318 vm_map_entry_t tmp_entry
;
7320 vm_map_entry_t entry
;
7323 vm_region_basic_info_64_t basic
;
7324 vm_region_extended_info_t extended
;
7325 vm_region_top_info_t top
;
7327 if (map
== VM_MAP_NULL
)
7328 return(KERN_INVALID_ARGUMENT
);
7332 case VM_REGION_BASIC_INFO
:
7334 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7335 return(KERN_INVALID_ARGUMENT
);
7337 basic
= (vm_region_basic_info_64_t
) info
;
7338 *count
= VM_REGION_BASIC_INFO_COUNT
;
7340 vm_map_lock_read(map
);
7343 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7344 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7345 vm_map_unlock_read(map
);
7346 return(KERN_INVALID_ADDRESS
);
7352 start
= entry
->vme_start
;
7354 basic
->offset
= entry
->offset
;
7355 basic
->protection
= entry
->protection
;
7356 basic
->inheritance
= entry
->inheritance
;
7357 basic
->max_protection
= entry
->max_protection
;
7358 basic
->behavior
= entry
->behavior
;
7359 basic
->user_wired_count
= entry
->user_wired_count
;
7360 basic
->reserved
= entry
->is_sub_map
;
7362 *size
= (entry
->vme_end
- start
);
7364 if (object_name
) *object_name
= IP_NULL
;
7365 if (entry
->is_sub_map
) {
7366 basic
->shared
= FALSE
;
7368 basic
->shared
= entry
->is_shared
;
7371 vm_map_unlock_read(map
);
7372 return(KERN_SUCCESS
);
7374 case VM_REGION_EXTENDED_INFO
:
7377 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7378 return(KERN_INVALID_ARGUMENT
);
7380 extended
= (vm_region_extended_info_t
) info
;
7381 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7383 vm_map_lock_read(map
);
7386 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7387 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7388 vm_map_unlock_read(map
);
7389 return(KERN_INVALID_ADDRESS
);
7394 start
= entry
->vme_start
;
7396 extended
->protection
= entry
->protection
;
7397 extended
->user_tag
= entry
->alias
;
7398 extended
->pages_resident
= 0;
7399 extended
->pages_swapped_out
= 0;
7400 extended
->pages_shared_now_private
= 0;
7401 extended
->pages_dirtied
= 0;
7402 extended
->external_pager
= 0;
7403 extended
->shadow_depth
= 0;
7405 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7407 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7408 extended
->share_mode
= SM_PRIVATE
;
7411 *object_name
= IP_NULL
;
7413 *size
= (entry
->vme_end
- start
);
7415 vm_map_unlock_read(map
);
7416 return(KERN_SUCCESS
);
7418 case VM_REGION_TOP_INFO
:
7421 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7422 return(KERN_INVALID_ARGUMENT
);
7424 top
= (vm_region_top_info_t
) info
;
7425 *count
= VM_REGION_TOP_INFO_COUNT
;
7427 vm_map_lock_read(map
);
7430 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7431 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7432 vm_map_unlock_read(map
);
7433 return(KERN_INVALID_ADDRESS
);
7439 start
= entry
->vme_start
;
7441 top
->private_pages_resident
= 0;
7442 top
->shared_pages_resident
= 0;
7444 vm_region_top_walk(entry
, top
);
7447 *object_name
= IP_NULL
;
7449 *size
= (entry
->vme_end
- start
);
7451 vm_map_unlock_read(map
);
7452 return(KERN_SUCCESS
);
7455 return(KERN_INVALID_ARGUMENT
);
7461 vm_map_entry_t entry
,
7462 vm_region_top_info_t top
)
7464 register struct vm_object
*obj
, *tmp_obj
;
7465 register int ref_count
;
7467 if (entry
->object
.vm_object
== 0 || entry
->is_sub_map
) {
7468 top
->share_mode
= SM_EMPTY
;
7474 obj
= entry
->object
.vm_object
;
7476 vm_object_lock(obj
);
7478 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7483 top
->private_pages_resident
= obj
->resident_page_count
;
7485 top
->shared_pages_resident
= obj
->resident_page_count
;
7486 top
->ref_count
= ref_count
;
7487 top
->share_mode
= SM_COW
;
7489 while (tmp_obj
= obj
->shadow
) {
7490 vm_object_lock(tmp_obj
);
7491 vm_object_unlock(obj
);
7494 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7497 top
->shared_pages_resident
+= obj
->resident_page_count
;
7498 top
->ref_count
+= ref_count
- 1;
7501 if (entry
->needs_copy
) {
7502 top
->share_mode
= SM_COW
;
7503 top
->shared_pages_resident
= obj
->resident_page_count
;
7505 if (ref_count
== 1 ||
7506 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7507 top
->share_mode
= SM_PRIVATE
;
7508 top
->private_pages_resident
= obj
->resident_page_count
;
7510 top
->share_mode
= SM_SHARED
;
7511 top
->shared_pages_resident
= obj
->resident_page_count
;
7514 top
->ref_count
= ref_count
;
7516 top
->obj_id
= (int)obj
;
7518 vm_object_unlock(obj
);
7524 vm_map_entry_t entry
,
7525 vm_region_extended_info_t extended
,
7526 vm_object_offset_t offset
,
7531 register struct vm_object
*obj
, *tmp_obj
;
7532 register vm_offset_t last_offset
;
7534 register int ref_count
;
7535 void vm_region_look_for_page();
7537 if ((entry
->object
.vm_object
== 0) ||
7538 (entry
->is_sub_map
) ||
7539 (entry
->object
.vm_object
->phys_contiguous
)) {
7540 extended
->share_mode
= SM_EMPTY
;
7541 extended
->ref_count
= 0;
7545 obj
= entry
->object
.vm_object
;
7547 vm_object_lock(obj
);
7549 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7552 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7553 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7555 if (extended
->shadow_depth
|| entry
->needs_copy
)
7556 extended
->share_mode
= SM_COW
;
7559 extended
->share_mode
= SM_PRIVATE
;
7561 if (obj
->true_share
)
7562 extended
->share_mode
= SM_TRUESHARED
;
7564 extended
->share_mode
= SM_SHARED
;
7567 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7569 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7570 if ((tmp_obj
= obj
->shadow
) == 0)
7572 vm_object_lock(tmp_obj
);
7573 vm_object_unlock(obj
);
7575 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7578 extended
->ref_count
+= ref_count
;
7581 vm_object_unlock(obj
);
7583 if (extended
->share_mode
== SM_SHARED
) {
7584 register vm_map_entry_t cur
;
7585 register vm_map_entry_t last
;
7588 obj
= entry
->object
.vm_object
;
7589 last
= vm_map_to_entry(map
);
7592 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7594 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7595 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7597 if (my_refs
== ref_count
)
7598 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7599 else if (my_refs
> 1)
7600 extended
->share_mode
= SM_SHARED_ALIASED
;
7606 /* object is locked on entry and locked on return */
7610 vm_region_look_for_page(
7612 vm_region_extended_info_t extended
,
7613 vm_object_offset_t offset
,
7619 register vm_page_t p
;
7620 register vm_object_t shadow
;
7621 register int ref_count
;
7622 vm_object_t caller_object
;
7624 shadow
= object
->shadow
;
7625 caller_object
= object
;
7630 if ( !(object
->pager_trusted
) && !(object
->internal
))
7631 extended
->external_pager
= 1;
7633 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7634 if (shadow
&& (max_refcnt
== 1))
7635 extended
->pages_shared_now_private
++;
7637 if (p
->dirty
|| pmap_is_modified(p
->phys_addr
))
7638 extended
->pages_dirtied
++;
7639 extended
->pages_resident
++;
7641 if(object
!= caller_object
)
7642 vm_object_unlock(object
);
7646 if (object
->existence_map
) {
7647 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7649 extended
->pages_swapped_out
++;
7651 if(object
!= caller_object
)
7652 vm_object_unlock(object
);
7658 vm_object_lock(shadow
);
7660 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7663 if (++depth
> extended
->shadow_depth
)
7664 extended
->shadow_depth
= depth
;
7666 if (ref_count
> max_refcnt
)
7667 max_refcnt
= ref_count
;
7669 if(object
!= caller_object
)
7670 vm_object_unlock(object
);
7673 shadow
= object
->shadow
;
7674 offset
= offset
+ object
->shadow_offset
;
7677 if(object
!= caller_object
)
7678 vm_object_unlock(object
);
7684 vm_region_count_obj_refs(
7685 vm_map_entry_t entry
,
7688 register int ref_count
;
7689 register vm_object_t chk_obj
;
7690 register vm_object_t tmp_obj
;
7692 if (entry
->object
.vm_object
== 0)
7695 if (entry
->is_sub_map
)
7696 ref_count
= vm_region_count_obj_refs((vm_map_entry_t
)entry
->object
.sub_map
, object
);
7700 chk_obj
= entry
->object
.vm_object
;
7701 vm_object_lock(chk_obj
);
7704 if (chk_obj
== object
)
7706 if (tmp_obj
= chk_obj
->shadow
)
7707 vm_object_lock(tmp_obj
);
7708 vm_object_unlock(chk_obj
);
7718 * Routine: vm_map_simplify
7721 * Attempt to simplify the map representation in
7722 * the vicinity of the given starting address.
7724 * This routine is intended primarily to keep the
7725 * kernel maps more compact -- they generally don't
7726 * benefit from the "expand a map entry" technology
7727 * at allocation time because the adjacent entry
7728 * is often wired down.
7735 vm_map_entry_t this_entry
;
7736 vm_map_entry_t prev_entry
;
7737 vm_map_entry_t next_entry
;
7741 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7742 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7744 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7746 (prev_entry
->is_shared
== FALSE
) &&
7747 (prev_entry
->is_sub_map
== FALSE
) &&
7749 (this_entry
->is_shared
== FALSE
) &&
7750 (this_entry
->is_sub_map
== FALSE
) &&
7752 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7753 (prev_entry
->protection
== this_entry
->protection
) &&
7754 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7755 (prev_entry
->behavior
== this_entry
->behavior
) &&
7756 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7757 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7758 (prev_entry
->in_transition
== FALSE
) &&
7759 (this_entry
->in_transition
== FALSE
) &&
7761 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7763 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7764 ((prev_entry
->offset
+
7765 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7766 == this_entry
->offset
)
7768 SAVE_HINT(map
, prev_entry
);
7769 vm_map_entry_unlink(map
, this_entry
);
7770 prev_entry
->vme_end
= this_entry
->vme_end
;
7771 UPDATE_FIRST_FREE(map
, map
->first_free
);
7772 vm_object_deallocate(this_entry
->object
.vm_object
);
7773 vm_map_entry_dispose(map
, this_entry
);
7774 counter(c_vm_map_simplified_lower
++);
7777 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7778 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7780 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7782 (next_entry
->is_shared
== FALSE
) &&
7783 (next_entry
->is_sub_map
== FALSE
) &&
7785 (next_entry
->is_shared
== FALSE
) &&
7786 (next_entry
->is_sub_map
== FALSE
) &&
7788 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7789 (next_entry
->protection
== this_entry
->protection
) &&
7790 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7791 (next_entry
->behavior
== this_entry
->behavior
) &&
7792 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7793 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7794 (this_entry
->in_transition
== FALSE
) &&
7795 (next_entry
->in_transition
== FALSE
) &&
7797 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7799 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7800 ((this_entry
->offset
+
7801 (this_entry
->vme_end
- this_entry
->vme_start
))
7802 == next_entry
->offset
)
7804 vm_map_entry_unlink(map
, next_entry
);
7805 this_entry
->vme_end
= next_entry
->vme_end
;
7806 UPDATE_FIRST_FREE(map
, map
->first_free
);
7807 vm_object_deallocate(next_entry
->object
.vm_object
);
7808 vm_map_entry_dispose(map
, next_entry
);
7809 counter(c_vm_map_simplified_upper
++);
7811 counter(c_vm_map_simplify_called
++);
7817 * Routine: vm_map_machine_attribute
7819 * Provide machine-specific attributes to mappings,
7820 * such as cachability etc. for machines that provide
7821 * them. NUMA architectures and machines with big/strange
7822 * caches will use this.
7824 * Responsibilities for locking and checking are handled here,
7825 * everything else in the pmap module. If any non-volatile
7826 * information must be kept, the pmap module should handle
7827 * it itself. [This assumes that attributes do not
7828 * need to be inherited, which seems ok to me]
7831 vm_map_machine_attribute(
7833 vm_offset_t address
,
7835 vm_machine_attribute_t attribute
,
7836 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7839 vm_size_t sync_size
;
7841 vm_map_entry_t entry
;
7843 if (address
< vm_map_min(map
) ||
7844 (address
+ size
) > vm_map_max(map
))
7845 return KERN_INVALID_ADDRESS
;
7849 if (attribute
!= MATTR_CACHE
) {
7850 /* If we don't have to find physical addresses, we */
7851 /* don't have to do an explicit traversal here. */
7852 ret
= pmap_attribute(map
->pmap
,
7853 address
, size
, attribute
, value
);
7858 /* Get the starting address */
7859 start
= trunc_page(address
);
7860 /* Figure how much memory we need to flush (in page increments) */
7861 sync_size
= round_page(start
+ size
) - start
;
7864 ret
= KERN_SUCCESS
; /* Assume it all worked */
7867 if (vm_map_lookup_entry(map
, start
, &entry
)) {
7869 if((entry
->vme_end
- start
) > sync_size
) {
7870 sub_size
= sync_size
;
7873 sub_size
= entry
->vme_end
- start
;
7874 sync_size
-= sub_size
;
7876 if(entry
->is_sub_map
) {
7877 vm_map_machine_attribute(
7878 entry
->object
.sub_map
,
7879 (start
- entry
->vme_start
)
7884 if(entry
->object
.vm_object
) {
7887 vm_object_t base_object
;
7888 vm_object_offset_t offset
;
7889 vm_object_offset_t base_offset
;
7892 offset
= (start
- entry
->vme_start
)
7894 base_offset
= offset
;
7895 object
= entry
->object
.vm_object
;
7896 base_object
= object
;
7900 if(m
&& !m
->fictitious
) {
7903 pmap_attribute_cache_sync(
7907 } else if (object
->shadow
) {
7909 object
->shadow_offset
;
7910 object
= object
->shadow
;
7914 /* Bump to the next page */
7915 base_offset
+= PAGE_SIZE
;
7916 offset
= base_offset
;
7917 object
= base_object
;
7925 return KERN_FAILURE
;
7936 * vm_map_behavior_set:
7938 * Sets the paging reference behavior of the specified address
7939 * range in the target map. Paging reference behavior affects
7940 * how pagein operations resulting from faults on the map will be
7944 vm_map_behavior_set(
7948 vm_behavior_t new_behavior
)
7950 register vm_map_entry_t entry
;
7951 vm_map_entry_t temp_entry
;
7954 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
7955 (integer_t
)map
, start
, end
, new_behavior
, 0);
7957 switch (new_behavior
) {
7958 case VM_BEHAVIOR_DEFAULT
:
7959 case VM_BEHAVIOR_RANDOM
:
7960 case VM_BEHAVIOR_SEQUENTIAL
:
7961 case VM_BEHAVIOR_RSEQNTL
:
7963 case VM_BEHAVIOR_WILLNEED
:
7964 case VM_BEHAVIOR_DONTNEED
:
7965 new_behavior
= VM_BEHAVIOR_DEFAULT
;
7968 return(KERN_INVALID_ARGUMENT
);
7974 * The entire address range must be valid for the map.
7975 * Note that vm_map_range_check() does a
7976 * vm_map_lookup_entry() internally and returns the
7977 * entry containing the start of the address range if
7978 * the entire range is valid.
7980 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
7982 vm_map_clip_start(map
, entry
, start
);
7986 return(KERN_INVALID_ADDRESS
);
7989 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
7990 vm_map_clip_end(map
, entry
, end
);
7992 entry
->behavior
= new_behavior
;
7994 entry
= entry
->vme_next
;
7998 return(KERN_SUCCESS
);
8002 #include <mach_kdb.h>
8004 #include <ddb/db_output.h>
8005 #include <vm/vm_print.h>
8007 #define printf db_printf
8010 * Forward declarations for internal functions.
8012 extern void vm_map_links_print(
8013 struct vm_map_links
*links
);
8015 extern void vm_map_header_print(
8016 struct vm_map_header
*header
);
8018 extern void vm_map_entry_print(
8019 vm_map_entry_t entry
);
8021 extern void vm_follow_entry(
8022 vm_map_entry_t entry
);
8024 extern void vm_follow_map(
8028 * vm_map_links_print: [ debug ]
8032 struct vm_map_links
*links
)
8034 iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
8042 * vm_map_header_print: [ debug ]
8045 vm_map_header_print(
8046 struct vm_map_header
*header
)
8048 vm_map_links_print(&header
->links
);
8049 iprintf("nentries=0x%x, %sentries_pageable\n",
8051 (header
->entries_pageable
? "" : "!"));
8055 * vm_follow_entry: [ debug ]
8059 vm_map_entry_t entry
)
8061 extern int db_indent
;
8064 iprintf("map entry 0x%x:\n", entry
);
8068 shadows
= vm_follow_object(entry
->object
.vm_object
);
8069 iprintf("Total objects : %d\n",shadows
);
8075 * vm_map_entry_print: [ debug ]
8079 register vm_map_entry_t entry
)
8081 extern int db_indent
;
8082 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
8083 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
8085 iprintf("map entry 0x%x:\n", entry
);
8089 vm_map_links_print(&entry
->links
);
8091 iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
8095 entry
->max_protection
,
8096 inheritance_name
[(entry
->inheritance
& 0x3)]);
8098 iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
8099 behavior_name
[(entry
->behavior
& 0x3)],
8101 entry
->user_wired_count
);
8102 iprintf("%sin_transition, %sneeds_wakeup\n",
8103 (entry
->in_transition
? "" : "!"),
8104 (entry
->needs_wakeup
? "" : "!"));
8106 if (entry
->is_sub_map
) {
8107 iprintf("submap=0x%x, offset=0x%x\n",
8108 entry
->object
.sub_map
,
8111 iprintf("object=0x%x, offset=0x%x, ",
8112 entry
->object
.vm_object
,
8114 printf("%sis_shared, %sneeds_copy\n",
8115 (entry
->is_shared
? "" : "!"),
8116 (entry
->needs_copy
? "" : "!"));
8123 * vm_follow_map: [ debug ]
8129 register vm_map_entry_t entry
;
8130 extern int db_indent
;
8132 iprintf("task map 0x%x:\n", map
);
8136 for (entry
= vm_map_first_entry(map
);
8137 entry
&& entry
!= vm_map_to_entry(map
);
8138 entry
= entry
->vme_next
) {
8139 vm_follow_entry(entry
);
8146 * vm_map_print: [ debug ]
8150 register vm_map_t map
)
8152 register vm_map_entry_t entry
;
8153 extern int db_indent
;
8156 iprintf("task map 0x%x:\n", map
);
8160 vm_map_header_print(&map
->hdr
);
8162 iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
8169 iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
8170 (map
->wait_for_space
? "" : "!"),
8171 (map
->wiring_required
? "" : "!"),
8175 switch (map
->sw_state
) {
8186 iprintf("res=%d, sw_state=%s\n", map
->res_count
, swstate
);
8187 #endif /* TASK_SWAPPER */
8189 for (entry
= vm_map_first_entry(map
);
8190 entry
&& entry
!= vm_map_to_entry(map
);
8191 entry
= entry
->vme_next
) {
8192 vm_map_entry_print(entry
);
8199 * Routine: vm_map_copy_print
8201 * Pretty-print a copy object for ddb.
8208 extern int db_indent
;
8210 vm_map_entry_t entry
;
8212 printf("copy object 0x%x\n", copy
);
8216 iprintf("type=%d", copy
->type
);
8217 switch (copy
->type
) {
8218 case VM_MAP_COPY_ENTRY_LIST
:
8219 printf("[entry_list]");
8222 case VM_MAP_COPY_OBJECT
:
8226 case VM_MAP_COPY_KERNEL_BUFFER
:
8227 printf("[kernel_buffer]");
8231 printf("[bad type]");
8234 printf(", offset=0x%x", copy
->offset
);
8235 printf(", size=0x%x\n", copy
->size
);
8237 switch (copy
->type
) {
8238 case VM_MAP_COPY_ENTRY_LIST
:
8239 vm_map_header_print(©
->cpy_hdr
);
8240 for (entry
= vm_map_copy_first_entry(copy
);
8241 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8242 entry
= entry
->vme_next
) {
8243 vm_map_entry_print(entry
);
8247 case VM_MAP_COPY_OBJECT
:
8248 iprintf("object=0x%x\n", copy
->cpy_object
);
8251 case VM_MAP_COPY_KERNEL_BUFFER
:
8252 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8253 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8262 * db_vm_map_total_size(map) [ debug ]
8264 * return the total virtual size (in bytes) of the map
8267 db_vm_map_total_size(
8270 vm_map_entry_t entry
;
8274 for (entry
= vm_map_first_entry(map
);
8275 entry
!= vm_map_to_entry(map
);
8276 entry
= entry
->vme_next
) {
8277 total
+= entry
->vme_end
- entry
->vme_start
;
8283 #endif /* MACH_KDB */
8286 * Routine: vm_map_entry_insert
8288 * Descritpion: This routine inserts a new vm_entry in a locked map.
8291 vm_map_entry_insert(
8293 vm_map_entry_t insp_entry
,
8297 vm_object_offset_t offset
,
8298 boolean_t needs_copy
,
8299 boolean_t is_shared
,
8300 boolean_t in_transition
,
8301 vm_prot_t cur_protection
,
8302 vm_prot_t max_protection
,
8303 vm_behavior_t behavior
,
8304 vm_inherit_t inheritance
,
8305 unsigned wired_count
)
8307 vm_map_entry_t new_entry
;
8309 assert(insp_entry
!= (vm_map_entry_t
)0);
8311 new_entry
= vm_map_entry_create(map
);
8313 new_entry
->vme_start
= start
;
8314 new_entry
->vme_end
= end
;
8315 assert(page_aligned(new_entry
->vme_start
));
8316 assert(page_aligned(new_entry
->vme_end
));
8318 new_entry
->object
.vm_object
= object
;
8319 new_entry
->offset
= offset
;
8320 new_entry
->is_shared
= is_shared
;
8321 new_entry
->is_sub_map
= FALSE
;
8322 new_entry
->needs_copy
= needs_copy
;
8323 new_entry
->in_transition
= in_transition
;
8324 new_entry
->needs_wakeup
= FALSE
;
8325 new_entry
->inheritance
= inheritance
;
8326 new_entry
->protection
= cur_protection
;
8327 new_entry
->max_protection
= max_protection
;
8328 new_entry
->behavior
= behavior
;
8329 new_entry
->wired_count
= wired_count
;
8330 new_entry
->user_wired_count
= 0;
8331 new_entry
->use_pmap
= FALSE
;
8334 * Insert the new entry into the list.
8337 vm_map_entry_link(map
, insp_entry
, new_entry
);
8338 map
->size
+= end
- start
;
8341 * Update the free space hint and the lookup hint.
8344 SAVE_HINT(map
, new_entry
);
8349 * Routine: vm_remap_extract
8351 * Descritpion: This routine returns a vm_entry list from a map.
8359 struct vm_map_header
*map_header
,
8360 vm_prot_t
*cur_protection
,
8361 vm_prot_t
*max_protection
,
8362 /* What, no behavior? */
8363 vm_inherit_t inheritance
,
8366 kern_return_t result
;
8367 vm_size_t mapped_size
;
8369 vm_map_entry_t src_entry
; /* result of last map lookup */
8370 vm_map_entry_t new_entry
;
8371 vm_object_offset_t offset
;
8372 vm_offset_t map_address
;
8373 vm_offset_t src_start
; /* start of entry to map */
8374 vm_offset_t src_end
; /* end of region to be mapped */
8376 vm_map_version_t version
;
8377 boolean_t src_needs_copy
;
8378 boolean_t new_entry_needs_copy
;
8380 assert(map
!= VM_MAP_NULL
);
8381 assert(size
!= 0 && size
== round_page(size
));
8382 assert(inheritance
== VM_INHERIT_NONE
||
8383 inheritance
== VM_INHERIT_COPY
||
8384 inheritance
== VM_INHERIT_SHARE
);
8387 * Compute start and end of region.
8389 src_start
= trunc_page(addr
);
8390 src_end
= round_page(src_start
+ size
);
8393 * Initialize map_header.
8395 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8396 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8397 map_header
->nentries
= 0;
8398 map_header
->entries_pageable
= pageable
;
8400 *cur_protection
= VM_PROT_ALL
;
8401 *max_protection
= VM_PROT_ALL
;
8405 result
= KERN_SUCCESS
;
8408 * The specified source virtual space might correspond to
8409 * multiple map entries, need to loop on them.
8412 while (mapped_size
!= size
) {
8413 vm_size_t entry_size
;
8416 * Find the beginning of the region.
8418 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8419 result
= KERN_INVALID_ADDRESS
;
8423 if (src_start
< src_entry
->vme_start
||
8424 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8425 result
= KERN_INVALID_ADDRESS
;
8429 if(src_entry
->is_sub_map
) {
8430 result
= KERN_INVALID_ADDRESS
;
8434 tmp_size
= size
- mapped_size
;
8435 if (src_end
> src_entry
->vme_end
)
8436 tmp_size
-= (src_end
- src_entry
->vme_end
);
8438 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8439 src_entry
->vme_start
);
8441 if(src_entry
->is_sub_map
) {
8442 vm_map_reference(src_entry
->object
.sub_map
);
8444 object
= src_entry
->object
.vm_object
;
8446 if (object
== VM_OBJECT_NULL
) {
8447 object
= vm_object_allocate(entry_size
);
8448 src_entry
->offset
= 0;
8449 src_entry
->object
.vm_object
= object
;
8450 } else if (object
->copy_strategy
!=
8451 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8453 * We are already using an asymmetric
8454 * copy, and therefore we already have
8457 assert(!src_entry
->needs_copy
);
8458 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8459 (object
->internal
&& !object
->true_share
&&
8460 !src_entry
->is_shared
&&
8461 object
->size
> entry_size
)) {
8463 vm_object_shadow(&src_entry
->object
.vm_object
,
8467 if (!src_entry
->needs_copy
&&
8468 (src_entry
->protection
& VM_PROT_WRITE
)) {
8470 vm_object_pmap_protect(
8471 src_entry
->object
.vm_object
,
8475 src_entry
->vme_start
,
8476 src_entry
->protection
&
8479 pmap_protect(vm_map_pmap(map
),
8480 src_entry
->vme_start
,
8482 src_entry
->protection
&
8487 object
= src_entry
->object
.vm_object
;
8488 src_entry
->needs_copy
= FALSE
;
8492 vm_object_lock(object
);
8493 object
->ref_count
++; /* object ref. for new entry */
8494 VM_OBJ_RES_INCR(object
);
8495 if (object
->copy_strategy
==
8496 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8497 object
->copy_strategy
=
8498 MEMORY_OBJECT_COPY_DELAY
;
8500 vm_object_unlock(object
);
8503 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8505 new_entry
= _vm_map_entry_create(map_header
);
8506 vm_map_entry_copy(new_entry
, src_entry
);
8507 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8509 new_entry
->vme_start
= map_address
;
8510 new_entry
->vme_end
= map_address
+ tmp_size
;
8511 new_entry
->inheritance
= inheritance
;
8512 new_entry
->offset
= offset
;
8515 * The new region has to be copied now if required.
8519 src_entry
->is_shared
= TRUE
;
8520 new_entry
->is_shared
= TRUE
;
8521 if (!(new_entry
->is_sub_map
))
8522 new_entry
->needs_copy
= FALSE
;
8524 } else if (src_entry
->is_sub_map
) {
8525 /* make this a COW sub_map if not already */
8526 new_entry
->needs_copy
= TRUE
;
8527 } else if (src_entry
->wired_count
== 0 &&
8528 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8530 (new_entry
->vme_end
-
8531 new_entry
->vme_start
),
8533 &new_entry_needs_copy
)) {
8535 new_entry
->needs_copy
= new_entry_needs_copy
;
8536 new_entry
->is_shared
= FALSE
;
8539 * Handle copy_on_write semantics.
8541 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8542 vm_object_pmap_protect(object
,
8545 ((src_entry
->is_shared
8547 PMAP_NULL
: map
->pmap
),
8548 src_entry
->vme_start
,
8549 src_entry
->protection
&
8552 src_entry
->needs_copy
= TRUE
;
8555 * Throw away the old object reference of the new entry.
8557 vm_object_deallocate(object
);
8560 new_entry
->is_shared
= FALSE
;
8563 * The map can be safely unlocked since we
8564 * already hold a reference on the object.
8566 * Record the timestamp of the map for later
8567 * verification, and unlock the map.
8569 version
.main_timestamp
= map
->timestamp
;
8570 vm_map_unlock(map
); /* Increments timestamp once! */
8575 if (src_entry
->wired_count
> 0) {
8576 vm_object_lock(object
);
8577 result
= vm_object_copy_slowly(
8582 &new_entry
->object
.vm_object
);
8584 new_entry
->offset
= 0;
8585 new_entry
->needs_copy
= FALSE
;
8587 result
= vm_object_copy_strategically(
8591 &new_entry
->object
.vm_object
,
8593 &new_entry_needs_copy
);
8595 new_entry
->needs_copy
= new_entry_needs_copy
;
8599 * Throw away the old object reference of the new entry.
8601 vm_object_deallocate(object
);
8603 if (result
!= KERN_SUCCESS
&&
8604 result
!= KERN_MEMORY_RESTART_COPY
) {
8605 _vm_map_entry_dispose(map_header
, new_entry
);
8610 * Verify that the map has not substantially
8611 * changed while the copy was being made.
8615 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8617 * Simple version comparison failed.
8619 * Retry the lookup and verify that the
8620 * same object/offset are still present.
8622 vm_object_deallocate(new_entry
->
8624 _vm_map_entry_dispose(map_header
, new_entry
);
8625 if (result
== KERN_MEMORY_RESTART_COPY
)
8626 result
= KERN_SUCCESS
;
8630 if (result
== KERN_MEMORY_RESTART_COPY
) {
8631 vm_object_reference(object
);
8636 _vm_map_entry_link(map_header
,
8637 map_header
->links
.prev
, new_entry
);
8639 *cur_protection
&= src_entry
->protection
;
8640 *max_protection
&= src_entry
->max_protection
;
8642 map_address
+= tmp_size
;
8643 mapped_size
+= tmp_size
;
8644 src_start
+= tmp_size
;
8649 if (result
!= KERN_SUCCESS
) {
8651 * Free all allocated elements.
8653 for (src_entry
= map_header
->links
.next
;
8654 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8655 src_entry
= new_entry
) {
8656 new_entry
= src_entry
->vme_next
;
8657 _vm_map_entry_unlink(map_header
, src_entry
);
8658 vm_object_deallocate(src_entry
->object
.vm_object
);
8659 _vm_map_entry_dispose(map_header
, src_entry
);
8668 * Map portion of a task's address space.
8669 * Mapped region must not overlap more than
8670 * one vm memory object. Protections and
8671 * inheritance attributes remain the same
8672 * as in the original task and are out parameters.
8673 * Source and Target task can be identical
8674 * Other attributes are identical as for vm_map()
8678 vm_map_t target_map
,
8679 vm_offset_t
*address
,
8684 vm_offset_t memory_address
,
8686 vm_prot_t
*cur_protection
,
8687 vm_prot_t
*max_protection
,
8688 vm_inherit_t inheritance
)
8690 kern_return_t result
;
8691 vm_map_entry_t entry
;
8692 vm_map_entry_t insp_entry
;
8693 vm_map_entry_t new_entry
;
8694 struct vm_map_header map_header
;
8696 if (target_map
== VM_MAP_NULL
)
8697 return KERN_INVALID_ARGUMENT
;
8699 switch (inheritance
) {
8700 case VM_INHERIT_NONE
:
8701 case VM_INHERIT_COPY
:
8702 case VM_INHERIT_SHARE
:
8703 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8707 return KERN_INVALID_ARGUMENT
;
8710 size
= round_page(size
);
8712 result
= vm_remap_extract(src_map
, memory_address
,
8713 size
, copy
, &map_header
,
8720 if (result
!= KERN_SUCCESS
) {
8725 * Allocate/check a range of free virtual address
8726 * space for the target
8728 *address
= trunc_page(*address
);
8729 vm_map_lock(target_map
);
8730 result
= vm_remap_range_allocate(target_map
, address
, size
,
8731 mask
, anywhere
, &insp_entry
);
8733 for (entry
= map_header
.links
.next
;
8734 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8735 entry
= new_entry
) {
8736 new_entry
= entry
->vme_next
;
8737 _vm_map_entry_unlink(&map_header
, entry
);
8738 if (result
== KERN_SUCCESS
) {
8739 entry
->vme_start
+= *address
;
8740 entry
->vme_end
+= *address
;
8741 vm_map_entry_link(target_map
, insp_entry
, entry
);
8744 if (!entry
->is_sub_map
) {
8745 vm_object_deallocate(entry
->object
.vm_object
);
8747 vm_map_deallocate(entry
->object
.sub_map
);
8749 _vm_map_entry_dispose(&map_header
, entry
);
8753 if (result
== KERN_SUCCESS
) {
8754 target_map
->size
+= size
;
8755 SAVE_HINT(target_map
, insp_entry
);
8757 vm_map_unlock(target_map
);
8759 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8760 result
= vm_map_wire(target_map
, *address
,
8761 *address
+ size
, *cur_protection
, TRUE
);
8766 * Routine: vm_remap_range_allocate
8769 * Allocate a range in the specified virtual address map.
8770 * returns the address and the map entry just before the allocated
8773 * Map must be locked.
8777 vm_remap_range_allocate(
8779 vm_offset_t
*address
, /* IN/OUT */
8783 vm_map_entry_t
*map_entry
) /* OUT */
8785 register vm_map_entry_t entry
;
8786 register vm_offset_t start
;
8787 register vm_offset_t end
;
8788 kern_return_t result
= KERN_SUCCESS
;
8797 * Calculate the first possible address.
8800 if (start
< map
->min_offset
)
8801 start
= map
->min_offset
;
8802 if (start
> map
->max_offset
)
8803 return(KERN_NO_SPACE
);
8806 * Look for the first possible address;
8807 * if there's already something at this
8808 * address, we have to start after it.
8811 assert(first_free_is_valid(map
));
8812 if (start
== map
->min_offset
) {
8813 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8814 start
= entry
->vme_end
;
8816 vm_map_entry_t tmp_entry
;
8817 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8818 start
= tmp_entry
->vme_end
;
8823 * In any case, the "entry" always precedes
8824 * the proposed new region throughout the
8829 register vm_map_entry_t next
;
8832 * Find the end of the proposed new region.
8833 * Be sure we didn't go beyond the end, or
8834 * wrap around the address.
8837 end
= ((start
+ mask
) & ~mask
);
8839 return(KERN_NO_SPACE
);
8843 if ((end
> map
->max_offset
) || (end
< start
)) {
8844 if (map
->wait_for_space
) {
8845 if (size
<= (map
->max_offset
-
8847 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8849 thread_block((void (*)(void))0);
8855 return(KERN_NO_SPACE
);
8859 * If there are no more entries, we must win.
8862 next
= entry
->vme_next
;
8863 if (next
== vm_map_to_entry(map
))
8867 * If there is another entry, it must be
8868 * after the end of the potential new region.
8871 if (next
->vme_start
>= end
)
8875 * Didn't fit -- move to the next entry.
8879 start
= entry
->vme_end
;
8883 vm_map_entry_t temp_entry
;
8887 * the address doesn't itself violate
8888 * the mask requirement.
8891 if ((start
& mask
) != 0)
8892 return(KERN_NO_SPACE
);
8896 * ... the address is within bounds
8901 if ((start
< map
->min_offset
) ||
8902 (end
> map
->max_offset
) ||
8904 return(KERN_INVALID_ADDRESS
);
8908 * ... the starting address isn't allocated
8911 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
8912 return(KERN_NO_SPACE
);
8917 * ... the next region doesn't overlap the
8921 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
8922 (entry
->vme_next
->vme_start
< end
))
8923 return(KERN_NO_SPACE
);
8926 return(KERN_SUCCESS
);
8932 * Set the address map for the current thr_act to the specified map
8940 thread_act_t thr_act
= current_act();
8941 vm_map_t oldmap
= thr_act
->map
;
8943 mp_disable_preemption();
8944 mycpu
= cpu_number();
8947 * Deactivate the current map and activate the requested map
8949 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
8951 mp_enable_preemption();
8957 * Routine: vm_map_write_user
8960 * Copy out data from a kernel space into space in the
8961 * destination map. The space must already exist in the
8963 * NOTE: This routine should only be called by threads
8964 * which can block on a page fault. i.e. kernel mode user
8971 vm_offset_t src_addr
,
8972 vm_offset_t dst_addr
,
8975 thread_act_t thr_act
= current_act();
8976 kern_return_t kr
= KERN_SUCCESS
;
8978 if(thr_act
->map
== map
) {
8979 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8980 kr
= KERN_INVALID_ADDRESS
;
8985 /* take on the identity of the target map while doing */
8988 vm_map_reference(map
);
8989 oldmap
= vm_map_switch(map
);
8990 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8991 kr
= KERN_INVALID_ADDRESS
;
8993 vm_map_switch(oldmap
);
8994 vm_map_deallocate(map
);
9000 * Routine: vm_map_read_user
9003 * Copy in data from a user space source map into the
9004 * kernel map. The space must already exist in the
9006 * NOTE: This routine should only be called by threads
9007 * which can block on a page fault. i.e. kernel mode user
9014 vm_offset_t src_addr
,
9015 vm_offset_t dst_addr
,
9018 thread_act_t thr_act
= current_act();
9019 kern_return_t kr
= KERN_SUCCESS
;
9021 if(thr_act
->map
== map
) {
9022 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9023 kr
= KERN_INVALID_ADDRESS
;
9028 /* take on the identity of the target map while doing */
9031 vm_map_reference(map
);
9032 oldmap
= vm_map_switch(map
);
9033 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9034 kr
= KERN_INVALID_ADDRESS
;
9036 vm_map_switch(oldmap
);
9037 vm_map_deallocate(map
);
9042 /* Takes existing source and destination sub-maps and clones the contents of */
9043 /* the source map */
9047 ipc_port_t src_region
,
9048 ipc_port_t dst_region
)
9050 vm_named_entry_t src_object
;
9051 vm_named_entry_t dst_object
;
9055 vm_offset_t max_off
;
9056 vm_map_entry_t entry
;
9057 vm_map_entry_t new_entry
;
9058 vm_map_entry_t insert_point
;
9060 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
9061 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
9062 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
9063 return KERN_INVALID_ARGUMENT
;
9065 src_map
= (vm_map_t
)src_object
->backing
.map
;
9066 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
9067 /* destination map is assumed to be unavailable to any other */
9068 /* activity. i.e. it is new */
9069 vm_map_lock(src_map
);
9070 if((src_map
->min_offset
!= dst_map
->min_offset
)
9071 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
9072 vm_map_unlock(src_map
);
9073 return KERN_INVALID_ARGUMENT
;
9075 addr
= src_map
->min_offset
;
9076 vm_map_lookup_entry(dst_map
, addr
, &entry
);
9077 if(entry
== vm_map_to_entry(dst_map
)) {
9078 entry
= entry
->vme_next
;
9080 if(entry
== vm_map_to_entry(dst_map
)) {
9081 max_off
= src_map
->max_offset
;
9083 max_off
= entry
->vme_start
;
9085 vm_map_lookup_entry(src_map
, addr
, &entry
);
9086 if(entry
== vm_map_to_entry(src_map
)) {
9087 entry
= entry
->vme_next
;
9089 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
9090 while((entry
!= vm_map_to_entry(src_map
)) &&
9091 (entry
->vme_end
<= max_off
)) {
9092 addr
= entry
->vme_start
;
9093 new_entry
= vm_map_entry_create(dst_map
);
9094 vm_map_entry_copy(new_entry
, entry
);
9095 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
9096 insert_point
= new_entry
;
9097 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
9098 if (new_entry
->is_sub_map
) {
9099 vm_map_reference(new_entry
->object
.sub_map
);
9101 vm_object_reference(
9102 new_entry
->object
.vm_object
);
9105 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
9106 entry
= entry
->vme_next
;
9108 vm_map_unlock(src_map
);
9109 return KERN_SUCCESS
;
9113 * Export routines to other components for the things we access locally through
9120 return (current_map_fast());
9124 * vm_map_check_protection:
9126 * Assert that the target map allows the specified
9127 * privilege on the entire address region given.
9128 * The entire region must be allocated.
9130 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9131 register vm_map_t map
;
9132 register vm_offset_t start
;
9133 register vm_offset_t end
;
9134 register vm_prot_t protection
;
9136 register vm_map_entry_t entry
;
9137 vm_map_entry_t tmp_entry
;
9141 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9147 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9154 while (start
< end
) {
9155 if (entry
== vm_map_to_entry(map
)) {
9164 if (start
< entry
->vme_start
) {
9170 * Check protection associated with entry.
9173 if ((entry
->protection
& protection
) != protection
) {
9178 /* go to next entry */
9180 start
= entry
->vme_end
;
9181 entry
= entry
->vme_next
;
9188 * This routine is obsolete, but included for backward
9189 * compatibility for older drivers.
9192 kernel_vm_map_reference(
9195 vm_map_reference(map
);
9201 * Most code internal to the osfmk will go through a
9202 * macro defining this. This is always here for the
9203 * use of other kernel components.
9205 #undef vm_map_reference
9208 register vm_map_t map
)
9210 if (map
== VM_MAP_NULL
)
9213 mutex_lock(&map
->s_lock
);
9215 assert(map
->res_count
> 0);
9216 assert(map
->ref_count
>= map
->res_count
);
9220 mutex_unlock(&map
->s_lock
);
9224 * vm_map_deallocate:
9226 * Removes a reference from the specified map,
9227 * destroying it if no references remain.
9228 * The map should not be locked.
9232 register vm_map_t map
)
9236 if (map
== VM_MAP_NULL
)
9239 mutex_lock(&map
->s_lock
);
9240 ref
= --map
->ref_count
;
9242 vm_map_res_deallocate(map
);
9243 mutex_unlock(&map
->s_lock
);
9246 assert(map
->ref_count
== 0);
9247 mutex_unlock(&map
->s_lock
);
9251 * The map residence count isn't decremented here because
9252 * the vm_map_delete below will traverse the entire map,
9253 * deleting entries, and the residence counts on objects
9254 * and sharing maps will go away then.
9258 vm_map_destroy(map
);