2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Virtual memory mapping module.
61 #include <task_swapper.h>
62 #include <mach_assert.h>
64 #include <mach/kern_return.h>
65 #include <mach/port.h>
66 #include <mach/vm_attributes.h>
67 #include <mach/vm_param.h>
68 #include <mach/vm_behavior.h>
69 #include <kern/assert.h>
70 #include <kern/counters.h>
71 #include <kern/zalloc.h>
72 #include <vm/vm_init.h>
73 #include <vm/vm_fault.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/sched_prim.h>
80 #include <kern/misc_protos.h>
81 #include <mach/vm_map_server.h>
82 #include <mach/mach_host_server.h>
86 /* Internal prototypes
88 extern boolean_t
vm_map_range_check(
92 vm_map_entry_t
*entry
);
94 extern vm_map_entry_t
_vm_map_entry_create(
95 struct vm_map_header
*map_header
);
97 extern void _vm_map_entry_dispose(
98 struct vm_map_header
*map_header
,
99 vm_map_entry_t entry
);
101 extern void vm_map_pmap_enter(
104 vm_offset_t end_addr
,
106 vm_object_offset_t offset
,
107 vm_prot_t protection
);
109 extern void _vm_map_clip_end(
110 struct vm_map_header
*map_header
,
111 vm_map_entry_t entry
,
114 extern void vm_map_entry_delete(
116 vm_map_entry_t entry
);
118 extern kern_return_t
vm_map_delete(
124 extern void vm_map_copy_steal_pages(
127 extern kern_return_t
vm_map_copy_overwrite_unaligned(
129 vm_map_entry_t entry
,
133 extern kern_return_t
vm_map_copy_overwrite_aligned(
135 vm_map_entry_t tmp_entry
,
140 extern kern_return_t
vm_map_copyin_kernel_buffer(
142 vm_offset_t src_addr
,
144 boolean_t src_destroy
,
145 vm_map_copy_t
*copy_result
); /* OUT */
147 extern kern_return_t
vm_map_copyout_kernel_buffer(
149 vm_offset_t
*addr
, /* IN/OUT */
151 boolean_t overwrite
);
153 extern void vm_map_fork_share(
155 vm_map_entry_t old_entry
,
158 extern boolean_t
vm_map_fork_copy(
160 vm_map_entry_t
*old_entry_p
,
163 extern kern_return_t
vm_remap_range_allocate(
165 vm_offset_t
*address
, /* IN/OUT */
169 vm_map_entry_t
*map_entry
); /* OUT */
171 extern void _vm_map_clip_start(
172 struct vm_map_header
*map_header
,
173 vm_map_entry_t entry
,
176 void vm_region_top_walk(
177 vm_map_entry_t entry
,
178 vm_region_top_info_t top
);
181 vm_map_entry_t entry
,
182 vm_region_extended_info_t extended
,
183 vm_object_offset_t offset
,
189 * Macros to copy a vm_map_entry. We must be careful to correctly
190 * manage the wired page count. vm_map_entry_copy() creates a new
191 * map entry to the same memory - the wired count in the new entry
192 * must be set to zero. vm_map_entry_copy_full() creates a new
193 * entry that is identical to the old entry. This preserves the
194 * wire count; it's used for map splitting and zone changing in
197 #define vm_map_entry_copy(NEW,OLD) \
200 (NEW)->is_shared = FALSE; \
201 (NEW)->needs_wakeup = FALSE; \
202 (NEW)->in_transition = FALSE; \
203 (NEW)->wired_count = 0; \
204 (NEW)->user_wired_count = 0; \
207 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
210 * Virtual memory maps provide for the mapping, protection,
211 * and sharing of virtual memory objects. In addition,
212 * this module provides for an efficient virtual copy of
213 * memory from one map to another.
215 * Synchronization is required prior to most operations.
217 * Maps consist of an ordered doubly-linked list of simple
218 * entries; a single hint is used to speed up lookups.
220 * Sharing maps have been deleted from this version of Mach.
221 * All shared objects are now mapped directly into the respective
222 * maps. This requires a change in the copy on write strategy;
223 * the asymmetric (delayed) strategy is used for shared temporary
224 * objects instead of the symmetric (shadow) strategy. All maps
225 * are now "top level" maps (either task map, kernel map or submap
226 * of the kernel map).
228 * Since portions of maps are specified by start/end addreses,
229 * which may not align with existing map entries, all
230 * routines merely "clip" entries to these start/end values.
231 * [That is, an entry is split into two, bordering at a
232 * start or end value.] Note that these clippings may not
233 * always be necessary (as the two resulting entries are then
234 * not changed); however, the clipping is done for convenience.
235 * No attempt is currently made to "glue back together" two
238 * The symmetric (shadow) copy strategy implements virtual copy
239 * by copying VM object references from one map to
240 * another, and then marking both regions as copy-on-write.
241 * It is important to note that only one writeable reference
242 * to a VM object region exists in any map when this strategy
243 * is used -- this means that shadow object creation can be
244 * delayed until a write operation occurs. The symmetric (delayed)
245 * strategy allows multiple maps to have writeable references to
246 * the same region of a vm object, and hence cannot delay creating
247 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
248 * Copying of permanent objects is completely different; see
249 * vm_object_copy_strategically() in vm_object.c.
252 zone_t vm_map_zone
; /* zone for vm_map structures */
253 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
254 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
255 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
259 * Placeholder object for submap operations. This object is dropped
260 * into the range by a call to vm_map_find, and removed when
261 * vm_map_submap creates the submap.
264 vm_object_t vm_submap_object
;
269 * Initialize the vm_map module. Must be called before
270 * any other vm_map routines.
272 * Map and entry structures are allocated from zones -- we must
273 * initialize those zones.
275 * There are three zones of interest:
277 * vm_map_zone: used to allocate maps.
278 * vm_map_entry_zone: used to allocate map entries.
279 * vm_map_kentry_zone: used to allocate map entries for the kernel.
281 * The kernel allocates map entries from a special zone that is initially
282 * "crammed" with memory. It would be difficult (perhaps impossible) for
283 * the kernel to allocate more memory to a entry zone when it became
284 * empty since the very act of allocating memory implies the creation
288 vm_offset_t map_data
;
289 vm_size_t map_data_size
;
290 vm_offset_t kentry_data
;
291 vm_size_t kentry_data_size
;
292 int kentry_count
= 2048; /* to init kentry_data_size */
294 #define NO_COALESCE_LIMIT (1024 * 128)
297 * Threshold for aggressive (eager) page map entering for vm copyout
298 * operations. Any copyout larger will NOT be aggressively entered.
300 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
306 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
309 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
310 1024*1024, PAGE_SIZE
*5,
311 "non-kernel map entries");
313 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
314 kentry_data_size
, kentry_data_size
,
315 "kernel map entries");
317 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
318 16*1024, PAGE_SIZE
, "map copies");
321 * Cram the map and kentry zones with initial data.
322 * Set kentry_zone non-collectible to aid zone_gc().
324 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
325 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
326 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
327 zcram(vm_map_zone
, map_data
, map_data_size
);
328 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
335 map_data_size
= round_page(10 * sizeof(struct vm_map
));
336 map_data
= pmap_steal_memory(map_data_size
);
340 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
341 * physical page (i.e. that beyond the kernel image and page tables)
342 * individually; we guess at most one entry per eight pages in the
343 * real world. This works out to roughly .1 of 1% of physical memory,
344 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
347 kentry_count
= pmap_free_pages() / 8;
351 round_page(kentry_count
* sizeof(struct vm_map_entry
));
352 kentry_data
= pmap_steal_memory(kentry_data_size
);
358 * Creates and returns a new empty VM map with
359 * the given physical map structure, and having
360 * the given lower and upper address bounds.
369 register vm_map_t result
;
371 result
= (vm_map_t
) zalloc(vm_map_zone
);
372 if (result
== VM_MAP_NULL
)
373 panic("vm_map_create");
375 vm_map_first_entry(result
) = vm_map_to_entry(result
);
376 vm_map_last_entry(result
) = vm_map_to_entry(result
);
377 result
->hdr
.nentries
= 0;
378 result
->hdr
.entries_pageable
= pageable
;
381 result
->ref_count
= 1;
383 result
->res_count
= 1;
384 result
->sw_state
= MAP_SW_IN
;
385 #endif /* TASK_SWAPPER */
387 result
->min_offset
= min
;
388 result
->max_offset
= max
;
389 result
->wiring_required
= FALSE
;
390 result
->no_zero_fill
= FALSE
;
391 result
->mapped
= FALSE
;
392 result
->wait_for_space
= FALSE
;
393 result
->first_free
= vm_map_to_entry(result
);
394 result
->hint
= vm_map_to_entry(result
);
395 vm_map_lock_init(result
);
396 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
402 * vm_map_entry_create: [ internal use only ]
404 * Allocates a VM map entry for insertion in the
405 * given map (or map copy). No fields are filled.
407 #define vm_map_entry_create(map) \
408 _vm_map_entry_create(&(map)->hdr)
410 #define vm_map_copy_entry_create(copy) \
411 _vm_map_entry_create(&(copy)->cpy_hdr)
414 _vm_map_entry_create(
415 register struct vm_map_header
*map_header
)
417 register zone_t zone
;
418 register vm_map_entry_t entry
;
420 if (map_header
->entries_pageable
)
421 zone
= vm_map_entry_zone
;
423 zone
= vm_map_kentry_zone
;
425 entry
= (vm_map_entry_t
) zalloc(zone
);
426 if (entry
== VM_MAP_ENTRY_NULL
)
427 panic("vm_map_entry_create");
433 * vm_map_entry_dispose: [ internal use only ]
435 * Inverse of vm_map_entry_create.
437 #define vm_map_entry_dispose(map, entry) \
439 if((entry) == (map)->first_free) \
440 (map)->first_free = vm_map_to_entry(map); \
441 if((entry) == (map)->hint) \
442 (map)->hint = vm_map_to_entry(map); \
443 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
446 #define vm_map_copy_entry_dispose(map, entry) \
447 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
450 _vm_map_entry_dispose(
451 register struct vm_map_header
*map_header
,
452 register vm_map_entry_t entry
)
454 register zone_t zone
;
456 if (map_header
->entries_pageable
)
457 zone
= vm_map_entry_zone
;
459 zone
= vm_map_kentry_zone
;
461 zfree(zone
, (vm_offset_t
) entry
);
464 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
465 boolean_t first_free_check
= FALSE
;
470 vm_map_entry_t entry
, next
;
472 if (!first_free_check
)
475 entry
= vm_map_to_entry(map
);
476 next
= entry
->vme_next
;
477 while (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_end
) ||
478 (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_start
) &&
479 next
!= vm_map_to_entry(map
))) {
481 next
= entry
->vme_next
;
482 if (entry
== vm_map_to_entry(map
))
485 if (map
->first_free
!= entry
) {
486 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
487 map
, map
->first_free
, entry
);
496 * Updates the map->first_free pointer to the
497 * entry immediately before the first hole in the map.
498 * The map should be locked.
500 #define UPDATE_FIRST_FREE(map, new_first_free) \
503 vm_map_entry_t UFF_first_free; \
504 vm_map_entry_t UFF_next_entry; \
506 UFF_first_free = (new_first_free); \
507 UFF_next_entry = UFF_first_free->vme_next; \
508 while (trunc_page(UFF_next_entry->vme_start) == \
509 trunc_page(UFF_first_free->vme_end) || \
510 (trunc_page(UFF_next_entry->vme_start) == \
511 trunc_page(UFF_first_free->vme_start) && \
512 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
513 UFF_first_free = UFF_next_entry; \
514 UFF_next_entry = UFF_first_free->vme_next; \
515 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
518 UFF_map->first_free = UFF_first_free; \
519 assert(first_free_is_valid(UFF_map)); \
523 * vm_map_entry_{un,}link:
525 * Insert/remove entries from maps (or map copies).
527 #define vm_map_entry_link(map, after_where, entry) \
530 vm_map_entry_t VMEL_entry; \
532 VMEL_entry = (entry); \
533 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
534 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
538 #define vm_map_copy_entry_link(copy, after_where, entry) \
539 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
541 #define _vm_map_entry_link(hdr, after_where, entry) \
544 (entry)->vme_prev = (after_where); \
545 (entry)->vme_next = (after_where)->vme_next; \
546 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
549 #define vm_map_entry_unlink(map, entry) \
552 vm_map_entry_t VMEU_entry; \
553 vm_map_entry_t VMEU_first_free; \
555 VMEU_entry = (entry); \
556 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
557 VMEU_first_free = VMEU_entry->vme_prev; \
559 VMEU_first_free = VMEU_map->first_free; \
560 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
561 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
564 #define vm_map_copy_entry_unlink(copy, entry) \
565 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
567 #define _vm_map_entry_unlink(hdr, entry) \
570 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
571 (entry)->vme_prev->vme_next = (entry)->vme_next; \
574 #if MACH_ASSERT && TASK_SWAPPER
576 * vm_map_res_reference:
578 * Adds another valid residence count to the given map.
580 * Map is locked so this function can be called from
584 void vm_map_res_reference(register vm_map_t map
)
586 /* assert map is locked */
587 assert(map
->res_count
>= 0);
588 assert(map
->ref_count
>= map
->res_count
);
589 if (map
->res_count
== 0) {
590 mutex_unlock(&map
->s_lock
);
593 mutex_lock(&map
->s_lock
);
601 * vm_map_reference_swap:
603 * Adds valid reference and residence counts to the given map.
605 * The map may not be in memory (i.e. zero residence count).
608 void vm_map_reference_swap(register vm_map_t map
)
610 assert(map
!= VM_MAP_NULL
);
611 mutex_lock(&map
->s_lock
);
612 assert(map
->res_count
>= 0);
613 assert(map
->ref_count
>= map
->res_count
);
615 vm_map_res_reference(map
);
616 mutex_unlock(&map
->s_lock
);
620 * vm_map_res_deallocate:
622 * Decrement residence count on a map; possibly causing swapout.
624 * The map must be in memory (i.e. non-zero residence count).
626 * The map is locked, so this function is callable from vm_map_deallocate.
629 void vm_map_res_deallocate(register vm_map_t map
)
631 assert(map
->res_count
> 0);
632 if (--map
->res_count
== 0) {
633 mutex_unlock(&map
->s_lock
);
637 mutex_lock(&map
->s_lock
);
639 assert(map
->ref_count
>= map
->res_count
);
641 #endif /* MACH_ASSERT && TASK_SWAPPER */
646 * Actually destroy a map.
650 register vm_map_t map
)
653 (void) vm_map_delete(map
, map
->min_offset
,
654 map
->max_offset
, VM_MAP_NO_FLAGS
);
657 pmap_destroy(map
->pmap
);
659 zfree(vm_map_zone
, (vm_offset_t
) map
);
664 * vm_map_swapin/vm_map_swapout
666 * Swap a map in and out, either referencing or releasing its resources.
667 * These functions are internal use only; however, they must be exported
668 * because they may be called from macros, which are exported.
670 * In the case of swapout, there could be races on the residence count,
671 * so if the residence count is up, we return, assuming that a
672 * vm_map_deallocate() call in the near future will bring us back.
675 * -- We use the map write lock for synchronization among races.
676 * -- The map write lock, and not the simple s_lock, protects the
677 * swap state of the map.
678 * -- If a map entry is a share map, then we hold both locks, in
679 * hierarchical order.
681 * Synchronization Notes:
682 * 1) If a vm_map_swapin() call happens while swapout in progress, it
683 * will block on the map lock and proceed when swapout is through.
684 * 2) A vm_map_reference() call at this time is illegal, and will
685 * cause a panic. vm_map_reference() is only allowed on resident
686 * maps, since it refuses to block.
687 * 3) A vm_map_swapin() call during a swapin will block, and
688 * proceeed when the first swapin is done, turning into a nop.
689 * This is the reason the res_count is not incremented until
690 * after the swapin is complete.
691 * 4) There is a timing hole after the checks of the res_count, before
692 * the map lock is taken, during which a swapin may get the lock
693 * before a swapout about to happen. If this happens, the swapin
694 * will detect the state and increment the reference count, causing
695 * the swapout to be a nop, thereby delaying it until a later
696 * vm_map_deallocate. If the swapout gets the lock first, then
697 * the swapin will simply block until the swapout is done, and
700 * Because vm_map_swapin() is potentially an expensive operation, it
701 * should be used with caution.
704 * 1) A map with a residence count of zero is either swapped, or
706 * 2) A map with a non-zero residence count is either resident,
707 * or being swapped in.
710 int vm_map_swap_enable
= 1;
712 void vm_map_swapin (vm_map_t map
)
714 register vm_map_entry_t entry
;
716 if (!vm_map_swap_enable
) /* debug */
721 * First deal with various races.
723 if (map
->sw_state
== MAP_SW_IN
)
725 * we raced with swapout and won. Returning will incr.
726 * the res_count, turning the swapout into a nop.
731 * The residence count must be zero. If we raced with another
732 * swapin, the state would have been IN; if we raced with a
733 * swapout (after another competing swapin), we must have lost
734 * the race to get here (see above comment), in which case
735 * res_count is still 0.
737 assert(map
->res_count
== 0);
740 * There are no intermediate states of a map going out or
741 * coming in, since the map is locked during the transition.
743 assert(map
->sw_state
== MAP_SW_OUT
);
746 * We now operate upon each map entry. If the entry is a sub-
747 * or share-map, we call vm_map_res_reference upon it.
748 * If the entry is an object, we call vm_object_res_reference
749 * (this may iterate through the shadow chain).
750 * Note that we hold the map locked the entire time,
751 * even if we get back here via a recursive call in
752 * vm_map_res_reference.
754 entry
= vm_map_first_entry(map
);
756 while (entry
!= vm_map_to_entry(map
)) {
757 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
758 if (entry
->is_sub_map
) {
759 vm_map_t lmap
= entry
->object
.sub_map
;
760 mutex_lock(&lmap
->s_lock
);
761 vm_map_res_reference(lmap
);
762 mutex_unlock(&lmap
->s_lock
);
764 vm_object_t object
= entry
->object
.vm_object
;
765 vm_object_lock(object
);
767 * This call may iterate through the
770 vm_object_res_reference(object
);
771 vm_object_unlock(object
);
774 entry
= entry
->vme_next
;
776 assert(map
->sw_state
== MAP_SW_OUT
);
777 map
->sw_state
= MAP_SW_IN
;
780 void vm_map_swapout(vm_map_t map
)
782 register vm_map_entry_t entry
;
786 * First deal with various races.
787 * If we raced with a swapin and lost, the residence count
788 * will have been incremented to 1, and we simply return.
790 mutex_lock(&map
->s_lock
);
791 if (map
->res_count
!= 0) {
792 mutex_unlock(&map
->s_lock
);
795 mutex_unlock(&map
->s_lock
);
798 * There are no intermediate states of a map going out or
799 * coming in, since the map is locked during the transition.
801 assert(map
->sw_state
== MAP_SW_IN
);
803 if (!vm_map_swap_enable
)
807 * We now operate upon each map entry. If the entry is a sub-
808 * or share-map, we call vm_map_res_deallocate upon it.
809 * If the entry is an object, we call vm_object_res_deallocate
810 * (this may iterate through the shadow chain).
811 * Note that we hold the map locked the entire time,
812 * even if we get back here via a recursive call in
813 * vm_map_res_deallocate.
815 entry
= vm_map_first_entry(map
);
817 while (entry
!= vm_map_to_entry(map
)) {
818 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
819 if (entry
->is_sub_map
) {
820 vm_map_t lmap
= entry
->object
.sub_map
;
821 mutex_lock(&lmap
->s_lock
);
822 vm_map_res_deallocate(lmap
);
823 mutex_unlock(&lmap
->s_lock
);
825 vm_object_t object
= entry
->object
.vm_object
;
826 vm_object_lock(object
);
828 * This call may take a long time,
829 * since it could actively push
830 * out pages (if we implement it
833 vm_object_res_deallocate(object
);
834 vm_object_unlock(object
);
837 entry
= entry
->vme_next
;
839 assert(map
->sw_state
== MAP_SW_IN
);
840 map
->sw_state
= MAP_SW_OUT
;
843 #endif /* TASK_SWAPPER */
849 * Saves the specified entry as the hint for
850 * future lookups. Performs necessary interlocks.
852 #define SAVE_HINT(map,value) \
853 mutex_lock(&(map)->s_lock); \
854 (map)->hint = (value); \
855 mutex_unlock(&(map)->s_lock);
858 * vm_map_lookup_entry: [ internal use only ]
860 * Finds the map entry containing (or
861 * immediately preceding) the specified address
862 * in the given map; the entry is returned
863 * in the "entry" parameter. The boolean
864 * result indicates whether the address is
865 * actually contained in the map.
869 register vm_map_t map
,
870 register vm_offset_t address
,
871 vm_map_entry_t
*entry
) /* OUT */
873 register vm_map_entry_t cur
;
874 register vm_map_entry_t last
;
877 * Start looking either from the head of the
878 * list, or from the hint.
881 mutex_lock(&map
->s_lock
);
883 mutex_unlock(&map
->s_lock
);
885 if (cur
== vm_map_to_entry(map
))
888 if (address
>= cur
->vme_start
) {
890 * Go from hint to end of list.
892 * But first, make a quick check to see if
893 * we are already looking at the entry we
894 * want (which is usually the case).
895 * Note also that we don't need to save the hint
896 * here... it is the same hint (unless we are
897 * at the header, in which case the hint didn't
898 * buy us anything anyway).
900 last
= vm_map_to_entry(map
);
901 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
908 * Go from start to hint, *inclusively*
910 last
= cur
->vme_next
;
911 cur
= vm_map_first_entry(map
);
918 while (cur
!= last
) {
919 if (cur
->vme_end
> address
) {
920 if (address
>= cur
->vme_start
) {
922 * Save this lookup for future
934 *entry
= cur
->vme_prev
;
935 SAVE_HINT(map
, *entry
);
940 * Routine: vm_map_find_space
942 * Allocate a range in the specified virtual address map,
943 * returning the entry allocated for that range.
944 * Used by kmem_alloc, etc.
946 * The map must be NOT be locked. It will be returned locked
947 * on KERN_SUCCESS, unlocked on failure.
949 * If an entry is allocated, the object/offset fields
950 * are initialized to zero.
954 register vm_map_t map
,
955 vm_offset_t
*address
, /* OUT */
958 vm_map_entry_t
*o_entry
) /* OUT */
960 register vm_map_entry_t entry
, new_entry
;
961 register vm_offset_t start
;
962 register vm_offset_t end
;
964 new_entry
= vm_map_entry_create(map
);
967 * Look for the first possible address; if there's already
968 * something at this address, we have to start after it.
973 assert(first_free_is_valid(map
));
974 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
975 start
= map
->min_offset
;
977 start
= entry
->vme_end
;
980 * In any case, the "entry" always precedes
981 * the proposed new region throughout the loop:
985 register vm_map_entry_t next
;
988 * Find the end of the proposed new region.
989 * Be sure we didn't go beyond the end, or
990 * wrap around the address.
993 end
= ((start
+ mask
) & ~mask
);
995 vm_map_entry_dispose(map
, new_entry
);
997 return(KERN_NO_SPACE
);
1002 if ((end
> map
->max_offset
) || (end
< start
)) {
1003 vm_map_entry_dispose(map
, new_entry
);
1005 return(KERN_NO_SPACE
);
1009 * If there are no more entries, we must win.
1012 next
= entry
->vme_next
;
1013 if (next
== vm_map_to_entry(map
))
1017 * If there is another entry, it must be
1018 * after the end of the potential new region.
1021 if (next
->vme_start
>= end
)
1025 * Didn't fit -- move to the next entry.
1029 start
= entry
->vme_end
;
1034 * "start" and "end" should define the endpoints of the
1035 * available new range, and
1036 * "entry" should refer to the region before the new
1039 * the map should be locked.
1044 new_entry
->vme_start
= start
;
1045 new_entry
->vme_end
= end
;
1046 assert(page_aligned(new_entry
->vme_start
));
1047 assert(page_aligned(new_entry
->vme_end
));
1049 new_entry
->is_shared
= FALSE
;
1050 new_entry
->is_sub_map
= FALSE
;
1051 new_entry
->use_pmap
= FALSE
;
1052 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1053 new_entry
->offset
= (vm_object_offset_t
) 0;
1055 new_entry
->needs_copy
= FALSE
;
1057 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1058 new_entry
->protection
= VM_PROT_DEFAULT
;
1059 new_entry
->max_protection
= VM_PROT_ALL
;
1060 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1061 new_entry
->wired_count
= 0;
1062 new_entry
->user_wired_count
= 0;
1064 new_entry
->in_transition
= FALSE
;
1065 new_entry
->needs_wakeup
= FALSE
;
1068 * Insert the new entry into the list
1071 vm_map_entry_link(map
, entry
, new_entry
);
1076 * Update the lookup hint
1078 SAVE_HINT(map
, new_entry
);
1080 *o_entry
= new_entry
;
1081 return(KERN_SUCCESS
);
1084 int vm_map_pmap_enter_print
= FALSE
;
1085 int vm_map_pmap_enter_enable
= FALSE
;
1088 * Routine: vm_map_pmap_enter
1091 * Force pages from the specified object to be entered into
1092 * the pmap at the specified address if they are present.
1093 * As soon as a page not found in the object the scan ends.
1098 * In/out conditions:
1099 * The source map should not be locked on entry.
1104 register vm_offset_t addr
,
1105 register vm_offset_t end_addr
,
1106 register vm_object_t object
,
1107 vm_object_offset_t offset
,
1108 vm_prot_t protection
)
1110 unsigned int cache_attr
;
1112 while (addr
< end_addr
) {
1113 register vm_page_t m
;
1115 vm_object_lock(object
);
1116 vm_object_paging_begin(object
);
1118 m
= vm_page_lookup(object
, offset
);
1119 if (m
== VM_PAGE_NULL
|| m
->busy
||
1120 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1121 protection
& m
->page_lock
))) {
1123 vm_object_paging_end(object
);
1124 vm_object_unlock(object
);
1128 assert(!m
->fictitious
); /* XXX is this possible ??? */
1130 if (vm_map_pmap_enter_print
) {
1131 printf("vm_map_pmap_enter:");
1132 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1133 map
, addr
, object
, offset
);
1137 if (m
->no_isync
== TRUE
) {
1138 pmap_sync_caches_phys(m
->phys_addr
);
1139 m
->no_isync
= FALSE
;
1142 cache_attr
= ((unsigned int)object
->wimg_bits
) & VM_WIMG_MASK
;
1143 vm_object_unlock(object
);
1145 PMAP_ENTER(map
->pmap
, addr
, m
,
1146 protection
, cache_attr
, FALSE
);
1148 vm_object_lock(object
);
1150 PAGE_WAKEUP_DONE(m
);
1151 vm_page_lock_queues();
1152 if (!m
->active
&& !m
->inactive
)
1153 vm_page_activate(m
);
1154 vm_page_unlock_queues();
1155 vm_object_paging_end(object
);
1156 vm_object_unlock(object
);
1158 offset
+= PAGE_SIZE_64
;
1164 * Routine: vm_map_enter
1167 * Allocate a range in the specified virtual address map.
1168 * The resulting range will refer to memory defined by
1169 * the given memory object and offset into that object.
1171 * Arguments are as defined in the vm_map call.
1175 register vm_map_t map
,
1176 vm_offset_t
*address
, /* IN/OUT */
1181 vm_object_offset_t offset
,
1182 boolean_t needs_copy
,
1183 vm_prot_t cur_protection
,
1184 vm_prot_t max_protection
,
1185 vm_inherit_t inheritance
)
1187 vm_map_entry_t entry
;
1188 register vm_offset_t start
;
1189 register vm_offset_t end
;
1190 kern_return_t result
= KERN_SUCCESS
;
1192 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1195 VM_GET_FLAGS_ALIAS(flags
, alias
);
1197 #define RETURN(value) { result = value; goto BailOut; }
1199 assert(page_aligned(*address
));
1200 assert(page_aligned(size
));
1209 * Calculate the first possible address.
1212 if (start
< map
->min_offset
)
1213 start
= map
->min_offset
;
1214 if (start
> map
->max_offset
)
1215 RETURN(KERN_NO_SPACE
);
1218 * Look for the first possible address;
1219 * if there's already something at this
1220 * address, we have to start after it.
1223 assert(first_free_is_valid(map
));
1224 if (start
== map
->min_offset
) {
1225 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1226 start
= entry
->vme_end
;
1228 vm_map_entry_t tmp_entry
;
1229 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1230 start
= tmp_entry
->vme_end
;
1235 * In any case, the "entry" always precedes
1236 * the proposed new region throughout the
1241 register vm_map_entry_t next
;
1244 * Find the end of the proposed new region.
1245 * Be sure we didn't go beyond the end, or
1246 * wrap around the address.
1249 end
= ((start
+ mask
) & ~mask
);
1251 RETURN(KERN_NO_SPACE
);
1255 if ((end
> map
->max_offset
) || (end
< start
)) {
1256 if (map
->wait_for_space
) {
1257 if (size
<= (map
->max_offset
-
1259 assert_wait((event_t
)map
,
1262 thread_block((void (*)(void))0);
1266 RETURN(KERN_NO_SPACE
);
1270 * If there are no more entries, we must win.
1273 next
= entry
->vme_next
;
1274 if (next
== vm_map_to_entry(map
))
1278 * If there is another entry, it must be
1279 * after the end of the potential new region.
1282 if (next
->vme_start
>= end
)
1286 * Didn't fit -- move to the next entry.
1290 start
= entry
->vme_end
;
1294 vm_map_entry_t temp_entry
;
1298 * the address doesn't itself violate
1299 * the mask requirement.
1303 if ((start
& mask
) != 0)
1304 RETURN(KERN_NO_SPACE
);
1307 * ... the address is within bounds
1312 if ((start
< map
->min_offset
) ||
1313 (end
> map
->max_offset
) ||
1315 RETURN(KERN_INVALID_ADDRESS
);
1319 * ... the starting address isn't allocated
1322 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1323 RETURN(KERN_NO_SPACE
);
1328 * ... the next region doesn't overlap the
1332 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1333 (entry
->vme_next
->vme_start
< end
))
1334 RETURN(KERN_NO_SPACE
);
1339 * "start" and "end" should define the endpoints of the
1340 * available new range, and
1341 * "entry" should refer to the region before the new
1344 * the map should be locked.
1348 * See whether we can avoid creating a new entry (and object) by
1349 * extending one of our neighbors. [So far, we only attempt to
1350 * extend from below.]
1353 if ((object
== VM_OBJECT_NULL
) &&
1354 (entry
!= vm_map_to_entry(map
)) &&
1355 (entry
->vme_end
== start
) &&
1356 (!entry
->is_shared
) &&
1357 (!entry
->is_sub_map
) &&
1358 (entry
->alias
== alias
) &&
1359 (entry
->inheritance
== inheritance
) &&
1360 (entry
->protection
== cur_protection
) &&
1361 (entry
->max_protection
== max_protection
) &&
1362 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1363 (entry
->in_transition
== 0) &&
1364 ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
) &&
1365 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1366 if (vm_object_coalesce(entry
->object
.vm_object
,
1369 (vm_object_offset_t
) 0,
1370 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1371 (vm_size_t
)(end
- entry
->vme_end
))) {
1374 * Coalesced the two objects - can extend
1375 * the previous map entry to include the
1378 map
->size
+= (end
- entry
->vme_end
);
1379 entry
->vme_end
= end
;
1380 UPDATE_FIRST_FREE(map
, map
->first_free
);
1381 RETURN(KERN_SUCCESS
);
1386 * Create a new entry
1390 register vm_map_entry_t new_entry
;
1392 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1393 offset
, needs_copy
, FALSE
, FALSE
,
1394 cur_protection
, max_protection
,
1395 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1396 new_entry
->alias
= alias
;
1399 /* Wire down the new entry if the user
1400 * requested all new map entries be wired.
1402 if (map
->wiring_required
) {
1403 result
= vm_map_wire(map
, start
, end
,
1404 new_entry
->protection
, TRUE
);
1408 if ((object
!= VM_OBJECT_NULL
) &&
1409 (vm_map_pmap_enter_enable
) &&
1412 (size
< (128*1024))) {
1413 vm_map_pmap_enter(map
, start
, end
,
1414 object
, offset
, cur_protection
);
1428 * vm_map_clip_start: [ internal use only ]
1430 * Asserts that the given entry begins at or after
1431 * the specified address; if necessary,
1432 * it splits the entry into two.
1435 #define vm_map_clip_start(map, entry, startaddr) \
1437 vm_map_t VMCS_map; \
1438 vm_map_entry_t VMCS_entry; \
1439 vm_offset_t VMCS_startaddr; \
1441 VMCS_entry = (entry); \
1442 VMCS_startaddr = (startaddr); \
1443 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1444 if(entry->use_pmap) { \
1445 vm_offset_t pmap_base_addr; \
1447 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1448 pmap_unnest(map->pmap, pmap_base_addr, \
1450 entry->use_pmap = FALSE; \
1451 } else if(entry->object.vm_object \
1452 && !entry->is_sub_map \
1453 && entry->object.vm_object->phys_contiguous) { \
1454 pmap_remove(map->pmap, \
1455 entry->vme_start, entry->vme_end); \
1457 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1459 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1462 #define vm_map_clip_start(map, entry, startaddr) \
1464 vm_map_t VMCS_map; \
1465 vm_map_entry_t VMCS_entry; \
1466 vm_offset_t VMCS_startaddr; \
1468 VMCS_entry = (entry); \
1469 VMCS_startaddr = (startaddr); \
1470 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1471 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1473 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1477 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1479 if ((startaddr) > (entry)->vme_start) \
1480 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1484 * This routine is called only when it is known that
1485 * the entry must be split.
1489 register struct vm_map_header
*map_header
,
1490 register vm_map_entry_t entry
,
1491 register vm_offset_t start
)
1493 register vm_map_entry_t new_entry
;
1496 * Split off the front portion --
1497 * note that we must insert the new
1498 * entry BEFORE this one, so that
1499 * this entry has the specified starting
1503 new_entry
= _vm_map_entry_create(map_header
);
1504 vm_map_entry_copy_full(new_entry
, entry
);
1506 new_entry
->vme_end
= start
;
1507 entry
->offset
+= (start
- entry
->vme_start
);
1508 entry
->vme_start
= start
;
1510 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1512 if (entry
->is_sub_map
)
1513 vm_map_reference(new_entry
->object
.sub_map
);
1515 vm_object_reference(new_entry
->object
.vm_object
);
1520 * vm_map_clip_end: [ internal use only ]
1522 * Asserts that the given entry ends at or before
1523 * the specified address; if necessary,
1524 * it splits the entry into two.
1527 #define vm_map_clip_end(map, entry, endaddr) \
1529 vm_map_t VMCE_map; \
1530 vm_map_entry_t VMCE_entry; \
1531 vm_offset_t VMCE_endaddr; \
1533 VMCE_entry = (entry); \
1534 VMCE_endaddr = (endaddr); \
1535 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1536 if(entry->use_pmap) { \
1537 vm_offset_t pmap_base_addr; \
1539 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1540 pmap_unnest(map->pmap, pmap_base_addr, \
1542 entry->use_pmap = FALSE; \
1543 } else if(entry->object.vm_object \
1544 && !entry->is_sub_map \
1545 && entry->object.vm_object->phys_contiguous) { \
1546 pmap_remove(map->pmap, \
1547 entry->vme_start, entry->vme_end); \
1549 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1551 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1554 #define vm_map_clip_end(map, entry, endaddr) \
1556 vm_map_t VMCE_map; \
1557 vm_map_entry_t VMCE_entry; \
1558 vm_offset_t VMCE_endaddr; \
1560 VMCE_entry = (entry); \
1561 VMCE_endaddr = (endaddr); \
1562 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1563 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1565 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1569 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1571 if ((endaddr) < (entry)->vme_end) \
1572 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1576 * This routine is called only when it is known that
1577 * the entry must be split.
1581 register struct vm_map_header
*map_header
,
1582 register vm_map_entry_t entry
,
1583 register vm_offset_t end
)
1585 register vm_map_entry_t new_entry
;
1588 * Create a new entry and insert it
1589 * AFTER the specified entry
1592 new_entry
= _vm_map_entry_create(map_header
);
1593 vm_map_entry_copy_full(new_entry
, entry
);
1595 new_entry
->vme_start
= entry
->vme_end
= end
;
1596 new_entry
->offset
+= (end
- entry
->vme_start
);
1598 _vm_map_entry_link(map_header
, entry
, new_entry
);
1600 if (entry
->is_sub_map
)
1601 vm_map_reference(new_entry
->object
.sub_map
);
1603 vm_object_reference(new_entry
->object
.vm_object
);
1608 * VM_MAP_RANGE_CHECK: [ internal use only ]
1610 * Asserts that the starting and ending region
1611 * addresses fall within the valid range of the map.
1613 #define VM_MAP_RANGE_CHECK(map, start, end) \
1615 if (start < vm_map_min(map)) \
1616 start = vm_map_min(map); \
1617 if (end > vm_map_max(map)) \
1618 end = vm_map_max(map); \
1624 * vm_map_range_check: [ internal use only ]
1626 * Check that the region defined by the specified start and
1627 * end addresses are wholly contained within a single map
1628 * entry or set of adjacent map entries of the spacified map,
1629 * i.e. the specified region contains no unmapped space.
1630 * If any or all of the region is unmapped, FALSE is returned.
1631 * Otherwise, TRUE is returned and if the output argument 'entry'
1632 * is not NULL it points to the map entry containing the start
1635 * The map is locked for reading on entry and is left locked.
1639 register vm_map_t map
,
1640 register vm_offset_t start
,
1641 register vm_offset_t end
,
1642 vm_map_entry_t
*entry
)
1645 register vm_offset_t prev
;
1648 * Basic sanity checks first
1650 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1654 * Check first if the region starts within a valid
1655 * mapping for the map.
1657 if (!vm_map_lookup_entry(map
, start
, &cur
))
1661 * Optimize for the case that the region is contained
1662 * in a single map entry.
1664 if (entry
!= (vm_map_entry_t
*) NULL
)
1666 if (end
<= cur
->vme_end
)
1670 * If the region is not wholly contained within a
1671 * single entry, walk the entries looking for holes.
1673 prev
= cur
->vme_end
;
1674 cur
= cur
->vme_next
;
1675 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1676 if (end
<= cur
->vme_end
)
1678 prev
= cur
->vme_end
;
1679 cur
= cur
->vme_next
;
1685 * vm_map_submap: [ kernel use only ]
1687 * Mark the given range as handled by a subordinate map.
1689 * This range must have been created with vm_map_find using
1690 * the vm_submap_object, and no other operations may have been
1691 * performed on this range prior to calling vm_map_submap.
1693 * Only a limited number of operations can be performed
1694 * within this rage after calling vm_map_submap:
1696 * [Don't try vm_map_copyin!]
1698 * To remove a submapping, one must first remove the
1699 * range from the superior map, and then destroy the
1700 * submap (if desired). [Better yet, don't try it.]
1704 register vm_map_t map
,
1705 register vm_offset_t start
,
1706 register vm_offset_t end
,
1711 vm_map_entry_t entry
;
1712 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1713 register vm_object_t object
;
1717 submap
->mapped
= TRUE
;
1719 VM_MAP_RANGE_CHECK(map
, start
, end
);
1721 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1722 vm_map_clip_start(map
, entry
, start
);
1725 entry
= entry
->vme_next
;
1727 if(entry
== vm_map_to_entry(map
)) {
1729 return KERN_INVALID_ARGUMENT
;
1732 vm_map_clip_end(map
, entry
, end
);
1734 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1735 (!entry
->is_sub_map
) &&
1736 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1737 (object
->resident_page_count
== 0) &&
1738 (object
->copy
== VM_OBJECT_NULL
) &&
1739 (object
->shadow
== VM_OBJECT_NULL
) &&
1740 (!object
->pager_created
)) {
1741 entry
->offset
= (vm_object_offset_t
)offset
;
1742 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1743 vm_object_deallocate(object
);
1744 entry
->is_sub_map
= TRUE
;
1745 vm_map_reference(entry
->object
.sub_map
= submap
);
1747 if ((use_pmap
) && (offset
== 0)) {
1748 /* nest if platform code will allow */
1749 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1750 start
, end
- start
);
1752 panic("pmap_nest failed!");
1753 entry
->use_pmap
= TRUE
;
1757 pmap_remove(map
->pmap
, start
, end
);
1759 result
= KERN_SUCCESS
;
1769 * Sets the protection of the specified address
1770 * region in the target map. If "set_max" is
1771 * specified, the maximum protection is to be set;
1772 * otherwise, only the current protection is affected.
1776 register vm_map_t map
,
1777 register vm_offset_t start
,
1778 register vm_offset_t end
,
1779 register vm_prot_t new_prot
,
1780 register boolean_t set_max
)
1782 register vm_map_entry_t current
;
1783 register vm_offset_t prev
;
1784 vm_map_entry_t entry
;
1789 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1790 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1795 * Lookup the entry. If it doesn't start in a valid
1796 * entry, return an error. Remember if we need to
1797 * clip the entry. We don't do it here because we don't
1798 * want to make any changes until we've scanned the
1799 * entire range below for address and protection
1802 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1804 return(KERN_INVALID_ADDRESS
);
1808 * Make a first pass to check for protection and address
1813 prev
= current
->vme_start
;
1814 while ((current
!= vm_map_to_entry(map
)) &&
1815 (current
->vme_start
< end
)) {
1818 * If there is a hole, return an error.
1820 if (current
->vme_start
!= prev
) {
1822 return(KERN_INVALID_ADDRESS
);
1825 new_max
= current
->max_protection
;
1826 if(new_prot
& VM_PROT_COPY
) {
1827 new_max
|= VM_PROT_WRITE
;
1828 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1830 return(KERN_PROTECTION_FAILURE
);
1833 if ((new_prot
& new_max
) != new_prot
) {
1835 return(KERN_PROTECTION_FAILURE
);
1839 prev
= current
->vme_end
;
1840 current
= current
->vme_next
;
1844 return(KERN_INVALID_ADDRESS
);
1848 * Go back and fix up protections.
1849 * Clip to start here if the range starts within
1855 vm_map_clip_start(map
, entry
, start
);
1857 while ((current
!= vm_map_to_entry(map
)) &&
1858 (current
->vme_start
< end
)) {
1862 vm_map_clip_end(map
, current
, end
);
1864 old_prot
= current
->protection
;
1866 if(new_prot
& VM_PROT_COPY
) {
1867 /* caller is asking specifically to copy the */
1868 /* mapped data, this implies that max protection */
1869 /* will include write. Caller must be prepared */
1870 /* for loss of shared memory communication in the */
1871 /* target area after taking this step */
1872 current
->needs_copy
= TRUE
;
1873 current
->max_protection
|= VM_PROT_WRITE
;
1877 current
->protection
=
1878 (current
->max_protection
=
1879 new_prot
& ~VM_PROT_COPY
) &
1882 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1885 * Update physical map if necessary.
1886 * If the request is to turn off write protection,
1887 * we won't do it for real (in pmap). This is because
1888 * it would cause copy-on-write to fail. We've already
1889 * set, the new protection in the map, so if a
1890 * write-protect fault occurred, it will be fixed up
1891 * properly, COW or not.
1893 /* the 256M hack for existing hardware limitations */
1894 if (current
->protection
!= old_prot
) {
1895 if(current
->is_sub_map
&& current
->use_pmap
) {
1896 vm_offset_t pmap_base_addr
;
1897 vm_offset_t pmap_end_addr
;
1898 vm_map_entry_t local_entry
;
1900 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1901 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1903 if(!vm_map_lookup_entry(map
,
1904 pmap_base_addr
, &local_entry
))
1905 panic("vm_map_protect: nested pmap area is missing");
1906 while ((local_entry
!= vm_map_to_entry(map
)) &&
1907 (local_entry
->vme_start
< pmap_end_addr
)) {
1908 local_entry
->use_pmap
= FALSE
;
1909 local_entry
= local_entry
->vme_next
;
1911 pmap_unnest(map
->pmap
, pmap_base_addr
,
1912 (pmap_end_addr
- pmap_base_addr
) + 1);
1915 if (!(current
->protection
& VM_PROT_WRITE
)) {
1916 /* Look one level in we support nested pmaps */
1917 /* from mapped submaps which are direct entries */
1919 if(current
->is_sub_map
&& current
->use_pmap
) {
1920 pmap_protect(current
->object
.sub_map
->pmap
,
1923 current
->protection
);
1925 pmap_protect(map
->pmap
, current
->vme_start
,
1927 current
->protection
);
1931 current
= current
->vme_next
;
1935 return(KERN_SUCCESS
);
1941 * Sets the inheritance of the specified address
1942 * range in the target map. Inheritance
1943 * affects how the map will be shared with
1944 * child maps at the time of vm_map_fork.
1948 register vm_map_t map
,
1949 register vm_offset_t start
,
1950 register vm_offset_t end
,
1951 register vm_inherit_t new_inheritance
)
1953 register vm_map_entry_t entry
;
1954 vm_map_entry_t temp_entry
;
1958 VM_MAP_RANGE_CHECK(map
, start
, end
);
1960 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
1962 vm_map_clip_start(map
, entry
, start
);
1965 temp_entry
= temp_entry
->vme_next
;
1969 /* first check entire range for submaps which can't support the */
1970 /* given inheritance. */
1971 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1972 if(entry
->is_sub_map
) {
1973 if(new_inheritance
== VM_INHERIT_COPY
)
1974 return(KERN_INVALID_ARGUMENT
);
1977 entry
= entry
->vme_next
;
1982 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
1983 vm_map_clip_end(map
, entry
, end
);
1985 entry
->inheritance
= new_inheritance
;
1987 entry
= entry
->vme_next
;
1991 return(KERN_SUCCESS
);
1997 * Sets the pageability of the specified address range in the
1998 * target map as wired. Regions specified as not pageable require
1999 * locked-down physical memory and physical page maps. The
2000 * access_type variable indicates types of accesses that must not
2001 * generate page faults. This is checked against protection of
2002 * memory being locked-down.
2004 * The map must not be locked, but a reference must remain to the
2005 * map throughout the call.
2009 register vm_map_t map
,
2010 register vm_offset_t start
,
2011 register vm_offset_t end
,
2012 register vm_prot_t access_type
,
2013 boolean_t user_wire
,
2015 vm_offset_t pmap_addr
)
2017 register vm_map_entry_t entry
;
2018 struct vm_map_entry
*first_entry
, tmp_entry
;
2020 register vm_offset_t s
,e
;
2022 boolean_t need_wakeup
;
2023 boolean_t main_map
= FALSE
;
2024 wait_interrupt_t interruptible_state
;
2025 thread_t cur_thread
;
2026 unsigned int last_timestamp
;
2030 if(map_pmap
== NULL
)
2032 last_timestamp
= map
->timestamp
;
2034 VM_MAP_RANGE_CHECK(map
, start
, end
);
2035 assert(page_aligned(start
));
2036 assert(page_aligned(end
));
2038 /* We wired what the caller asked for, zero pages */
2040 return KERN_SUCCESS
;
2043 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2044 entry
= first_entry
;
2045 /* vm_map_clip_start will be done later. */
2047 /* Start address is not in map */
2049 return(KERN_INVALID_ADDRESS
);
2053 need_wakeup
= FALSE
;
2054 cur_thread
= current_thread();
2055 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2057 * If another thread is wiring/unwiring this entry then
2058 * block after informing other thread to wake us up.
2060 if (entry
->in_transition
) {
2061 wait_result_t wait_result
;
2064 * We have not clipped the entry. Make sure that
2065 * the start address is in range so that the lookup
2066 * below will succeed.
2068 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2070 entry
->needs_wakeup
= TRUE
;
2073 * wake up anybody waiting on entries that we have
2077 vm_map_entry_wakeup(map
);
2078 need_wakeup
= FALSE
;
2081 * User wiring is interruptible
2083 wait_result
= vm_map_entry_wait(map
,
2084 (user_wire
) ? THREAD_ABORTSAFE
:
2086 if (user_wire
&& wait_result
== THREAD_INTERRUPTED
) {
2088 * undo the wirings we have done so far
2089 * We do not clear the needs_wakeup flag,
2090 * because we cannot tell if we were the
2094 vm_map_unwire(map
, start
, s
, user_wire
);
2095 return(KERN_FAILURE
);
2099 * Cannot avoid a lookup here. reset timestamp.
2101 last_timestamp
= map
->timestamp
;
2104 * The entry could have been clipped, look it up again.
2105 * Worse that can happen is, it may not exist anymore.
2107 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2109 panic("vm_map_wire: re-lookup failed");
2112 * User: undo everything upto the previous
2113 * entry. let vm_map_unwire worry about
2114 * checking the validity of the range.
2117 vm_map_unwire(map
, start
, s
, user_wire
);
2118 return(KERN_FAILURE
);
2120 entry
= first_entry
;
2124 if(entry
->is_sub_map
) {
2125 vm_offset_t sub_start
;
2126 vm_offset_t sub_end
;
2127 vm_offset_t local_start
;
2128 vm_offset_t local_end
;
2131 vm_map_clip_start(map
, entry
, start
);
2132 vm_map_clip_end(map
, entry
, end
);
2134 sub_start
= entry
->offset
;
2135 sub_end
= entry
->vme_end
- entry
->vme_start
;
2136 sub_end
+= entry
->offset
;
2138 local_end
= entry
->vme_end
;
2139 if(map_pmap
== NULL
) {
2140 if(entry
->use_pmap
) {
2141 pmap
= entry
->object
.sub_map
->pmap
;
2142 /* ppc implementation requires that */
2143 /* submaps pmap address ranges line */
2144 /* up with parent map */
2146 pmap_addr
= sub_start
;
2153 if (entry
->wired_count
) {
2154 if (entry
->wired_count
2156 panic("vm_map_wire: too many wirings");
2159 entry
->user_wired_count
2160 >= MAX_WIRE_COUNT
) {
2162 vm_map_unwire(map
, start
,
2163 entry
->vme_start
, user_wire
);
2164 return(KERN_FAILURE
);
2167 entry
->user_wired_count
++;
2169 (entry
->user_wired_count
== 0))
2170 entry
->wired_count
++;
2171 entry
= entry
->vme_next
;
2176 vm_object_offset_t offset_hi
;
2177 vm_object_offset_t offset_lo
;
2178 vm_object_offset_t offset
;
2181 vm_behavior_t behavior
;
2182 vm_map_entry_t local_entry
;
2183 vm_map_version_t version
;
2184 vm_map_t lookup_map
;
2186 /* call vm_map_lookup_locked to */
2187 /* cause any needs copy to be */
2189 local_start
= entry
->vme_start
;
2191 vm_map_lock_write_to_read(map
);
2192 if(vm_map_lookup_locked(
2193 &lookup_map
, local_start
,
2196 &offset
, &prot
, &wired
,
2197 &behavior
, &offset_lo
,
2198 &offset_hi
, &pmap_map
)) {
2200 vm_map_unlock(lookup_map
);
2201 vm_map_unwire(map
, start
,
2202 entry
->vme_start
, user_wire
);
2203 return(KERN_FAILURE
);
2205 if(pmap_map
!= lookup_map
)
2206 vm_map_unlock(pmap_map
);
2207 vm_map_unlock_read(lookup_map
);
2209 vm_object_unlock(object
);
2211 if (!vm_map_lookup_entry(map
,
2212 local_start
, &local_entry
)) {
2214 vm_map_unwire(map
, start
,
2215 entry
->vme_start
, user_wire
);
2216 return(KERN_FAILURE
);
2218 /* did we have a change of type? */
2219 if (!local_entry
->is_sub_map
) {
2220 last_timestamp
= map
->timestamp
;
2223 entry
= local_entry
;
2225 entry
->user_wired_count
++;
2227 (entry
->user_wired_count
== 1))
2228 entry
->wired_count
++;
2230 entry
->in_transition
= TRUE
;
2233 rc
= vm_map_wire_nested(
2234 entry
->object
.sub_map
,
2237 user_wire
, pmap
, pmap_addr
);
2241 local_start
= entry
->vme_start
;
2243 entry
->user_wired_count
++;
2245 (entry
->user_wired_count
== 1))
2246 entry
->wired_count
++;
2248 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2251 user_wire
, pmap
, pmap_addr
);
2254 s
= entry
->vme_start
;
2258 * Find the entry again. It could have been clipped
2259 * after we unlocked the map.
2261 if (!vm_map_lookup_entry(map
, local_start
,
2263 panic("vm_map_wire: re-lookup failed");
2264 entry
= first_entry
;
2266 last_timestamp
= map
->timestamp
;
2267 while ((entry
!= vm_map_to_entry(map
)) &&
2268 (entry
->vme_start
< e
)) {
2269 assert(entry
->in_transition
);
2270 entry
->in_transition
= FALSE
;
2271 if (entry
->needs_wakeup
) {
2272 entry
->needs_wakeup
= FALSE
;
2275 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2277 entry
->user_wired_count
--;
2279 (entry
->user_wired_count
== 0))
2280 entry
->wired_count
--;
2282 entry
= entry
->vme_next
;
2284 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2287 vm_map_entry_wakeup(map
);
2289 * undo everything upto the previous entry.
2291 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2298 * If this entry is already wired then increment
2299 * the appropriate wire reference count.
2301 if (entry
->wired_count
) {
2302 /* sanity check: wired_count is a short */
2303 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2304 panic("vm_map_wire: too many wirings");
2307 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2309 vm_map_unwire(map
, start
,
2310 entry
->vme_start
, user_wire
);
2311 return(KERN_FAILURE
);
2314 * entry is already wired down, get our reference
2315 * after clipping to our range.
2317 vm_map_clip_start(map
, entry
, start
);
2318 vm_map_clip_end(map
, entry
, end
);
2320 entry
->user_wired_count
++;
2321 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2322 entry
->wired_count
++;
2324 entry
= entry
->vme_next
;
2329 * Unwired entry or wire request transmitted via submap
2334 * Perform actions of vm_map_lookup that need the write
2335 * lock on the map: create a shadow object for a
2336 * copy-on-write region, or an object for a zero-fill
2339 size
= entry
->vme_end
- entry
->vme_start
;
2341 * If wiring a copy-on-write page, we need to copy it now
2342 * even if we're only (currently) requesting read access.
2343 * This is aggressive, but once it's wired we can't move it.
2345 if (entry
->needs_copy
) {
2346 vm_object_shadow(&entry
->object
.vm_object
,
2347 &entry
->offset
, size
);
2348 entry
->needs_copy
= FALSE
;
2349 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2350 entry
->object
.vm_object
= vm_object_allocate(size
);
2351 entry
->offset
= (vm_object_offset_t
)0;
2354 vm_map_clip_start(map
, entry
, start
);
2355 vm_map_clip_end(map
, entry
, end
);
2357 s
= entry
->vme_start
;
2361 * Check for holes and protection mismatch.
2362 * Holes: Next entry should be contiguous unless this
2363 * is the end of the region.
2364 * Protection: Access requested must be allowed, unless
2365 * wiring is by protection class
2367 if ((((entry
->vme_end
< end
) &&
2368 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2369 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2370 ((entry
->protection
& access_type
) != access_type
))) {
2372 * Found a hole or protection problem.
2373 * Unwire the region we wired so far.
2375 if (start
!= entry
->vme_start
) {
2377 vm_map_unwire(map
, start
, s
, user_wire
);
2381 return((entry
->protection
&access_type
) != access_type
?
2382 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2385 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2388 entry
->user_wired_count
++;
2389 if ((!user_wire
) || (entry
->user_wired_count
== 1))
2390 entry
->wired_count
++;
2392 entry
->in_transition
= TRUE
;
2395 * This entry might get split once we unlock the map.
2396 * In vm_fault_wire(), we need the current range as
2397 * defined by this entry. In order for this to work
2398 * along with a simultaneous clip operation, we make a
2399 * temporary copy of this entry and use that for the
2400 * wiring. Note that the underlying objects do not
2401 * change during a clip.
2406 * The in_transition state guarentees that the entry
2407 * (or entries for this range, if split occured) will be
2408 * there when the map lock is acquired for the second time.
2412 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2413 interruptible_state
= thread_interrupt_level(THREAD_UNINT
);
2416 rc
= vm_fault_wire(map
,
2417 &tmp_entry
, map_pmap
, pmap_addr
);
2419 rc
= vm_fault_wire(map
,
2420 &tmp_entry
, map
->pmap
,
2421 tmp_entry
.vme_start
);
2423 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2424 thread_interrupt_level(interruptible_state
);
2428 if (last_timestamp
+1 != map
->timestamp
) {
2430 * Find the entry again. It could have been clipped
2431 * after we unlocked the map.
2433 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2435 panic("vm_map_wire: re-lookup failed");
2437 entry
= first_entry
;
2440 last_timestamp
= map
->timestamp
;
2442 while ((entry
!= vm_map_to_entry(map
)) &&
2443 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2444 assert(entry
->in_transition
);
2445 entry
->in_transition
= FALSE
;
2446 if (entry
->needs_wakeup
) {
2447 entry
->needs_wakeup
= FALSE
;
2450 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2452 entry
->user_wired_count
--;
2454 (entry
->user_wired_count
== 0))
2455 entry
->wired_count
--;
2457 entry
= entry
->vme_next
;
2460 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2463 vm_map_entry_wakeup(map
);
2465 * undo everything upto the previous entry.
2467 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2470 } /* end while loop through map entries */
2474 * wake up anybody waiting on entries we wired.
2477 vm_map_entry_wakeup(map
);
2479 return(KERN_SUCCESS
);
2485 register vm_map_t map
,
2486 register vm_offset_t start
,
2487 register vm_offset_t end
,
2488 register vm_prot_t access_type
,
2489 boolean_t user_wire
)
2496 * the calls to mapping_prealloc and mapping_relpre
2497 * (along with the VM_MAP_RANGE_CHECK to insure a
2498 * resonable range was passed in) are
2499 * currently necessary because
2500 * we haven't enabled kernel pre-emption
2501 * and/or the pmap_enter cannot purge and re-use
2504 VM_MAP_RANGE_CHECK(map
, start
, end
);
2505 mapping_prealloc(end
- start
);
2507 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2508 user_wire
, (pmap_t
)NULL
, 0);
2518 * Sets the pageability of the specified address range in the target
2519 * as pageable. Regions specified must have been wired previously.
2521 * The map must not be locked, but a reference must remain to the map
2522 * throughout the call.
2524 * Kernel will panic on failures. User unwire ignores holes and
2525 * unwired and intransition entries to avoid losing memory by leaving
2529 vm_map_unwire_nested(
2530 register vm_map_t map
,
2531 register vm_offset_t start
,
2532 register vm_offset_t end
,
2533 boolean_t user_wire
,
2535 vm_offset_t pmap_addr
)
2537 register vm_map_entry_t entry
;
2538 struct vm_map_entry
*first_entry
, tmp_entry
;
2539 boolean_t need_wakeup
;
2540 boolean_t main_map
= FALSE
;
2541 unsigned int last_timestamp
;
2544 if(map_pmap
== NULL
)
2546 last_timestamp
= map
->timestamp
;
2548 VM_MAP_RANGE_CHECK(map
, start
, end
);
2549 assert(page_aligned(start
));
2550 assert(page_aligned(end
));
2552 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2553 entry
= first_entry
;
2554 /* vm_map_clip_start will be done later. */
2557 /* Start address is not in map. */
2559 return(KERN_INVALID_ADDRESS
);
2562 need_wakeup
= FALSE
;
2563 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2564 if (entry
->in_transition
) {
2567 * Another thread is wiring down this entry. Note
2568 * that if it is not for the other thread we would
2569 * be unwiring an unwired entry. This is not
2570 * permitted. If we wait, we will be unwiring memory
2574 * Another thread is unwiring this entry. We did not
2575 * have a reference to it, because if we did, this
2576 * entry will not be getting unwired now.
2579 panic("vm_map_unwire: in_transition entry");
2581 entry
= entry
->vme_next
;
2585 if(entry
->is_sub_map
) {
2586 vm_offset_t sub_start
;
2587 vm_offset_t sub_end
;
2588 vm_offset_t local_end
;
2592 vm_map_clip_start(map
, entry
, start
);
2593 vm_map_clip_end(map
, entry
, end
);
2595 sub_start
= entry
->offset
;
2596 sub_end
= entry
->vme_end
- entry
->vme_start
;
2597 sub_end
+= entry
->offset
;
2598 local_end
= entry
->vme_end
;
2599 if(map_pmap
== NULL
) {
2600 if(entry
->use_pmap
) {
2601 pmap
= entry
->object
.sub_map
->pmap
;
2602 pmap_addr
= sub_start
;
2607 if (entry
->wired_count
== 0 ||
2608 (user_wire
&& entry
->user_wired_count
== 0)) {
2610 panic("vm_map_unwire: entry is unwired");
2611 entry
= entry
->vme_next
;
2617 * Holes: Next entry should be contiguous unless
2618 * this is the end of the region.
2620 if (((entry
->vme_end
< end
) &&
2621 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2622 (entry
->vme_next
->vme_start
2623 > entry
->vme_end
)))) {
2625 panic("vm_map_unwire: non-contiguous region");
2627 entry = entry->vme_next;
2632 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2633 entry
->wired_count
--;
2635 if (entry
->wired_count
!= 0) {
2636 entry
= entry
->vme_next
;
2640 entry
->in_transition
= TRUE
;
2641 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2644 * We can unlock the map now. The in_transition state
2645 * guarantees existance of the entry.
2648 vm_map_unwire_nested(entry
->object
.sub_map
,
2649 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2652 if (last_timestamp
+1 != map
->timestamp
) {
2654 * Find the entry again. It could have been
2655 * clipped or deleted after we unlocked the map.
2657 if (!vm_map_lookup_entry(map
,
2658 tmp_entry
.vme_start
,
2661 panic("vm_map_unwire: re-lookup failed");
2662 entry
= first_entry
->vme_next
;
2664 entry
= first_entry
;
2666 last_timestamp
= map
->timestamp
;
2669 * clear transition bit for all constituent entries
2670 * that were in the original entry (saved in
2671 * tmp_entry). Also check for waiters.
2673 while ((entry
!= vm_map_to_entry(map
)) &&
2674 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2675 assert(entry
->in_transition
);
2676 entry
->in_transition
= FALSE
;
2677 if (entry
->needs_wakeup
) {
2678 entry
->needs_wakeup
= FALSE
;
2681 entry
= entry
->vme_next
;
2686 vm_map_unwire_nested(entry
->object
.sub_map
,
2687 sub_start
, sub_end
, user_wire
, pmap
, pmap_addr
);
2690 if (last_timestamp
+1 != map
->timestamp
) {
2692 * Find the entry again. It could have been
2693 * clipped or deleted after we unlocked the map.
2695 if (!vm_map_lookup_entry(map
,
2696 tmp_entry
.vme_start
,
2699 panic("vm_map_unwire: re-lookup failed");
2700 entry
= first_entry
->vme_next
;
2702 entry
= first_entry
;
2704 last_timestamp
= map
->timestamp
;
2709 if ((entry
->wired_count
== 0) ||
2710 (user_wire
&& entry
->user_wired_count
== 0)) {
2712 panic("vm_map_unwire: entry is unwired");
2714 entry
= entry
->vme_next
;
2718 assert(entry
->wired_count
> 0 &&
2719 (!user_wire
|| entry
->user_wired_count
> 0));
2721 vm_map_clip_start(map
, entry
, start
);
2722 vm_map_clip_end(map
, entry
, end
);
2726 * Holes: Next entry should be contiguous unless
2727 * this is the end of the region.
2729 if (((entry
->vme_end
< end
) &&
2730 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2731 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2734 panic("vm_map_unwire: non-contiguous region");
2735 entry
= entry
->vme_next
;
2739 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2740 entry
->wired_count
--;
2742 if (entry
->wired_count
!= 0) {
2743 entry
= entry
->vme_next
;
2747 entry
->in_transition
= TRUE
;
2748 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2751 * We can unlock the map now. The in_transition state
2752 * guarantees existance of the entry.
2756 vm_fault_unwire(map
,
2757 &tmp_entry
, FALSE
, map_pmap
, pmap_addr
);
2759 vm_fault_unwire(map
,
2760 &tmp_entry
, FALSE
, map
->pmap
,
2761 tmp_entry
.vme_start
);
2765 if (last_timestamp
+1 != map
->timestamp
) {
2767 * Find the entry again. It could have been clipped
2768 * or deleted after we unlocked the map.
2770 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2773 panic("vm_map_unwire: re-lookup failed");
2774 entry
= first_entry
->vme_next
;
2776 entry
= first_entry
;
2778 last_timestamp
= map
->timestamp
;
2781 * clear transition bit for all constituent entries that
2782 * were in the original entry (saved in tmp_entry). Also
2783 * check for waiters.
2785 while ((entry
!= vm_map_to_entry(map
)) &&
2786 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2787 assert(entry
->in_transition
);
2788 entry
->in_transition
= FALSE
;
2789 if (entry
->needs_wakeup
) {
2790 entry
->needs_wakeup
= FALSE
;
2793 entry
= entry
->vme_next
;
2798 * wake up anybody waiting on entries that we have unwired.
2801 vm_map_entry_wakeup(map
);
2802 return(KERN_SUCCESS
);
2808 register vm_map_t map
,
2809 register vm_offset_t start
,
2810 register vm_offset_t end
,
2811 boolean_t user_wire
)
2813 return vm_map_unwire_nested(map
, start
, end
,
2814 user_wire
, (pmap_t
)NULL
, 0);
2819 * vm_map_entry_delete: [ internal use only ]
2821 * Deallocate the given entry from the target map.
2824 vm_map_entry_delete(
2825 register vm_map_t map
,
2826 register vm_map_entry_t entry
)
2828 register vm_offset_t s
, e
;
2829 register vm_object_t object
;
2830 register vm_map_t submap
;
2831 extern vm_object_t kernel_object
;
2833 s
= entry
->vme_start
;
2835 assert(page_aligned(s
));
2836 assert(page_aligned(e
));
2837 assert(entry
->wired_count
== 0);
2838 assert(entry
->user_wired_count
== 0);
2840 if (entry
->is_sub_map
) {
2842 submap
= entry
->object
.sub_map
;
2845 object
= entry
->object
.vm_object
;
2848 vm_map_entry_unlink(map
, entry
);
2851 vm_map_entry_dispose(map
, entry
);
2855 * Deallocate the object only after removing all
2856 * pmap entries pointing to its pages.
2859 vm_map_deallocate(submap
);
2861 vm_object_deallocate(object
);
2866 vm_map_submap_pmap_clean(
2873 vm_offset_t submap_start
;
2874 vm_offset_t submap_end
;
2876 vm_size_t remove_size
;
2877 vm_map_entry_t entry
;
2879 submap_end
= offset
+ (end
- start
);
2880 submap_start
= offset
;
2881 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2883 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2884 if(offset
> entry
->vme_start
)
2885 remove_size
-= offset
- entry
->vme_start
;
2888 if(submap_end
< entry
->vme_end
) {
2890 entry
->vme_end
- submap_end
;
2892 if(entry
->is_sub_map
) {
2893 vm_map_submap_pmap_clean(
2896 start
+ remove_size
,
2897 entry
->object
.sub_map
,
2901 if((map
->mapped
) && (map
->ref_count
)
2902 && (entry
->object
.vm_object
!= NULL
)) {
2903 vm_object_pmap_protect(
2904 entry
->object
.vm_object
,
2911 pmap_remove(map
->pmap
,
2912 start
, start
+ remove_size
);
2917 entry
= entry
->vme_next
;
2919 while((entry
!= vm_map_to_entry(sub_map
))
2920 && (entry
->vme_start
< submap_end
)) {
2921 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2922 if(submap_end
< entry
->vme_end
) {
2923 remove_size
-= entry
->vme_end
- submap_end
;
2925 if(entry
->is_sub_map
) {
2926 vm_map_submap_pmap_clean(
2928 (start
+ entry
->vme_start
) - offset
,
2929 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2930 entry
->object
.sub_map
,
2933 if((map
->mapped
) && (map
->ref_count
)
2934 && (entry
->object
.vm_object
!= NULL
)) {
2935 vm_object_pmap_protect(
2936 entry
->object
.vm_object
,
2943 pmap_remove(map
->pmap
,
2944 (start
+ entry
->vme_start
) - offset
,
2945 ((start
+ entry
->vme_start
)
2946 - offset
) + remove_size
);
2949 entry
= entry
->vme_next
;
2955 * vm_map_delete: [ internal use only ]
2957 * Deallocates the given address range from the target map.
2958 * Removes all user wirings. Unwires one kernel wiring if
2959 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2960 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2961 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2963 * This routine is called with map locked and leaves map locked.
2967 register vm_map_t map
,
2969 register vm_offset_t end
,
2972 vm_map_entry_t entry
, next
;
2973 struct vm_map_entry
*first_entry
, tmp_entry
;
2974 register vm_offset_t s
, e
;
2975 register vm_object_t object
;
2976 boolean_t need_wakeup
;
2977 unsigned int last_timestamp
= ~0; /* unlikely value */
2979 extern vm_map_t kernel_map
;
2981 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
2982 THREAD_ABORTSAFE
: THREAD_UNINT
;
2985 * All our DMA I/O operations in IOKit are currently done by
2986 * wiring through the map entries of the task requesting the I/O.
2987 * Because of this, we must always wait for kernel wirings
2988 * to go away on the entries before deleting them.
2990 * Any caller who wants to actually remove a kernel wiring
2991 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
2992 * properly remove one wiring instead of blasting through
2995 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
2998 * Find the start of the region, and clip it
3000 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3001 entry
= first_entry
;
3002 vm_map_clip_start(map
, entry
, start
);
3005 * Fix the lookup hint now, rather than each
3006 * time through the loop.
3008 SAVE_HINT(map
, entry
->vme_prev
);
3010 entry
= first_entry
->vme_next
;
3013 need_wakeup
= FALSE
;
3015 * Step through all entries in this region
3017 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3019 vm_map_clip_end(map
, entry
, end
);
3020 if (entry
->in_transition
) {
3021 wait_result_t wait_result
;
3024 * Another thread is wiring/unwiring this entry.
3025 * Let the other thread know we are waiting.
3027 s
= entry
->vme_start
;
3028 entry
->needs_wakeup
= TRUE
;
3031 * wake up anybody waiting on entries that we have
3032 * already unwired/deleted.
3035 vm_map_entry_wakeup(map
);
3036 need_wakeup
= FALSE
;
3039 wait_result
= vm_map_entry_wait(map
, interruptible
);
3041 if (interruptible
&&
3042 wait_result
== THREAD_INTERRUPTED
) {
3044 * We do not clear the needs_wakeup flag,
3045 * since we cannot tell if we were the only one.
3048 return KERN_ABORTED
;
3052 * The entry could have been clipped or it
3053 * may not exist anymore. Look it up again.
3055 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3056 assert((map
!= kernel_map
) &&
3057 (!entry
->is_sub_map
));
3059 * User: use the next entry
3061 entry
= first_entry
->vme_next
;
3063 entry
= first_entry
;
3064 SAVE_HINT(map
, entry
->vme_prev
);
3066 last_timestamp
= map
->timestamp
;
3068 } /* end in_transition */
3070 if (entry
->wired_count
) {
3072 * Remove a kernel wiring if requested or if
3073 * there are user wirings.
3075 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3076 (entry
->user_wired_count
> 0))
3077 entry
->wired_count
--;
3079 /* remove all user wire references */
3080 entry
->user_wired_count
= 0;
3082 if (entry
->wired_count
!= 0) {
3083 assert((map
!= kernel_map
) &&
3084 (!entry
->is_sub_map
));
3086 * Cannot continue. Typical case is when
3087 * a user thread has physical io pending on
3088 * on this page. Either wait for the
3089 * kernel wiring to go away or return an
3092 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3093 wait_result_t wait_result
;
3095 s
= entry
->vme_start
;
3096 entry
->needs_wakeup
= TRUE
;
3097 wait_result
= vm_map_entry_wait(map
,
3100 if (interruptible
&&
3101 wait_result
== THREAD_INTERRUPTED
) {
3103 * We do not clear the
3104 * needs_wakeup flag, since we
3105 * cannot tell if we were the
3109 return KERN_ABORTED
;
3113 * The entry could have been clipped or
3114 * it may not exist anymore. Look it
3117 if (!vm_map_lookup_entry(map
, s
,
3119 assert((map
!= kernel_map
) &&
3120 (!entry
->is_sub_map
));
3122 * User: use the next entry
3124 entry
= first_entry
->vme_next
;
3126 entry
= first_entry
;
3127 SAVE_HINT(map
, entry
->vme_prev
);
3129 last_timestamp
= map
->timestamp
;
3133 return KERN_FAILURE
;
3137 entry
->in_transition
= TRUE
;
3139 * copy current entry. see comment in vm_map_wire()
3142 s
= entry
->vme_start
;
3146 * We can unlock the map now. The in_transition
3147 * state guarentees existance of the entry.
3150 vm_fault_unwire(map
, &tmp_entry
,
3151 tmp_entry
.object
.vm_object
== kernel_object
,
3152 map
->pmap
, tmp_entry
.vme_start
);
3155 if (last_timestamp
+1 != map
->timestamp
) {
3157 * Find the entry again. It could have
3158 * been clipped after we unlocked the map.
3160 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3161 assert((map
!= kernel_map
) &&
3162 (!entry
->is_sub_map
));
3163 first_entry
= first_entry
->vme_next
;
3165 SAVE_HINT(map
, entry
->vme_prev
);
3168 SAVE_HINT(map
, entry
->vme_prev
);
3169 first_entry
= entry
;
3172 last_timestamp
= map
->timestamp
;
3174 entry
= first_entry
;
3175 while ((entry
!= vm_map_to_entry(map
)) &&
3176 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3177 assert(entry
->in_transition
);
3178 entry
->in_transition
= FALSE
;
3179 if (entry
->needs_wakeup
) {
3180 entry
->needs_wakeup
= FALSE
;
3183 entry
= entry
->vme_next
;
3186 * We have unwired the entry(s). Go back and
3189 entry
= first_entry
;
3193 /* entry is unwired */
3194 assert(entry
->wired_count
== 0);
3195 assert(entry
->user_wired_count
== 0);
3197 if ((!entry
->is_sub_map
&&
3198 entry
->object
.vm_object
!= kernel_object
) ||
3199 entry
->is_sub_map
) {
3200 if(entry
->is_sub_map
) {
3201 if(entry
->use_pmap
) {
3203 pmap_unnest(map
->pmap
, entry
->vme_start
,
3204 entry
->vme_end
- entry
->vme_start
);
3206 if((map
->mapped
) && (map
->ref_count
)) {
3207 /* clean up parent map/maps */
3208 vm_map_submap_pmap_clean(
3209 map
, entry
->vme_start
,
3211 entry
->object
.sub_map
,
3215 vm_map_submap_pmap_clean(
3216 map
, entry
->vme_start
, entry
->vme_end
,
3217 entry
->object
.sub_map
,
3221 if((map
->mapped
) && (map
->ref_count
)) {
3222 vm_object_pmap_protect(
3223 entry
->object
.vm_object
,
3225 entry
->vme_end
- entry
->vme_start
,
3230 pmap_remove(map
->pmap
,
3237 next
= entry
->vme_next
;
3238 s
= next
->vme_start
;
3239 last_timestamp
= map
->timestamp
;
3240 vm_map_entry_delete(map
, entry
);
3241 /* vm_map_entry_delete unlocks the map */
3245 if(entry
== vm_map_to_entry(map
)) {
3248 if (last_timestamp
+1 != map
->timestamp
) {
3250 * we are responsible for deleting everything
3251 * from the give space, if someone has interfered
3252 * we pick up where we left off, back fills should
3253 * be all right for anyone except map_delete and
3254 * we have to assume that the task has been fully
3255 * disabled before we get here
3257 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3258 entry
= entry
->vme_next
;
3260 SAVE_HINT(map
, entry
->vme_prev
);
3263 * others can not only allocate behind us, we can
3264 * also see coalesce while we don't have the map lock
3266 if(entry
== vm_map_to_entry(map
)) {
3269 vm_map_clip_start(map
, entry
, s
);
3271 last_timestamp
= map
->timestamp
;
3274 if (map
->wait_for_space
)
3275 thread_wakeup((event_t
) map
);
3277 * wake up anybody waiting on entries that we have already deleted.
3280 vm_map_entry_wakeup(map
);
3282 return KERN_SUCCESS
;
3288 * Remove the given address range from the target map.
3289 * This is the exported form of vm_map_delete.
3293 register vm_map_t map
,
3294 register vm_offset_t start
,
3295 register vm_offset_t end
,
3296 register boolean_t flags
)
3298 register kern_return_t result
;
3299 boolean_t funnel_set
= FALSE
;
3301 thread_t cur_thread
;
3303 cur_thread
= current_thread();
3305 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
3307 curflock
= cur_thread
->funnel_lock
;
3308 thread_funnel_set( curflock
, FALSE
);
3311 VM_MAP_RANGE_CHECK(map
, start
, end
);
3312 result
= vm_map_delete(map
, start
, end
, flags
);
3315 thread_funnel_set( curflock
, TRUE
);
3323 * Routine: vm_map_copy_discard
3326 * Dispose of a map copy object (returned by
3330 vm_map_copy_discard(
3333 TR_DECL("vm_map_copy_discard");
3335 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3337 if (copy
== VM_MAP_COPY_NULL
)
3340 switch (copy
->type
) {
3341 case VM_MAP_COPY_ENTRY_LIST
:
3342 while (vm_map_copy_first_entry(copy
) !=
3343 vm_map_copy_to_entry(copy
)) {
3344 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3346 vm_map_copy_entry_unlink(copy
, entry
);
3347 vm_object_deallocate(entry
->object
.vm_object
);
3348 vm_map_copy_entry_dispose(copy
, entry
);
3351 case VM_MAP_COPY_OBJECT
:
3352 vm_object_deallocate(copy
->cpy_object
);
3354 case VM_MAP_COPY_KERNEL_BUFFER
:
3357 * The vm_map_copy_t and possibly the data buffer were
3358 * allocated by a single call to kalloc(), i.e. the
3359 * vm_map_copy_t was not allocated out of the zone.
3361 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3364 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3368 * Routine: vm_map_copy_copy
3371 * Move the information in a map copy object to
3372 * a new map copy object, leaving the old one
3375 * This is used by kernel routines that need
3376 * to look at out-of-line data (in copyin form)
3377 * before deciding whether to return SUCCESS.
3378 * If the routine returns FAILURE, the original
3379 * copy object will be deallocated; therefore,
3380 * these routines must make a copy of the copy
3381 * object and leave the original empty so that
3382 * deallocation will not fail.
3388 vm_map_copy_t new_copy
;
3390 if (copy
== VM_MAP_COPY_NULL
)
3391 return VM_MAP_COPY_NULL
;
3394 * Allocate a new copy object, and copy the information
3395 * from the old one into it.
3398 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3401 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3403 * The links in the entry chain must be
3404 * changed to point to the new copy object.
3406 vm_map_copy_first_entry(copy
)->vme_prev
3407 = vm_map_copy_to_entry(new_copy
);
3408 vm_map_copy_last_entry(copy
)->vme_next
3409 = vm_map_copy_to_entry(new_copy
);
3413 * Change the old copy object into one that contains
3414 * nothing to be deallocated.
3416 copy
->type
= VM_MAP_COPY_OBJECT
;
3417 copy
->cpy_object
= VM_OBJECT_NULL
;
3420 * Return the new object.
3426 vm_map_overwrite_submap_recurse(
3428 vm_offset_t dst_addr
,
3431 vm_offset_t dst_end
;
3432 vm_map_entry_t tmp_entry
;
3433 vm_map_entry_t entry
;
3434 kern_return_t result
;
3435 boolean_t encountered_sub_map
= FALSE
;
3440 * Verify that the destination is all writeable
3441 * initially. We have to trunc the destination
3442 * address and round the copy size or we'll end up
3443 * splitting entries in strange ways.
3446 dst_end
= round_page(dst_addr
+ dst_size
);
3447 vm_map_lock(dst_map
);
3450 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3451 vm_map_unlock(dst_map
);
3452 return(KERN_INVALID_ADDRESS
);
3455 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3457 for (entry
= tmp_entry
;;) {
3458 vm_map_entry_t next
;
3460 next
= entry
->vme_next
;
3461 while(entry
->is_sub_map
) {
3462 vm_offset_t sub_start
;
3463 vm_offset_t sub_end
;
3464 vm_offset_t local_end
;
3466 if (entry
->in_transition
) {
3468 * Say that we are waiting, and wait for entry.
3470 entry
->needs_wakeup
= TRUE
;
3471 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3476 encountered_sub_map
= TRUE
;
3477 sub_start
= entry
->offset
;
3479 if(entry
->vme_end
< dst_end
)
3480 sub_end
= entry
->vme_end
;
3483 sub_end
-= entry
->vme_start
;
3484 sub_end
+= entry
->offset
;
3485 local_end
= entry
->vme_end
;
3486 vm_map_unlock(dst_map
);
3488 result
= vm_map_overwrite_submap_recurse(
3489 entry
->object
.sub_map
,
3491 sub_end
- sub_start
);
3493 if(result
!= KERN_SUCCESS
)
3495 if (dst_end
<= entry
->vme_end
)
3496 return KERN_SUCCESS
;
3497 vm_map_lock(dst_map
);
3498 if(!vm_map_lookup_entry(dst_map
, local_end
,
3500 vm_map_unlock(dst_map
);
3501 return(KERN_INVALID_ADDRESS
);
3504 next
= entry
->vme_next
;
3507 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3508 vm_map_unlock(dst_map
);
3509 return(KERN_PROTECTION_FAILURE
);
3513 * If the entry is in transition, we must wait
3514 * for it to exit that state. Anything could happen
3515 * when we unlock the map, so start over.
3517 if (entry
->in_transition
) {
3520 * Say that we are waiting, and wait for entry.
3522 entry
->needs_wakeup
= TRUE
;
3523 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3529 * our range is contained completely within this map entry
3531 if (dst_end
<= entry
->vme_end
) {
3532 vm_map_unlock(dst_map
);
3533 return KERN_SUCCESS
;
3536 * check that range specified is contiguous region
3538 if ((next
== vm_map_to_entry(dst_map
)) ||
3539 (next
->vme_start
!= entry
->vme_end
)) {
3540 vm_map_unlock(dst_map
);
3541 return(KERN_INVALID_ADDRESS
);
3545 * Check for permanent objects in the destination.
3547 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3548 ((!entry
->object
.vm_object
->internal
) ||
3549 (entry
->object
.vm_object
->true_share
))) {
3550 if(encountered_sub_map
) {
3551 vm_map_unlock(dst_map
);
3552 return(KERN_FAILURE
);
3559 vm_map_unlock(dst_map
);
3560 return(KERN_SUCCESS
);
3564 * Routine: vm_map_copy_overwrite
3567 * Copy the memory described by the map copy
3568 * object (copy; returned by vm_map_copyin) onto
3569 * the specified destination region (dst_map, dst_addr).
3570 * The destination must be writeable.
3572 * Unlike vm_map_copyout, this routine actually
3573 * writes over previously-mapped memory. If the
3574 * previous mapping was to a permanent (user-supplied)
3575 * memory object, it is preserved.
3577 * The attributes (protection and inheritance) of the
3578 * destination region are preserved.
3580 * If successful, consumes the copy object.
3581 * Otherwise, the caller is responsible for it.
3583 * Implementation notes:
3584 * To overwrite aligned temporary virtual memory, it is
3585 * sufficient to remove the previous mapping and insert
3586 * the new copy. This replacement is done either on
3587 * the whole region (if no permanent virtual memory
3588 * objects are embedded in the destination region) or
3589 * in individual map entries.
3591 * To overwrite permanent virtual memory , it is necessary
3592 * to copy each page, as the external memory management
3593 * interface currently does not provide any optimizations.
3595 * Unaligned memory also has to be copied. It is possible
3596 * to use 'vm_trickery' to copy the aligned data. This is
3597 * not done but not hard to implement.
3599 * Once a page of permanent memory has been overwritten,
3600 * it is impossible to interrupt this function; otherwise,
3601 * the call would be neither atomic nor location-independent.
3602 * The kernel-state portion of a user thread must be
3605 * It may be expensive to forward all requests that might
3606 * overwrite permanent memory (vm_write, vm_copy) to
3607 * uninterruptible kernel threads. This routine may be
3608 * called by interruptible threads; however, success is
3609 * not guaranteed -- if the request cannot be performed
3610 * atomically and interruptibly, an error indication is
3615 vm_map_copy_overwrite_nested(
3617 vm_offset_t dst_addr
,
3619 boolean_t interruptible
,
3622 vm_offset_t dst_end
;
3623 vm_map_entry_t tmp_entry
;
3624 vm_map_entry_t entry
;
3626 boolean_t aligned
= TRUE
;
3627 boolean_t contains_permanent_objects
= FALSE
;
3628 boolean_t encountered_sub_map
= FALSE
;
3629 vm_offset_t base_addr
;
3630 vm_size_t copy_size
;
3631 vm_size_t total_size
;
3635 * Check for null copy object.
3638 if (copy
== VM_MAP_COPY_NULL
)
3639 return(KERN_SUCCESS
);
3642 * Check for special kernel buffer allocated
3643 * by new_ipc_kmsg_copyin.
3646 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3647 return(vm_map_copyout_kernel_buffer(
3653 * Only works for entry lists at the moment. Will
3654 * support page lists later.
3657 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3659 if (copy
->size
== 0) {
3660 vm_map_copy_discard(copy
);
3661 return(KERN_SUCCESS
);
3665 * Verify that the destination is all writeable
3666 * initially. We have to trunc the destination
3667 * address and round the copy size or we'll end up
3668 * splitting entries in strange ways.
3671 if (!page_aligned(copy
->size
) ||
3672 !page_aligned (copy
->offset
) ||
3673 !page_aligned (dst_addr
))
3676 dst_end
= round_page(dst_addr
+ copy
->size
);
3678 dst_end
= dst_addr
+ copy
->size
;
3681 vm_map_lock(dst_map
);
3684 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3685 vm_map_unlock(dst_map
);
3686 return(KERN_INVALID_ADDRESS
);
3688 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3689 for (entry
= tmp_entry
;;) {
3690 vm_map_entry_t next
= entry
->vme_next
;
3692 while(entry
->is_sub_map
) {
3693 vm_offset_t sub_start
;
3694 vm_offset_t sub_end
;
3695 vm_offset_t local_end
;
3697 if (entry
->in_transition
) {
3700 * Say that we are waiting, and wait for entry.
3702 entry
->needs_wakeup
= TRUE
;
3703 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3708 local_end
= entry
->vme_end
;
3709 if (!(entry
->needs_copy
)) {
3710 /* if needs_copy we are a COW submap */
3711 /* in such a case we just replace so */
3712 /* there is no need for the follow- */
3714 encountered_sub_map
= TRUE
;
3715 sub_start
= entry
->offset
;
3717 if(entry
->vme_end
< dst_end
)
3718 sub_end
= entry
->vme_end
;
3721 sub_end
-= entry
->vme_start
;
3722 sub_end
+= entry
->offset
;
3723 vm_map_unlock(dst_map
);
3725 kr
= vm_map_overwrite_submap_recurse(
3726 entry
->object
.sub_map
,
3728 sub_end
- sub_start
);
3729 if(kr
!= KERN_SUCCESS
)
3731 vm_map_lock(dst_map
);
3734 if (dst_end
<= entry
->vme_end
)
3735 goto start_overwrite
;
3736 if(!vm_map_lookup_entry(dst_map
, local_end
,
3738 vm_map_unlock(dst_map
);
3739 return(KERN_INVALID_ADDRESS
);
3741 next
= entry
->vme_next
;
3744 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3745 vm_map_unlock(dst_map
);
3746 return(KERN_PROTECTION_FAILURE
);
3750 * If the entry is in transition, we must wait
3751 * for it to exit that state. Anything could happen
3752 * when we unlock the map, so start over.
3754 if (entry
->in_transition
) {
3757 * Say that we are waiting, and wait for entry.
3759 entry
->needs_wakeup
= TRUE
;
3760 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3766 * our range is contained completely within this map entry
3768 if (dst_end
<= entry
->vme_end
)
3771 * check that range specified is contiguous region
3773 if ((next
== vm_map_to_entry(dst_map
)) ||
3774 (next
->vme_start
!= entry
->vme_end
)) {
3775 vm_map_unlock(dst_map
);
3776 return(KERN_INVALID_ADDRESS
);
3781 * Check for permanent objects in the destination.
3783 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3784 ((!entry
->object
.vm_object
->internal
) ||
3785 (entry
->object
.vm_object
->true_share
))) {
3786 contains_permanent_objects
= TRUE
;
3794 * If there are permanent objects in the destination, then
3795 * the copy cannot be interrupted.
3798 if (interruptible
&& contains_permanent_objects
) {
3799 vm_map_unlock(dst_map
);
3800 return(KERN_FAILURE
); /* XXX */
3805 * Make a second pass, overwriting the data
3806 * At the beginning of each loop iteration,
3807 * the next entry to be overwritten is "tmp_entry"
3808 * (initially, the value returned from the lookup above),
3809 * and the starting address expected in that entry
3813 total_size
= copy
->size
;
3814 if(encountered_sub_map
) {
3816 /* re-calculate tmp_entry since we've had the map */
3818 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3819 vm_map_unlock(dst_map
);
3820 return(KERN_INVALID_ADDRESS
);
3823 copy_size
= copy
->size
;
3826 base_addr
= dst_addr
;
3828 /* deconstruct the copy object and do in parts */
3829 /* only in sub_map, interruptable case */
3830 vm_map_entry_t copy_entry
;
3831 vm_map_entry_t previous_prev
;
3832 vm_map_entry_t next_copy
;
3834 int remaining_entries
;
3837 for (entry
= tmp_entry
; copy_size
== 0;) {
3838 vm_map_entry_t next
;
3840 next
= entry
->vme_next
;
3842 /* tmp_entry and base address are moved along */
3843 /* each time we encounter a sub-map. Otherwise */
3844 /* entry can outpase tmp_entry, and the copy_size */
3845 /* may reflect the distance between them */
3846 /* if the current entry is found to be in transition */
3847 /* we will start over at the beginning or the last */
3848 /* encounter of a submap as dictated by base_addr */
3849 /* we will zero copy_size accordingly. */
3850 if (entry
->in_transition
) {
3852 * Say that we are waiting, and wait for entry.
3854 entry
->needs_wakeup
= TRUE
;
3855 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3857 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3859 vm_map_unlock(dst_map
);
3860 return(KERN_INVALID_ADDRESS
);
3866 if(entry
->is_sub_map
) {
3867 vm_offset_t sub_start
;
3868 vm_offset_t sub_end
;
3869 vm_offset_t local_end
;
3871 if (entry
->needs_copy
) {
3872 /* if this is a COW submap */
3873 /* just back the range with a */
3874 /* anonymous entry */
3875 if(entry
->vme_end
< dst_end
)
3876 sub_end
= entry
->vme_end
;
3879 if(entry
->vme_start
< base_addr
)
3880 sub_start
= base_addr
;
3882 sub_start
= entry
->vme_start
;
3884 dst_map
, entry
, sub_end
);
3886 dst_map
, entry
, sub_start
);
3887 entry
->is_sub_map
= FALSE
;
3889 entry
->object
.sub_map
);
3890 entry
->object
.sub_map
= NULL
;
3891 entry
->is_shared
= FALSE
;
3892 entry
->needs_copy
= FALSE
;
3894 entry
->protection
= VM_PROT_ALL
;
3895 entry
->max_protection
= VM_PROT_ALL
;
3896 entry
->wired_count
= 0;
3897 entry
->user_wired_count
= 0;
3898 if(entry
->inheritance
3899 == VM_INHERIT_SHARE
)
3900 entry
->inheritance
= VM_INHERIT_COPY
;
3903 /* first take care of any non-sub_map */
3904 /* entries to send */
3905 if(base_addr
< entry
->vme_start
) {
3908 entry
->vme_start
- base_addr
;
3911 sub_start
= entry
->offset
;
3913 if(entry
->vme_end
< dst_end
)
3914 sub_end
= entry
->vme_end
;
3917 sub_end
-= entry
->vme_start
;
3918 sub_end
+= entry
->offset
;
3919 local_end
= entry
->vme_end
;
3920 vm_map_unlock(dst_map
);
3921 copy_size
= sub_end
- sub_start
;
3923 /* adjust the copy object */
3924 if (total_size
> copy_size
) {
3925 vm_size_t local_size
= 0;
3926 vm_size_t entry_size
;
3929 new_offset
= copy
->offset
;
3930 copy_entry
= vm_map_copy_first_entry(copy
);
3932 vm_map_copy_to_entry(copy
)){
3933 entry_size
= copy_entry
->vme_end
-
3934 copy_entry
->vme_start
;
3935 if((local_size
< copy_size
) &&
3936 ((local_size
+ entry_size
)
3938 vm_map_copy_clip_end(copy
,
3940 copy_entry
->vme_start
+
3941 (copy_size
- local_size
));
3942 entry_size
= copy_entry
->vme_end
-
3943 copy_entry
->vme_start
;
3944 local_size
+= entry_size
;
3945 new_offset
+= entry_size
;
3947 if(local_size
>= copy_size
) {
3948 next_copy
= copy_entry
->vme_next
;
3949 copy_entry
->vme_next
=
3950 vm_map_copy_to_entry(copy
);
3952 copy
->cpy_hdr
.links
.prev
;
3953 copy
->cpy_hdr
.links
.prev
= copy_entry
;
3954 copy
->size
= copy_size
;
3956 copy
->cpy_hdr
.nentries
;
3957 remaining_entries
-= nentries
;
3958 copy
->cpy_hdr
.nentries
= nentries
;
3961 local_size
+= entry_size
;
3962 new_offset
+= entry_size
;
3965 copy_entry
= copy_entry
->vme_next
;
3969 if((entry
->use_pmap
) && (pmap
== NULL
)) {
3970 kr
= vm_map_copy_overwrite_nested(
3971 entry
->object
.sub_map
,
3975 entry
->object
.sub_map
->pmap
);
3976 } else if (pmap
!= NULL
) {
3977 kr
= vm_map_copy_overwrite_nested(
3978 entry
->object
.sub_map
,
3981 interruptible
, pmap
);
3983 kr
= vm_map_copy_overwrite_nested(
3984 entry
->object
.sub_map
,
3990 if(kr
!= KERN_SUCCESS
) {
3991 if(next_copy
!= NULL
) {
3992 copy
->cpy_hdr
.nentries
+=
3994 copy
->cpy_hdr
.links
.prev
->vme_next
=
3996 copy
->cpy_hdr
.links
.prev
3998 copy
->size
= total_size
;
4002 if (dst_end
<= local_end
) {
4003 return(KERN_SUCCESS
);
4005 /* otherwise copy no longer exists, it was */
4006 /* destroyed after successful copy_overwrite */
4007 copy
= (vm_map_copy_t
)
4008 zalloc(vm_map_copy_zone
);
4009 vm_map_copy_first_entry(copy
) =
4010 vm_map_copy_last_entry(copy
) =
4011 vm_map_copy_to_entry(copy
);
4012 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4013 copy
->offset
= new_offset
;
4015 total_size
-= copy_size
;
4017 /* put back remainder of copy in container */
4018 if(next_copy
!= NULL
) {
4019 copy
->cpy_hdr
.nentries
= remaining_entries
;
4020 copy
->cpy_hdr
.links
.next
= next_copy
;
4021 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4022 copy
->size
= total_size
;
4023 next_copy
->vme_prev
=
4024 vm_map_copy_to_entry(copy
);
4027 base_addr
= local_end
;
4028 vm_map_lock(dst_map
);
4029 if(!vm_map_lookup_entry(dst_map
,
4030 local_end
, &tmp_entry
)) {
4031 vm_map_unlock(dst_map
);
4032 return(KERN_INVALID_ADDRESS
);
4037 if (dst_end
<= entry
->vme_end
) {
4038 copy_size
= dst_end
- base_addr
;
4042 if ((next
== vm_map_to_entry(dst_map
)) ||
4043 (next
->vme_start
!= entry
->vme_end
)) {
4044 vm_map_unlock(dst_map
);
4045 return(KERN_INVALID_ADDRESS
);
4054 /* adjust the copy object */
4055 if (total_size
> copy_size
) {
4056 vm_size_t local_size
= 0;
4057 vm_size_t entry_size
;
4059 new_offset
= copy
->offset
;
4060 copy_entry
= vm_map_copy_first_entry(copy
);
4061 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4062 entry_size
= copy_entry
->vme_end
-
4063 copy_entry
->vme_start
;
4064 if((local_size
< copy_size
) &&
4065 ((local_size
+ entry_size
)
4067 vm_map_copy_clip_end(copy
, copy_entry
,
4068 copy_entry
->vme_start
+
4069 (copy_size
- local_size
));
4070 entry_size
= copy_entry
->vme_end
-
4071 copy_entry
->vme_start
;
4072 local_size
+= entry_size
;
4073 new_offset
+= entry_size
;
4075 if(local_size
>= copy_size
) {
4076 next_copy
= copy_entry
->vme_next
;
4077 copy_entry
->vme_next
=
4078 vm_map_copy_to_entry(copy
);
4080 copy
->cpy_hdr
.links
.prev
;
4081 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4082 copy
->size
= copy_size
;
4084 copy
->cpy_hdr
.nentries
;
4085 remaining_entries
-= nentries
;
4086 copy
->cpy_hdr
.nentries
= nentries
;
4089 local_size
+= entry_size
;
4090 new_offset
+= entry_size
;
4093 copy_entry
= copy_entry
->vme_next
;
4103 local_pmap
= dst_map
->pmap
;
4105 if ((kr
= vm_map_copy_overwrite_aligned(
4106 dst_map
, tmp_entry
, copy
,
4107 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4108 if(next_copy
!= NULL
) {
4109 copy
->cpy_hdr
.nentries
+=
4111 copy
->cpy_hdr
.links
.prev
->vme_next
=
4113 copy
->cpy_hdr
.links
.prev
=
4115 copy
->size
+= copy_size
;
4119 vm_map_unlock(dst_map
);
4124 * if the copy and dst address are misaligned but the same
4125 * offset within the page we can copy_not_aligned the
4126 * misaligned parts and copy aligned the rest. If they are
4127 * aligned but len is unaligned we simply need to copy
4128 * the end bit unaligned. We'll need to split the misaligned
4129 * bits of the region in this case !
4131 /* ALWAYS UNLOCKS THE dst_map MAP */
4132 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4133 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4134 if(next_copy
!= NULL
) {
4135 copy
->cpy_hdr
.nentries
+=
4137 copy
->cpy_hdr
.links
.prev
->vme_next
=
4139 copy
->cpy_hdr
.links
.prev
=
4141 copy
->size
+= copy_size
;
4146 total_size
-= copy_size
;
4149 base_addr
+= copy_size
;
4151 copy
->offset
= new_offset
;
4152 if(next_copy
!= NULL
) {
4153 copy
->cpy_hdr
.nentries
= remaining_entries
;
4154 copy
->cpy_hdr
.links
.next
= next_copy
;
4155 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4156 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4157 copy
->size
= total_size
;
4159 vm_map_lock(dst_map
);
4161 if (!vm_map_lookup_entry(dst_map
,
4162 base_addr
, &tmp_entry
)) {
4163 vm_map_unlock(dst_map
);
4164 return(KERN_INVALID_ADDRESS
);
4166 if (tmp_entry
->in_transition
) {
4167 entry
->needs_wakeup
= TRUE
;
4168 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4173 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(base_addr
));
4179 * Throw away the vm_map_copy object
4181 vm_map_copy_discard(copy
);
4183 return(KERN_SUCCESS
);
4184 }/* vm_map_copy_overwrite */
4187 vm_map_copy_overwrite(
4189 vm_offset_t dst_addr
,
4191 boolean_t interruptible
)
4193 return vm_map_copy_overwrite_nested(
4194 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4199 * Routine: vm_map_copy_overwrite_unaligned
4202 * Physically copy unaligned data
4205 * Unaligned parts of pages have to be physically copied. We use
4206 * a modified form of vm_fault_copy (which understands none-aligned
4207 * page offsets and sizes) to do the copy. We attempt to copy as
4208 * much memory in one go as possibly, however vm_fault_copy copies
4209 * within 1 memory object so we have to find the smaller of "amount left"
4210 * "source object data size" and "target object data size". With
4211 * unaligned data we don't need to split regions, therefore the source
4212 * (copy) object should be one map entry, the target range may be split
4213 * over multiple map entries however. In any event we are pessimistic
4214 * about these assumptions.
4217 * dst_map is locked on entry and is return locked on success,
4218 * unlocked on error.
4222 vm_map_copy_overwrite_unaligned(
4224 vm_map_entry_t entry
,
4228 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4229 vm_map_version_t version
;
4230 vm_object_t dst_object
;
4231 vm_object_offset_t dst_offset
;
4232 vm_object_offset_t src_offset
;
4233 vm_object_offset_t entry_offset
;
4234 vm_offset_t entry_end
;
4239 kern_return_t kr
= KERN_SUCCESS
;
4241 vm_map_lock_write_to_read(dst_map
);
4243 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4244 amount_left
= copy
->size
;
4246 * unaligned so we never clipped this entry, we need the offset into
4247 * the vm_object not just the data.
4249 while (amount_left
> 0) {
4251 if (entry
== vm_map_to_entry(dst_map
)) {
4252 vm_map_unlock_read(dst_map
);
4253 return KERN_INVALID_ADDRESS
;
4256 /* "start" must be within the current map entry */
4257 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4259 dst_offset
= start
- entry
->vme_start
;
4261 dst_size
= entry
->vme_end
- start
;
4263 src_size
= copy_entry
->vme_end
-
4264 (copy_entry
->vme_start
+ src_offset
);
4266 if (dst_size
< src_size
) {
4268 * we can only copy dst_size bytes before
4269 * we have to get the next destination entry
4271 copy_size
= dst_size
;
4274 * we can only copy src_size bytes before
4275 * we have to get the next source copy entry
4277 copy_size
= src_size
;
4280 if (copy_size
> amount_left
) {
4281 copy_size
= amount_left
;
4284 * Entry needs copy, create a shadow shadow object for
4285 * Copy on write region.
4287 if (entry
->needs_copy
&&
4288 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4290 if (vm_map_lock_read_to_write(dst_map
)) {
4291 vm_map_lock_read(dst_map
);
4294 vm_object_shadow(&entry
->object
.vm_object
,
4296 (vm_size_t
)(entry
->vme_end
4297 - entry
->vme_start
));
4298 entry
->needs_copy
= FALSE
;
4299 vm_map_lock_write_to_read(dst_map
);
4301 dst_object
= entry
->object
.vm_object
;
4303 * unlike with the virtual (aligned) copy we're going
4304 * to fault on it therefore we need a target object.
4306 if (dst_object
== VM_OBJECT_NULL
) {
4307 if (vm_map_lock_read_to_write(dst_map
)) {
4308 vm_map_lock_read(dst_map
);
4311 dst_object
= vm_object_allocate((vm_size_t
)
4312 entry
->vme_end
- entry
->vme_start
);
4313 entry
->object
.vm_object
= dst_object
;
4315 vm_map_lock_write_to_read(dst_map
);
4318 * Take an object reference and unlock map. The "entry" may
4319 * disappear or change when the map is unlocked.
4321 vm_object_reference(dst_object
);
4322 version
.main_timestamp
= dst_map
->timestamp
;
4323 entry_offset
= entry
->offset
;
4324 entry_end
= entry
->vme_end
;
4325 vm_map_unlock_read(dst_map
);
4327 * Copy as much as possible in one pass
4330 copy_entry
->object
.vm_object
,
4331 copy_entry
->offset
+ src_offset
,
4334 entry_offset
+ dst_offset
,
4340 src_offset
+= copy_size
;
4341 amount_left
-= copy_size
;
4343 * Release the object reference
4345 vm_object_deallocate(dst_object
);
4347 * If a hard error occurred, return it now
4349 if (kr
!= KERN_SUCCESS
)
4352 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4353 || amount_left
== 0)
4356 * all done with this copy entry, dispose.
4358 vm_map_copy_entry_unlink(copy
, copy_entry
);
4359 vm_object_deallocate(copy_entry
->object
.vm_object
);
4360 vm_map_copy_entry_dispose(copy
, copy_entry
);
4362 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4363 == vm_map_copy_to_entry(copy
) && amount_left
) {
4365 * not finished copying but run out of source
4367 return KERN_INVALID_ADDRESS
;
4372 if (amount_left
== 0)
4373 return KERN_SUCCESS
;
4375 vm_map_lock_read(dst_map
);
4376 if (version
.main_timestamp
== dst_map
->timestamp
) {
4377 if (start
== entry_end
) {
4379 * destination region is split. Use the version
4380 * information to avoid a lookup in the normal
4383 entry
= entry
->vme_next
;
4385 * should be contiguous. Fail if we encounter
4386 * a hole in the destination.
4388 if (start
!= entry
->vme_start
) {
4389 vm_map_unlock_read(dst_map
);
4390 return KERN_INVALID_ADDRESS
;
4395 * Map version check failed.
4396 * we must lookup the entry because somebody
4397 * might have changed the map behind our backs.
4400 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4402 vm_map_unlock_read(dst_map
);
4403 return KERN_INVALID_ADDRESS
;
4409 vm_map_unlock_read(dst_map
);
4411 return KERN_SUCCESS
;
4412 }/* vm_map_copy_overwrite_unaligned */
4415 * Routine: vm_map_copy_overwrite_aligned
4418 * Does all the vm_trickery possible for whole pages.
4422 * If there are no permanent objects in the destination,
4423 * and the source and destination map entry zones match,
4424 * and the destination map entry is not shared,
4425 * then the map entries can be deleted and replaced
4426 * with those from the copy. The following code is the
4427 * basic idea of what to do, but there are lots of annoying
4428 * little details about getting protection and inheritance
4429 * right. Should add protection, inheritance, and sharing checks
4430 * to the above pass and make sure that no wiring is involved.
4434 vm_map_copy_overwrite_aligned(
4436 vm_map_entry_t tmp_entry
,
4442 vm_map_entry_t copy_entry
;
4443 vm_size_t copy_size
;
4445 vm_map_entry_t entry
;
4447 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4448 != vm_map_copy_to_entry(copy
))
4450 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4453 if (entry
== vm_map_to_entry(dst_map
)) {
4454 vm_map_unlock(dst_map
);
4455 return KERN_INVALID_ADDRESS
;
4457 size
= (entry
->vme_end
- entry
->vme_start
);
4459 * Make sure that no holes popped up in the
4460 * address map, and that the protection is
4461 * still valid, in case the map was unlocked
4465 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4466 && !entry
->needs_copy
)) {
4467 vm_map_unlock(dst_map
);
4468 return(KERN_INVALID_ADDRESS
);
4470 assert(entry
!= vm_map_to_entry(dst_map
));
4473 * Check protection again
4476 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4477 vm_map_unlock(dst_map
);
4478 return(KERN_PROTECTION_FAILURE
);
4482 * Adjust to source size first
4485 if (copy_size
< size
) {
4486 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4491 * Adjust to destination size
4494 if (size
< copy_size
) {
4495 vm_map_copy_clip_end(copy
, copy_entry
,
4496 copy_entry
->vme_start
+ size
);
4500 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4501 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4502 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4505 * If the destination contains temporary unshared memory,
4506 * we can perform the copy by throwing it away and
4507 * installing the source data.
4510 object
= entry
->object
.vm_object
;
4511 if ((!entry
->is_shared
&&
4512 ((object
== VM_OBJECT_NULL
) ||
4513 (object
->internal
&& !object
->true_share
))) ||
4514 entry
->needs_copy
) {
4515 vm_object_t old_object
= entry
->object
.vm_object
;
4516 vm_object_offset_t old_offset
= entry
->offset
;
4517 vm_object_offset_t offset
;
4520 * Ensure that the source and destination aren't
4523 if (old_object
== copy_entry
->object
.vm_object
&&
4524 old_offset
== copy_entry
->offset
) {
4525 vm_map_copy_entry_unlink(copy
, copy_entry
);
4526 vm_map_copy_entry_dispose(copy
, copy_entry
);
4528 if (old_object
!= VM_OBJECT_NULL
)
4529 vm_object_deallocate(old_object
);
4531 start
= tmp_entry
->vme_end
;
4532 tmp_entry
= tmp_entry
->vme_next
;
4536 if (old_object
!= VM_OBJECT_NULL
) {
4537 if(entry
->is_sub_map
) {
4538 if(entry
->use_pmap
) {
4540 pmap_unnest(dst_map
->pmap
,
4543 - entry
->vme_start
);
4545 if(dst_map
->mapped
) {
4546 /* clean up parent */
4548 vm_map_submap_pmap_clean(
4549 dst_map
, entry
->vme_start
,
4551 entry
->object
.sub_map
,
4555 vm_map_submap_pmap_clean(
4556 dst_map
, entry
->vme_start
,
4558 entry
->object
.sub_map
,
4562 entry
->object
.sub_map
);
4564 if(dst_map
->mapped
) {
4565 vm_object_pmap_protect(
4566 entry
->object
.vm_object
,
4574 pmap_remove(dst_map
->pmap
,
4578 vm_object_deallocate(old_object
);
4582 entry
->is_sub_map
= FALSE
;
4583 entry
->object
= copy_entry
->object
;
4584 object
= entry
->object
.vm_object
;
4585 entry
->needs_copy
= copy_entry
->needs_copy
;
4586 entry
->wired_count
= 0;
4587 entry
->user_wired_count
= 0;
4588 offset
= entry
->offset
= copy_entry
->offset
;
4590 vm_map_copy_entry_unlink(copy
, copy_entry
);
4591 vm_map_copy_entry_dispose(copy
, copy_entry
);
4592 #if BAD_OPTIMIZATION
4594 * if we turn this optimization back on
4595 * we need to revisit our use of pmap mappings
4596 * large copies will cause us to run out and panic
4597 * this optimization only saved on average 2 us per page if ALL
4598 * the pages in the source were currently mapped
4599 * and ALL the pages in the dest were touched, if there were fewer
4600 * than 2/3 of the pages touched, this optimization actually cost more cycles
4604 * Try to aggressively enter physical mappings
4605 * (but avoid uninstantiated objects)
4607 if (object
!= VM_OBJECT_NULL
) {
4608 vm_offset_t va
= entry
->vme_start
;
4610 while (va
< entry
->vme_end
) {
4611 register vm_page_t m
;
4615 * Look for the page in the top object
4617 prot
= entry
->protection
;
4618 vm_object_lock(object
);
4619 vm_object_paging_begin(object
);
4621 if ((m
= vm_page_lookup(object
,offset
)) !=
4622 VM_PAGE_NULL
&& !m
->busy
&&
4624 (!m
->unusual
|| (!m
->error
&&
4625 !m
->restart
&& !m
->absent
&&
4626 (prot
& m
->page_lock
) == 0))) {
4629 vm_object_unlock(object
);
4632 * Honor COW obligations
4634 if (entry
->needs_copy
)
4635 prot
&= ~VM_PROT_WRITE
;
4636 /* It is our policy to require */
4637 /* explicit sync from anyone */
4638 /* writing code and then */
4639 /* a pc to execute it. */
4642 PMAP_ENTER(pmap
, va
, m
, prot
,
4643 VM_WIMG_USE_DEFAULT
, FALSE
);
4645 vm_object_lock(object
);
4646 vm_page_lock_queues();
4647 if (!m
->active
&& !m
->inactive
)
4648 vm_page_activate(m
);
4649 vm_page_unlock_queues();
4650 PAGE_WAKEUP_DONE(m
);
4652 vm_object_paging_end(object
);
4653 vm_object_unlock(object
);
4655 offset
+= PAGE_SIZE_64
;
4657 } /* end while (va < entry->vme_end) */
4658 } /* end if (object) */
4661 * Set up for the next iteration. The map
4662 * has not been unlocked, so the next
4663 * address should be at the end of this
4664 * entry, and the next map entry should be
4665 * the one following it.
4668 start
= tmp_entry
->vme_end
;
4669 tmp_entry
= tmp_entry
->vme_next
;
4671 vm_map_version_t version
;
4672 vm_object_t dst_object
= entry
->object
.vm_object
;
4673 vm_object_offset_t dst_offset
= entry
->offset
;
4677 * Take an object reference, and record
4678 * the map version information so that the
4679 * map can be safely unlocked.
4682 vm_object_reference(dst_object
);
4684 /* account for unlock bumping up timestamp */
4685 version
.main_timestamp
= dst_map
->timestamp
+ 1;
4687 vm_map_unlock(dst_map
);
4690 * Copy as much as possible in one pass
4695 copy_entry
->object
.vm_object
,
4705 * Release the object reference
4708 vm_object_deallocate(dst_object
);
4711 * If a hard error occurred, return it now
4714 if (r
!= KERN_SUCCESS
)
4717 if (copy_size
!= 0) {
4719 * Dispose of the copied region
4722 vm_map_copy_clip_end(copy
, copy_entry
,
4723 copy_entry
->vme_start
+ copy_size
);
4724 vm_map_copy_entry_unlink(copy
, copy_entry
);
4725 vm_object_deallocate(copy_entry
->object
.vm_object
);
4726 vm_map_copy_entry_dispose(copy
, copy_entry
);
4730 * Pick up in the destination map where we left off.
4732 * Use the version information to avoid a lookup
4733 * in the normal case.
4737 vm_map_lock(dst_map
);
4738 if (version
.main_timestamp
== dst_map
->timestamp
) {
4739 /* We can safely use saved tmp_entry value */
4741 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4742 tmp_entry
= tmp_entry
->vme_next
;
4744 /* Must do lookup of tmp_entry */
4746 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4747 vm_map_unlock(dst_map
);
4748 return(KERN_INVALID_ADDRESS
);
4750 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4755 return(KERN_SUCCESS
);
4756 }/* vm_map_copy_overwrite_aligned */
4759 * Routine: vm_map_copyin_kernel_buffer
4762 * Copy in data to a kernel buffer from space in the
4763 * source map. The original space may be otpionally
4766 * If successful, returns a new copy object.
4769 vm_map_copyin_kernel_buffer(
4771 vm_offset_t src_addr
,
4773 boolean_t src_destroy
,
4774 vm_map_copy_t
*copy_result
)
4778 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4780 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4781 if (copy
== VM_MAP_COPY_NULL
) {
4782 return KERN_RESOURCE_SHORTAGE
;
4784 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4787 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4788 copy
->cpy_kalloc_size
= kalloc_size
;
4790 if (src_map
== kernel_map
) {
4791 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4792 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4793 VM_MAP_REMOVE_INTERRUPTIBLE
;
4796 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4797 if (kr
!= KERN_SUCCESS
) {
4798 kfree((vm_offset_t
)copy
, kalloc_size
);
4801 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4802 VM_MAP_REMOVE_INTERRUPTIBLE
;
4805 (void) vm_map_remove(src_map
, trunc_page(src_addr
),
4806 round_page(src_addr
+ len
),
4809 *copy_result
= copy
;
4810 return KERN_SUCCESS
;
4814 * Routine: vm_map_copyout_kernel_buffer
4817 * Copy out data from a kernel buffer into space in the
4818 * destination map. The space may be otpionally dynamically
4821 * If successful, consumes the copy object.
4822 * Otherwise, the caller is responsible for it.
4825 vm_map_copyout_kernel_buffer(
4827 vm_offset_t
*addr
, /* IN/OUT */
4829 boolean_t overwrite
)
4831 kern_return_t kr
= KERN_SUCCESS
;
4832 thread_act_t thr_act
= current_act();
4837 * Allocate space in the target map for the data
4840 kr
= vm_map_enter(map
,
4842 round_page(copy
->size
),
4846 (vm_object_offset_t
) 0,
4850 VM_INHERIT_DEFAULT
);
4851 if (kr
!= KERN_SUCCESS
)
4856 * Copyout the data from the kernel buffer to the target map.
4858 if (thr_act
->map
== map
) {
4861 * If the target map is the current map, just do
4864 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4866 return(KERN_INVALID_ADDRESS
);
4873 * If the target map is another map, assume the
4874 * target's address space identity for the duration
4877 vm_map_reference(map
);
4878 oldmap
= vm_map_switch(map
);
4880 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4882 return(KERN_INVALID_ADDRESS
);
4885 (void) vm_map_switch(oldmap
);
4886 vm_map_deallocate(map
);
4889 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4895 * Macro: vm_map_copy_insert
4898 * Link a copy chain ("copy") into a map at the
4899 * specified location (after "where").
4901 * The copy chain is destroyed.
4903 * The arguments are evaluated multiple times.
4905 #define vm_map_copy_insert(map, where, copy) \
4907 vm_map_t VMCI_map; \
4908 vm_map_entry_t VMCI_where; \
4909 vm_map_copy_t VMCI_copy; \
4911 VMCI_where = (where); \
4912 VMCI_copy = (copy); \
4913 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4914 ->vme_next = (VMCI_where->vme_next); \
4915 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4916 ->vme_prev = VMCI_where; \
4917 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4918 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4919 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4923 * Routine: vm_map_copyout
4926 * Copy out a copy chain ("copy") into newly-allocated
4927 * space in the destination map.
4929 * If successful, consumes the copy object.
4930 * Otherwise, the caller is responsible for it.
4934 register vm_map_t dst_map
,
4935 vm_offset_t
*dst_addr
, /* OUT */
4936 register vm_map_copy_t copy
)
4939 vm_size_t adjustment
;
4941 vm_object_offset_t vm_copy_start
;
4942 vm_map_entry_t last
;
4944 vm_map_entry_t entry
;
4947 * Check for null copy object.
4950 if (copy
== VM_MAP_COPY_NULL
) {
4952 return(KERN_SUCCESS
);
4956 * Check for special copy object, created
4957 * by vm_map_copyin_object.
4960 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
4961 vm_object_t object
= copy
->cpy_object
;
4963 vm_object_offset_t offset
;
4965 offset
= trunc_page_64(copy
->offset
);
4966 size
= round_page(copy
->size
+
4967 (vm_size_t
)(copy
->offset
- offset
));
4969 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
4970 (vm_offset_t
) 0, TRUE
,
4971 object
, offset
, FALSE
,
4972 VM_PROT_DEFAULT
, VM_PROT_ALL
,
4973 VM_INHERIT_DEFAULT
);
4974 if (kr
!= KERN_SUCCESS
)
4976 /* Account for non-pagealigned copy object */
4977 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
4978 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
4979 return(KERN_SUCCESS
);
4983 * Check for special kernel buffer allocated
4984 * by new_ipc_kmsg_copyin.
4987 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
4988 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
4993 * Find space for the data
4996 vm_copy_start
= trunc_page_64(copy
->offset
);
4997 size
= round_page((vm_size_t
)copy
->offset
+ copy
->size
)
5002 vm_map_lock(dst_map
);
5003 assert(first_free_is_valid(dst_map
));
5004 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
5005 vm_map_min(dst_map
) : last
->vme_end
;
5008 vm_map_entry_t next
= last
->vme_next
;
5009 vm_offset_t end
= start
+ size
;
5011 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5012 if (dst_map
->wait_for_space
) {
5013 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5014 assert_wait((event_t
) dst_map
,
5015 THREAD_INTERRUPTIBLE
);
5016 vm_map_unlock(dst_map
);
5017 thread_block((void (*)(void))0);
5021 vm_map_unlock(dst_map
);
5022 return(KERN_NO_SPACE
);
5025 if ((next
== vm_map_to_entry(dst_map
)) ||
5026 (next
->vme_start
>= end
))
5030 start
= last
->vme_end
;
5034 * Since we're going to just drop the map
5035 * entries from the copy into the destination
5036 * map, they must come from the same pool.
5039 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5041 * Mismatches occur when dealing with the default
5045 vm_map_entry_t next
, new;
5048 * Find the zone that the copies were allocated from
5050 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5052 : vm_map_kentry_zone
;
5053 entry
= vm_map_copy_first_entry(copy
);
5056 * Reinitialize the copy so that vm_map_copy_entry_link
5059 copy
->cpy_hdr
.nentries
= 0;
5060 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5061 vm_map_copy_first_entry(copy
) =
5062 vm_map_copy_last_entry(copy
) =
5063 vm_map_copy_to_entry(copy
);
5068 while (entry
!= vm_map_copy_to_entry(copy
)) {
5069 new = vm_map_copy_entry_create(copy
);
5070 vm_map_entry_copy_full(new, entry
);
5071 new->use_pmap
= FALSE
; /* clr address space specifics */
5072 vm_map_copy_entry_link(copy
,
5073 vm_map_copy_last_entry(copy
),
5075 next
= entry
->vme_next
;
5076 zfree(old_zone
, (vm_offset_t
) entry
);
5082 * Adjust the addresses in the copy chain, and
5083 * reset the region attributes.
5086 adjustment
= start
- vm_copy_start
;
5087 for (entry
= vm_map_copy_first_entry(copy
);
5088 entry
!= vm_map_copy_to_entry(copy
);
5089 entry
= entry
->vme_next
) {
5090 entry
->vme_start
+= adjustment
;
5091 entry
->vme_end
+= adjustment
;
5093 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5094 entry
->protection
= VM_PROT_DEFAULT
;
5095 entry
->max_protection
= VM_PROT_ALL
;
5096 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5099 * If the entry is now wired,
5100 * map the pages into the destination map.
5102 if (entry
->wired_count
!= 0) {
5103 register vm_offset_t va
;
5104 vm_object_offset_t offset
;
5105 register vm_object_t object
;
5107 object
= entry
->object
.vm_object
;
5108 offset
= entry
->offset
;
5109 va
= entry
->vme_start
;
5111 pmap_pageable(dst_map
->pmap
,
5116 while (va
< entry
->vme_end
) {
5117 register vm_page_t m
;
5120 * Look up the page in the object.
5121 * Assert that the page will be found in the
5124 * the object was newly created by
5125 * vm_object_copy_slowly, and has
5126 * copies of all of the pages from
5129 * the object was moved from the old
5130 * map entry; because the old map
5131 * entry was wired, all of the pages
5132 * were in the top-level object.
5133 * (XXX not true if we wire pages for
5136 vm_object_lock(object
);
5137 vm_object_paging_begin(object
);
5139 m
= vm_page_lookup(object
, offset
);
5140 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5142 panic("vm_map_copyout: wiring 0x%x", m
);
5145 vm_object_unlock(object
);
5147 PMAP_ENTER(dst_map
->pmap
, va
, m
, entry
->protection
,
5148 VM_WIMG_USE_DEFAULT
, TRUE
);
5150 vm_object_lock(object
);
5151 PAGE_WAKEUP_DONE(m
);
5152 /* the page is wired, so we don't have to activate */
5153 vm_object_paging_end(object
);
5154 vm_object_unlock(object
);
5156 offset
+= PAGE_SIZE_64
;
5160 else if (size
<= vm_map_aggressive_enter_max
) {
5162 register vm_offset_t va
;
5163 vm_object_offset_t offset
;
5164 register vm_object_t object
;
5167 object
= entry
->object
.vm_object
;
5168 if (object
!= VM_OBJECT_NULL
) {
5170 offset
= entry
->offset
;
5171 va
= entry
->vme_start
;
5172 while (va
< entry
->vme_end
) {
5173 register vm_page_t m
;
5176 * Look up the page in the object.
5177 * Assert that the page will be found
5178 * in the top object if at all...
5180 vm_object_lock(object
);
5181 vm_object_paging_begin(object
);
5183 if (((m
= vm_page_lookup(object
,
5186 !m
->busy
&& !m
->fictitious
&&
5187 !m
->absent
&& !m
->error
) {
5189 vm_object_unlock(object
);
5191 /* honor cow obligations */
5192 prot
= entry
->protection
;
5193 if (entry
->needs_copy
)
5194 prot
&= ~VM_PROT_WRITE
;
5196 PMAP_ENTER(dst_map
->pmap
, va
,
5198 VM_WIMG_USE_DEFAULT
,
5201 vm_object_lock(object
);
5202 vm_page_lock_queues();
5203 if (!m
->active
&& !m
->inactive
)
5204 vm_page_activate(m
);
5205 vm_page_unlock_queues();
5206 PAGE_WAKEUP_DONE(m
);
5208 vm_object_paging_end(object
);
5209 vm_object_unlock(object
);
5211 offset
+= PAGE_SIZE_64
;
5219 * Correct the page alignment for the result
5222 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5225 * Update the hints and the map size
5228 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5230 dst_map
->size
+= size
;
5236 vm_map_copy_insert(dst_map
, last
, copy
);
5238 vm_map_unlock(dst_map
);
5241 * XXX If wiring_required, call vm_map_pageable
5244 return(KERN_SUCCESS
);
5247 boolean_t vm_map_aggressive_enter
; /* not used yet */
5251 * Routine: vm_map_copyin
5254 * Copy the specified region (src_addr, len) from the
5255 * source address space (src_map), possibly removing
5256 * the region from the source address space (src_destroy).
5259 * A vm_map_copy_t object (copy_result), suitable for
5260 * insertion into another address space (using vm_map_copyout),
5261 * copying over another address space region (using
5262 * vm_map_copy_overwrite). If the copy is unused, it
5263 * should be destroyed (using vm_map_copy_discard).
5265 * In/out conditions:
5266 * The source map should not be locked on entry.
5269 typedef struct submap_map
{
5270 vm_map_t parent_map
;
5271 vm_offset_t base_start
;
5272 vm_offset_t base_end
;
5273 struct submap_map
*next
;
5277 vm_map_copyin_common(
5279 vm_offset_t src_addr
,
5281 boolean_t src_destroy
,
5282 boolean_t src_volatile
,
5283 vm_map_copy_t
*copy_result
, /* OUT */
5284 boolean_t use_maxprot
)
5286 extern int msg_ool_size_small
;
5288 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5289 * in multi-level lookup, this
5290 * entry contains the actual
5294 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5296 vm_offset_t src_start
; /* Start of current entry --
5297 * where copy is taking place now
5299 vm_offset_t src_end
; /* End of entire region to be
5301 vm_offset_t base_start
; /* submap fields to save offsets */
5302 /* in original map */
5303 vm_offset_t base_end
;
5304 vm_map_t base_map
=src_map
;
5305 vm_map_entry_t base_entry
;
5306 boolean_t map_share
=FALSE
;
5307 submap_map_t
*parent_maps
= NULL
;
5310 vm_map_copy_t copy
; /* Resulting copy */
5311 vm_offset_t copy_addr
;
5314 * Check for copies of zero bytes.
5318 *copy_result
= VM_MAP_COPY_NULL
;
5319 return(KERN_SUCCESS
);
5323 * If the copy is sufficiently small, use a kernel buffer instead
5324 * of making a virtual copy. The theory being that the cost of
5325 * setting up VM (and taking C-O-W faults) dominates the copy costs
5326 * for small regions.
5328 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5329 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5330 src_destroy
, copy_result
);
5333 * Compute start and end of region
5336 src_start
= trunc_page(src_addr
);
5337 src_end
= round_page(src_addr
+ len
);
5339 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5342 * Check that the end address doesn't overflow
5345 if (src_end
<= src_start
)
5346 if ((src_end
< src_start
) || (src_start
!= 0))
5347 return(KERN_INVALID_ADDRESS
);
5350 * Allocate a header element for the list.
5352 * Use the start and end in the header to
5353 * remember the endpoints prior to rounding.
5356 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5357 vm_map_copy_first_entry(copy
) =
5358 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5359 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5360 copy
->cpy_hdr
.nentries
= 0;
5361 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5363 copy
->offset
= src_addr
;
5366 new_entry
= vm_map_copy_entry_create(copy
);
5370 vm_map_unlock(src_map); \
5371 if(src_map != base_map) \
5372 vm_map_deallocate(src_map); \
5373 if (new_entry != VM_MAP_ENTRY_NULL) \
5374 vm_map_copy_entry_dispose(copy,new_entry); \
5375 vm_map_copy_discard(copy); \
5377 submap_map_t *ptr; \
5379 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5380 parent_maps=parent_maps->next; \
5381 if (ptr->parent_map != base_map) \
5382 vm_map_deallocate(ptr->parent_map); \
5383 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5390 * Find the beginning of the region.
5393 vm_map_lock(src_map
);
5395 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5396 RETURN(KERN_INVALID_ADDRESS
);
5397 if(!tmp_entry
->is_sub_map
) {
5398 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5400 /* set for later submap fix-up */
5401 copy_addr
= src_start
;
5404 * Go through entries until we get to the end.
5409 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5410 vm_size_t src_size
; /* Size of source
5411 * map entry (in both
5416 vm_object_t src_object
; /* Object to copy */
5417 vm_object_offset_t src_offset
;
5419 boolean_t src_needs_copy
; /* Should source map
5421 * for copy-on-write?
5424 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5426 boolean_t was_wired
; /* Was source wired? */
5427 vm_map_version_t version
; /* Version before locks
5428 * dropped to make copy
5430 kern_return_t result
; /* Return value from
5431 * copy_strategically.
5433 while(tmp_entry
->is_sub_map
) {
5434 vm_size_t submap_len
;
5437 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5438 ptr
->next
= parent_maps
;
5440 ptr
->parent_map
= src_map
;
5441 ptr
->base_start
= src_start
;
5442 ptr
->base_end
= src_end
;
5443 submap_len
= tmp_entry
->vme_end
- src_start
;
5444 if(submap_len
> (src_end
-src_start
))
5445 submap_len
= src_end
-src_start
;
5446 ptr
->base_start
+= submap_len
;
5448 src_start
-= tmp_entry
->vme_start
;
5449 src_start
+= tmp_entry
->offset
;
5450 src_end
= src_start
+ submap_len
;
5451 src_map
= tmp_entry
->object
.sub_map
;
5452 vm_map_lock(src_map
);
5453 /* keep an outstanding reference for all maps in */
5454 /* the parents tree except the base map */
5455 vm_map_reference(src_map
);
5456 vm_map_unlock(ptr
->parent_map
);
5457 if (!vm_map_lookup_entry(
5458 src_map
, src_start
, &tmp_entry
))
5459 RETURN(KERN_INVALID_ADDRESS
);
5461 if(!tmp_entry
->is_sub_map
)
5462 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5463 src_entry
= tmp_entry
;
5465 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5466 ((tmp_entry
->object
.vm_object
->wimg_bits
!= VM_WIMG_DEFAULT
) ||
5467 (tmp_entry
->object
.vm_object
->phys_contiguous
))) {
5468 /* This is not, cannot be supported for now */
5469 /* we need a description of the caching mode */
5470 /* reflected in the object before we can */
5471 /* support copyin, and then the support will */
5472 /* be for direct copy */
5473 RETURN(KERN_PROTECTION_FAILURE
);
5476 * Create a new address map entry to hold the result.
5477 * Fill in the fields from the appropriate source entries.
5478 * We must unlock the source map to do this if we need
5479 * to allocate a map entry.
5481 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5482 version
.main_timestamp
= src_map
->timestamp
;
5483 vm_map_unlock(src_map
);
5485 new_entry
= vm_map_copy_entry_create(copy
);
5487 vm_map_lock(src_map
);
5488 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5489 if (!vm_map_lookup_entry(src_map
, src_start
,
5491 RETURN(KERN_INVALID_ADDRESS
);
5493 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5494 continue; /* restart w/ new tmp_entry */
5499 * Verify that the region can be read.
5501 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5503 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5504 RETURN(KERN_PROTECTION_FAILURE
);
5507 * Clip against the endpoints of the entire region.
5510 vm_map_clip_end(src_map
, src_entry
, src_end
);
5512 src_size
= src_entry
->vme_end
- src_start
;
5513 src_object
= src_entry
->object
.vm_object
;
5514 src_offset
= src_entry
->offset
;
5515 was_wired
= (src_entry
->wired_count
!= 0);
5517 vm_map_entry_copy(new_entry
, src_entry
);
5518 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5521 * Attempt non-blocking copy-on-write optimizations.
5525 (src_object
== VM_OBJECT_NULL
||
5526 (src_object
->internal
&& !src_object
->true_share
5529 * If we are destroying the source, and the object
5530 * is internal, we can move the object reference
5531 * from the source to the copy. The copy is
5532 * copy-on-write only if the source is.
5533 * We make another reference to the object, because
5534 * destroying the source entry will deallocate it.
5536 vm_object_reference(src_object
);
5539 * Copy is always unwired. vm_map_copy_entry
5540 * set its wired count to zero.
5543 goto CopySuccessful
;
5548 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5549 src_object
, new_entry
, new_entry
->object
.vm_object
,
5552 vm_object_copy_quickly(
5553 &new_entry
->object
.vm_object
,
5557 &new_entry_needs_copy
)) {
5559 new_entry
->needs_copy
= new_entry_needs_copy
;
5562 * Handle copy-on-write obligations
5565 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5566 if (tmp_entry
->is_shared
||
5567 tmp_entry
->object
.vm_object
->true_share
||
5569 vm_map_unlock(src_map
);
5570 new_entry
->object
.vm_object
=
5571 vm_object_copy_delayed(
5575 /* dec ref gained in copy_quickly */
5576 vm_object_lock(src_object
);
5577 src_object
->ref_count
--;
5578 assert(src_object
->ref_count
> 0);
5579 vm_object_res_deallocate(src_object
);
5580 vm_object_unlock(src_object
);
5581 vm_map_lock(src_map
);
5583 * it turns out that we have
5584 * finished our copy. No matter
5585 * what the state of the map
5586 * we will lock it again here
5587 * knowing that if there is
5588 * additional data to copy
5589 * it will be checked at
5590 * the top of the loop
5592 * Don't do timestamp check
5596 vm_object_pmap_protect(
5600 (src_entry
->is_shared
?
5603 src_entry
->vme_start
,
5604 src_entry
->protection
&
5607 tmp_entry
->needs_copy
= TRUE
;
5612 * The map has never been unlocked, so it's safe
5613 * to move to the next entry rather than doing
5617 goto CopySuccessful
;
5620 new_entry
->needs_copy
= FALSE
;
5623 * Take an object reference, so that we may
5624 * release the map lock(s).
5627 assert(src_object
!= VM_OBJECT_NULL
);
5628 vm_object_reference(src_object
);
5631 * Record the timestamp for later verification.
5635 version
.main_timestamp
= src_map
->timestamp
;
5636 vm_map_unlock(src_map
); /* Increments timestamp once! */
5643 vm_object_lock(src_object
);
5644 result
= vm_object_copy_slowly(
5649 &new_entry
->object
.vm_object
);
5650 new_entry
->offset
= 0;
5651 new_entry
->needs_copy
= FALSE
;
5653 result
= vm_object_copy_strategically(src_object
,
5656 &new_entry
->object
.vm_object
,
5658 &new_entry_needs_copy
);
5660 new_entry
->needs_copy
= new_entry_needs_copy
;
5664 if (result
!= KERN_SUCCESS
&&
5665 result
!= KERN_MEMORY_RESTART_COPY
) {
5666 vm_map_lock(src_map
);
5671 * Throw away the extra reference
5674 vm_object_deallocate(src_object
);
5677 * Verify that the map has not substantially
5678 * changed while the copy was being made.
5681 vm_map_lock(src_map
);
5683 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5684 goto VerificationSuccessful
;
5687 * Simple version comparison failed.
5689 * Retry the lookup and verify that the
5690 * same object/offset are still present.
5692 * [Note: a memory manager that colludes with
5693 * the calling task can detect that we have
5694 * cheated. While the map was unlocked, the
5695 * mapping could have been changed and restored.]
5698 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5699 RETURN(KERN_INVALID_ADDRESS
);
5702 src_entry
= tmp_entry
;
5703 vm_map_clip_start(src_map
, src_entry
, src_start
);
5705 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5707 src_entry
->max_protection
& VM_PROT_READ
== 0)
5708 goto VerificationFailed
;
5710 if (src_entry
->vme_end
< new_entry
->vme_end
)
5711 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5713 if ((src_entry
->object
.vm_object
!= src_object
) ||
5714 (src_entry
->offset
!= src_offset
) ) {
5717 * Verification failed.
5719 * Start over with this top-level entry.
5722 VerificationFailed
: ;
5724 vm_object_deallocate(new_entry
->object
.vm_object
);
5725 tmp_entry
= src_entry
;
5730 * Verification succeeded.
5733 VerificationSuccessful
: ;
5735 if (result
== KERN_MEMORY_RESTART_COPY
)
5745 * Link in the new copy entry.
5748 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5752 * Determine whether the entire region
5755 src_start
= new_entry
->vme_end
;
5756 new_entry
= VM_MAP_ENTRY_NULL
;
5757 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5758 if (src_map
!= base_map
) {
5762 assert(ptr
!= NULL
);
5763 parent_maps
= parent_maps
->next
;
5764 vm_map_unlock(src_map
);
5765 vm_map_deallocate(src_map
);
5766 vm_map_lock(ptr
->parent_map
);
5767 src_map
= ptr
->parent_map
;
5768 src_start
= ptr
->base_start
;
5769 src_end
= ptr
->base_end
;
5770 if ((src_end
> src_start
) &&
5771 !vm_map_lookup_entry(
5772 src_map
, src_start
, &tmp_entry
))
5773 RETURN(KERN_INVALID_ADDRESS
);
5774 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5775 if(parent_maps
== NULL
)
5777 src_entry
= tmp_entry
->vme_prev
;
5781 if ((src_start
>= src_end
) && (src_end
!= 0))
5785 * Verify that there are no gaps in the region
5788 tmp_entry
= src_entry
->vme_next
;
5789 if ((tmp_entry
->vme_start
!= src_start
) ||
5790 (tmp_entry
== vm_map_to_entry(src_map
)))
5791 RETURN(KERN_INVALID_ADDRESS
);
5795 * If the source should be destroyed, do it now, since the
5796 * copy was successful.
5799 (void) vm_map_delete(src_map
,
5800 trunc_page(src_addr
),
5802 (src_map
== kernel_map
) ?
5803 VM_MAP_REMOVE_KUNWIRE
:
5807 vm_map_unlock(src_map
);
5809 /* Fix-up start and end points in copy. This is necessary */
5810 /* when the various entries in the copy object were picked */
5811 /* up from different sub-maps */
5813 tmp_entry
= vm_map_copy_first_entry(copy
);
5814 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5815 tmp_entry
->vme_end
= copy_addr
+
5816 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5817 tmp_entry
->vme_start
= copy_addr
;
5818 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5819 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5822 *copy_result
= copy
;
5823 return(KERN_SUCCESS
);
5829 * vm_map_copyin_object:
5831 * Create a copy object from an object.
5832 * Our caller donates an object reference.
5836 vm_map_copyin_object(
5838 vm_object_offset_t offset
, /* offset of region in object */
5839 vm_object_size_t size
, /* size of region in object */
5840 vm_map_copy_t
*copy_result
) /* OUT */
5842 vm_map_copy_t copy
; /* Resulting copy */
5845 * We drop the object into a special copy object
5846 * that contains the object directly.
5849 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5850 copy
->type
= VM_MAP_COPY_OBJECT
;
5851 copy
->cpy_object
= object
;
5852 copy
->cpy_index
= 0;
5853 copy
->offset
= offset
;
5856 *copy_result
= copy
;
5857 return(KERN_SUCCESS
);
5863 vm_map_entry_t old_entry
,
5867 vm_map_entry_t new_entry
;
5868 kern_return_t result
;
5871 * New sharing code. New map entry
5872 * references original object. Internal
5873 * objects use asynchronous copy algorithm for
5874 * future copies. First make sure we have
5875 * the right object. If we need a shadow,
5876 * or someone else already has one, then
5877 * make a new shadow and share it.
5880 object
= old_entry
->object
.vm_object
;
5881 if (old_entry
->is_sub_map
) {
5882 assert(old_entry
->wired_count
== 0);
5884 if(old_entry
->use_pmap
) {
5885 result
= pmap_nest(new_map
->pmap
,
5886 (old_entry
->object
.sub_map
)->pmap
,
5887 old_entry
->vme_start
,
5888 old_entry
->vme_end
- old_entry
->vme_start
);
5890 panic("vm_map_fork_share: pmap_nest failed!");
5893 } else if (object
== VM_OBJECT_NULL
) {
5894 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5895 old_entry
->vme_start
));
5896 old_entry
->offset
= 0;
5897 old_entry
->object
.vm_object
= object
;
5898 assert(!old_entry
->needs_copy
);
5899 } else if (object
->copy_strategy
!=
5900 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5903 * We are already using an asymmetric
5904 * copy, and therefore we already have
5908 assert(! old_entry
->needs_copy
);
5910 else if (old_entry
->needs_copy
|| /* case 1 */
5911 object
->shadowed
|| /* case 2 */
5912 (!object
->true_share
&& /* case 3 */
5913 !old_entry
->is_shared
&&
5915 (vm_size_t
)(old_entry
->vme_end
-
5916 old_entry
->vme_start
)))) {
5919 * We need to create a shadow.
5920 * There are three cases here.
5921 * In the first case, we need to
5922 * complete a deferred symmetrical
5923 * copy that we participated in.
5924 * In the second and third cases,
5925 * we need to create the shadow so
5926 * that changes that we make to the
5927 * object do not interfere with
5928 * any symmetrical copies which
5929 * have occured (case 2) or which
5930 * might occur (case 3).
5932 * The first case is when we had
5933 * deferred shadow object creation
5934 * via the entry->needs_copy mechanism.
5935 * This mechanism only works when
5936 * only one entry points to the source
5937 * object, and we are about to create
5938 * a second entry pointing to the
5939 * same object. The problem is that
5940 * there is no way of mapping from
5941 * an object to the entries pointing
5942 * to it. (Deferred shadow creation
5943 * works with one entry because occurs
5944 * at fault time, and we walk from the
5945 * entry to the object when handling
5948 * The second case is when the object
5949 * to be shared has already been copied
5950 * with a symmetric copy, but we point
5951 * directly to the object without
5952 * needs_copy set in our entry. (This
5953 * can happen because different ranges
5954 * of an object can be pointed to by
5955 * different entries. In particular,
5956 * a single entry pointing to an object
5957 * can be split by a call to vm_inherit,
5958 * which, combined with task_create, can
5959 * result in the different entries
5960 * having different needs_copy values.)
5961 * The shadowed flag in the object allows
5962 * us to detect this case. The problem
5963 * with this case is that if this object
5964 * has or will have shadows, then we
5965 * must not perform an asymmetric copy
5966 * of this object, since such a copy
5967 * allows the object to be changed, which
5968 * will break the previous symmetrical
5969 * copies (which rely upon the object
5970 * not changing). In a sense, the shadowed
5971 * flag says "don't change this object".
5972 * We fix this by creating a shadow
5973 * object for this object, and sharing
5974 * that. This works because we are free
5975 * to change the shadow object (and thus
5976 * to use an asymmetric copy strategy);
5977 * this is also semantically correct,
5978 * since this object is temporary, and
5979 * therefore a copy of the object is
5980 * as good as the object itself. (This
5981 * is not true for permanent objects,
5982 * since the pager needs to see changes,
5983 * which won't happen if the changes
5984 * are made to a copy.)
5986 * The third case is when the object
5987 * to be shared has parts sticking
5988 * outside of the entry we're working
5989 * with, and thus may in the future
5990 * be subject to a symmetrical copy.
5991 * (This is a preemptive version of
5995 assert(!(object
->shadowed
&& old_entry
->is_shared
));
5996 vm_object_shadow(&old_entry
->object
.vm_object
,
5998 (vm_size_t
) (old_entry
->vme_end
-
5999 old_entry
->vme_start
));
6002 * If we're making a shadow for other than
6003 * copy on write reasons, then we have
6004 * to remove write permission.
6007 if (!old_entry
->needs_copy
&&
6008 (old_entry
->protection
& VM_PROT_WRITE
)) {
6009 if(old_map
->mapped
) {
6010 vm_object_pmap_protect(
6011 old_entry
->object
.vm_object
,
6013 (old_entry
->vme_end
-
6014 old_entry
->vme_start
),
6016 old_entry
->vme_start
,
6017 old_entry
->protection
& ~VM_PROT_WRITE
);
6019 pmap_protect(old_map
->pmap
,
6020 old_entry
->vme_start
,
6022 old_entry
->protection
& ~VM_PROT_WRITE
);
6026 old_entry
->needs_copy
= FALSE
;
6027 object
= old_entry
->object
.vm_object
;
6031 * If object was using a symmetric copy strategy,
6032 * change its copy strategy to the default
6033 * asymmetric copy strategy, which is copy_delay
6034 * in the non-norma case and copy_call in the
6035 * norma case. Bump the reference count for the
6039 if(old_entry
->is_sub_map
) {
6040 vm_map_lock(old_entry
->object
.sub_map
);
6041 vm_map_reference(old_entry
->object
.sub_map
);
6042 vm_map_unlock(old_entry
->object
.sub_map
);
6044 vm_object_lock(object
);
6045 object
->ref_count
++;
6046 vm_object_res_reference(object
);
6047 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6048 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6050 vm_object_unlock(object
);
6054 * Clone the entry, using object ref from above.
6055 * Mark both entries as shared.
6058 new_entry
= vm_map_entry_create(new_map
);
6059 vm_map_entry_copy(new_entry
, old_entry
);
6060 old_entry
->is_shared
= TRUE
;
6061 new_entry
->is_shared
= TRUE
;
6064 * Insert the entry into the new map -- we
6065 * know we're inserting at the end of the new
6069 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6072 * Update the physical map
6075 if (old_entry
->is_sub_map
) {
6076 /* Bill Angell pmap support goes here */
6078 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6079 old_entry
->vme_end
- old_entry
->vme_start
,
6080 old_entry
->vme_start
);
6087 vm_map_entry_t
*old_entry_p
,
6090 vm_map_entry_t old_entry
= *old_entry_p
;
6091 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6092 vm_offset_t start
= old_entry
->vme_start
;
6094 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6096 vm_map_unlock(old_map
);
6098 * Use maxprot version of copyin because we
6099 * care about whether this memory can ever
6100 * be accessed, not just whether it's accessible
6103 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6106 * The map might have changed while it
6107 * was unlocked, check it again. Skip
6108 * any blank space or permanently
6109 * unreadable region.
6111 vm_map_lock(old_map
);
6112 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6113 last
->max_protection
& VM_PROT_READ
==
6115 last
= last
->vme_next
;
6117 *old_entry_p
= last
;
6120 * XXX For some error returns, want to
6121 * XXX skip to the next element. Note
6122 * that INVALID_ADDRESS and
6123 * PROTECTION_FAILURE are handled above.
6130 * Insert the copy into the new map
6133 vm_map_copy_insert(new_map
, last
, copy
);
6136 * Pick up the traversal at the end of
6137 * the copied region.
6140 vm_map_lock(old_map
);
6141 start
+= entry_size
;
6142 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6143 last
= last
->vme_next
;
6145 vm_map_clip_start(old_map
, last
, start
);
6147 *old_entry_p
= last
;
6155 * Create and return a new map based on the old
6156 * map, according to the inheritance values on the
6157 * regions in that map.
6159 * The source map must not be locked.
6165 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6167 vm_map_entry_t old_entry
;
6168 vm_size_t new_size
= 0, entry_size
;
6169 vm_map_entry_t new_entry
;
6170 boolean_t src_needs_copy
;
6171 boolean_t new_entry_needs_copy
;
6173 vm_map_reference_swap(old_map
);
6174 vm_map_lock(old_map
);
6176 new_map
= vm_map_create(new_pmap
,
6177 old_map
->min_offset
,
6178 old_map
->max_offset
,
6179 old_map
->hdr
.entries_pageable
);
6182 old_entry
= vm_map_first_entry(old_map
);
6183 old_entry
!= vm_map_to_entry(old_map
);
6186 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6188 switch (old_entry
->inheritance
) {
6189 case VM_INHERIT_NONE
:
6192 case VM_INHERIT_SHARE
:
6193 vm_map_fork_share(old_map
, old_entry
, new_map
);
6194 new_size
+= entry_size
;
6197 case VM_INHERIT_COPY
:
6200 * Inline the copy_quickly case;
6201 * upon failure, fall back on call
6202 * to vm_map_fork_copy.
6205 if(old_entry
->is_sub_map
)
6207 if ((old_entry
->wired_count
!= 0) ||
6208 ((old_entry
->object
.vm_object
!= NULL
) &&
6209 (old_entry
->object
.vm_object
->true_share
))) {
6210 goto slow_vm_map_fork_copy
;
6213 new_entry
= vm_map_entry_create(new_map
);
6214 vm_map_entry_copy(new_entry
, old_entry
);
6215 /* clear address space specifics */
6216 new_entry
->use_pmap
= FALSE
;
6218 if (! vm_object_copy_quickly(
6219 &new_entry
->object
.vm_object
,
6221 (old_entry
->vme_end
-
6222 old_entry
->vme_start
),
6224 &new_entry_needs_copy
)) {
6225 vm_map_entry_dispose(new_map
, new_entry
);
6226 goto slow_vm_map_fork_copy
;
6230 * Handle copy-on-write obligations
6233 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6234 vm_object_pmap_protect(
6235 old_entry
->object
.vm_object
,
6237 (old_entry
->vme_end
-
6238 old_entry
->vme_start
),
6239 ((old_entry
->is_shared
6243 old_entry
->vme_start
,
6244 old_entry
->protection
& ~VM_PROT_WRITE
);
6246 old_entry
->needs_copy
= TRUE
;
6248 new_entry
->needs_copy
= new_entry_needs_copy
;
6251 * Insert the entry at the end
6255 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6257 new_size
+= entry_size
;
6260 slow_vm_map_fork_copy
:
6261 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6262 new_size
+= entry_size
;
6266 old_entry
= old_entry
->vme_next
;
6269 new_map
->size
= new_size
;
6270 vm_map_unlock(old_map
);
6271 vm_map_deallocate(old_map
);
6278 * vm_map_lookup_locked:
6280 * Finds the VM object, offset, and
6281 * protection for a given virtual address in the
6282 * specified map, assuming a page fault of the
6285 * Returns the (object, offset, protection) for
6286 * this address, whether it is wired down, and whether
6287 * this map has the only reference to the data in question.
6288 * In order to later verify this lookup, a "version"
6291 * The map MUST be locked by the caller and WILL be
6292 * locked on exit. In order to guarantee the
6293 * existence of the returned object, it is returned
6296 * If a lookup is requested with "write protection"
6297 * specified, the map may be changed to perform virtual
6298 * copying operations, although the data referenced will
6302 vm_map_lookup_locked(
6303 vm_map_t
*var_map
, /* IN/OUT */
6304 register vm_offset_t vaddr
,
6305 register vm_prot_t fault_type
,
6306 vm_map_version_t
*out_version
, /* OUT */
6307 vm_object_t
*object
, /* OUT */
6308 vm_object_offset_t
*offset
, /* OUT */
6309 vm_prot_t
*out_prot
, /* OUT */
6310 boolean_t
*wired
, /* OUT */
6311 int *behavior
, /* OUT */
6312 vm_object_offset_t
*lo_offset
, /* OUT */
6313 vm_object_offset_t
*hi_offset
, /* OUT */
6316 vm_map_entry_t entry
;
6317 register vm_map_t map
= *var_map
;
6318 vm_map_t old_map
= *var_map
;
6319 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6320 vm_offset_t cow_parent_vaddr
;
6321 vm_offset_t old_start
;
6322 vm_offset_t old_end
;
6323 register vm_prot_t prot
;
6329 * If the map has an interesting hint, try it before calling
6330 * full blown lookup routine.
6333 mutex_lock(&map
->s_lock
);
6335 mutex_unlock(&map
->s_lock
);
6337 if ((entry
== vm_map_to_entry(map
)) ||
6338 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6339 vm_map_entry_t tmp_entry
;
6342 * Entry was either not a valid hint, or the vaddr
6343 * was not contained in the entry, so do a full lookup.
6345 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6346 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6347 vm_map_unlock(cow_sub_map_parent
);
6348 if((*pmap_map
!= map
)
6349 && (*pmap_map
!= cow_sub_map_parent
))
6350 vm_map_unlock(*pmap_map
);
6351 return KERN_INVALID_ADDRESS
;
6356 if(map
== old_map
) {
6357 old_start
= entry
->vme_start
;
6358 old_end
= entry
->vme_end
;
6362 * Handle submaps. Drop lock on upper map, submap is
6367 if (entry
->is_sub_map
) {
6368 vm_offset_t local_vaddr
;
6369 vm_offset_t end_delta
;
6370 vm_offset_t start_delta
;
6371 vm_offset_t object_start_delta
;
6372 vm_map_entry_t submap_entry
;
6373 boolean_t mapped_needs_copy
=FALSE
;
6375 local_vaddr
= vaddr
;
6377 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6378 /* if pmap_map equals map we unlock below */
6379 if ((*pmap_map
!= map
) &&
6380 (*pmap_map
!= cow_sub_map_parent
))
6381 vm_map_unlock(*pmap_map
);
6382 *pmap_map
= entry
->object
.sub_map
;
6385 if(entry
->needs_copy
) {
6386 if (!mapped_needs_copy
) {
6387 if (vm_map_lock_read_to_write(map
)) {
6388 vm_map_lock_read(map
);
6389 if(*pmap_map
== entry
->object
.sub_map
)
6393 vm_map_lock_read(entry
->object
.sub_map
);
6394 cow_sub_map_parent
= map
;
6395 /* reset base to map before cow object */
6396 /* this is the map which will accept */
6397 /* the new cow object */
6398 old_start
= entry
->vme_start
;
6399 old_end
= entry
->vme_end
;
6400 cow_parent_vaddr
= vaddr
;
6401 mapped_needs_copy
= TRUE
;
6403 vm_map_lock_read(entry
->object
.sub_map
);
6404 if((cow_sub_map_parent
!= map
) &&
6409 vm_map_lock_read(entry
->object
.sub_map
);
6410 /* leave map locked if it is a target */
6411 /* cow sub_map above otherwise, just */
6412 /* follow the maps down to the object */
6413 /* here we unlock knowing we are not */
6414 /* revisiting the map. */
6415 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6416 vm_map_unlock_read(map
);
6419 *var_map
= map
= entry
->object
.sub_map
;
6421 /* calculate the offset in the submap for vaddr */
6422 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6425 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6426 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6427 vm_map_unlock(cow_sub_map_parent
);
6429 if((*pmap_map
!= map
)
6430 && (*pmap_map
!= cow_sub_map_parent
)) {
6431 vm_map_unlock(*pmap_map
);
6434 return KERN_INVALID_ADDRESS
;
6436 /* find the attenuated shadow of the underlying object */
6437 /* on our target map */
6439 /* in english the submap object may extend beyond the */
6440 /* region mapped by the entry or, may only fill a portion */
6441 /* of it. For our purposes, we only care if the object */
6442 /* doesn't fill. In this case the area which will */
6443 /* ultimately be clipped in the top map will only need */
6444 /* to be as big as the portion of the underlying entry */
6445 /* which is mapped */
6446 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6447 submap_entry
->vme_start
- entry
->offset
: 0;
6450 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6451 submap_entry
->vme_end
?
6452 0 : (entry
->offset
+
6453 (old_end
- old_start
))
6454 - submap_entry
->vme_end
;
6456 old_start
+= start_delta
;
6457 old_end
-= end_delta
;
6459 if(submap_entry
->is_sub_map
) {
6460 entry
= submap_entry
;
6461 vaddr
= local_vaddr
;
6462 goto submap_recurse
;
6465 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6467 vm_object_t copy_object
;
6468 vm_offset_t local_start
;
6469 vm_offset_t local_end
;
6470 boolean_t copied_slowly
= FALSE
;
6472 if (vm_map_lock_read_to_write(map
)) {
6473 vm_map_lock_read(map
);
6474 old_start
-= start_delta
;
6475 old_end
+= end_delta
;
6480 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6481 submap_entry
->object
.vm_object
=
6484 (submap_entry
->vme_end
6485 - submap_entry
->vme_start
));
6486 submap_entry
->offset
= 0;
6488 local_start
= local_vaddr
-
6489 (cow_parent_vaddr
- old_start
);
6490 local_end
= local_vaddr
+
6491 (old_end
- cow_parent_vaddr
);
6492 vm_map_clip_start(map
, submap_entry
, local_start
);
6493 vm_map_clip_end(map
, submap_entry
, local_end
);
6495 /* This is the COW case, lets connect */
6496 /* an entry in our space to the underlying */
6497 /* object in the submap, bypassing the */
6501 if(submap_entry
->wired_count
!= 0) {
6503 submap_entry
->object
.vm_object
);
6504 vm_object_copy_slowly(
6505 submap_entry
->object
.vm_object
,
6506 submap_entry
->offset
,
6507 submap_entry
->vme_end
-
6508 submap_entry
->vme_start
,
6511 copied_slowly
= TRUE
;
6514 /* set up shadow object */
6515 copy_object
= submap_entry
->object
.vm_object
;
6516 vm_object_reference(copy_object
);
6517 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6518 submap_entry
->needs_copy
= TRUE
;
6519 vm_object_pmap_protect(
6520 submap_entry
->object
.vm_object
,
6521 submap_entry
->offset
,
6522 submap_entry
->vme_end
-
6523 submap_entry
->vme_start
,
6524 (submap_entry
->is_shared
6526 PMAP_NULL
: map
->pmap
,
6527 submap_entry
->vme_start
,
6528 submap_entry
->protection
&
6533 /* This works diffently than the */
6534 /* normal submap case. We go back */
6535 /* to the parent of the cow map and*/
6536 /* clip out the target portion of */
6537 /* the sub_map, substituting the */
6538 /* new copy object, */
6541 local_start
= old_start
;
6542 local_end
= old_end
;
6543 map
= cow_sub_map_parent
;
6544 *var_map
= cow_sub_map_parent
;
6545 vaddr
= cow_parent_vaddr
;
6546 cow_sub_map_parent
= NULL
;
6548 if(!vm_map_lookup_entry(map
,
6550 vm_object_deallocate(
6552 vm_map_lock_write_to_read(map
);
6553 return KERN_INVALID_ADDRESS
;
6556 /* clip out the portion of space */
6557 /* mapped by the sub map which */
6558 /* corresponds to the underlying */
6560 vm_map_clip_start(map
, entry
, local_start
);
6561 vm_map_clip_end(map
, entry
, local_end
);
6564 /* substitute copy object for */
6565 /* shared map entry */
6566 vm_map_deallocate(entry
->object
.sub_map
);
6567 entry
->is_sub_map
= FALSE
;
6568 entry
->object
.vm_object
= copy_object
;
6570 entry
->protection
|= VM_PROT_WRITE
;
6571 entry
->max_protection
|= VM_PROT_WRITE
;
6574 entry
->needs_copy
= FALSE
;
6575 entry
->is_shared
= FALSE
;
6577 entry
->offset
= submap_entry
->offset
;
6578 entry
->needs_copy
= TRUE
;
6579 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6580 entry
->inheritance
= VM_INHERIT_COPY
;
6582 entry
->is_shared
= TRUE
;
6584 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6585 entry
->inheritance
= VM_INHERIT_COPY
;
6587 vm_map_lock_write_to_read(map
);
6589 if((cow_sub_map_parent
)
6590 && (cow_sub_map_parent
!= *pmap_map
)
6591 && (cow_sub_map_parent
!= map
)) {
6592 vm_map_unlock(cow_sub_map_parent
);
6594 entry
= submap_entry
;
6595 vaddr
= local_vaddr
;
6600 * Check whether this task is allowed to have
6604 prot
= entry
->protection
;
6605 if ((fault_type
& (prot
)) != fault_type
) {
6606 if (*pmap_map
!= map
) {
6607 vm_map_unlock(*pmap_map
);
6610 return KERN_PROTECTION_FAILURE
;
6614 * If this page is not pageable, we have to get
6615 * it for all possible accesses.
6618 if (*wired
= (entry
->wired_count
!= 0))
6619 prot
= fault_type
= entry
->protection
;
6622 * If the entry was copy-on-write, we either ...
6625 if (entry
->needs_copy
) {
6627 * If we want to write the page, we may as well
6628 * handle that now since we've got the map locked.
6630 * If we don't need to write the page, we just
6631 * demote the permissions allowed.
6634 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6636 * Make a new object, and place it in the
6637 * object chain. Note that no new references
6638 * have appeared -- one just moved from the
6639 * map to the new object.
6642 if (vm_map_lock_read_to_write(map
)) {
6643 vm_map_lock_read(map
);
6646 vm_object_shadow(&entry
->object
.vm_object
,
6648 (vm_size_t
) (entry
->vme_end
-
6651 entry
->object
.vm_object
->shadowed
= TRUE
;
6652 entry
->needs_copy
= FALSE
;
6653 vm_map_lock_write_to_read(map
);
6657 * We're attempting to read a copy-on-write
6658 * page -- don't allow writes.
6661 prot
&= (~VM_PROT_WRITE
);
6666 * Create an object if necessary.
6668 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6670 if (vm_map_lock_read_to_write(map
)) {
6671 vm_map_lock_read(map
);
6675 entry
->object
.vm_object
= vm_object_allocate(
6676 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6678 vm_map_lock_write_to_read(map
);
6682 * Return the object/offset from this entry. If the entry
6683 * was copy-on-write or empty, it has been fixed up. Also
6684 * return the protection.
6687 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6688 *object
= entry
->object
.vm_object
;
6690 *behavior
= entry
->behavior
;
6691 *lo_offset
= entry
->offset
;
6692 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6695 * Lock the object to prevent it from disappearing
6698 vm_object_lock(*object
);
6701 * Save the version number
6704 out_version
->main_timestamp
= map
->timestamp
;
6706 return KERN_SUCCESS
;
6713 * Verifies that the map in question has not changed
6714 * since the given version. If successful, the map
6715 * will not change until vm_map_verify_done() is called.
6719 register vm_map_t map
,
6720 register vm_map_version_t
*version
) /* REF */
6724 vm_map_lock_read(map
);
6725 result
= (map
->timestamp
== version
->main_timestamp
);
6728 vm_map_unlock_read(map
);
6734 * vm_map_verify_done:
6736 * Releases locks acquired by a vm_map_verify.
6738 * This is now a macro in vm/vm_map.h. It does a
6739 * vm_map_unlock_read on the map.
6746 * User call to obtain information about a region in
6747 * a task's address map. Currently, only one flavor is
6750 * XXX The reserved and behavior fields cannot be filled
6751 * in until the vm merge from the IK is completed, and
6752 * vm_reserve is implemented.
6754 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6760 vm_offset_t
*address
, /* IN/OUT */
6761 vm_size_t
*size
, /* OUT */
6762 vm_region_flavor_t flavor
, /* IN */
6763 vm_region_info_t info
, /* OUT */
6764 mach_msg_type_number_t
*count
, /* IN/OUT */
6765 ipc_port_t
*object_name
) /* OUT */
6767 vm_map_entry_t tmp_entry
;
6769 vm_map_entry_t entry
;
6772 vm_region_basic_info_t basic
;
6773 vm_region_extended_info_t extended
;
6774 vm_region_top_info_t top
;
6776 if (map
== VM_MAP_NULL
)
6777 return(KERN_INVALID_ARGUMENT
);
6781 case VM_REGION_BASIC_INFO
:
6783 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6784 return(KERN_INVALID_ARGUMENT
);
6786 basic
= (vm_region_basic_info_t
) info
;
6787 *count
= VM_REGION_BASIC_INFO_COUNT
;
6789 vm_map_lock_read(map
);
6792 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6793 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6794 vm_map_unlock_read(map
);
6795 return(KERN_INVALID_ADDRESS
);
6801 start
= entry
->vme_start
;
6803 basic
->offset
= entry
->offset
;
6804 basic
->protection
= entry
->protection
;
6805 basic
->inheritance
= entry
->inheritance
;
6806 basic
->max_protection
= entry
->max_protection
;
6807 basic
->behavior
= entry
->behavior
;
6808 basic
->user_wired_count
= entry
->user_wired_count
;
6809 basic
->reserved
= entry
->is_sub_map
;
6811 *size
= (entry
->vme_end
- start
);
6813 if (object_name
) *object_name
= IP_NULL
;
6814 if (entry
->is_sub_map
) {
6815 basic
->shared
= FALSE
;
6817 basic
->shared
= entry
->is_shared
;
6820 vm_map_unlock_read(map
);
6821 return(KERN_SUCCESS
);
6823 case VM_REGION_EXTENDED_INFO
:
6826 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6827 return(KERN_INVALID_ARGUMENT
);
6829 extended
= (vm_region_extended_info_t
) info
;
6830 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6832 vm_map_lock_read(map
);
6835 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6836 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6837 vm_map_unlock_read(map
);
6838 return(KERN_INVALID_ADDRESS
);
6843 start
= entry
->vme_start
;
6845 extended
->protection
= entry
->protection
;
6846 extended
->user_tag
= entry
->alias
;
6847 extended
->pages_resident
= 0;
6848 extended
->pages_swapped_out
= 0;
6849 extended
->pages_shared_now_private
= 0;
6850 extended
->pages_dirtied
= 0;
6851 extended
->external_pager
= 0;
6852 extended
->shadow_depth
= 0;
6854 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6856 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6857 extended
->share_mode
= SM_PRIVATE
;
6860 *object_name
= IP_NULL
;
6862 *size
= (entry
->vme_end
- start
);
6864 vm_map_unlock_read(map
);
6865 return(KERN_SUCCESS
);
6867 case VM_REGION_TOP_INFO
:
6870 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6871 return(KERN_INVALID_ARGUMENT
);
6873 top
= (vm_region_top_info_t
) info
;
6874 *count
= VM_REGION_TOP_INFO_COUNT
;
6876 vm_map_lock_read(map
);
6879 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6880 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6881 vm_map_unlock_read(map
);
6882 return(KERN_INVALID_ADDRESS
);
6888 start
= entry
->vme_start
;
6890 top
->private_pages_resident
= 0;
6891 top
->shared_pages_resident
= 0;
6893 vm_region_top_walk(entry
, top
);
6896 *object_name
= IP_NULL
;
6898 *size
= (entry
->vme_end
- start
);
6900 vm_map_unlock_read(map
);
6901 return(KERN_SUCCESS
);
6904 return(KERN_INVALID_ARGUMENT
);
6909 * vm_region_recurse: A form of vm_region which follows the
6910 * submaps in a target map
6917 vm_offset_t
*address
, /* IN/OUT */
6918 vm_size_t
*size
, /* OUT */
6919 natural_t
*nesting_depth
, /* IN/OUT */
6920 vm_region_recurse_info_t info
, /* IN/OUT */
6921 mach_msg_type_number_t
*count
) /* IN/OUT */
6923 vm_map_entry_t tmp_entry
;
6925 vm_map_entry_t entry
;
6929 unsigned int recurse_count
;
6932 vm_map_entry_t base_entry
;
6933 vm_offset_t base_next
;
6934 vm_offset_t base_addr
;
6935 vm_offset_t baddr_start_delta
;
6936 vm_region_submap_info_t submap_info
;
6937 vm_region_extended_info_data_t extended
;
6939 if (map
== VM_MAP_NULL
)
6940 return(KERN_INVALID_ARGUMENT
);
6942 submap_info
= (vm_region_submap_info_t
) info
;
6943 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6945 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6946 return(KERN_INVALID_ARGUMENT
);
6950 recurse_count
= *nesting_depth
;
6952 LOOKUP_NEXT_BASE_ENTRY
:
6953 vm_map_lock_read(map
);
6954 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6955 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6956 vm_map_unlock_read(map
);
6957 return(KERN_INVALID_ADDRESS
);
6962 *size
= entry
->vme_end
- entry
->vme_start
;
6963 start
= entry
->vme_start
;
6965 baddr_start_delta
= *address
- start
;
6966 base_next
= entry
->vme_end
;
6969 while(entry
->is_sub_map
&& recurse_count
) {
6971 vm_map_lock_read(entry
->object
.sub_map
);
6974 if(entry
== base_entry
) {
6975 start
= entry
->offset
;
6976 start
+= *address
- entry
->vme_start
;
6979 submap
= entry
->object
.sub_map
;
6980 vm_map_unlock_read(map
);
6983 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6984 if ((entry
= tmp_entry
->vme_next
)
6985 == vm_map_to_entry(map
)) {
6986 vm_map_unlock_read(map
);
6991 goto LOOKUP_NEXT_BASE_ENTRY
;
6997 if(start
<= entry
->vme_start
) {
6998 vm_offset_t old_start
= start
;
6999 if(baddr_start_delta
) {
7000 base_addr
+= (baddr_start_delta
);
7001 *size
-= baddr_start_delta
;
7002 baddr_start_delta
= 0;
7005 (base_addr
+= (entry
->vme_start
- start
))) {
7006 vm_map_unlock_read(map
);
7011 goto LOOKUP_NEXT_BASE_ENTRY
;
7013 *size
-= entry
->vme_start
- start
;
7014 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7015 *size
= entry
->vme_end
- entry
->vme_start
;
7019 if(baddr_start_delta
) {
7020 if((start
- entry
->vme_start
)
7021 < baddr_start_delta
) {
7022 base_addr
+= start
- entry
->vme_start
;
7023 *size
-= start
- entry
->vme_start
;
7025 base_addr
+= baddr_start_delta
;
7026 *size
+= baddr_start_delta
;
7028 baddr_start_delta
= 0;
7030 base_addr
+= entry
->vme_start
;
7031 if(base_addr
>= base_next
) {
7032 vm_map_unlock_read(map
);
7037 goto LOOKUP_NEXT_BASE_ENTRY
;
7039 if (*size
> (entry
->vme_end
- start
))
7040 *size
= entry
->vme_end
- start
;
7042 start
= entry
->vme_start
- start
;
7045 start
+= entry
->offset
;
7048 *nesting_depth
-= recurse_count
;
7049 if(entry
!= base_entry
) {
7050 start
= entry
->vme_start
+ (start
- entry
->offset
);
7054 submap_info
->user_tag
= entry
->alias
;
7055 submap_info
->offset
= entry
->offset
;
7056 submap_info
->protection
= entry
->protection
;
7057 submap_info
->inheritance
= entry
->inheritance
;
7058 submap_info
->max_protection
= entry
->max_protection
;
7059 submap_info
->behavior
= entry
->behavior
;
7060 submap_info
->user_wired_count
= entry
->user_wired_count
;
7061 submap_info
->is_submap
= entry
->is_sub_map
;
7062 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7063 *address
= base_addr
;
7066 extended
.pages_resident
= 0;
7067 extended
.pages_swapped_out
= 0;
7068 extended
.pages_shared_now_private
= 0;
7069 extended
.pages_dirtied
= 0;
7070 extended
.external_pager
= 0;
7071 extended
.shadow_depth
= 0;
7073 if(!entry
->is_sub_map
) {
7074 vm_region_walk(entry
, &extended
, entry
->offset
,
7075 entry
->vme_end
- start
, map
, start
);
7076 submap_info
->share_mode
= extended
.share_mode
;
7077 if (extended
.external_pager
&& extended
.ref_count
== 2
7078 && extended
.share_mode
== SM_SHARED
)
7079 submap_info
->share_mode
= SM_PRIVATE
;
7080 submap_info
->ref_count
= extended
.ref_count
;
7083 submap_info
->share_mode
= SM_TRUESHARED
;
7085 submap_info
->share_mode
= SM_PRIVATE
;
7086 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7089 submap_info
->pages_resident
= extended
.pages_resident
;
7090 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7091 submap_info
->pages_shared_now_private
=
7092 extended
.pages_shared_now_private
;
7093 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7094 submap_info
->external_pager
= extended
.external_pager
;
7095 submap_info
->shadow_depth
= extended
.shadow_depth
;
7097 vm_map_unlock_read(map
);
7098 return(KERN_SUCCESS
);
7102 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7103 * Goes away after regular vm_region_recurse function migrates to
7105 * vm_region_recurse: A form of vm_region which follows the
7106 * submaps in a target map
7111 vm_region_recurse_64(
7113 vm_offset_t
*address
, /* IN/OUT */
7114 vm_size_t
*size
, /* OUT */
7115 natural_t
*nesting_depth
, /* IN/OUT */
7116 vm_region_recurse_info_t info
, /* IN/OUT */
7117 mach_msg_type_number_t
*count
) /* IN/OUT */
7119 vm_map_entry_t tmp_entry
;
7121 vm_map_entry_t entry
;
7125 unsigned int recurse_count
;
7128 vm_map_entry_t base_entry
;
7129 vm_offset_t base_next
;
7130 vm_offset_t base_addr
;
7131 vm_offset_t baddr_start_delta
;
7132 vm_region_submap_info_64_t submap_info
;
7133 vm_region_extended_info_data_t extended
;
7135 if (map
== VM_MAP_NULL
)
7136 return(KERN_INVALID_ARGUMENT
);
7138 submap_info
= (vm_region_submap_info_64_t
) info
;
7139 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7141 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7142 return(KERN_INVALID_ARGUMENT
);
7146 recurse_count
= *nesting_depth
;
7148 LOOKUP_NEXT_BASE_ENTRY
:
7149 vm_map_lock_read(map
);
7150 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7151 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7152 vm_map_unlock_read(map
);
7153 return(KERN_INVALID_ADDRESS
);
7158 *size
= entry
->vme_end
- entry
->vme_start
;
7159 start
= entry
->vme_start
;
7161 baddr_start_delta
= *address
- start
;
7162 base_next
= entry
->vme_end
;
7165 while(entry
->is_sub_map
&& recurse_count
) {
7167 vm_map_lock_read(entry
->object
.sub_map
);
7170 if(entry
== base_entry
) {
7171 start
= entry
->offset
;
7172 start
+= *address
- entry
->vme_start
;
7175 submap
= entry
->object
.sub_map
;
7176 vm_map_unlock_read(map
);
7179 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7180 if ((entry
= tmp_entry
->vme_next
)
7181 == vm_map_to_entry(map
)) {
7182 vm_map_unlock_read(map
);
7187 goto LOOKUP_NEXT_BASE_ENTRY
;
7193 if(start
<= entry
->vme_start
) {
7194 vm_offset_t old_start
= start
;
7195 if(baddr_start_delta
) {
7196 base_addr
+= (baddr_start_delta
);
7197 *size
-= baddr_start_delta
;
7198 baddr_start_delta
= 0;
7201 (base_addr
+= (entry
->vme_start
- start
))) {
7202 vm_map_unlock_read(map
);
7207 goto LOOKUP_NEXT_BASE_ENTRY
;
7209 *size
-= entry
->vme_start
- start
;
7210 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7211 *size
= entry
->vme_end
- entry
->vme_start
;
7215 if(baddr_start_delta
) {
7216 if((start
- entry
->vme_start
)
7217 < baddr_start_delta
) {
7218 base_addr
+= start
- entry
->vme_start
;
7219 *size
-= start
- entry
->vme_start
;
7221 base_addr
+= baddr_start_delta
;
7222 *size
+= baddr_start_delta
;
7224 baddr_start_delta
= 0;
7226 base_addr
+= entry
->vme_start
;
7227 if(base_addr
>= base_next
) {
7228 vm_map_unlock_read(map
);
7233 goto LOOKUP_NEXT_BASE_ENTRY
;
7235 if (*size
> (entry
->vme_end
- start
))
7236 *size
= entry
->vme_end
- start
;
7238 start
= entry
->vme_start
- start
;
7241 start
+= entry
->offset
;
7244 *nesting_depth
-= recurse_count
;
7245 if(entry
!= base_entry
) {
7246 start
= entry
->vme_start
+ (start
- entry
->offset
);
7250 submap_info
->user_tag
= entry
->alias
;
7251 submap_info
->offset
= entry
->offset
;
7252 submap_info
->protection
= entry
->protection
;
7253 submap_info
->inheritance
= entry
->inheritance
;
7254 submap_info
->max_protection
= entry
->max_protection
;
7255 submap_info
->behavior
= entry
->behavior
;
7256 submap_info
->user_wired_count
= entry
->user_wired_count
;
7257 submap_info
->is_submap
= entry
->is_sub_map
;
7258 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7259 *address
= base_addr
;
7262 extended
.pages_resident
= 0;
7263 extended
.pages_swapped_out
= 0;
7264 extended
.pages_shared_now_private
= 0;
7265 extended
.pages_dirtied
= 0;
7266 extended
.external_pager
= 0;
7267 extended
.shadow_depth
= 0;
7269 if(!entry
->is_sub_map
) {
7270 vm_region_walk(entry
, &extended
, entry
->offset
,
7271 entry
->vme_end
- start
, map
, start
);
7272 submap_info
->share_mode
= extended
.share_mode
;
7273 if (extended
.external_pager
&& extended
.ref_count
== 2
7274 && extended
.share_mode
== SM_SHARED
)
7275 submap_info
->share_mode
= SM_PRIVATE
;
7276 submap_info
->ref_count
= extended
.ref_count
;
7279 submap_info
->share_mode
= SM_TRUESHARED
;
7281 submap_info
->share_mode
= SM_PRIVATE
;
7282 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7285 submap_info
->pages_resident
= extended
.pages_resident
;
7286 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7287 submap_info
->pages_shared_now_private
=
7288 extended
.pages_shared_now_private
;
7289 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7290 submap_info
->external_pager
= extended
.external_pager
;
7291 submap_info
->shadow_depth
= extended
.shadow_depth
;
7293 vm_map_unlock_read(map
);
7294 return(KERN_SUCCESS
);
7299 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7300 * Goes away after regular vm_region function migrates to
7308 vm_offset_t
*address
, /* IN/OUT */
7309 vm_size_t
*size
, /* OUT */
7310 vm_region_flavor_t flavor
, /* IN */
7311 vm_region_info_t info
, /* OUT */
7312 mach_msg_type_number_t
*count
, /* IN/OUT */
7313 ipc_port_t
*object_name
) /* OUT */
7315 vm_map_entry_t tmp_entry
;
7317 vm_map_entry_t entry
;
7320 vm_region_basic_info_64_t basic
;
7321 vm_region_extended_info_t extended
;
7322 vm_region_top_info_t top
;
7324 if (map
== VM_MAP_NULL
)
7325 return(KERN_INVALID_ARGUMENT
);
7329 case VM_REGION_BASIC_INFO
:
7331 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7332 return(KERN_INVALID_ARGUMENT
);
7334 basic
= (vm_region_basic_info_64_t
) info
;
7335 *count
= VM_REGION_BASIC_INFO_COUNT
;
7337 vm_map_lock_read(map
);
7340 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7341 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7342 vm_map_unlock_read(map
);
7343 return(KERN_INVALID_ADDRESS
);
7349 start
= entry
->vme_start
;
7351 basic
->offset
= entry
->offset
;
7352 basic
->protection
= entry
->protection
;
7353 basic
->inheritance
= entry
->inheritance
;
7354 basic
->max_protection
= entry
->max_protection
;
7355 basic
->behavior
= entry
->behavior
;
7356 basic
->user_wired_count
= entry
->user_wired_count
;
7357 basic
->reserved
= entry
->is_sub_map
;
7359 *size
= (entry
->vme_end
- start
);
7361 if (object_name
) *object_name
= IP_NULL
;
7362 if (entry
->is_sub_map
) {
7363 basic
->shared
= FALSE
;
7365 basic
->shared
= entry
->is_shared
;
7368 vm_map_unlock_read(map
);
7369 return(KERN_SUCCESS
);
7371 case VM_REGION_EXTENDED_INFO
:
7374 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7375 return(KERN_INVALID_ARGUMENT
);
7377 extended
= (vm_region_extended_info_t
) info
;
7378 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7380 vm_map_lock_read(map
);
7383 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7384 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7385 vm_map_unlock_read(map
);
7386 return(KERN_INVALID_ADDRESS
);
7391 start
= entry
->vme_start
;
7393 extended
->protection
= entry
->protection
;
7394 extended
->user_tag
= entry
->alias
;
7395 extended
->pages_resident
= 0;
7396 extended
->pages_swapped_out
= 0;
7397 extended
->pages_shared_now_private
= 0;
7398 extended
->pages_dirtied
= 0;
7399 extended
->external_pager
= 0;
7400 extended
->shadow_depth
= 0;
7402 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7404 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7405 extended
->share_mode
= SM_PRIVATE
;
7408 *object_name
= IP_NULL
;
7410 *size
= (entry
->vme_end
- start
);
7412 vm_map_unlock_read(map
);
7413 return(KERN_SUCCESS
);
7415 case VM_REGION_TOP_INFO
:
7418 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7419 return(KERN_INVALID_ARGUMENT
);
7421 top
= (vm_region_top_info_t
) info
;
7422 *count
= VM_REGION_TOP_INFO_COUNT
;
7424 vm_map_lock_read(map
);
7427 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7428 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7429 vm_map_unlock_read(map
);
7430 return(KERN_INVALID_ADDRESS
);
7436 start
= entry
->vme_start
;
7438 top
->private_pages_resident
= 0;
7439 top
->shared_pages_resident
= 0;
7441 vm_region_top_walk(entry
, top
);
7444 *object_name
= IP_NULL
;
7446 *size
= (entry
->vme_end
- start
);
7448 vm_map_unlock_read(map
);
7449 return(KERN_SUCCESS
);
7452 return(KERN_INVALID_ARGUMENT
);
7458 vm_map_entry_t entry
,
7459 vm_region_top_info_t top
)
7461 register struct vm_object
*obj
, *tmp_obj
;
7462 register int ref_count
;
7464 if (entry
->object
.vm_object
== 0 || entry
->is_sub_map
) {
7465 top
->share_mode
= SM_EMPTY
;
7471 obj
= entry
->object
.vm_object
;
7473 vm_object_lock(obj
);
7475 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7480 top
->private_pages_resident
= obj
->resident_page_count
;
7482 top
->shared_pages_resident
= obj
->resident_page_count
;
7483 top
->ref_count
= ref_count
;
7484 top
->share_mode
= SM_COW
;
7486 while (tmp_obj
= obj
->shadow
) {
7487 vm_object_lock(tmp_obj
);
7488 vm_object_unlock(obj
);
7491 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7494 top
->shared_pages_resident
+= obj
->resident_page_count
;
7495 top
->ref_count
+= ref_count
- 1;
7498 if (entry
->needs_copy
) {
7499 top
->share_mode
= SM_COW
;
7500 top
->shared_pages_resident
= obj
->resident_page_count
;
7502 if (ref_count
== 1 ||
7503 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7504 top
->share_mode
= SM_PRIVATE
;
7505 top
->private_pages_resident
= obj
->resident_page_count
;
7507 top
->share_mode
= SM_SHARED
;
7508 top
->shared_pages_resident
= obj
->resident_page_count
;
7511 top
->ref_count
= ref_count
;
7513 top
->obj_id
= (int)obj
;
7515 vm_object_unlock(obj
);
7521 vm_map_entry_t entry
,
7522 vm_region_extended_info_t extended
,
7523 vm_object_offset_t offset
,
7528 register struct vm_object
*obj
, *tmp_obj
;
7529 register vm_offset_t last_offset
;
7531 register int ref_count
;
7532 void vm_region_look_for_page();
7534 if ((entry
->object
.vm_object
== 0) ||
7535 (entry
->is_sub_map
) ||
7536 (entry
->object
.vm_object
->phys_contiguous
)) {
7537 extended
->share_mode
= SM_EMPTY
;
7538 extended
->ref_count
= 0;
7542 obj
= entry
->object
.vm_object
;
7544 vm_object_lock(obj
);
7546 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7549 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7550 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7552 if (extended
->shadow_depth
|| entry
->needs_copy
)
7553 extended
->share_mode
= SM_COW
;
7556 extended
->share_mode
= SM_PRIVATE
;
7558 if (obj
->true_share
)
7559 extended
->share_mode
= SM_TRUESHARED
;
7561 extended
->share_mode
= SM_SHARED
;
7564 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7566 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7567 if ((tmp_obj
= obj
->shadow
) == 0)
7569 vm_object_lock(tmp_obj
);
7570 vm_object_unlock(obj
);
7572 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7575 extended
->ref_count
+= ref_count
;
7578 vm_object_unlock(obj
);
7580 if (extended
->share_mode
== SM_SHARED
) {
7581 register vm_map_entry_t cur
;
7582 register vm_map_entry_t last
;
7585 obj
= entry
->object
.vm_object
;
7586 last
= vm_map_to_entry(map
);
7589 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7591 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7592 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7594 if (my_refs
== ref_count
)
7595 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7596 else if (my_refs
> 1)
7597 extended
->share_mode
= SM_SHARED_ALIASED
;
7603 /* object is locked on entry and locked on return */
7607 vm_region_look_for_page(
7609 vm_region_extended_info_t extended
,
7610 vm_object_offset_t offset
,
7616 register vm_page_t p
;
7617 register vm_object_t shadow
;
7618 register int ref_count
;
7619 vm_object_t caller_object
;
7621 shadow
= object
->shadow
;
7622 caller_object
= object
;
7627 if ( !(object
->pager_trusted
) && !(object
->internal
))
7628 extended
->external_pager
= 1;
7630 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7631 if (shadow
&& (max_refcnt
== 1))
7632 extended
->pages_shared_now_private
++;
7634 if (p
->dirty
|| pmap_is_modified(p
->phys_addr
))
7635 extended
->pages_dirtied
++;
7636 extended
->pages_resident
++;
7638 if(object
!= caller_object
)
7639 vm_object_unlock(object
);
7643 if (object
->existence_map
) {
7644 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7646 extended
->pages_swapped_out
++;
7648 if(object
!= caller_object
)
7649 vm_object_unlock(object
);
7655 vm_object_lock(shadow
);
7657 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7660 if (++depth
> extended
->shadow_depth
)
7661 extended
->shadow_depth
= depth
;
7663 if (ref_count
> max_refcnt
)
7664 max_refcnt
= ref_count
;
7666 if(object
!= caller_object
)
7667 vm_object_unlock(object
);
7670 shadow
= object
->shadow
;
7671 offset
= offset
+ object
->shadow_offset
;
7674 if(object
!= caller_object
)
7675 vm_object_unlock(object
);
7681 vm_region_count_obj_refs(
7682 vm_map_entry_t entry
,
7685 register int ref_count
;
7686 register vm_object_t chk_obj
;
7687 register vm_object_t tmp_obj
;
7689 if (entry
->object
.vm_object
== 0)
7692 if (entry
->is_sub_map
)
7693 ref_count
= vm_region_count_obj_refs((vm_map_entry_t
)entry
->object
.sub_map
, object
);
7697 chk_obj
= entry
->object
.vm_object
;
7698 vm_object_lock(chk_obj
);
7701 if (chk_obj
== object
)
7703 if (tmp_obj
= chk_obj
->shadow
)
7704 vm_object_lock(tmp_obj
);
7705 vm_object_unlock(chk_obj
);
7715 * Routine: vm_map_simplify
7718 * Attempt to simplify the map representation in
7719 * the vicinity of the given starting address.
7721 * This routine is intended primarily to keep the
7722 * kernel maps more compact -- they generally don't
7723 * benefit from the "expand a map entry" technology
7724 * at allocation time because the adjacent entry
7725 * is often wired down.
7732 vm_map_entry_t this_entry
;
7733 vm_map_entry_t prev_entry
;
7734 vm_map_entry_t next_entry
;
7738 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7739 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7741 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7743 (prev_entry
->is_shared
== FALSE
) &&
7744 (prev_entry
->is_sub_map
== FALSE
) &&
7746 (this_entry
->is_shared
== FALSE
) &&
7747 (this_entry
->is_sub_map
== FALSE
) &&
7749 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7750 (prev_entry
->protection
== this_entry
->protection
) &&
7751 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7752 (prev_entry
->behavior
== this_entry
->behavior
) &&
7753 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7754 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7755 (prev_entry
->in_transition
== FALSE
) &&
7756 (this_entry
->in_transition
== FALSE
) &&
7758 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7760 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7761 ((prev_entry
->offset
+
7762 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7763 == this_entry
->offset
)
7765 SAVE_HINT(map
, prev_entry
);
7766 vm_map_entry_unlink(map
, this_entry
);
7767 prev_entry
->vme_end
= this_entry
->vme_end
;
7768 UPDATE_FIRST_FREE(map
, map
->first_free
);
7769 vm_object_deallocate(this_entry
->object
.vm_object
);
7770 vm_map_entry_dispose(map
, this_entry
);
7771 counter(c_vm_map_simplified_lower
++);
7774 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7775 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7777 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7779 (next_entry
->is_shared
== FALSE
) &&
7780 (next_entry
->is_sub_map
== FALSE
) &&
7782 (next_entry
->is_shared
== FALSE
) &&
7783 (next_entry
->is_sub_map
== FALSE
) &&
7785 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7786 (next_entry
->protection
== this_entry
->protection
) &&
7787 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7788 (next_entry
->behavior
== this_entry
->behavior
) &&
7789 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7790 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7791 (this_entry
->in_transition
== FALSE
) &&
7792 (next_entry
->in_transition
== FALSE
) &&
7794 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7796 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7797 ((this_entry
->offset
+
7798 (this_entry
->vme_end
- this_entry
->vme_start
))
7799 == next_entry
->offset
)
7801 vm_map_entry_unlink(map
, next_entry
);
7802 this_entry
->vme_end
= next_entry
->vme_end
;
7803 UPDATE_FIRST_FREE(map
, map
->first_free
);
7804 vm_object_deallocate(next_entry
->object
.vm_object
);
7805 vm_map_entry_dispose(map
, next_entry
);
7806 counter(c_vm_map_simplified_upper
++);
7808 counter(c_vm_map_simplify_called
++);
7814 * Routine: vm_map_machine_attribute
7816 * Provide machine-specific attributes to mappings,
7817 * such as cachability etc. for machines that provide
7818 * them. NUMA architectures and machines with big/strange
7819 * caches will use this.
7821 * Responsibilities for locking and checking are handled here,
7822 * everything else in the pmap module. If any non-volatile
7823 * information must be kept, the pmap module should handle
7824 * it itself. [This assumes that attributes do not
7825 * need to be inherited, which seems ok to me]
7828 vm_map_machine_attribute(
7830 vm_offset_t address
,
7832 vm_machine_attribute_t attribute
,
7833 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7836 vm_size_t sync_size
;
7838 vm_map_entry_t entry
;
7840 if (address
< vm_map_min(map
) ||
7841 (address
+ size
) > vm_map_max(map
))
7842 return KERN_INVALID_ADDRESS
;
7846 if (attribute
!= MATTR_CACHE
) {
7847 /* If we don't have to find physical addresses, we */
7848 /* don't have to do an explicit traversal here. */
7849 ret
= pmap_attribute(map
->pmap
,
7850 address
, size
, attribute
, value
);
7855 /* Get the starting address */
7856 start
= trunc_page(address
);
7857 /* Figure how much memory we need to flush (in page increments) */
7858 sync_size
= round_page(start
+ size
) - start
;
7861 ret
= KERN_SUCCESS
; /* Assume it all worked */
7864 if (vm_map_lookup_entry(map
, start
, &entry
)) {
7866 if((entry
->vme_end
- start
) > sync_size
) {
7867 sub_size
= sync_size
;
7870 sub_size
= entry
->vme_end
- start
;
7871 sync_size
-= sub_size
;
7873 if(entry
->is_sub_map
) {
7874 vm_map_machine_attribute(
7875 entry
->object
.sub_map
,
7876 (start
- entry
->vme_start
)
7881 if(entry
->object
.vm_object
) {
7884 vm_object_t base_object
;
7885 vm_object_offset_t offset
;
7886 vm_object_offset_t base_offset
;
7889 offset
= (start
- entry
->vme_start
)
7891 base_offset
= offset
;
7892 object
= entry
->object
.vm_object
;
7893 base_object
= object
;
7897 if(m
&& !m
->fictitious
) {
7900 pmap_attribute_cache_sync(
7904 } else if (object
->shadow
) {
7906 object
->shadow_offset
;
7907 object
= object
->shadow
;
7911 /* Bump to the next page */
7912 base_offset
+= PAGE_SIZE
;
7913 offset
= base_offset
;
7914 object
= base_object
;
7922 return KERN_FAILURE
;
7933 * vm_map_behavior_set:
7935 * Sets the paging reference behavior of the specified address
7936 * range in the target map. Paging reference behavior affects
7937 * how pagein operations resulting from faults on the map will be
7941 vm_map_behavior_set(
7945 vm_behavior_t new_behavior
)
7947 register vm_map_entry_t entry
;
7948 vm_map_entry_t temp_entry
;
7951 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
7952 (integer_t
)map
, start
, end
, new_behavior
, 0);
7954 switch (new_behavior
) {
7955 case VM_BEHAVIOR_DEFAULT
:
7956 case VM_BEHAVIOR_RANDOM
:
7957 case VM_BEHAVIOR_SEQUENTIAL
:
7958 case VM_BEHAVIOR_RSEQNTL
:
7960 case VM_BEHAVIOR_WILLNEED
:
7961 case VM_BEHAVIOR_DONTNEED
:
7962 new_behavior
= VM_BEHAVIOR_DEFAULT
;
7965 return(KERN_INVALID_ARGUMENT
);
7971 * The entire address range must be valid for the map.
7972 * Note that vm_map_range_check() does a
7973 * vm_map_lookup_entry() internally and returns the
7974 * entry containing the start of the address range if
7975 * the entire range is valid.
7977 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
7979 vm_map_clip_start(map
, entry
, start
);
7983 return(KERN_INVALID_ADDRESS
);
7986 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
7987 vm_map_clip_end(map
, entry
, end
);
7989 entry
->behavior
= new_behavior
;
7991 entry
= entry
->vme_next
;
7995 return(KERN_SUCCESS
);
7999 #include <mach_kdb.h>
8001 #include <ddb/db_output.h>
8002 #include <vm/vm_print.h>
8004 #define printf db_printf
8007 * Forward declarations for internal functions.
8009 extern void vm_map_links_print(
8010 struct vm_map_links
*links
);
8012 extern void vm_map_header_print(
8013 struct vm_map_header
*header
);
8015 extern void vm_map_entry_print(
8016 vm_map_entry_t entry
);
8018 extern void vm_follow_entry(
8019 vm_map_entry_t entry
);
8021 extern void vm_follow_map(
8025 * vm_map_links_print: [ debug ]
8029 struct vm_map_links
*links
)
8031 iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
8039 * vm_map_header_print: [ debug ]
8042 vm_map_header_print(
8043 struct vm_map_header
*header
)
8045 vm_map_links_print(&header
->links
);
8046 iprintf("nentries=0x%x, %sentries_pageable\n",
8048 (header
->entries_pageable
? "" : "!"));
8052 * vm_follow_entry: [ debug ]
8056 vm_map_entry_t entry
)
8058 extern int db_indent
;
8061 iprintf("map entry 0x%x:\n", entry
);
8065 shadows
= vm_follow_object(entry
->object
.vm_object
);
8066 iprintf("Total objects : %d\n",shadows
);
8072 * vm_map_entry_print: [ debug ]
8076 register vm_map_entry_t entry
)
8078 extern int db_indent
;
8079 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
8080 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
8082 iprintf("map entry 0x%x:\n", entry
);
8086 vm_map_links_print(&entry
->links
);
8088 iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
8092 entry
->max_protection
,
8093 inheritance_name
[(entry
->inheritance
& 0x3)]);
8095 iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
8096 behavior_name
[(entry
->behavior
& 0x3)],
8098 entry
->user_wired_count
);
8099 iprintf("%sin_transition, %sneeds_wakeup\n",
8100 (entry
->in_transition
? "" : "!"),
8101 (entry
->needs_wakeup
? "" : "!"));
8103 if (entry
->is_sub_map
) {
8104 iprintf("submap=0x%x, offset=0x%x\n",
8105 entry
->object
.sub_map
,
8108 iprintf("object=0x%x, offset=0x%x, ",
8109 entry
->object
.vm_object
,
8111 printf("%sis_shared, %sneeds_copy\n",
8112 (entry
->is_shared
? "" : "!"),
8113 (entry
->needs_copy
? "" : "!"));
8120 * vm_follow_map: [ debug ]
8126 register vm_map_entry_t entry
;
8127 extern int db_indent
;
8129 iprintf("task map 0x%x:\n", map
);
8133 for (entry
= vm_map_first_entry(map
);
8134 entry
&& entry
!= vm_map_to_entry(map
);
8135 entry
= entry
->vme_next
) {
8136 vm_follow_entry(entry
);
8143 * vm_map_print: [ debug ]
8147 register vm_map_t map
)
8149 register vm_map_entry_t entry
;
8150 extern int db_indent
;
8153 iprintf("task map 0x%x:\n", map
);
8157 vm_map_header_print(&map
->hdr
);
8159 iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
8166 iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
8167 (map
->wait_for_space
? "" : "!"),
8168 (map
->wiring_required
? "" : "!"),
8172 switch (map
->sw_state
) {
8183 iprintf("res=%d, sw_state=%s\n", map
->res_count
, swstate
);
8184 #endif /* TASK_SWAPPER */
8186 for (entry
= vm_map_first_entry(map
);
8187 entry
&& entry
!= vm_map_to_entry(map
);
8188 entry
= entry
->vme_next
) {
8189 vm_map_entry_print(entry
);
8196 * Routine: vm_map_copy_print
8198 * Pretty-print a copy object for ddb.
8205 extern int db_indent
;
8207 vm_map_entry_t entry
;
8209 printf("copy object 0x%x\n", copy
);
8213 iprintf("type=%d", copy
->type
);
8214 switch (copy
->type
) {
8215 case VM_MAP_COPY_ENTRY_LIST
:
8216 printf("[entry_list]");
8219 case VM_MAP_COPY_OBJECT
:
8223 case VM_MAP_COPY_KERNEL_BUFFER
:
8224 printf("[kernel_buffer]");
8228 printf("[bad type]");
8231 printf(", offset=0x%x", copy
->offset
);
8232 printf(", size=0x%x\n", copy
->size
);
8234 switch (copy
->type
) {
8235 case VM_MAP_COPY_ENTRY_LIST
:
8236 vm_map_header_print(©
->cpy_hdr
);
8237 for (entry
= vm_map_copy_first_entry(copy
);
8238 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8239 entry
= entry
->vme_next
) {
8240 vm_map_entry_print(entry
);
8244 case VM_MAP_COPY_OBJECT
:
8245 iprintf("object=0x%x\n", copy
->cpy_object
);
8248 case VM_MAP_COPY_KERNEL_BUFFER
:
8249 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8250 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8259 * db_vm_map_total_size(map) [ debug ]
8261 * return the total virtual size (in bytes) of the map
8264 db_vm_map_total_size(
8267 vm_map_entry_t entry
;
8271 for (entry
= vm_map_first_entry(map
);
8272 entry
!= vm_map_to_entry(map
);
8273 entry
= entry
->vme_next
) {
8274 total
+= entry
->vme_end
- entry
->vme_start
;
8280 #endif /* MACH_KDB */
8283 * Routine: vm_map_entry_insert
8285 * Descritpion: This routine inserts a new vm_entry in a locked map.
8288 vm_map_entry_insert(
8290 vm_map_entry_t insp_entry
,
8294 vm_object_offset_t offset
,
8295 boolean_t needs_copy
,
8296 boolean_t is_shared
,
8297 boolean_t in_transition
,
8298 vm_prot_t cur_protection
,
8299 vm_prot_t max_protection
,
8300 vm_behavior_t behavior
,
8301 vm_inherit_t inheritance
,
8302 unsigned wired_count
)
8304 vm_map_entry_t new_entry
;
8306 assert(insp_entry
!= (vm_map_entry_t
)0);
8308 new_entry
= vm_map_entry_create(map
);
8310 new_entry
->vme_start
= start
;
8311 new_entry
->vme_end
= end
;
8312 assert(page_aligned(new_entry
->vme_start
));
8313 assert(page_aligned(new_entry
->vme_end
));
8315 new_entry
->object
.vm_object
= object
;
8316 new_entry
->offset
= offset
;
8317 new_entry
->is_shared
= is_shared
;
8318 new_entry
->is_sub_map
= FALSE
;
8319 new_entry
->needs_copy
= needs_copy
;
8320 new_entry
->in_transition
= in_transition
;
8321 new_entry
->needs_wakeup
= FALSE
;
8322 new_entry
->inheritance
= inheritance
;
8323 new_entry
->protection
= cur_protection
;
8324 new_entry
->max_protection
= max_protection
;
8325 new_entry
->behavior
= behavior
;
8326 new_entry
->wired_count
= wired_count
;
8327 new_entry
->user_wired_count
= 0;
8328 new_entry
->use_pmap
= FALSE
;
8331 * Insert the new entry into the list.
8334 vm_map_entry_link(map
, insp_entry
, new_entry
);
8335 map
->size
+= end
- start
;
8338 * Update the free space hint and the lookup hint.
8341 SAVE_HINT(map
, new_entry
);
8346 * Routine: vm_remap_extract
8348 * Descritpion: This routine returns a vm_entry list from a map.
8356 struct vm_map_header
*map_header
,
8357 vm_prot_t
*cur_protection
,
8358 vm_prot_t
*max_protection
,
8359 /* What, no behavior? */
8360 vm_inherit_t inheritance
,
8363 kern_return_t result
;
8364 vm_size_t mapped_size
;
8366 vm_map_entry_t src_entry
; /* result of last map lookup */
8367 vm_map_entry_t new_entry
;
8368 vm_object_offset_t offset
;
8369 vm_offset_t map_address
;
8370 vm_offset_t src_start
; /* start of entry to map */
8371 vm_offset_t src_end
; /* end of region to be mapped */
8373 vm_map_version_t version
;
8374 boolean_t src_needs_copy
;
8375 boolean_t new_entry_needs_copy
;
8377 assert(map
!= VM_MAP_NULL
);
8378 assert(size
!= 0 && size
== round_page(size
));
8379 assert(inheritance
== VM_INHERIT_NONE
||
8380 inheritance
== VM_INHERIT_COPY
||
8381 inheritance
== VM_INHERIT_SHARE
);
8384 * Compute start and end of region.
8386 src_start
= trunc_page(addr
);
8387 src_end
= round_page(src_start
+ size
);
8390 * Initialize map_header.
8392 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8393 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8394 map_header
->nentries
= 0;
8395 map_header
->entries_pageable
= pageable
;
8397 *cur_protection
= VM_PROT_ALL
;
8398 *max_protection
= VM_PROT_ALL
;
8402 result
= KERN_SUCCESS
;
8405 * The specified source virtual space might correspond to
8406 * multiple map entries, need to loop on them.
8409 while (mapped_size
!= size
) {
8410 vm_size_t entry_size
;
8413 * Find the beginning of the region.
8415 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8416 result
= KERN_INVALID_ADDRESS
;
8420 if (src_start
< src_entry
->vme_start
||
8421 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8422 result
= KERN_INVALID_ADDRESS
;
8426 if(src_entry
->is_sub_map
) {
8427 result
= KERN_INVALID_ADDRESS
;
8431 tmp_size
= size
- mapped_size
;
8432 if (src_end
> src_entry
->vme_end
)
8433 tmp_size
-= (src_end
- src_entry
->vme_end
);
8435 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8436 src_entry
->vme_start
);
8438 if(src_entry
->is_sub_map
) {
8439 vm_map_reference(src_entry
->object
.sub_map
);
8441 object
= src_entry
->object
.vm_object
;
8443 if (object
== VM_OBJECT_NULL
) {
8444 object
= vm_object_allocate(entry_size
);
8445 src_entry
->offset
= 0;
8446 src_entry
->object
.vm_object
= object
;
8447 } else if (object
->copy_strategy
!=
8448 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8450 * We are already using an asymmetric
8451 * copy, and therefore we already have
8454 assert(!src_entry
->needs_copy
);
8455 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8456 (object
->internal
&& !object
->true_share
&&
8457 !src_entry
->is_shared
&&
8458 object
->size
> entry_size
)) {
8460 vm_object_shadow(&src_entry
->object
.vm_object
,
8464 if (!src_entry
->needs_copy
&&
8465 (src_entry
->protection
& VM_PROT_WRITE
)) {
8467 vm_object_pmap_protect(
8468 src_entry
->object
.vm_object
,
8472 src_entry
->vme_start
,
8473 src_entry
->protection
&
8476 pmap_protect(vm_map_pmap(map
),
8477 src_entry
->vme_start
,
8479 src_entry
->protection
&
8484 object
= src_entry
->object
.vm_object
;
8485 src_entry
->needs_copy
= FALSE
;
8489 vm_object_lock(object
);
8490 object
->ref_count
++; /* object ref. for new entry */
8491 VM_OBJ_RES_INCR(object
);
8492 if (object
->copy_strategy
==
8493 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8494 object
->copy_strategy
=
8495 MEMORY_OBJECT_COPY_DELAY
;
8497 vm_object_unlock(object
);
8500 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8502 new_entry
= _vm_map_entry_create(map_header
);
8503 vm_map_entry_copy(new_entry
, src_entry
);
8504 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8506 new_entry
->vme_start
= map_address
;
8507 new_entry
->vme_end
= map_address
+ tmp_size
;
8508 new_entry
->inheritance
= inheritance
;
8509 new_entry
->offset
= offset
;
8512 * The new region has to be copied now if required.
8516 src_entry
->is_shared
= TRUE
;
8517 new_entry
->is_shared
= TRUE
;
8518 if (!(new_entry
->is_sub_map
))
8519 new_entry
->needs_copy
= FALSE
;
8521 } else if (src_entry
->is_sub_map
) {
8522 /* make this a COW sub_map if not already */
8523 new_entry
->needs_copy
= TRUE
;
8524 } else if (src_entry
->wired_count
== 0 &&
8525 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8527 (new_entry
->vme_end
-
8528 new_entry
->vme_start
),
8530 &new_entry_needs_copy
)) {
8532 new_entry
->needs_copy
= new_entry_needs_copy
;
8533 new_entry
->is_shared
= FALSE
;
8536 * Handle copy_on_write semantics.
8538 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8539 vm_object_pmap_protect(object
,
8542 ((src_entry
->is_shared
8544 PMAP_NULL
: map
->pmap
),
8545 src_entry
->vme_start
,
8546 src_entry
->protection
&
8549 src_entry
->needs_copy
= TRUE
;
8552 * Throw away the old object reference of the new entry.
8554 vm_object_deallocate(object
);
8557 new_entry
->is_shared
= FALSE
;
8560 * The map can be safely unlocked since we
8561 * already hold a reference on the object.
8563 * Record the timestamp of the map for later
8564 * verification, and unlock the map.
8566 version
.main_timestamp
= map
->timestamp
;
8567 vm_map_unlock(map
); /* Increments timestamp once! */
8572 if (src_entry
->wired_count
> 0) {
8573 vm_object_lock(object
);
8574 result
= vm_object_copy_slowly(
8579 &new_entry
->object
.vm_object
);
8581 new_entry
->offset
= 0;
8582 new_entry
->needs_copy
= FALSE
;
8584 result
= vm_object_copy_strategically(
8588 &new_entry
->object
.vm_object
,
8590 &new_entry_needs_copy
);
8592 new_entry
->needs_copy
= new_entry_needs_copy
;
8596 * Throw away the old object reference of the new entry.
8598 vm_object_deallocate(object
);
8600 if (result
!= KERN_SUCCESS
&&
8601 result
!= KERN_MEMORY_RESTART_COPY
) {
8602 _vm_map_entry_dispose(map_header
, new_entry
);
8607 * Verify that the map has not substantially
8608 * changed while the copy was being made.
8612 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8614 * Simple version comparison failed.
8616 * Retry the lookup and verify that the
8617 * same object/offset are still present.
8619 vm_object_deallocate(new_entry
->
8621 _vm_map_entry_dispose(map_header
, new_entry
);
8622 if (result
== KERN_MEMORY_RESTART_COPY
)
8623 result
= KERN_SUCCESS
;
8627 if (result
== KERN_MEMORY_RESTART_COPY
) {
8628 vm_object_reference(object
);
8633 _vm_map_entry_link(map_header
,
8634 map_header
->links
.prev
, new_entry
);
8636 *cur_protection
&= src_entry
->protection
;
8637 *max_protection
&= src_entry
->max_protection
;
8639 map_address
+= tmp_size
;
8640 mapped_size
+= tmp_size
;
8641 src_start
+= tmp_size
;
8646 if (result
!= KERN_SUCCESS
) {
8648 * Free all allocated elements.
8650 for (src_entry
= map_header
->links
.next
;
8651 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8652 src_entry
= new_entry
) {
8653 new_entry
= src_entry
->vme_next
;
8654 _vm_map_entry_unlink(map_header
, src_entry
);
8655 vm_object_deallocate(src_entry
->object
.vm_object
);
8656 _vm_map_entry_dispose(map_header
, src_entry
);
8665 * Map portion of a task's address space.
8666 * Mapped region must not overlap more than
8667 * one vm memory object. Protections and
8668 * inheritance attributes remain the same
8669 * as in the original task and are out parameters.
8670 * Source and Target task can be identical
8671 * Other attributes are identical as for vm_map()
8675 vm_map_t target_map
,
8676 vm_offset_t
*address
,
8681 vm_offset_t memory_address
,
8683 vm_prot_t
*cur_protection
,
8684 vm_prot_t
*max_protection
,
8685 vm_inherit_t inheritance
)
8687 kern_return_t result
;
8688 vm_map_entry_t entry
;
8689 vm_map_entry_t insp_entry
;
8690 vm_map_entry_t new_entry
;
8691 struct vm_map_header map_header
;
8693 if (target_map
== VM_MAP_NULL
)
8694 return KERN_INVALID_ARGUMENT
;
8696 switch (inheritance
) {
8697 case VM_INHERIT_NONE
:
8698 case VM_INHERIT_COPY
:
8699 case VM_INHERIT_SHARE
:
8700 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8704 return KERN_INVALID_ARGUMENT
;
8707 size
= round_page(size
);
8709 result
= vm_remap_extract(src_map
, memory_address
,
8710 size
, copy
, &map_header
,
8717 if (result
!= KERN_SUCCESS
) {
8722 * Allocate/check a range of free virtual address
8723 * space for the target
8725 *address
= trunc_page(*address
);
8726 vm_map_lock(target_map
);
8727 result
= vm_remap_range_allocate(target_map
, address
, size
,
8728 mask
, anywhere
, &insp_entry
);
8730 for (entry
= map_header
.links
.next
;
8731 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8732 entry
= new_entry
) {
8733 new_entry
= entry
->vme_next
;
8734 _vm_map_entry_unlink(&map_header
, entry
);
8735 if (result
== KERN_SUCCESS
) {
8736 entry
->vme_start
+= *address
;
8737 entry
->vme_end
+= *address
;
8738 vm_map_entry_link(target_map
, insp_entry
, entry
);
8741 if (!entry
->is_sub_map
) {
8742 vm_object_deallocate(entry
->object
.vm_object
);
8744 vm_map_deallocate(entry
->object
.sub_map
);
8746 _vm_map_entry_dispose(&map_header
, entry
);
8750 if (result
== KERN_SUCCESS
) {
8751 target_map
->size
+= size
;
8752 SAVE_HINT(target_map
, insp_entry
);
8754 vm_map_unlock(target_map
);
8756 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8757 result
= vm_map_wire(target_map
, *address
,
8758 *address
+ size
, *cur_protection
, TRUE
);
8763 * Routine: vm_remap_range_allocate
8766 * Allocate a range in the specified virtual address map.
8767 * returns the address and the map entry just before the allocated
8770 * Map must be locked.
8774 vm_remap_range_allocate(
8776 vm_offset_t
*address
, /* IN/OUT */
8780 vm_map_entry_t
*map_entry
) /* OUT */
8782 register vm_map_entry_t entry
;
8783 register vm_offset_t start
;
8784 register vm_offset_t end
;
8785 kern_return_t result
= KERN_SUCCESS
;
8794 * Calculate the first possible address.
8797 if (start
< map
->min_offset
)
8798 start
= map
->min_offset
;
8799 if (start
> map
->max_offset
)
8800 return(KERN_NO_SPACE
);
8803 * Look for the first possible address;
8804 * if there's already something at this
8805 * address, we have to start after it.
8808 assert(first_free_is_valid(map
));
8809 if (start
== map
->min_offset
) {
8810 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8811 start
= entry
->vme_end
;
8813 vm_map_entry_t tmp_entry
;
8814 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8815 start
= tmp_entry
->vme_end
;
8820 * In any case, the "entry" always precedes
8821 * the proposed new region throughout the
8826 register vm_map_entry_t next
;
8829 * Find the end of the proposed new region.
8830 * Be sure we didn't go beyond the end, or
8831 * wrap around the address.
8834 end
= ((start
+ mask
) & ~mask
);
8836 return(KERN_NO_SPACE
);
8840 if ((end
> map
->max_offset
) || (end
< start
)) {
8841 if (map
->wait_for_space
) {
8842 if (size
<= (map
->max_offset
-
8844 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8846 thread_block((void (*)(void))0);
8852 return(KERN_NO_SPACE
);
8856 * If there are no more entries, we must win.
8859 next
= entry
->vme_next
;
8860 if (next
== vm_map_to_entry(map
))
8864 * If there is another entry, it must be
8865 * after the end of the potential new region.
8868 if (next
->vme_start
>= end
)
8872 * Didn't fit -- move to the next entry.
8876 start
= entry
->vme_end
;
8880 vm_map_entry_t temp_entry
;
8884 * the address doesn't itself violate
8885 * the mask requirement.
8888 if ((start
& mask
) != 0)
8889 return(KERN_NO_SPACE
);
8893 * ... the address is within bounds
8898 if ((start
< map
->min_offset
) ||
8899 (end
> map
->max_offset
) ||
8901 return(KERN_INVALID_ADDRESS
);
8905 * ... the starting address isn't allocated
8908 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
8909 return(KERN_NO_SPACE
);
8914 * ... the next region doesn't overlap the
8918 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
8919 (entry
->vme_next
->vme_start
< end
))
8920 return(KERN_NO_SPACE
);
8923 return(KERN_SUCCESS
);
8929 * Set the address map for the current thr_act to the specified map
8937 thread_act_t thr_act
= current_act();
8938 vm_map_t oldmap
= thr_act
->map
;
8940 mp_disable_preemption();
8941 mycpu
= cpu_number();
8944 * Deactivate the current map and activate the requested map
8946 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
8948 mp_enable_preemption();
8954 * Routine: vm_map_write_user
8957 * Copy out data from a kernel space into space in the
8958 * destination map. The space must already exist in the
8960 * NOTE: This routine should only be called by threads
8961 * which can block on a page fault. i.e. kernel mode user
8968 vm_offset_t src_addr
,
8969 vm_offset_t dst_addr
,
8972 thread_act_t thr_act
= current_act();
8973 kern_return_t kr
= KERN_SUCCESS
;
8975 if(thr_act
->map
== map
) {
8976 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8977 kr
= KERN_INVALID_ADDRESS
;
8982 /* take on the identity of the target map while doing */
8985 vm_map_reference(map
);
8986 oldmap
= vm_map_switch(map
);
8987 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8988 kr
= KERN_INVALID_ADDRESS
;
8990 vm_map_switch(oldmap
);
8991 vm_map_deallocate(map
);
8997 * Routine: vm_map_read_user
9000 * Copy in data from a user space source map into the
9001 * kernel map. The space must already exist in the
9003 * NOTE: This routine should only be called by threads
9004 * which can block on a page fault. i.e. kernel mode user
9011 vm_offset_t src_addr
,
9012 vm_offset_t dst_addr
,
9015 thread_act_t thr_act
= current_act();
9016 kern_return_t kr
= KERN_SUCCESS
;
9018 if(thr_act
->map
== map
) {
9019 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9020 kr
= KERN_INVALID_ADDRESS
;
9025 /* take on the identity of the target map while doing */
9028 vm_map_reference(map
);
9029 oldmap
= vm_map_switch(map
);
9030 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
9031 kr
= KERN_INVALID_ADDRESS
;
9033 vm_map_switch(oldmap
);
9034 vm_map_deallocate(map
);
9039 /* Takes existing source and destination sub-maps and clones the contents of */
9040 /* the source map */
9044 ipc_port_t src_region
,
9045 ipc_port_t dst_region
)
9047 vm_named_entry_t src_object
;
9048 vm_named_entry_t dst_object
;
9052 vm_offset_t max_off
;
9053 vm_map_entry_t entry
;
9054 vm_map_entry_t new_entry
;
9055 vm_map_entry_t insert_point
;
9057 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
9058 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
9059 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
9060 return KERN_INVALID_ARGUMENT
;
9062 src_map
= (vm_map_t
)src_object
->backing
.map
;
9063 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
9064 /* destination map is assumed to be unavailable to any other */
9065 /* activity. i.e. it is new */
9066 vm_map_lock(src_map
);
9067 if((src_map
->min_offset
!= dst_map
->min_offset
)
9068 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
9069 vm_map_unlock(src_map
);
9070 return KERN_INVALID_ARGUMENT
;
9072 addr
= src_map
->min_offset
;
9073 vm_map_lookup_entry(dst_map
, addr
, &entry
);
9074 if(entry
== vm_map_to_entry(dst_map
)) {
9075 entry
= entry
->vme_next
;
9077 if(entry
== vm_map_to_entry(dst_map
)) {
9078 max_off
= src_map
->max_offset
;
9080 max_off
= entry
->vme_start
;
9082 vm_map_lookup_entry(src_map
, addr
, &entry
);
9083 if(entry
== vm_map_to_entry(src_map
)) {
9084 entry
= entry
->vme_next
;
9086 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
9087 while((entry
!= vm_map_to_entry(src_map
)) &&
9088 (entry
->vme_end
<= max_off
)) {
9089 addr
= entry
->vme_start
;
9090 new_entry
= vm_map_entry_create(dst_map
);
9091 vm_map_entry_copy(new_entry
, entry
);
9092 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
9093 insert_point
= new_entry
;
9094 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
9095 if (new_entry
->is_sub_map
) {
9096 vm_map_reference(new_entry
->object
.sub_map
);
9098 vm_object_reference(
9099 new_entry
->object
.vm_object
);
9102 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
9103 entry
= entry
->vme_next
;
9105 vm_map_unlock(src_map
);
9106 return KERN_SUCCESS
;
9110 * Export routines to other components for the things we access locally through
9117 return (current_map_fast());
9121 * vm_map_check_protection:
9123 * Assert that the target map allows the specified
9124 * privilege on the entire address region given.
9125 * The entire region must be allocated.
9127 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9128 register vm_map_t map
;
9129 register vm_offset_t start
;
9130 register vm_offset_t end
;
9131 register vm_prot_t protection
;
9133 register vm_map_entry_t entry
;
9134 vm_map_entry_t tmp_entry
;
9138 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9144 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9151 while (start
< end
) {
9152 if (entry
== vm_map_to_entry(map
)) {
9161 if (start
< entry
->vme_start
) {
9167 * Check protection associated with entry.
9170 if ((entry
->protection
& protection
) != protection
) {
9175 /* go to next entry */
9177 start
= entry
->vme_end
;
9178 entry
= entry
->vme_next
;
9185 * This routine is obsolete, but included for backward
9186 * compatibility for older drivers.
9189 kernel_vm_map_reference(
9192 vm_map_reference(map
);
9198 * Most code internal to the osfmk will go through a
9199 * macro defining this. This is always here for the
9200 * use of other kernel components.
9202 #undef vm_map_reference
9205 register vm_map_t map
)
9207 if (map
== VM_MAP_NULL
)
9210 mutex_lock(&map
->s_lock
);
9212 assert(map
->res_count
> 0);
9213 assert(map
->ref_count
>= map
->res_count
);
9217 mutex_unlock(&map
->s_lock
);
9221 * vm_map_deallocate:
9223 * Removes a reference from the specified map,
9224 * destroying it if no references remain.
9225 * The map should not be locked.
9229 register vm_map_t map
)
9233 if (map
== VM_MAP_NULL
)
9236 mutex_lock(&map
->s_lock
);
9237 ref
= --map
->ref_count
;
9239 vm_map_res_deallocate(map
);
9240 mutex_unlock(&map
->s_lock
);
9243 assert(map
->ref_count
== 0);
9244 mutex_unlock(&map
->s_lock
);
9248 * The map residence count isn't decremented here because
9249 * the vm_map_delete below will traverse the entire map,
9250 * deleting entries, and the residence counts on objects
9251 * and sharing maps will go away then.
9255 vm_map_destroy(map
);