2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Virtual memory mapping module.
61 #include <task_swapper.h>
62 #include <mach_assert.h>
64 #include <mach/kern_return.h>
65 #include <mach/port.h>
66 #include <mach/vm_attributes.h>
67 #include <mach/vm_param.h>
68 #include <mach/vm_behavior.h>
69 #include <kern/assert.h>
70 #include <kern/counters.h>
71 #include <kern/zalloc.h>
72 #include <vm/vm_init.h>
73 #include <vm/vm_fault.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/sched_prim.h>
80 #include <kern/misc_protos.h>
81 #include <mach/vm_map_server.h>
82 #include <mach/mach_host_server.h>
86 /* Internal prototypes
88 extern boolean_t
vm_map_range_check(
92 vm_map_entry_t
*entry
);
94 extern vm_map_entry_t
_vm_map_entry_create(
95 struct vm_map_header
*map_header
);
97 extern void _vm_map_entry_dispose(
98 struct vm_map_header
*map_header
,
99 vm_map_entry_t entry
);
101 extern void vm_map_pmap_enter(
104 vm_offset_t end_addr
,
106 vm_object_offset_t offset
,
107 vm_prot_t protection
);
109 extern void _vm_map_clip_end(
110 struct vm_map_header
*map_header
,
111 vm_map_entry_t entry
,
114 extern void vm_map_entry_delete(
116 vm_map_entry_t entry
);
118 extern kern_return_t
vm_map_delete(
124 extern void vm_map_copy_steal_pages(
127 extern kern_return_t
vm_map_copy_overwrite_unaligned(
129 vm_map_entry_t entry
,
133 extern kern_return_t
vm_map_copy_overwrite_aligned(
135 vm_map_entry_t tmp_entry
,
140 extern kern_return_t
vm_map_copyin_kernel_buffer(
142 vm_offset_t src_addr
,
144 boolean_t src_destroy
,
145 vm_map_copy_t
*copy_result
); /* OUT */
147 extern kern_return_t
vm_map_copyout_kernel_buffer(
149 vm_offset_t
*addr
, /* IN/OUT */
151 boolean_t overwrite
);
153 extern void vm_map_fork_share(
155 vm_map_entry_t old_entry
,
158 extern boolean_t
vm_map_fork_copy(
160 vm_map_entry_t
*old_entry_p
,
163 extern kern_return_t
vm_remap_range_allocate(
165 vm_offset_t
*address
, /* IN/OUT */
169 vm_map_entry_t
*map_entry
); /* OUT */
171 extern void _vm_map_clip_start(
172 struct vm_map_header
*map_header
,
173 vm_map_entry_t entry
,
176 void vm_region_top_walk(
177 vm_map_entry_t entry
,
178 vm_region_top_info_t top
);
181 vm_map_entry_t entry
,
182 vm_region_extended_info_t extended
,
183 vm_object_offset_t offset
,
189 * Macros to copy a vm_map_entry. We must be careful to correctly
190 * manage the wired page count. vm_map_entry_copy() creates a new
191 * map entry to the same memory - the wired count in the new entry
192 * must be set to zero. vm_map_entry_copy_full() creates a new
193 * entry that is identical to the old entry. This preserves the
194 * wire count; it's used for map splitting and zone changing in
197 #define vm_map_entry_copy(NEW,OLD) \
200 (NEW)->is_shared = FALSE; \
201 (NEW)->needs_wakeup = FALSE; \
202 (NEW)->in_transition = FALSE; \
203 (NEW)->wired_count = 0; \
204 (NEW)->user_wired_count = 0; \
207 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
210 * Virtual memory maps provide for the mapping, protection,
211 * and sharing of virtual memory objects. In addition,
212 * this module provides for an efficient virtual copy of
213 * memory from one map to another.
215 * Synchronization is required prior to most operations.
217 * Maps consist of an ordered doubly-linked list of simple
218 * entries; a single hint is used to speed up lookups.
220 * Sharing maps have been deleted from this version of Mach.
221 * All shared objects are now mapped directly into the respective
222 * maps. This requires a change in the copy on write strategy;
223 * the asymmetric (delayed) strategy is used for shared temporary
224 * objects instead of the symmetric (shadow) strategy. All maps
225 * are now "top level" maps (either task map, kernel map or submap
226 * of the kernel map).
228 * Since portions of maps are specified by start/end addreses,
229 * which may not align with existing map entries, all
230 * routines merely "clip" entries to these start/end values.
231 * [That is, an entry is split into two, bordering at a
232 * start or end value.] Note that these clippings may not
233 * always be necessary (as the two resulting entries are then
234 * not changed); however, the clipping is done for convenience.
235 * No attempt is currently made to "glue back together" two
238 * The symmetric (shadow) copy strategy implements virtual copy
239 * by copying VM object references from one map to
240 * another, and then marking both regions as copy-on-write.
241 * It is important to note that only one writeable reference
242 * to a VM object region exists in any map when this strategy
243 * is used -- this means that shadow object creation can be
244 * delayed until a write operation occurs. The symmetric (delayed)
245 * strategy allows multiple maps to have writeable references to
246 * the same region of a vm object, and hence cannot delay creating
247 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
248 * Copying of permanent objects is completely different; see
249 * vm_object_copy_strategically() in vm_object.c.
252 zone_t vm_map_zone
; /* zone for vm_map structures */
253 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
254 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
255 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
259 * Placeholder object for submap operations. This object is dropped
260 * into the range by a call to vm_map_find, and removed when
261 * vm_map_submap creates the submap.
264 vm_object_t vm_submap_object
;
269 * Initialize the vm_map module. Must be called before
270 * any other vm_map routines.
272 * Map and entry structures are allocated from zones -- we must
273 * initialize those zones.
275 * There are three zones of interest:
277 * vm_map_zone: used to allocate maps.
278 * vm_map_entry_zone: used to allocate map entries.
279 * vm_map_kentry_zone: used to allocate map entries for the kernel.
281 * The kernel allocates map entries from a special zone that is initially
282 * "crammed" with memory. It would be difficult (perhaps impossible) for
283 * the kernel to allocate more memory to a entry zone when it became
284 * empty since the very act of allocating memory implies the creation
288 vm_offset_t map_data
;
289 vm_size_t map_data_size
;
290 vm_offset_t kentry_data
;
291 vm_size_t kentry_data_size
;
292 int kentry_count
= 2048; /* to init kentry_data_size */
294 #define NO_COALESCE_LIMIT (1024 * 128)
297 * Threshold for aggressive (eager) page map entering for vm copyout
298 * operations. Any copyout larger will NOT be aggressively entered.
300 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
306 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
309 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
310 1024*1024, PAGE_SIZE
*5,
311 "non-kernel map entries");
313 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
314 kentry_data_size
, kentry_data_size
,
315 "kernel map entries");
317 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
318 16*1024, PAGE_SIZE
, "map copies");
321 * Cram the map and kentry zones with initial data.
322 * Set kentry_zone non-collectible to aid zone_gc().
324 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
325 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
326 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
327 zcram(vm_map_zone
, map_data
, map_data_size
);
328 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
335 map_data_size
= round_page(10 * sizeof(struct vm_map
));
336 map_data
= pmap_steal_memory(map_data_size
);
340 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
341 * physical page (i.e. that beyond the kernel image and page tables)
342 * individually; we guess at most one entry per eight pages in the
343 * real world. This works out to roughly .1 of 1% of physical memory,
344 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
347 kentry_count
= pmap_free_pages() / 8;
351 round_page(kentry_count
* sizeof(struct vm_map_entry
));
352 kentry_data
= pmap_steal_memory(kentry_data_size
);
358 * Creates and returns a new empty VM map with
359 * the given physical map structure, and having
360 * the given lower and upper address bounds.
369 register vm_map_t result
;
371 result
= (vm_map_t
) zalloc(vm_map_zone
);
372 if (result
== VM_MAP_NULL
)
373 panic("vm_map_create");
375 vm_map_first_entry(result
) = vm_map_to_entry(result
);
376 vm_map_last_entry(result
) = vm_map_to_entry(result
);
377 result
->hdr
.nentries
= 0;
378 result
->hdr
.entries_pageable
= pageable
;
381 result
->ref_count
= 1;
383 result
->res_count
= 1;
384 result
->sw_state
= MAP_SW_IN
;
385 #endif /* TASK_SWAPPER */
387 result
->min_offset
= min
;
388 result
->max_offset
= max
;
389 result
->wiring_required
= FALSE
;
390 result
->no_zero_fill
= FALSE
;
391 result
->wait_for_space
= FALSE
;
392 result
->first_free
= vm_map_to_entry(result
);
393 result
->hint
= vm_map_to_entry(result
);
394 vm_map_lock_init(result
);
395 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
401 * vm_map_entry_create: [ internal use only ]
403 * Allocates a VM map entry for insertion in the
404 * given map (or map copy). No fields are filled.
406 #define vm_map_entry_create(map) \
407 _vm_map_entry_create(&(map)->hdr)
409 #define vm_map_copy_entry_create(copy) \
410 _vm_map_entry_create(&(copy)->cpy_hdr)
413 _vm_map_entry_create(
414 register struct vm_map_header
*map_header
)
416 register zone_t zone
;
417 register vm_map_entry_t entry
;
419 if (map_header
->entries_pageable
)
420 zone
= vm_map_entry_zone
;
422 zone
= vm_map_kentry_zone
;
424 entry
= (vm_map_entry_t
) zalloc(zone
);
425 if (entry
== VM_MAP_ENTRY_NULL
)
426 panic("vm_map_entry_create");
432 * vm_map_entry_dispose: [ internal use only ]
434 * Inverse of vm_map_entry_create.
436 #define vm_map_entry_dispose(map, entry) \
438 if((entry) == (map)->first_free) \
439 (map)->first_free = vm_map_to_entry(map); \
440 if((entry) == (map)->hint) \
441 (map)->hint = vm_map_to_entry(map); \
442 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
445 #define vm_map_copy_entry_dispose(map, entry) \
446 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
449 _vm_map_entry_dispose(
450 register struct vm_map_header
*map_header
,
451 register vm_map_entry_t entry
)
453 register zone_t zone
;
455 if (map_header
->entries_pageable
)
456 zone
= vm_map_entry_zone
;
458 zone
= vm_map_kentry_zone
;
460 zfree(zone
, (vm_offset_t
) entry
);
463 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
464 boolean_t first_free_check
= FALSE
;
469 vm_map_entry_t entry
, next
;
471 if (!first_free_check
)
474 entry
= vm_map_to_entry(map
);
475 next
= entry
->vme_next
;
476 while (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_end
) ||
477 (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_start
) &&
478 next
!= vm_map_to_entry(map
))) {
480 next
= entry
->vme_next
;
481 if (entry
== vm_map_to_entry(map
))
484 if (map
->first_free
!= entry
) {
485 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
486 map
, map
->first_free
, entry
);
495 * Updates the map->first_free pointer to the
496 * entry immediately before the first hole in the map.
497 * The map should be locked.
499 #define UPDATE_FIRST_FREE(map, new_first_free) \
502 vm_map_entry_t UFF_first_free; \
503 vm_map_entry_t UFF_next_entry; \
505 UFF_first_free = (new_first_free); \
506 UFF_next_entry = UFF_first_free->vme_next; \
507 while (trunc_page(UFF_next_entry->vme_start) == \
508 trunc_page(UFF_first_free->vme_end) || \
509 (trunc_page(UFF_next_entry->vme_start) == \
510 trunc_page(UFF_first_free->vme_start) && \
511 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
512 UFF_first_free = UFF_next_entry; \
513 UFF_next_entry = UFF_first_free->vme_next; \
514 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
517 UFF_map->first_free = UFF_first_free; \
518 assert(first_free_is_valid(UFF_map)); \
522 * vm_map_entry_{un,}link:
524 * Insert/remove entries from maps (or map copies).
526 #define vm_map_entry_link(map, after_where, entry) \
529 vm_map_entry_t VMEL_entry; \
531 VMEL_entry = (entry); \
532 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
533 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
537 #define vm_map_copy_entry_link(copy, after_where, entry) \
538 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
540 #define _vm_map_entry_link(hdr, after_where, entry) \
543 (entry)->vme_prev = (after_where); \
544 (entry)->vme_next = (after_where)->vme_next; \
545 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
548 #define vm_map_entry_unlink(map, entry) \
551 vm_map_entry_t VMEU_entry; \
552 vm_map_entry_t VMEU_first_free; \
554 VMEU_entry = (entry); \
555 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
556 VMEU_first_free = VMEU_entry->vme_prev; \
558 VMEU_first_free = VMEU_map->first_free; \
559 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
560 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
563 #define vm_map_copy_entry_unlink(copy, entry) \
564 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
566 #define _vm_map_entry_unlink(hdr, entry) \
569 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
570 (entry)->vme_prev->vme_next = (entry)->vme_next; \
574 * kernel_vm_map_reference:
576 * kernel internal export version for iokit and bsd components
577 * in lieu of component interface semantics.
581 kernel_vm_map_reference(
582 register vm_map_t map
)
584 if (map
== VM_MAP_NULL
)
587 mutex_lock(&map
->s_lock
);
589 assert(map
->res_count
> 0);
590 assert(map
->ref_count
>= map
->res_count
);
594 mutex_unlock(&map
->s_lock
);
597 #if MACH_ASSERT && TASK_SWAPPER
601 * Adds valid reference and residence counts to the given map.
602 * The map must be in memory (i.e. non-zero residence count).
607 register vm_map_t map
)
609 if (map
== VM_MAP_NULL
)
612 mutex_lock(&map
->s_lock
);
613 assert(map
->res_count
> 0);
614 assert(map
->ref_count
>= map
->res_count
);
617 mutex_unlock(&map
->s_lock
);
621 * vm_map_res_reference:
623 * Adds another valid residence count to the given map.
625 * Map is locked so this function can be called from
629 void vm_map_res_reference(register vm_map_t map
)
631 /* assert map is locked */
632 assert(map
->res_count
>= 0);
633 assert(map
->ref_count
>= map
->res_count
);
634 if (map
->res_count
== 0) {
635 mutex_unlock(&map
->s_lock
);
638 mutex_lock(&map
->s_lock
);
646 * vm_map_reference_swap:
648 * Adds valid reference and residence counts to the given map.
650 * The map may not be in memory (i.e. zero residence count).
653 void vm_map_reference_swap(register vm_map_t map
)
655 assert(map
!= VM_MAP_NULL
);
656 mutex_lock(&map
->s_lock
);
657 assert(map
->res_count
>= 0);
658 assert(map
->ref_count
>= map
->res_count
);
660 vm_map_res_reference(map
);
661 mutex_unlock(&map
->s_lock
);
665 * vm_map_res_deallocate:
667 * Decrement residence count on a map; possibly causing swapout.
669 * The map must be in memory (i.e. non-zero residence count).
671 * The map is locked, so this function is callable from vm_map_deallocate.
674 void vm_map_res_deallocate(register vm_map_t map
)
676 assert(map
->res_count
> 0);
677 if (--map
->res_count
== 0) {
678 mutex_unlock(&map
->s_lock
);
682 mutex_lock(&map
->s_lock
);
684 assert(map
->ref_count
>= map
->res_count
);
686 #endif /* MACH_ASSERT && TASK_SWAPPER */
691 * Removes a reference from the specified map,
692 * destroying it if no references remain.
693 * The map should not be locked.
697 register vm_map_t map
)
701 if (map
== VM_MAP_NULL
)
704 mutex_lock(&map
->s_lock
);
705 ref
= --map
->ref_count
;
707 vm_map_res_deallocate(map
);
708 mutex_unlock(&map
->s_lock
);
711 assert(map
->ref_count
== 0);
712 mutex_unlock(&map
->s_lock
);
716 * The map residence count isn't decremented here because
717 * the vm_map_delete below will traverse the entire map,
718 * deleting entries, and the residence counts on objects
719 * and sharing maps will go away then.
729 * Actually destroy a map.
733 register vm_map_t map
)
736 (void) vm_map_delete(map
, map
->min_offset
,
737 map
->max_offset
, VM_MAP_NO_FLAGS
);
740 pmap_destroy(map
->pmap
);
742 zfree(vm_map_zone
, (vm_offset_t
) map
);
747 * vm_map_swapin/vm_map_swapout
749 * Swap a map in and out, either referencing or releasing its resources.
750 * These functions are internal use only; however, they must be exported
751 * because they may be called from macros, which are exported.
753 * In the case of swapout, there could be races on the residence count,
754 * so if the residence count is up, we return, assuming that a
755 * vm_map_deallocate() call in the near future will bring us back.
758 * -- We use the map write lock for synchronization among races.
759 * -- The map write lock, and not the simple s_lock, protects the
760 * swap state of the map.
761 * -- If a map entry is a share map, then we hold both locks, in
762 * hierarchical order.
764 * Synchronization Notes:
765 * 1) If a vm_map_swapin() call happens while swapout in progress, it
766 * will block on the map lock and proceed when swapout is through.
767 * 2) A vm_map_reference() call at this time is illegal, and will
768 * cause a panic. vm_map_reference() is only allowed on resident
769 * maps, since it refuses to block.
770 * 3) A vm_map_swapin() call during a swapin will block, and
771 * proceeed when the first swapin is done, turning into a nop.
772 * This is the reason the res_count is not incremented until
773 * after the swapin is complete.
774 * 4) There is a timing hole after the checks of the res_count, before
775 * the map lock is taken, during which a swapin may get the lock
776 * before a swapout about to happen. If this happens, the swapin
777 * will detect the state and increment the reference count, causing
778 * the swapout to be a nop, thereby delaying it until a later
779 * vm_map_deallocate. If the swapout gets the lock first, then
780 * the swapin will simply block until the swapout is done, and
783 * Because vm_map_swapin() is potentially an expensive operation, it
784 * should be used with caution.
787 * 1) A map with a residence count of zero is either swapped, or
789 * 2) A map with a non-zero residence count is either resident,
790 * or being swapped in.
793 int vm_map_swap_enable
= 1;
795 void vm_map_swapin (vm_map_t map
)
797 register vm_map_entry_t entry
;
799 if (!vm_map_swap_enable
) /* debug */
804 * First deal with various races.
806 if (map
->sw_state
== MAP_SW_IN
)
808 * we raced with swapout and won. Returning will incr.
809 * the res_count, turning the swapout into a nop.
814 * The residence count must be zero. If we raced with another
815 * swapin, the state would have been IN; if we raced with a
816 * swapout (after another competing swapin), we must have lost
817 * the race to get here (see above comment), in which case
818 * res_count is still 0.
820 assert(map
->res_count
== 0);
823 * There are no intermediate states of a map going out or
824 * coming in, since the map is locked during the transition.
826 assert(map
->sw_state
== MAP_SW_OUT
);
829 * We now operate upon each map entry. If the entry is a sub-
830 * or share-map, we call vm_map_res_reference upon it.
831 * If the entry is an object, we call vm_object_res_reference
832 * (this may iterate through the shadow chain).
833 * Note that we hold the map locked the entire time,
834 * even if we get back here via a recursive call in
835 * vm_map_res_reference.
837 entry
= vm_map_first_entry(map
);
839 while (entry
!= vm_map_to_entry(map
)) {
840 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
841 if (entry
->is_sub_map
) {
842 vm_map_t lmap
= entry
->object
.sub_map
;
843 mutex_lock(&lmap
->s_lock
);
844 vm_map_res_reference(lmap
);
845 mutex_unlock(&lmap
->s_lock
);
847 vm_object_t object
= entry
->object
.vm_object
;
848 vm_object_lock(object
);
850 * This call may iterate through the
853 vm_object_res_reference(object
);
854 vm_object_unlock(object
);
857 entry
= entry
->vme_next
;
859 assert(map
->sw_state
== MAP_SW_OUT
);
860 map
->sw_state
= MAP_SW_IN
;
863 void vm_map_swapout(vm_map_t map
)
865 register vm_map_entry_t entry
;
869 * First deal with various races.
870 * If we raced with a swapin and lost, the residence count
871 * will have been incremented to 1, and we simply return.
873 mutex_lock(&map
->s_lock
);
874 if (map
->res_count
!= 0) {
875 mutex_unlock(&map
->s_lock
);
878 mutex_unlock(&map
->s_lock
);
881 * There are no intermediate states of a map going out or
882 * coming in, since the map is locked during the transition.
884 assert(map
->sw_state
== MAP_SW_IN
);
886 if (!vm_map_swap_enable
)
890 * We now operate upon each map entry. If the entry is a sub-
891 * or share-map, we call vm_map_res_deallocate upon it.
892 * If the entry is an object, we call vm_object_res_deallocate
893 * (this may iterate through the shadow chain).
894 * Note that we hold the map locked the entire time,
895 * even if we get back here via a recursive call in
896 * vm_map_res_deallocate.
898 entry
= vm_map_first_entry(map
);
900 while (entry
!= vm_map_to_entry(map
)) {
901 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
902 if (entry
->is_sub_map
) {
903 vm_map_t lmap
= entry
->object
.sub_map
;
904 mutex_lock(&lmap
->s_lock
);
905 vm_map_res_deallocate(lmap
);
906 mutex_unlock(&lmap
->s_lock
);
908 vm_object_t object
= entry
->object
.vm_object
;
909 vm_object_lock(object
);
911 * This call may take a long time,
912 * since it could actively push
913 * out pages (if we implement it
916 vm_object_res_deallocate(object
);
917 vm_object_unlock(object
);
920 entry
= entry
->vme_next
;
922 assert(map
->sw_state
== MAP_SW_IN
);
923 map
->sw_state
= MAP_SW_OUT
;
926 #endif /* TASK_SWAPPER */
932 * Saves the specified entry as the hint for
933 * future lookups. Performs necessary interlocks.
935 #define SAVE_HINT(map,value) \
936 mutex_lock(&(map)->s_lock); \
937 (map)->hint = (value); \
938 mutex_unlock(&(map)->s_lock);
941 * vm_map_lookup_entry: [ internal use only ]
943 * Finds the map entry containing (or
944 * immediately preceding) the specified address
945 * in the given map; the entry is returned
946 * in the "entry" parameter. The boolean
947 * result indicates whether the address is
948 * actually contained in the map.
952 register vm_map_t map
,
953 register vm_offset_t address
,
954 vm_map_entry_t
*entry
) /* OUT */
956 register vm_map_entry_t cur
;
957 register vm_map_entry_t last
;
960 * Start looking either from the head of the
961 * list, or from the hint.
964 mutex_lock(&map
->s_lock
);
966 mutex_unlock(&map
->s_lock
);
968 if (cur
== vm_map_to_entry(map
))
971 if (address
>= cur
->vme_start
) {
973 * Go from hint to end of list.
975 * But first, make a quick check to see if
976 * we are already looking at the entry we
977 * want (which is usually the case).
978 * Note also that we don't need to save the hint
979 * here... it is the same hint (unless we are
980 * at the header, in which case the hint didn't
981 * buy us anything anyway).
983 last
= vm_map_to_entry(map
);
984 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
991 * Go from start to hint, *inclusively*
993 last
= cur
->vme_next
;
994 cur
= vm_map_first_entry(map
);
1001 while (cur
!= last
) {
1002 if (cur
->vme_end
> address
) {
1003 if (address
>= cur
->vme_start
) {
1005 * Save this lookup for future
1010 SAVE_HINT(map
, cur
);
1015 cur
= cur
->vme_next
;
1017 *entry
= cur
->vme_prev
;
1018 SAVE_HINT(map
, *entry
);
1023 * Routine: vm_map_find_space
1025 * Allocate a range in the specified virtual address map,
1026 * returning the entry allocated for that range.
1027 * Used by kmem_alloc, etc.
1029 * The map must be NOT be locked. It will be returned locked
1030 * on KERN_SUCCESS, unlocked on failure.
1032 * If an entry is allocated, the object/offset fields
1033 * are initialized to zero.
1037 register vm_map_t map
,
1038 vm_offset_t
*address
, /* OUT */
1041 vm_map_entry_t
*o_entry
) /* OUT */
1043 register vm_map_entry_t entry
, new_entry
;
1044 register vm_offset_t start
;
1045 register vm_offset_t end
;
1047 new_entry
= vm_map_entry_create(map
);
1050 * Look for the first possible address; if there's already
1051 * something at this address, we have to start after it.
1056 assert(first_free_is_valid(map
));
1057 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
1058 start
= map
->min_offset
;
1060 start
= entry
->vme_end
;
1063 * In any case, the "entry" always precedes
1064 * the proposed new region throughout the loop:
1068 register vm_map_entry_t next
;
1071 * Find the end of the proposed new region.
1072 * Be sure we didn't go beyond the end, or
1073 * wrap around the address.
1076 end
= ((start
+ mask
) & ~mask
);
1078 vm_map_entry_dispose(map
, new_entry
);
1080 return(KERN_NO_SPACE
);
1085 if ((end
> map
->max_offset
) || (end
< start
)) {
1086 vm_map_entry_dispose(map
, new_entry
);
1088 return(KERN_NO_SPACE
);
1092 * If there are no more entries, we must win.
1095 next
= entry
->vme_next
;
1096 if (next
== vm_map_to_entry(map
))
1100 * If there is another entry, it must be
1101 * after the end of the potential new region.
1104 if (next
->vme_start
>= end
)
1108 * Didn't fit -- move to the next entry.
1112 start
= entry
->vme_end
;
1117 * "start" and "end" should define the endpoints of the
1118 * available new range, and
1119 * "entry" should refer to the region before the new
1122 * the map should be locked.
1127 new_entry
->vme_start
= start
;
1128 new_entry
->vme_end
= end
;
1129 assert(page_aligned(new_entry
->vme_start
));
1130 assert(page_aligned(new_entry
->vme_end
));
1132 new_entry
->is_shared
= FALSE
;
1133 new_entry
->is_sub_map
= FALSE
;
1134 new_entry
->use_pmap
= FALSE
;
1135 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1136 new_entry
->offset
= (vm_object_offset_t
) 0;
1138 new_entry
->needs_copy
= FALSE
;
1140 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1141 new_entry
->protection
= VM_PROT_DEFAULT
;
1142 new_entry
->max_protection
= VM_PROT_ALL
;
1143 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1144 new_entry
->wired_count
= 0;
1145 new_entry
->user_wired_count
= 0;
1147 new_entry
->in_transition
= FALSE
;
1148 new_entry
->needs_wakeup
= FALSE
;
1151 * Insert the new entry into the list
1154 vm_map_entry_link(map
, entry
, new_entry
);
1159 * Update the lookup hint
1161 SAVE_HINT(map
, new_entry
);
1163 *o_entry
= new_entry
;
1164 return(KERN_SUCCESS
);
1167 int vm_map_pmap_enter_print
= FALSE
;
1168 int vm_map_pmap_enter_enable
= FALSE
;
1171 * Routine: vm_map_pmap_enter
1174 * Force pages from the specified object to be entered into
1175 * the pmap at the specified address if they are present.
1176 * As soon as a page not found in the object the scan ends.
1181 * In/out conditions:
1182 * The source map should not be locked on entry.
1187 register vm_offset_t addr
,
1188 register vm_offset_t end_addr
,
1189 register vm_object_t object
,
1190 vm_object_offset_t offset
,
1191 vm_prot_t protection
)
1194 vm_machine_attribute_val_t mv_cache_sync
= MATTR_VAL_CACHE_SYNC
;
1196 while (addr
< end_addr
) {
1197 register vm_page_t m
;
1199 vm_object_lock(object
);
1200 vm_object_paging_begin(object
);
1202 m
= vm_page_lookup(object
, offset
);
1203 if (m
== VM_PAGE_NULL
|| m
->busy
||
1204 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1205 protection
& m
->page_lock
))) {
1207 vm_object_paging_end(object
);
1208 vm_object_unlock(object
);
1212 assert(!m
->fictitious
); /* XXX is this possible ??? */
1214 if (vm_map_pmap_enter_print
) {
1215 printf("vm_map_pmap_enter:");
1216 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1217 map
, addr
, object
, offset
);
1221 vm_object_unlock(object
);
1223 PMAP_ENTER(map
->pmap
, addr
, m
,
1227 pmap_attribute(map
->pmap
,
1233 vm_object_lock(object
);
1235 m
->no_isync
= FALSE
;
1237 PAGE_WAKEUP_DONE(m
);
1238 vm_page_lock_queues();
1239 if (!m
->active
&& !m
->inactive
)
1240 vm_page_activate(m
);
1241 vm_page_unlock_queues();
1242 vm_object_paging_end(object
);
1243 vm_object_unlock(object
);
1245 offset
+= PAGE_SIZE_64
;
1251 * Routine: vm_map_enter
1254 * Allocate a range in the specified virtual address map.
1255 * The resulting range will refer to memory defined by
1256 * the given memory object and offset into that object.
1258 * Arguments are as defined in the vm_map call.
1262 register vm_map_t map
,
1263 vm_offset_t
*address
, /* IN/OUT */
1268 vm_object_offset_t offset
,
1269 boolean_t needs_copy
,
1270 vm_prot_t cur_protection
,
1271 vm_prot_t max_protection
,
1272 vm_inherit_t inheritance
)
1274 vm_map_entry_t entry
;
1275 register vm_offset_t start
;
1276 register vm_offset_t end
;
1277 kern_return_t result
= KERN_SUCCESS
;
1279 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1282 VM_GET_FLAGS_ALIAS(flags
, alias
);
1284 #define RETURN(value) { result = value; goto BailOut; }
1286 assert(page_aligned(*address
));
1287 assert(page_aligned(size
));
1296 * Calculate the first possible address.
1299 if (start
< map
->min_offset
)
1300 start
= map
->min_offset
;
1301 if (start
> map
->max_offset
)
1302 RETURN(KERN_NO_SPACE
);
1305 * Look for the first possible address;
1306 * if there's already something at this
1307 * address, we have to start after it.
1310 assert(first_free_is_valid(map
));
1311 if (start
== map
->min_offset
) {
1312 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1313 start
= entry
->vme_end
;
1315 vm_map_entry_t tmp_entry
;
1316 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1317 start
= tmp_entry
->vme_end
;
1322 * In any case, the "entry" always precedes
1323 * the proposed new region throughout the
1328 register vm_map_entry_t next
;
1331 * Find the end of the proposed new region.
1332 * Be sure we didn't go beyond the end, or
1333 * wrap around the address.
1336 end
= ((start
+ mask
) & ~mask
);
1338 RETURN(KERN_NO_SPACE
);
1342 if ((end
> map
->max_offset
) || (end
< start
)) {
1343 if (map
->wait_for_space
) {
1344 if (size
<= (map
->max_offset
-
1346 assert_wait((event_t
)map
,
1349 thread_block((void (*)(void))0);
1353 RETURN(KERN_NO_SPACE
);
1357 * If there are no more entries, we must win.
1360 next
= entry
->vme_next
;
1361 if (next
== vm_map_to_entry(map
))
1365 * If there is another entry, it must be
1366 * after the end of the potential new region.
1369 if (next
->vme_start
>= end
)
1373 * Didn't fit -- move to the next entry.
1377 start
= entry
->vme_end
;
1381 vm_map_entry_t temp_entry
;
1385 * the address doesn't itself violate
1386 * the mask requirement.
1390 if ((start
& mask
) != 0)
1391 RETURN(KERN_NO_SPACE
);
1394 * ... the address is within bounds
1399 if ((start
< map
->min_offset
) ||
1400 (end
> map
->max_offset
) ||
1402 RETURN(KERN_INVALID_ADDRESS
);
1406 * ... the starting address isn't allocated
1409 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1410 RETURN(KERN_NO_SPACE
);
1415 * ... the next region doesn't overlap the
1419 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1420 (entry
->vme_next
->vme_start
< end
))
1421 RETURN(KERN_NO_SPACE
);
1426 * "start" and "end" should define the endpoints of the
1427 * available new range, and
1428 * "entry" should refer to the region before the new
1431 * the map should be locked.
1435 * See whether we can avoid creating a new entry (and object) by
1436 * extending one of our neighbors. [So far, we only attempt to
1437 * extend from below.]
1440 if ((object
== VM_OBJECT_NULL
) &&
1441 (entry
!= vm_map_to_entry(map
)) &&
1442 (entry
->vme_end
== start
) &&
1443 (!entry
->is_shared
) &&
1444 (!entry
->is_sub_map
) &&
1445 (entry
->alias
== alias
) &&
1446 (entry
->inheritance
== inheritance
) &&
1447 (entry
->protection
== cur_protection
) &&
1448 (entry
->max_protection
== max_protection
) &&
1449 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1450 (entry
->in_transition
== 0) &&
1451 ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
) &&
1452 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1453 if (vm_object_coalesce(entry
->object
.vm_object
,
1456 (vm_object_offset_t
) 0,
1457 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1458 (vm_size_t
)(end
- entry
->vme_end
))) {
1461 * Coalesced the two objects - can extend
1462 * the previous map entry to include the
1465 map
->size
+= (end
- entry
->vme_end
);
1466 entry
->vme_end
= end
;
1467 UPDATE_FIRST_FREE(map
, map
->first_free
);
1468 RETURN(KERN_SUCCESS
);
1473 * Create a new entry
1477 register vm_map_entry_t new_entry
;
1479 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1480 offset
, needs_copy
, FALSE
, FALSE
,
1481 cur_protection
, max_protection
,
1482 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1483 new_entry
->alias
= alias
;
1486 /* Wire down the new entry if the user
1487 * requested all new map entries be wired.
1489 if (map
->wiring_required
) {
1490 result
= vm_map_wire(map
, start
, end
,
1491 new_entry
->protection
, TRUE
);
1495 if ((object
!= VM_OBJECT_NULL
) &&
1496 (vm_map_pmap_enter_enable
) &&
1499 (size
< (128*1024))) {
1500 vm_map_pmap_enter(map
, start
, end
,
1501 object
, offset
, cur_protection
);
1515 * vm_map_clip_start: [ internal use only ]
1517 * Asserts that the given entry begins at or after
1518 * the specified address; if necessary,
1519 * it splits the entry into two.
1522 #define vm_map_clip_start(map, entry, startaddr) \
1524 vm_map_t VMCS_map; \
1525 vm_map_entry_t VMCS_entry; \
1526 vm_offset_t VMCS_startaddr; \
1528 VMCS_entry = (entry); \
1529 VMCS_startaddr = (startaddr); \
1530 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1531 if(entry->use_pmap) { \
1532 vm_offset_t pmap_base_addr; \
1533 vm_offset_t pmap_end_addr; \
1535 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1536 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
1537 pmap_unnest(map->pmap, pmap_base_addr, \
1538 (pmap_end_addr - pmap_base_addr) + 1); \
1539 entry->use_pmap = FALSE; \
1541 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1543 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1546 #define vm_map_clip_start(map, entry, startaddr) \
1548 vm_map_t VMCS_map; \
1549 vm_map_entry_t VMCS_entry; \
1550 vm_offset_t VMCS_startaddr; \
1552 VMCS_entry = (entry); \
1553 VMCS_startaddr = (startaddr); \
1554 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1555 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1557 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1561 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1563 if ((startaddr) > (entry)->vme_start) \
1564 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1568 * This routine is called only when it is known that
1569 * the entry must be split.
1573 register struct vm_map_header
*map_header
,
1574 register vm_map_entry_t entry
,
1575 register vm_offset_t start
)
1577 register vm_map_entry_t new_entry
;
1580 * Split off the front portion --
1581 * note that we must insert the new
1582 * entry BEFORE this one, so that
1583 * this entry has the specified starting
1587 new_entry
= _vm_map_entry_create(map_header
);
1588 vm_map_entry_copy_full(new_entry
, entry
);
1590 new_entry
->vme_end
= start
;
1591 entry
->offset
+= (start
- entry
->vme_start
);
1592 entry
->vme_start
= start
;
1594 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1596 if (entry
->is_sub_map
)
1597 vm_map_reference(new_entry
->object
.sub_map
);
1599 vm_object_reference(new_entry
->object
.vm_object
);
1604 * vm_map_clip_end: [ internal use only ]
1606 * Asserts that the given entry ends at or before
1607 * the specified address; if necessary,
1608 * it splits the entry into two.
1611 #define vm_map_clip_end(map, entry, endaddr) \
1613 vm_map_t VMCE_map; \
1614 vm_map_entry_t VMCE_entry; \
1615 vm_offset_t VMCE_endaddr; \
1617 VMCE_entry = (entry); \
1618 VMCE_endaddr = (endaddr); \
1619 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1620 if(entry->use_pmap) { \
1621 vm_offset_t pmap_base_addr; \
1622 vm_offset_t pmap_end_addr; \
1624 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1625 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
1626 pmap_unnest(map->pmap, pmap_base_addr, \
1627 (pmap_end_addr - pmap_base_addr) + 1); \
1628 entry->use_pmap = FALSE; \
1630 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1632 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1635 #define vm_map_clip_end(map, entry, endaddr) \
1637 vm_map_t VMCE_map; \
1638 vm_map_entry_t VMCE_entry; \
1639 vm_offset_t VMCE_endaddr; \
1641 VMCE_entry = (entry); \
1642 VMCE_endaddr = (endaddr); \
1643 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1644 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1646 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1650 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1652 if ((endaddr) < (entry)->vme_end) \
1653 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1657 * This routine is called only when it is known that
1658 * the entry must be split.
1662 register struct vm_map_header
*map_header
,
1663 register vm_map_entry_t entry
,
1664 register vm_offset_t end
)
1666 register vm_map_entry_t new_entry
;
1669 * Create a new entry and insert it
1670 * AFTER the specified entry
1673 new_entry
= _vm_map_entry_create(map_header
);
1674 vm_map_entry_copy_full(new_entry
, entry
);
1676 new_entry
->vme_start
= entry
->vme_end
= end
;
1677 new_entry
->offset
+= (end
- entry
->vme_start
);
1679 _vm_map_entry_link(map_header
, entry
, new_entry
);
1681 if (entry
->is_sub_map
)
1682 vm_map_reference(new_entry
->object
.sub_map
);
1684 vm_object_reference(new_entry
->object
.vm_object
);
1689 * VM_MAP_RANGE_CHECK: [ internal use only ]
1691 * Asserts that the starting and ending region
1692 * addresses fall within the valid range of the map.
1694 #define VM_MAP_RANGE_CHECK(map, start, end) \
1696 if (start < vm_map_min(map)) \
1697 start = vm_map_min(map); \
1698 if (end > vm_map_max(map)) \
1699 end = vm_map_max(map); \
1705 * vm_map_range_check: [ internal use only ]
1707 * Check that the region defined by the specified start and
1708 * end addresses are wholly contained within a single map
1709 * entry or set of adjacent map entries of the spacified map,
1710 * i.e. the specified region contains no unmapped space.
1711 * If any or all of the region is unmapped, FALSE is returned.
1712 * Otherwise, TRUE is returned and if the output argument 'entry'
1713 * is not NULL it points to the map entry containing the start
1716 * The map is locked for reading on entry and is left locked.
1720 register vm_map_t map
,
1721 register vm_offset_t start
,
1722 register vm_offset_t end
,
1723 vm_map_entry_t
*entry
)
1726 register vm_offset_t prev
;
1729 * Basic sanity checks first
1731 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1735 * Check first if the region starts within a valid
1736 * mapping for the map.
1738 if (!vm_map_lookup_entry(map
, start
, &cur
))
1742 * Optimize for the case that the region is contained
1743 * in a single map entry.
1745 if (entry
!= (vm_map_entry_t
*) NULL
)
1747 if (end
<= cur
->vme_end
)
1751 * If the region is not wholly contained within a
1752 * single entry, walk the entries looking for holes.
1754 prev
= cur
->vme_end
;
1755 cur
= cur
->vme_next
;
1756 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1757 if (end
<= cur
->vme_end
)
1759 prev
= cur
->vme_end
;
1760 cur
= cur
->vme_next
;
1766 * vm_map_submap: [ kernel use only ]
1768 * Mark the given range as handled by a subordinate map.
1770 * This range must have been created with vm_map_find using
1771 * the vm_submap_object, and no other operations may have been
1772 * performed on this range prior to calling vm_map_submap.
1774 * Only a limited number of operations can be performed
1775 * within this rage after calling vm_map_submap:
1777 * [Don't try vm_map_copyin!]
1779 * To remove a submapping, one must first remove the
1780 * range from the superior map, and then destroy the
1781 * submap (if desired). [Better yet, don't try it.]
1785 register vm_map_t map
,
1786 register vm_offset_t start
,
1787 register vm_offset_t end
,
1792 vm_map_entry_t entry
;
1793 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1794 register vm_object_t object
;
1798 VM_MAP_RANGE_CHECK(map
, start
, end
);
1800 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1801 vm_map_clip_start(map
, entry
, start
);
1804 entry
= entry
->vme_next
;
1806 if(entry
== vm_map_to_entry(map
)) {
1808 return KERN_INVALID_ARGUMENT
;
1811 vm_map_clip_end(map
, entry
, end
);
1813 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1814 (!entry
->is_sub_map
) &&
1815 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1816 (object
->resident_page_count
== 0) &&
1817 (object
->copy
== VM_OBJECT_NULL
) &&
1818 (object
->shadow
== VM_OBJECT_NULL
) &&
1819 (!object
->pager_created
)) {
1820 entry
->offset
= (vm_object_offset_t
)offset
;
1821 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1822 vm_object_deallocate(object
);
1823 entry
->is_sub_map
= TRUE
;
1824 vm_map_reference(entry
->object
.sub_map
= submap
);
1826 if ((use_pmap
) && (offset
== 0)) {
1827 /* nest if platform code will allow */
1828 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1829 start
, end
- start
);
1831 panic("pmap_nest failed!");
1832 entry
->use_pmap
= TRUE
;
1836 pmap_remove(map
->pmap
, start
, end
);
1838 result
= KERN_SUCCESS
;
1848 * Sets the protection of the specified address
1849 * region in the target map. If "set_max" is
1850 * specified, the maximum protection is to be set;
1851 * otherwise, only the current protection is affected.
1855 register vm_map_t map
,
1856 register vm_offset_t start
,
1857 register vm_offset_t end
,
1858 register vm_prot_t new_prot
,
1859 register boolean_t set_max
)
1861 register vm_map_entry_t current
;
1862 register vm_offset_t prev
;
1863 vm_map_entry_t entry
;
1868 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1869 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1874 * Lookup the entry. If it doesn't start in a valid
1875 * entry, return an error. Remember if we need to
1876 * clip the entry. We don't do it here because we don't
1877 * want to make any changes until we've scanned the
1878 * entire range below for address and protection
1881 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1883 return(KERN_INVALID_ADDRESS
);
1887 * Make a first pass to check for protection and address
1892 prev
= current
->vme_start
;
1893 while ((current
!= vm_map_to_entry(map
)) &&
1894 (current
->vme_start
< end
)) {
1897 * If there is a hole, return an error.
1899 if (current
->vme_start
!= prev
) {
1901 return(KERN_INVALID_ADDRESS
);
1904 new_max
= current
->max_protection
;
1905 if(new_prot
& VM_PROT_COPY
) {
1906 new_max
|= VM_PROT_WRITE
;
1907 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1909 return(KERN_PROTECTION_FAILURE
);
1912 if ((new_prot
& new_max
) != new_prot
) {
1914 return(KERN_PROTECTION_FAILURE
);
1918 prev
= current
->vme_end
;
1919 current
= current
->vme_next
;
1923 return(KERN_INVALID_ADDRESS
);
1927 * Go back and fix up protections.
1928 * Clip to start here if the range starts within
1934 vm_map_clip_start(map
, entry
, start
);
1936 while ((current
!= vm_map_to_entry(map
)) &&
1937 (current
->vme_start
< end
)) {
1941 vm_map_clip_end(map
, current
, end
);
1943 old_prot
= current
->protection
;
1945 if(new_prot
& VM_PROT_COPY
) {
1946 /* caller is asking specifically to copy the */
1947 /* mapped data, this implies that max protection */
1948 /* will include write. Caller must be prepared */
1949 /* for loss of shared memory communication in the */
1950 /* target area after taking this step */
1951 current
->needs_copy
= TRUE
;
1952 current
->max_protection
|= VM_PROT_WRITE
;
1956 current
->protection
=
1957 (current
->max_protection
=
1958 new_prot
& ~VM_PROT_COPY
) &
1961 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1964 * Update physical map if necessary.
1965 * If the request is to turn off write protection,
1966 * we won't do it for real (in pmap). This is because
1967 * it would cause copy-on-write to fail. We've already
1968 * set, the new protection in the map, so if a
1969 * write-protect fault occurred, it will be fixed up
1970 * properly, COW or not.
1972 /* the 256M hack for existing hardware limitations */
1973 if (current
->protection
!= old_prot
) {
1974 if(current
->is_sub_map
&& current
->use_pmap
) {
1975 vm_offset_t pmap_base_addr
;
1976 vm_offset_t pmap_end_addr
;
1977 vm_map_entry_t local_entry
;
1979 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1980 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1982 if(!vm_map_lookup_entry(map
,
1983 pmap_base_addr
, &local_entry
))
1984 panic("vm_map_protect: nested pmap area is missing");
1985 while ((local_entry
!= vm_map_to_entry(map
)) &&
1986 (local_entry
->vme_start
< pmap_end_addr
)) {
1987 local_entry
->use_pmap
= FALSE
;
1988 local_entry
= local_entry
->vme_next
;
1990 pmap_unnest(map
->pmap
, pmap_base_addr
,
1991 (pmap_end_addr
- pmap_base_addr
) + 1);
1994 if (!(current
->protection
& VM_PROT_WRITE
)) {
1995 /* Look one level in we support nested pmaps */
1996 /* from mapped submaps which are direct entries */
1998 if(current
->is_sub_map
&& current
->use_pmap
) {
1999 pmap_protect(current
->object
.sub_map
->pmap
,
2002 current
->protection
);
2004 pmap_protect(map
->pmap
, current
->vme_start
,
2006 current
->protection
);
2010 current
= current
->vme_next
;
2014 return(KERN_SUCCESS
);
2020 * Sets the inheritance of the specified address
2021 * range in the target map. Inheritance
2022 * affects how the map will be shared with
2023 * child maps at the time of vm_map_fork.
2027 register vm_map_t map
,
2028 register vm_offset_t start
,
2029 register vm_offset_t end
,
2030 register vm_inherit_t new_inheritance
)
2032 register vm_map_entry_t entry
;
2033 vm_map_entry_t temp_entry
;
2037 VM_MAP_RANGE_CHECK(map
, start
, end
);
2039 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
2041 vm_map_clip_start(map
, entry
, start
);
2044 temp_entry
= temp_entry
->vme_next
;
2048 /* first check entire range for submaps which can't support the */
2049 /* given inheritance. */
2050 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2051 if(entry
->is_sub_map
) {
2052 if(new_inheritance
== VM_INHERIT_COPY
)
2053 return(KERN_INVALID_ARGUMENT
);
2056 entry
= entry
->vme_next
;
2061 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2062 vm_map_clip_end(map
, entry
, end
);
2064 entry
->inheritance
= new_inheritance
;
2066 entry
= entry
->vme_next
;
2070 return(KERN_SUCCESS
);
2076 * Sets the pageability of the specified address range in the
2077 * target map as wired. Regions specified as not pageable require
2078 * locked-down physical memory and physical page maps. The
2079 * access_type variable indicates types of accesses that must not
2080 * generate page faults. This is checked against protection of
2081 * memory being locked-down.
2083 * The map must not be locked, but a reference must remain to the
2084 * map throughout the call.
2088 register vm_map_t map
,
2089 register vm_offset_t start
,
2090 register vm_offset_t end
,
2091 register vm_prot_t access_type
,
2092 boolean_t user_wire
,
2095 register vm_map_entry_t entry
;
2096 struct vm_map_entry
*first_entry
, tmp_entry
;
2098 register vm_offset_t s
,e
;
2100 boolean_t need_wakeup
;
2101 boolean_t main_map
= FALSE
;
2102 boolean_t interruptible_state
;
2103 thread_t cur_thread
;
2104 unsigned int last_timestamp
;
2108 if(map_pmap
== NULL
)
2110 last_timestamp
= map
->timestamp
;
2112 VM_MAP_RANGE_CHECK(map
, start
, end
);
2113 assert(page_aligned(start
));
2114 assert(page_aligned(end
));
2116 /* We wired what the caller asked for, zero pages */
2118 return KERN_SUCCESS
;
2121 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2122 entry
= first_entry
;
2123 /* vm_map_clip_start will be done later. */
2125 /* Start address is not in map */
2127 return(KERN_INVALID_ADDRESS
);
2131 need_wakeup
= FALSE
;
2132 cur_thread
= current_thread();
2133 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2135 * If another thread is wiring/unwiring this entry then
2136 * block after informing other thread to wake us up.
2138 if (entry
->in_transition
) {
2140 * We have not clipped the entry. Make sure that
2141 * the start address is in range so that the lookup
2142 * below will succeed.
2144 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2146 entry
->needs_wakeup
= TRUE
;
2149 * wake up anybody waiting on entries that we have
2153 vm_map_entry_wakeup(map
);
2154 need_wakeup
= FALSE
;
2157 * User wiring is interruptible
2159 vm_map_entry_wait(map
,
2160 (user_wire
) ? THREAD_ABORTSAFE
:
2162 if (user_wire
&& cur_thread
->wait_result
==
2163 THREAD_INTERRUPTED
) {
2165 * undo the wirings we have done so far
2166 * We do not clear the needs_wakeup flag,
2167 * because we cannot tell if we were the
2170 vm_map_unwire(map
, start
, s
, user_wire
);
2171 return(KERN_FAILURE
);
2176 * Cannot avoid a lookup here. reset timestamp.
2178 last_timestamp
= map
->timestamp
;
2181 * The entry could have been clipped, look it up again.
2182 * Worse that can happen is, it may not exist anymore.
2184 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2186 panic("vm_map_wire: re-lookup failed");
2189 * User: undo everything upto the previous
2190 * entry. let vm_map_unwire worry about
2191 * checking the validity of the range.
2194 vm_map_unwire(map
, start
, s
, user_wire
);
2195 return(KERN_FAILURE
);
2197 entry
= first_entry
;
2201 if(entry
->is_sub_map
) {
2202 vm_offset_t sub_start
;
2203 vm_offset_t sub_end
;
2204 vm_offset_t local_end
;
2207 vm_map_clip_start(map
, entry
, start
);
2208 vm_map_clip_end(map
, entry
, end
);
2210 sub_start
+= entry
->offset
;
2211 sub_end
= entry
->vme_end
- entry
->vme_start
;
2212 sub_end
+= entry
->offset
;
2214 local_end
= entry
->vme_end
;
2215 if(map_pmap
== NULL
) {
2216 if(entry
->use_pmap
) {
2217 pmap
= entry
->object
.sub_map
->pmap
;
2221 if (entry
->wired_count
) {
2222 if (entry
->wired_count
2224 panic("vm_map_wire: too many wirings");
2227 entry
->user_wired_count
2228 >= MAX_WIRE_COUNT
) {
2230 vm_map_unwire(map
, start
,
2231 entry
->vme_start
, user_wire
);
2232 return(KERN_FAILURE
);
2235 (entry
->user_wired_count
++ == 0))
2236 entry
->wired_count
++;
2237 entry
= entry
->vme_next
;
2242 vm_object_offset_t offset_hi
;
2243 vm_object_offset_t offset_lo
;
2244 vm_object_offset_t offset
;
2247 vm_behavior_t behavior
;
2248 vm_offset_t local_start
;
2249 vm_map_entry_t local_entry
;
2250 vm_map_version_t version
;
2251 vm_map_t lookup_map
;
2253 /* call vm_map_lookup_locked to */
2254 /* cause any needs copy to be */
2256 local_start
= entry
->vme_start
;
2258 vm_map_lock_write_to_read(map
);
2259 if(vm_map_lookup_locked(
2260 &lookup_map
, local_start
,
2263 &offset
, &prot
, &wired
,
2264 &behavior
, &offset_lo
,
2265 &offset_hi
, &pmap_map
)) {
2267 vm_map_unlock(lookup_map
);
2268 vm_map_unwire(map
, start
,
2269 entry
->vme_start
, user_wire
);
2270 return(KERN_FAILURE
);
2272 if(pmap_map
!= lookup_map
)
2273 vm_map_unlock(pmap_map
);
2274 if(lookup_map
!= map
) {
2275 vm_map_unlock(lookup_map
);
2282 version
.main_timestamp
;
2283 vm_object_unlock(object
);
2284 if (vm_map_lookup_entry(map
,
2285 local_start
, &local_entry
)) {
2287 vm_map_unwire(map
, start
,
2288 entry
->vme_start
, user_wire
);
2289 return(KERN_FAILURE
);
2291 /* did we have a change of type? */
2292 if (!local_entry
->is_sub_map
)
2294 entry
= local_entry
;
2296 entry
->user_wired_count
++;
2297 entry
->wired_count
++;
2299 entry
->in_transition
= TRUE
;
2302 rc
= vm_map_wire_nested(
2303 entry
->object
.sub_map
,
2308 last_timestamp
= map
->timestamp
;
2312 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2317 last_timestamp
= map
->timestamp
;
2319 s
= entry
->vme_start
;
2321 if (last_timestamp
+1 != map
->timestamp
) {
2323 * Find the entry again. It could have been clipped
2324 * after we unlocked the map.
2326 if (!vm_map_lookup_entry(map
, local_end
,
2328 panic("vm_map_wire: re-lookup failed");
2330 entry
= first_entry
;
2333 last_timestamp
= map
->timestamp
;
2334 while ((entry
!= vm_map_to_entry(map
)) &&
2335 (entry
->vme_start
< e
)) {
2336 assert(entry
->in_transition
);
2337 entry
->in_transition
= FALSE
;
2338 if (entry
->needs_wakeup
) {
2339 entry
->needs_wakeup
= FALSE
;
2342 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2345 entry
->user_wired_count
--;
2346 entry
->wired_count
--;
2349 entry
= entry
->vme_next
;
2351 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2354 vm_map_entry_wakeup(map
);
2356 * undo everything upto the previous entry.
2358 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2365 * If this entry is already wired then increment
2366 * the appropriate wire reference count.
2368 if (entry
->wired_count
&& main_map
) {
2369 /* sanity check: wired_count is a short */
2370 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2371 panic("vm_map_wire: too many wirings");
2374 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2376 vm_map_unwire(map
, start
,
2377 entry
->vme_start
, user_wire
);
2378 return(KERN_FAILURE
);
2381 * entry is already wired down, get our reference
2382 * after clipping to our range.
2384 vm_map_clip_start(map
, entry
, start
);
2385 vm_map_clip_end(map
, entry
, end
);
2386 if (!user_wire
|| (entry
->user_wired_count
++ == 0))
2387 entry
->wired_count
++;
2389 entry
= entry
->vme_next
;
2394 * Unwired entry or wire request transmitted via submap
2399 * Perform actions of vm_map_lookup that need the write
2400 * lock on the map: create a shadow object for a
2401 * copy-on-write region, or an object for a zero-fill
2404 size
= entry
->vme_end
- entry
->vme_start
;
2406 * If wiring a copy-on-write page, we need to copy it now
2407 * even if we're only (currently) requesting read access.
2408 * This is aggressive, but once it's wired we can't move it.
2410 if (entry
->needs_copy
) {
2411 vm_object_shadow(&entry
->object
.vm_object
,
2412 &entry
->offset
, size
);
2413 entry
->needs_copy
= FALSE
;
2414 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2415 entry
->object
.vm_object
= vm_object_allocate(size
);
2416 entry
->offset
= (vm_object_offset_t
)0;
2419 vm_map_clip_start(map
, entry
, start
);
2420 vm_map_clip_end(map
, entry
, end
);
2422 s
= entry
->vme_start
;
2426 * Check for holes and protection mismatch.
2427 * Holes: Next entry should be contiguous unless this
2428 * is the end of the region.
2429 * Protection: Access requested must be allowed, unless
2430 * wiring is by protection class
2432 if ((((entry
->vme_end
< end
) &&
2433 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2434 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2435 ((entry
->protection
& access_type
) != access_type
))) {
2437 * Found a hole or protection problem.
2438 * Unwire the region we wired so far.
2440 if (start
!= entry
->vme_start
) {
2442 vm_map_unwire(map
, start
, s
, user_wire
);
2446 return((entry
->protection
&access_type
) != access_type
?
2447 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2450 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2454 entry
->user_wired_count
++;
2455 entry
->wired_count
++;
2458 entry
->in_transition
= TRUE
;
2461 * This entry might get split once we unlock the map.
2462 * In vm_fault_wire(), we need the current range as
2463 * defined by this entry. In order for this to work
2464 * along with a simultaneous clip operation, we make a
2465 * temporary copy of this entry and use that for the
2466 * wiring. Note that the underlying objects do not
2467 * change during a clip.
2472 * The in_transition state guarentees that the entry
2473 * (or entries for this range, if split occured) will be
2474 * there when the map lock is acquired for the second time.
2478 if (!user_wire
&& cur_thread
!= THREAD_NULL
) {
2479 interruptible_state
= cur_thread
->interruptible
;
2480 cur_thread
->interruptible
= FALSE
;
2484 rc
= vm_fault_wire(map
, &tmp_entry
, map_pmap
);
2486 rc
= vm_fault_wire(map
, &tmp_entry
, map
->pmap
);
2488 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2489 cur_thread
->interruptible
= interruptible_state
;
2493 if (last_timestamp
+1 != map
->timestamp
) {
2495 * Find the entry again. It could have been clipped
2496 * after we unlocked the map.
2498 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2500 panic("vm_map_wire: re-lookup failed");
2502 entry
= first_entry
;
2505 last_timestamp
= map
->timestamp
;
2507 while ((entry
!= vm_map_to_entry(map
)) &&
2508 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2509 assert(entry
->in_transition
);
2510 entry
->in_transition
= FALSE
;
2511 if (entry
->needs_wakeup
) {
2512 entry
->needs_wakeup
= FALSE
;
2515 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2518 entry
->user_wired_count
--;
2519 entry
->wired_count
--;
2522 entry
= entry
->vme_next
;
2525 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2528 vm_map_entry_wakeup(map
);
2530 * undo everything upto the previous entry.
2532 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2535 } /* end while loop through map entries */
2539 * wake up anybody waiting on entries we wired.
2542 vm_map_entry_wakeup(map
);
2544 return(KERN_SUCCESS
);
2550 register vm_map_t map
,
2551 register vm_offset_t start
,
2552 register vm_offset_t end
,
2553 register vm_prot_t access_type
,
2554 boolean_t user_wire
)
2561 * the calls to mapping_prealloc and mapping_relpre
2562 * (along with the VM_MAP_RANGE_CHECK to insure a
2563 * resonable range was passed in) are
2564 * currently necessary because
2565 * we haven't enabled kernel pre-emption
2566 * and/or the pmap_enter cannot purge and re-use
2569 VM_MAP_RANGE_CHECK(map
, start
, end
);
2570 mapping_prealloc(end
- start
);
2572 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2573 user_wire
, (pmap_t
)NULL
);
2583 * Sets the pageability of the specified address range in the target
2584 * as pageable. Regions specified must have been wired previously.
2586 * The map must not be locked, but a reference must remain to the map
2587 * throughout the call.
2589 * Kernel will panic on failures. User unwire ignores holes and
2590 * unwired and intransition entries to avoid losing memory by leaving
2594 vm_map_unwire_nested(
2595 register vm_map_t map
,
2596 register vm_offset_t start
,
2597 register vm_offset_t end
,
2598 boolean_t user_wire
,
2601 register vm_map_entry_t entry
;
2602 struct vm_map_entry
*first_entry
, tmp_entry
;
2603 boolean_t need_wakeup
;
2604 boolean_t main_map
= FALSE
;
2605 unsigned int last_timestamp
;
2608 if(map_pmap
== NULL
)
2610 last_timestamp
= map
->timestamp
;
2612 VM_MAP_RANGE_CHECK(map
, start
, end
);
2613 assert(page_aligned(start
));
2614 assert(page_aligned(end
));
2616 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2617 entry
= first_entry
;
2618 /* vm_map_clip_start will be done later. */
2621 /* Start address is not in map. */
2623 return(KERN_INVALID_ADDRESS
);
2626 need_wakeup
= FALSE
;
2627 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2628 if (entry
->in_transition
) {
2631 * Another thread is wiring down this entry. Note
2632 * that if it is not for the other thread we would
2633 * be unwiring an unwired entry. This is not
2634 * permitted. If we wait, we will be unwiring memory
2638 * Another thread is unwiring this entry. We did not
2639 * have a reference to it, because if we did, this
2640 * entry will not be getting unwired now.
2643 panic("vm_map_unwire: in_transition entry");
2645 entry
= entry
->vme_next
;
2649 if(entry
->is_sub_map
) {
2650 vm_offset_t sub_start
;
2651 vm_offset_t sub_end
;
2652 vm_offset_t local_end
;
2656 vm_map_clip_start(map
, entry
, start
);
2657 vm_map_clip_end(map
, entry
, end
);
2659 sub_start
= entry
->offset
;
2660 sub_end
= entry
->vme_end
- entry
->vme_start
;
2661 sub_end
+= entry
->offset
;
2662 local_end
= entry
->vme_end
;
2663 if(map_pmap
== NULL
) {
2664 if(entry
->use_pmap
) {
2665 pmap
= entry
->object
.sub_map
->pmap
;
2669 if (entry
->wired_count
== 0 ||
2670 (user_wire
&& entry
->user_wired_count
== 0)) {
2672 panic("vm_map_unwire: entry is unwired");
2673 entry
= entry
->vme_next
;
2679 * Holes: Next entry should be contiguous unless
2680 * this is the end of the region.
2682 if (((entry
->vme_end
< end
) &&
2683 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2684 (entry
->vme_next
->vme_start
2685 > entry
->vme_end
)))) {
2687 panic("vm_map_unwire: non-contiguous region");
2689 entry = entry->vme_next;
2694 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2695 entry
->wired_count
--;
2697 if (entry
->wired_count
!= 0) {
2698 entry
= entry
->vme_next
;
2702 entry
->in_transition
= TRUE
;
2703 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2706 * We can unlock the map now. The in_transition state
2707 * guarantees existance of the entry.
2710 vm_map_unwire_nested(entry
->object
.sub_map
,
2711 sub_start
, sub_end
, user_wire
, pmap
);
2714 if (last_timestamp
+1 != map
->timestamp
) {
2716 * Find the entry again. It could have been
2717 * clipped or deleted after we unlocked the map.
2719 if (!vm_map_lookup_entry(map
,
2720 tmp_entry
.vme_start
,
2723 panic("vm_map_unwire: re-lookup failed");
2724 entry
= first_entry
->vme_next
;
2726 entry
= first_entry
;
2728 last_timestamp
= map
->timestamp
;
2731 * clear transition bit for all constituent entries
2732 * that were in the original entry (saved in
2733 * tmp_entry). Also check for waiters.
2735 while ((entry
!= vm_map_to_entry(map
)) &&
2736 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2737 assert(entry
->in_transition
);
2738 entry
->in_transition
= FALSE
;
2739 if (entry
->needs_wakeup
) {
2740 entry
->needs_wakeup
= FALSE
;
2743 entry
= entry
->vme_next
;
2748 vm_map_unwire_nested(entry
->object
.sub_map
,
2749 sub_start
, sub_end
, user_wire
, pmap
);
2752 if (last_timestamp
+1 != map
->timestamp
) {
2754 * Find the entry again. It could have been
2755 * clipped or deleted after we unlocked the map.
2757 if (!vm_map_lookup_entry(map
,
2758 tmp_entry
.vme_start
,
2761 panic("vm_map_unwire: re-lookup failed");
2762 entry
= first_entry
->vme_next
;
2764 entry
= first_entry
;
2766 last_timestamp
= map
->timestamp
;
2771 if (main_map
&& (entry
->wired_count
== 0 ||
2772 (user_wire
&& entry
->user_wired_count
== 0))) {
2774 panic("vm_map_unwire: entry is unwired");
2776 entry
= entry
->vme_next
;
2780 assert(entry
->wired_count
> 0 &&
2781 (!user_wire
|| entry
->user_wired_count
> 0));
2783 vm_map_clip_start(map
, entry
, start
);
2784 vm_map_clip_end(map
, entry
, end
);
2788 * Holes: Next entry should be contiguous unless
2789 * this is the end of the region.
2791 if (((entry
->vme_end
< end
) &&
2792 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2793 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2796 panic("vm_map_unwire: non-contiguous region");
2797 entry
= entry
->vme_next
;
2802 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2803 entry
->wired_count
--;
2805 if (entry
->wired_count
!= 0) {
2806 entry
= entry
->vme_next
;
2811 entry
->in_transition
= TRUE
;
2812 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2815 * We can unlock the map now. The in_transition state
2816 * guarantees existance of the entry.
2820 vm_fault_unwire(map
, &tmp_entry
, FALSE
, map_pmap
);
2822 vm_fault_unwire(map
, &tmp_entry
, FALSE
, map
->pmap
);
2826 if (last_timestamp
+1 != map
->timestamp
) {
2828 * Find the entry again. It could have been clipped
2829 * or deleted after we unlocked the map.
2831 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2834 panic("vm_map_unwire: re-lookup failed");
2835 entry
= first_entry
->vme_next
;
2837 entry
= first_entry
;
2839 last_timestamp
= map
->timestamp
;
2842 * clear transition bit for all constituent entries that
2843 * were in the original entry (saved in tmp_entry). Also
2844 * check for waiters.
2846 while ((entry
!= vm_map_to_entry(map
)) &&
2847 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2848 assert(entry
->in_transition
);
2849 entry
->in_transition
= FALSE
;
2850 if (entry
->needs_wakeup
) {
2851 entry
->needs_wakeup
= FALSE
;
2854 entry
= entry
->vme_next
;
2859 * wake up anybody waiting on entries that we have unwired.
2862 vm_map_entry_wakeup(map
);
2863 return(KERN_SUCCESS
);
2869 register vm_map_t map
,
2870 register vm_offset_t start
,
2871 register vm_offset_t end
,
2872 boolean_t user_wire
)
2874 return vm_map_unwire_nested(map
, start
, end
, user_wire
, (pmap_t
)NULL
);
2879 * vm_map_entry_delete: [ internal use only ]
2881 * Deallocate the given entry from the target map.
2884 vm_map_entry_delete(
2885 register vm_map_t map
,
2886 register vm_map_entry_t entry
)
2888 register vm_offset_t s
, e
;
2889 register vm_object_t object
;
2890 register vm_map_t submap
;
2891 extern vm_object_t kernel_object
;
2893 s
= entry
->vme_start
;
2895 assert(page_aligned(s
));
2896 assert(page_aligned(e
));
2897 assert(entry
->wired_count
== 0);
2898 assert(entry
->user_wired_count
== 0);
2900 if (entry
->is_sub_map
) {
2902 submap
= entry
->object
.sub_map
;
2905 object
= entry
->object
.vm_object
;
2908 vm_map_entry_unlink(map
, entry
);
2911 vm_map_entry_dispose(map
, entry
);
2915 * Deallocate the object only after removing all
2916 * pmap entries pointing to its pages.
2919 vm_map_deallocate(submap
);
2921 vm_object_deallocate(object
);
2926 vm_map_submap_pmap_clean(
2933 vm_offset_t submap_start
;
2934 vm_offset_t submap_end
;
2936 vm_size_t remove_size
;
2937 vm_map_entry_t entry
;
2939 submap_end
= offset
+ (end
- start
);
2940 submap_start
= offset
;
2941 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2943 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2944 if(offset
> entry
->vme_start
)
2945 remove_size
-= offset
- entry
->vme_start
;
2948 if(submap_end
< entry
->vme_end
) {
2950 entry
->vme_end
- submap_end
;
2952 if(entry
->is_sub_map
) {
2953 vm_map_submap_pmap_clean(
2956 start
+ remove_size
,
2957 entry
->object
.sub_map
,
2960 pmap_remove(map
->pmap
, start
, start
+ remove_size
);
2964 entry
= entry
->vme_next
;
2966 while((entry
!= vm_map_to_entry(sub_map
))
2967 && (entry
->vme_start
< submap_end
)) {
2968 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2969 if(submap_end
< entry
->vme_end
) {
2970 remove_size
-= entry
->vme_end
- submap_end
;
2972 if(entry
->is_sub_map
) {
2973 vm_map_submap_pmap_clean(
2975 (start
+ entry
->vme_start
) - offset
,
2976 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2977 entry
->object
.sub_map
,
2980 pmap_remove(map
->pmap
,
2981 (start
+ entry
->vme_start
) - offset
,
2982 ((start
+ entry
->vme_start
) - offset
) + remove_size
);
2984 entry
= entry
->vme_next
;
2990 * vm_map_delete: [ internal use only ]
2992 * Deallocates the given address range from the target map.
2993 * Removes all user wirings. Unwires one kernel wiring if
2994 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2995 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2996 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2998 * This routine is called with map locked and leaves map locked.
3002 register vm_map_t map
,
3004 register vm_offset_t end
,
3007 vm_map_entry_t entry
, next
;
3008 struct vm_map_entry
*first_entry
, tmp_entry
;
3009 register vm_offset_t s
, e
;
3010 register vm_object_t object
;
3011 boolean_t need_wakeup
;
3012 unsigned int last_timestamp
= ~0; /* unlikely value */
3014 extern vm_map_t kernel_map
;
3016 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
3017 THREAD_ABORTSAFE
: THREAD_UNINT
;
3020 * All our DMA I/O operations in IOKit are currently done by
3021 * wiring through the map entries of the task requesting the I/O.
3022 * Because of this, we must always wait for kernel wirings
3023 * to go away on the entries before deleting them.
3025 * Any caller who wants to actually remove a kernel wiring
3026 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3027 * properly remove one wiring instead of blasting through
3030 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3033 * Find the start of the region, and clip it
3035 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3036 entry
= first_entry
;
3037 vm_map_clip_start(map
, entry
, start
);
3040 * Fix the lookup hint now, rather than each
3041 * time through the loop.
3043 SAVE_HINT(map
, entry
->vme_prev
);
3045 entry
= first_entry
->vme_next
;
3048 need_wakeup
= FALSE
;
3050 * Step through all entries in this region
3052 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3054 vm_map_clip_end(map
, entry
, end
);
3055 if (entry
->in_transition
) {
3057 * Another thread is wiring/unwiring this entry.
3058 * Let the other thread know we are waiting.
3060 s
= entry
->vme_start
;
3061 entry
->needs_wakeup
= TRUE
;
3064 * wake up anybody waiting on entries that we have
3065 * already unwired/deleted.
3068 vm_map_entry_wakeup(map
);
3069 need_wakeup
= FALSE
;
3072 vm_map_entry_wait(map
, interruptible
);
3074 if (interruptible
&&
3075 current_thread()->wait_result
== THREAD_INTERRUPTED
)
3077 * We do not clear the needs_wakeup flag,
3078 * since we cannot tell if we were the only one.
3080 return KERN_ABORTED
;
3084 * Cannot avoid a lookup here. reset timestamp.
3086 last_timestamp
= map
->timestamp
;
3089 * The entry could have been clipped or it
3090 * may not exist anymore. Look it up again.
3092 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3093 assert((map
!= kernel_map
) &&
3094 (!entry
->is_sub_map
));
3096 * User: use the next entry
3098 entry
= first_entry
->vme_next
;
3100 entry
= first_entry
;
3101 SAVE_HINT(map
, entry
->vme_prev
);
3104 } /* end in_transition */
3106 if (entry
->wired_count
) {
3108 * Remove a kernel wiring if requested or if
3109 * there are user wirings.
3111 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3112 (entry
->user_wired_count
> 0))
3113 entry
->wired_count
--;
3115 /* remove all user wire references */
3116 entry
->user_wired_count
= 0;
3118 if (entry
->wired_count
!= 0) {
3119 assert((map
!= kernel_map
) &&
3120 (!entry
->is_sub_map
));
3122 * Cannot continue. Typical case is when
3123 * a user thread has physical io pending on
3124 * on this page. Either wait for the
3125 * kernel wiring to go away or return an
3128 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3130 s
= entry
->vme_start
;
3131 entry
->needs_wakeup
= TRUE
;
3132 vm_map_entry_wait(map
, interruptible
);
3134 if (interruptible
&&
3135 current_thread()->wait_result
==
3138 * We do not clear the
3139 * needs_wakeup flag, since we
3140 * cannot tell if we were the
3143 return KERN_ABORTED
;
3147 * Cannot avoid a lookup here. reset
3150 last_timestamp
= map
->timestamp
;
3153 * The entry could have been clipped or
3154 * it may not exist anymore. Look it
3157 if (!vm_map_lookup_entry(map
, s
,
3159 assert((map
!= kernel_map
) &&
3160 (!entry
->is_sub_map
));
3162 * User: use the next entry
3164 entry
= first_entry
->vme_next
;
3166 entry
= first_entry
;
3167 SAVE_HINT(map
, entry
->vme_prev
);
3172 return KERN_FAILURE
;
3176 entry
->in_transition
= TRUE
;
3178 * copy current entry. see comment in vm_map_wire()
3181 s
= entry
->vme_start
;
3185 * We can unlock the map now. The in_transition
3186 * state guarentees existance of the entry.
3189 vm_fault_unwire(map
, &tmp_entry
,
3190 tmp_entry
.object
.vm_object
== kernel_object
,
3194 if (last_timestamp
+1 != map
->timestamp
) {
3196 * Find the entry again. It could have
3197 * been clipped after we unlocked the map.
3199 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3200 assert((map
!= kernel_map
) &&
3201 (!entry
->is_sub_map
));
3202 first_entry
= first_entry
->vme_next
;
3204 SAVE_HINT(map
, entry
->vme_prev
);
3207 SAVE_HINT(map
, entry
->vme_prev
);
3208 first_entry
= entry
;
3211 last_timestamp
= map
->timestamp
;
3213 entry
= first_entry
;
3214 while ((entry
!= vm_map_to_entry(map
)) &&
3215 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3216 assert(entry
->in_transition
);
3217 entry
->in_transition
= FALSE
;
3218 if (entry
->needs_wakeup
) {
3219 entry
->needs_wakeup
= FALSE
;
3222 entry
= entry
->vme_next
;
3225 * We have unwired the entry(s). Go back and
3228 entry
= first_entry
;
3232 /* entry is unwired */
3233 assert(entry
->wired_count
== 0);
3234 assert(entry
->user_wired_count
== 0);
3236 if ((!entry
->is_sub_map
&&
3237 entry
->object
.vm_object
!= kernel_object
) ||
3238 entry
->is_sub_map
) {
3239 if(entry
->is_sub_map
) {
3240 if(entry
->use_pmap
) {
3242 pmap_unnest(map
->pmap
, entry
->vme_start
,
3243 entry
->vme_end
- entry
->vme_start
);
3246 vm_map_submap_pmap_clean(
3247 map
, entry
->vme_start
, entry
->vme_end
,
3248 entry
->object
.sub_map
,
3252 pmap_remove(map
->pmap
,
3253 entry
->vme_start
, entry
->vme_end
);
3257 next
= entry
->vme_next
;
3258 s
= next
->vme_start
;
3259 last_timestamp
= map
->timestamp
;
3260 vm_map_entry_delete(map
, entry
);
3261 /* vm_map_entry_delete unlocks the map */
3265 if(entry
== vm_map_to_entry(map
)) {
3268 if (last_timestamp
+1 != map
->timestamp
) {
3270 * we are responsible for deleting everything
3271 * from the give space, if someone has interfered
3272 * we pick up where we left off, back fills should
3273 * be all right for anyone except map_delete and
3274 * we have to assume that the task has been fully
3275 * disabled before we get here
3277 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3278 entry
= entry
->vme_next
;
3280 SAVE_HINT(map
, entry
->vme_prev
);
3283 * others can not only allocate behind us, we can
3284 * also see coalesce while we don't have the map lock
3286 if(entry
== vm_map_to_entry(map
)) {
3289 vm_map_clip_start(map
, entry
, s
);
3291 last_timestamp
= map
->timestamp
;
3294 if (map
->wait_for_space
)
3295 thread_wakeup((event_t
) map
);
3297 * wake up anybody waiting on entries that we have already deleted.
3300 vm_map_entry_wakeup(map
);
3302 return KERN_SUCCESS
;
3308 * Remove the given address range from the target map.
3309 * This is the exported form of vm_map_delete.
3313 register vm_map_t map
,
3314 register vm_offset_t start
,
3315 register vm_offset_t end
,
3316 register boolean_t flags
)
3318 register kern_return_t result
;
3321 VM_MAP_RANGE_CHECK(map
, start
, end
);
3322 result
= vm_map_delete(map
, start
, end
, flags
);
3330 * Routine: vm_map_copy_discard
3333 * Dispose of a map copy object (returned by
3337 vm_map_copy_discard(
3340 TR_DECL("vm_map_copy_discard");
3342 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3344 if (copy
== VM_MAP_COPY_NULL
)
3347 switch (copy
->type
) {
3348 case VM_MAP_COPY_ENTRY_LIST
:
3349 while (vm_map_copy_first_entry(copy
) !=
3350 vm_map_copy_to_entry(copy
)) {
3351 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3353 vm_map_copy_entry_unlink(copy
, entry
);
3354 vm_object_deallocate(entry
->object
.vm_object
);
3355 vm_map_copy_entry_dispose(copy
, entry
);
3358 case VM_MAP_COPY_OBJECT
:
3359 vm_object_deallocate(copy
->cpy_object
);
3361 case VM_MAP_COPY_KERNEL_BUFFER
:
3364 * The vm_map_copy_t and possibly the data buffer were
3365 * allocated by a single call to kalloc(), i.e. the
3366 * vm_map_copy_t was not allocated out of the zone.
3368 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3371 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3375 * Routine: vm_map_copy_copy
3378 * Move the information in a map copy object to
3379 * a new map copy object, leaving the old one
3382 * This is used by kernel routines that need
3383 * to look at out-of-line data (in copyin form)
3384 * before deciding whether to return SUCCESS.
3385 * If the routine returns FAILURE, the original
3386 * copy object will be deallocated; therefore,
3387 * these routines must make a copy of the copy
3388 * object and leave the original empty so that
3389 * deallocation will not fail.
3395 vm_map_copy_t new_copy
;
3397 if (copy
== VM_MAP_COPY_NULL
)
3398 return VM_MAP_COPY_NULL
;
3401 * Allocate a new copy object, and copy the information
3402 * from the old one into it.
3405 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3408 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3410 * The links in the entry chain must be
3411 * changed to point to the new copy object.
3413 vm_map_copy_first_entry(copy
)->vme_prev
3414 = vm_map_copy_to_entry(new_copy
);
3415 vm_map_copy_last_entry(copy
)->vme_next
3416 = vm_map_copy_to_entry(new_copy
);
3420 * Change the old copy object into one that contains
3421 * nothing to be deallocated.
3423 copy
->type
= VM_MAP_COPY_OBJECT
;
3424 copy
->cpy_object
= VM_OBJECT_NULL
;
3427 * Return the new object.
3433 vm_map_overwrite_submap_recurse(
3435 vm_offset_t dst_addr
,
3438 vm_offset_t dst_end
;
3439 vm_map_entry_t tmp_entry
;
3440 vm_map_entry_t entry
;
3441 kern_return_t result
;
3442 boolean_t encountered_sub_map
= FALSE
;
3447 * Verify that the destination is all writeable
3448 * initially. We have to trunc the destination
3449 * address and round the copy size or we'll end up
3450 * splitting entries in strange ways.
3453 dst_end
= round_page(dst_addr
+ dst_size
);
3456 vm_map_lock(dst_map
);
3457 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3458 vm_map_unlock(dst_map
);
3459 return(KERN_INVALID_ADDRESS
);
3462 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3464 for (entry
= tmp_entry
;;) {
3465 vm_map_entry_t next
;
3467 next
= entry
->vme_next
;
3468 while(entry
->is_sub_map
) {
3469 vm_offset_t sub_start
;
3470 vm_offset_t sub_end
;
3471 vm_offset_t local_end
;
3473 if (entry
->in_transition
) {
3475 * Say that we are waiting, and wait for entry.
3477 entry
->needs_wakeup
= TRUE
;
3478 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3483 encountered_sub_map
= TRUE
;
3484 sub_start
= entry
->offset
;
3486 if(entry
->vme_end
< dst_end
)
3487 sub_end
= entry
->vme_end
;
3490 sub_end
-= entry
->vme_start
;
3491 sub_end
+= entry
->offset
;
3492 local_end
= entry
->vme_end
;
3493 vm_map_unlock(dst_map
);
3495 result
= vm_map_overwrite_submap_recurse(
3496 entry
->object
.sub_map
,
3498 sub_end
- sub_start
);
3500 if(result
!= KERN_SUCCESS
)
3502 if (dst_end
<= entry
->vme_end
)
3503 return KERN_SUCCESS
;
3504 vm_map_lock(dst_map
);
3505 if(!vm_map_lookup_entry(dst_map
, local_end
,
3507 vm_map_unlock(dst_map
);
3508 return(KERN_INVALID_ADDRESS
);
3511 next
= entry
->vme_next
;
3514 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3515 vm_map_unlock(dst_map
);
3516 return(KERN_PROTECTION_FAILURE
);
3520 * If the entry is in transition, we must wait
3521 * for it to exit that state. Anything could happen
3522 * when we unlock the map, so start over.
3524 if (entry
->in_transition
) {
3527 * Say that we are waiting, and wait for entry.
3529 entry
->needs_wakeup
= TRUE
;
3530 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3536 * our range is contained completely within this map entry
3538 if (dst_end
<= entry
->vme_end
) {
3539 vm_map_unlock(dst_map
);
3540 return KERN_SUCCESS
;
3543 * check that range specified is contiguous region
3545 if ((next
== vm_map_to_entry(dst_map
)) ||
3546 (next
->vme_start
!= entry
->vme_end
)) {
3547 vm_map_unlock(dst_map
);
3548 return(KERN_INVALID_ADDRESS
);
3552 * Check for permanent objects in the destination.
3554 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3555 ((!entry
->object
.vm_object
->internal
) ||
3556 (entry
->object
.vm_object
->true_share
))) {
3557 if(encountered_sub_map
) {
3558 vm_map_unlock(dst_map
);
3559 return(KERN_FAILURE
);
3566 vm_map_unlock(dst_map
);
3567 return(KERN_SUCCESS
);
3571 * Routine: vm_map_copy_overwrite
3574 * Copy the memory described by the map copy
3575 * object (copy; returned by vm_map_copyin) onto
3576 * the specified destination region (dst_map, dst_addr).
3577 * The destination must be writeable.
3579 * Unlike vm_map_copyout, this routine actually
3580 * writes over previously-mapped memory. If the
3581 * previous mapping was to a permanent (user-supplied)
3582 * memory object, it is preserved.
3584 * The attributes (protection and inheritance) of the
3585 * destination region are preserved.
3587 * If successful, consumes the copy object.
3588 * Otherwise, the caller is responsible for it.
3590 * Implementation notes:
3591 * To overwrite aligned temporary virtual memory, it is
3592 * sufficient to remove the previous mapping and insert
3593 * the new copy. This replacement is done either on
3594 * the whole region (if no permanent virtual memory
3595 * objects are embedded in the destination region) or
3596 * in individual map entries.
3598 * To overwrite permanent virtual memory , it is necessary
3599 * to copy each page, as the external memory management
3600 * interface currently does not provide any optimizations.
3602 * Unaligned memory also has to be copied. It is possible
3603 * to use 'vm_trickery' to copy the aligned data. This is
3604 * not done but not hard to implement.
3606 * Once a page of permanent memory has been overwritten,
3607 * it is impossible to interrupt this function; otherwise,
3608 * the call would be neither atomic nor location-independent.
3609 * The kernel-state portion of a user thread must be
3612 * It may be expensive to forward all requests that might
3613 * overwrite permanent memory (vm_write, vm_copy) to
3614 * uninterruptible kernel threads. This routine may be
3615 * called by interruptible threads; however, success is
3616 * not guaranteed -- if the request cannot be performed
3617 * atomically and interruptibly, an error indication is
3622 vm_map_copy_overwrite_nested(
3624 vm_offset_t dst_addr
,
3626 boolean_t interruptible
,
3629 vm_offset_t dst_end
;
3630 vm_map_entry_t tmp_entry
;
3631 vm_map_entry_t entry
;
3633 boolean_t aligned
= TRUE
;
3634 boolean_t contains_permanent_objects
= FALSE
;
3635 boolean_t encountered_sub_map
= FALSE
;
3636 vm_offset_t base_addr
;
3637 vm_size_t copy_size
;
3638 vm_size_t total_size
;
3642 * Check for null copy object.
3645 if (copy
== VM_MAP_COPY_NULL
)
3646 return(KERN_SUCCESS
);
3649 * Check for special kernel buffer allocated
3650 * by new_ipc_kmsg_copyin.
3653 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3654 return(vm_map_copyout_kernel_buffer(
3660 * Only works for entry lists at the moment. Will
3661 * support page lists later.
3664 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3666 if (copy
->size
== 0) {
3667 vm_map_copy_discard(copy
);
3668 return(KERN_SUCCESS
);
3672 * Verify that the destination is all writeable
3673 * initially. We have to trunc the destination
3674 * address and round the copy size or we'll end up
3675 * splitting entries in strange ways.
3678 if (!page_aligned(copy
->size
) ||
3679 !page_aligned (copy
->offset
) ||
3680 !page_aligned (dst_addr
))
3683 dst_end
= round_page(dst_addr
+ copy
->size
);
3685 dst_end
= dst_addr
+ copy
->size
;
3689 vm_map_lock(dst_map
);
3690 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3691 vm_map_unlock(dst_map
);
3692 return(KERN_INVALID_ADDRESS
);
3694 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3695 for (entry
= tmp_entry
;;) {
3696 vm_map_entry_t next
= entry
->vme_next
;
3698 while(entry
->is_sub_map
) {
3699 vm_offset_t sub_start
;
3700 vm_offset_t sub_end
;
3701 vm_offset_t local_end
;
3703 if (entry
->in_transition
) {
3706 * Say that we are waiting, and wait for entry.
3708 entry
->needs_wakeup
= TRUE
;
3709 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3714 local_end
= entry
->vme_end
;
3715 if (!(entry
->needs_copy
)) {
3716 /* if needs_copy we are a COW submap */
3717 /* in such a case we just replace so */
3718 /* there is no need for the follow- */
3720 encountered_sub_map
= TRUE
;
3721 sub_start
= entry
->offset
;
3723 if(entry
->vme_end
< dst_end
)
3724 sub_end
= entry
->vme_end
;
3727 sub_end
-= entry
->vme_start
;
3728 sub_end
+= entry
->offset
;
3729 vm_map_unlock(dst_map
);
3731 kr
= vm_map_overwrite_submap_recurse(
3732 entry
->object
.sub_map
,
3734 sub_end
- sub_start
);
3735 if(kr
!= KERN_SUCCESS
)
3737 vm_map_lock(dst_map
);
3740 if (dst_end
<= entry
->vme_end
)
3741 goto start_overwrite
;
3742 if(!vm_map_lookup_entry(dst_map
, local_end
,
3744 vm_map_unlock(dst_map
);
3745 return(KERN_INVALID_ADDRESS
);
3747 next
= entry
->vme_next
;
3750 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3751 vm_map_unlock(dst_map
);
3752 return(KERN_PROTECTION_FAILURE
);
3756 * If the entry is in transition, we must wait
3757 * for it to exit that state. Anything could happen
3758 * when we unlock the map, so start over.
3760 if (entry
->in_transition
) {
3763 * Say that we are waiting, and wait for entry.
3765 entry
->needs_wakeup
= TRUE
;
3766 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3772 * our range is contained completely within this map entry
3774 if (dst_end
<= entry
->vme_end
)
3777 * check that range specified is contiguous region
3779 if ((next
== vm_map_to_entry(dst_map
)) ||
3780 (next
->vme_start
!= entry
->vme_end
)) {
3781 vm_map_unlock(dst_map
);
3782 return(KERN_INVALID_ADDRESS
);
3787 * Check for permanent objects in the destination.
3789 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3790 ((!entry
->object
.vm_object
->internal
) ||
3791 (entry
->object
.vm_object
->true_share
))) {
3792 contains_permanent_objects
= TRUE
;
3800 * If there are permanent objects in the destination, then
3801 * the copy cannot be interrupted.
3804 if (interruptible
&& contains_permanent_objects
) {
3805 vm_map_unlock(dst_map
);
3806 return(KERN_FAILURE
); /* XXX */
3811 * Make a second pass, overwriting the data
3812 * At the beginning of each loop iteration,
3813 * the next entry to be overwritten is "tmp_entry"
3814 * (initially, the value returned from the lookup above),
3815 * and the starting address expected in that entry
3819 total_size
= copy
->size
;
3820 if(encountered_sub_map
) {
3822 /* re-calculate tmp_entry since we've had the map */
3824 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3825 vm_map_unlock(dst_map
);
3826 return(KERN_INVALID_ADDRESS
);
3829 copy_size
= copy
->size
;
3832 base_addr
= dst_addr
;
3834 /* deconstruct the copy object and do in parts */
3835 /* only in sub_map, interruptable case */
3836 vm_map_entry_t copy_entry
;
3837 vm_map_entry_t previous_prev
;
3838 vm_map_entry_t next_copy
;
3840 int remaining_entries
;
3843 for (entry
= tmp_entry
; copy_size
== 0;) {
3844 vm_map_entry_t next
;
3846 next
= entry
->vme_next
;
3848 /* tmp_entry and base address are moved along */
3849 /* each time we encounter a sub-map. Otherwise */
3850 /* entry can outpase tmp_entry, and the copy_size */
3851 /* may reflect the distance between them */
3852 /* if the current entry is found to be in transition */
3853 /* we will start over at the beginning or the last */
3854 /* encounter of a submap as dictated by base_addr */
3855 /* we will zero copy_size accordingly. */
3856 if (entry
->in_transition
) {
3858 * Say that we are waiting, and wait for entry.
3860 entry
->needs_wakeup
= TRUE
;
3861 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3863 vm_map_lock(dst_map
);
3864 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3866 vm_map_unlock(dst_map
);
3867 return(KERN_INVALID_ADDRESS
);
3873 if(entry
->is_sub_map
) {
3874 vm_offset_t sub_start
;
3875 vm_offset_t sub_end
;
3876 vm_offset_t local_end
;
3878 if (entry
->needs_copy
) {
3879 /* if this is a COW submap */
3880 /* just back the range with a */
3881 /* anonymous entry */
3882 if(entry
->vme_end
< dst_end
)
3883 sub_end
= entry
->vme_end
;
3886 if(entry
->vme_start
< base_addr
)
3887 sub_start
= base_addr
;
3889 sub_start
= entry
->vme_start
;
3891 dst_map
, entry
, sub_end
);
3893 dst_map
, entry
, sub_start
);
3894 entry
->is_sub_map
= FALSE
;
3896 entry
->object
.sub_map
);
3897 entry
->object
.sub_map
= NULL
;
3898 entry
->is_shared
= FALSE
;
3899 entry
->needs_copy
= FALSE
;
3901 entry
->protection
= VM_PROT_ALL
;
3902 entry
->max_protection
= VM_PROT_ALL
;
3903 entry
->wired_count
= 0;
3904 entry
->user_wired_count
= 0;
3905 if(entry
->inheritance
3906 == VM_INHERIT_SHARE
)
3907 entry
->inheritance
= VM_INHERIT_COPY
;
3910 /* first take care of any non-sub_map */
3911 /* entries to send */
3912 if(base_addr
< entry
->vme_start
) {
3915 entry
->vme_start
- base_addr
;
3918 sub_start
= entry
->offset
;
3920 if(entry
->vme_end
< dst_end
)
3921 sub_end
= entry
->vme_end
;
3924 sub_end
-= entry
->vme_start
;
3925 sub_end
+= entry
->offset
;
3926 local_end
= entry
->vme_end
;
3927 vm_map_unlock(dst_map
);
3928 copy_size
= sub_end
- sub_start
;
3930 /* adjust the copy object */
3931 if (total_size
> copy_size
) {
3932 vm_size_t local_size
= 0;
3933 vm_size_t entry_size
;
3936 new_offset
= copy
->offset
;
3937 copy_entry
= vm_map_copy_first_entry(copy
);
3939 vm_map_copy_to_entry(copy
)){
3940 entry_size
= copy_entry
->vme_end
-
3941 copy_entry
->vme_start
;
3942 if((local_size
< copy_size
) &&
3943 ((local_size
+ entry_size
)
3945 vm_map_copy_clip_end(copy
,
3947 copy_entry
->vme_start
+
3948 (copy_size
- local_size
));
3949 entry_size
= copy_entry
->vme_end
-
3950 copy_entry
->vme_start
;
3951 local_size
+= entry_size
;
3952 new_offset
+= entry_size
;
3954 if(local_size
>= copy_size
) {
3955 next_copy
= copy_entry
->vme_next
;
3956 copy_entry
->vme_next
=
3957 vm_map_copy_to_entry(copy
);
3959 copy
->cpy_hdr
.links
.prev
;
3960 copy
->cpy_hdr
.links
.prev
= copy_entry
;
3961 copy
->size
= copy_size
;
3963 copy
->cpy_hdr
.nentries
;
3964 remaining_entries
-= nentries
;
3965 copy
->cpy_hdr
.nentries
= nentries
;
3968 local_size
+= entry_size
;
3969 new_offset
+= entry_size
;
3972 copy_entry
= copy_entry
->vme_next
;
3976 if((entry
->use_pmap
) && (pmap
== NULL
)) {
3977 kr
= vm_map_copy_overwrite_nested(
3978 entry
->object
.sub_map
,
3982 entry
->object
.sub_map
->pmap
);
3983 } else if (pmap
!= NULL
) {
3984 kr
= vm_map_copy_overwrite_nested(
3985 entry
->object
.sub_map
,
3988 interruptible
, pmap
);
3990 kr
= vm_map_copy_overwrite_nested(
3991 entry
->object
.sub_map
,
3997 if(kr
!= KERN_SUCCESS
) {
3998 if(next_copy
!= NULL
) {
3999 copy
->cpy_hdr
.nentries
+=
4001 copy
->cpy_hdr
.links
.prev
->vme_next
=
4003 copy
->cpy_hdr
.links
.prev
4005 copy
->size
= total_size
;
4009 if (dst_end
<= local_end
) {
4010 return(KERN_SUCCESS
);
4012 /* otherwise copy no longer exists, it was */
4013 /* destroyed after successful copy_overwrite */
4014 copy
= (vm_map_copy_t
)
4015 zalloc(vm_map_copy_zone
);
4016 vm_map_copy_first_entry(copy
) =
4017 vm_map_copy_last_entry(copy
) =
4018 vm_map_copy_to_entry(copy
);
4019 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4020 copy
->offset
= new_offset
;
4022 total_size
-= copy_size
;
4024 /* put back remainder of copy in container */
4025 if(next_copy
!= NULL
) {
4026 copy
->cpy_hdr
.nentries
= remaining_entries
;
4027 copy
->cpy_hdr
.links
.next
= next_copy
;
4028 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4029 copy
->size
= total_size
;
4030 next_copy
->vme_prev
=
4031 vm_map_copy_to_entry(copy
);
4034 base_addr
= local_end
;
4035 vm_map_lock(dst_map
);
4036 if(!vm_map_lookup_entry(dst_map
,
4037 local_end
, &tmp_entry
)) {
4038 vm_map_unlock(dst_map
);
4039 return(KERN_INVALID_ADDRESS
);
4044 if (dst_end
<= entry
->vme_end
) {
4045 copy_size
= dst_end
- base_addr
;
4049 if ((next
== vm_map_to_entry(dst_map
)) ||
4050 (next
->vme_start
!= entry
->vme_end
)) {
4051 vm_map_unlock(dst_map
);
4052 return(KERN_INVALID_ADDRESS
);
4061 /* adjust the copy object */
4062 if (total_size
> copy_size
) {
4063 vm_size_t local_size
= 0;
4064 vm_size_t entry_size
;
4066 new_offset
= copy
->offset
;
4067 copy_entry
= vm_map_copy_first_entry(copy
);
4068 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4069 entry_size
= copy_entry
->vme_end
-
4070 copy_entry
->vme_start
;
4071 if((local_size
< copy_size
) &&
4072 ((local_size
+ entry_size
)
4074 vm_map_copy_clip_end(copy
, copy_entry
,
4075 copy_entry
->vme_start
+
4076 (copy_size
- local_size
));
4077 entry_size
= copy_entry
->vme_end
-
4078 copy_entry
->vme_start
;
4079 local_size
+= entry_size
;
4080 new_offset
+= entry_size
;
4082 if(local_size
>= copy_size
) {
4083 next_copy
= copy_entry
->vme_next
;
4084 copy_entry
->vme_next
=
4085 vm_map_copy_to_entry(copy
);
4087 copy
->cpy_hdr
.links
.prev
;
4088 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4089 copy
->size
= copy_size
;
4091 copy
->cpy_hdr
.nentries
;
4092 remaining_entries
-= nentries
;
4093 copy
->cpy_hdr
.nentries
= nentries
;
4096 local_size
+= entry_size
;
4097 new_offset
+= entry_size
;
4100 copy_entry
= copy_entry
->vme_next
;
4110 local_pmap
= dst_map
->pmap
;
4112 if ((kr
= vm_map_copy_overwrite_aligned(
4113 dst_map
, tmp_entry
, copy
,
4114 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4115 if(next_copy
!= NULL
) {
4116 copy
->cpy_hdr
.nentries
+=
4118 copy
->cpy_hdr
.links
.prev
->vme_next
=
4120 copy
->cpy_hdr
.links
.prev
=
4122 copy
->size
+= copy_size
;
4126 vm_map_unlock(dst_map
);
4131 * if the copy and dst address are misaligned but the same
4132 * offset within the page we can copy_not_aligned the
4133 * misaligned parts and copy aligned the rest. If they are
4134 * aligned but len is unaligned we simply need to copy
4135 * the end bit unaligned. We'll need to split the misaligned
4136 * bits of the region in this case !
4138 /* ALWAYS UNLOCKS THE dst_map MAP */
4139 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4140 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4141 if(next_copy
!= NULL
) {
4142 copy
->cpy_hdr
.nentries
+=
4144 copy
->cpy_hdr
.links
.prev
->vme_next
=
4146 copy
->cpy_hdr
.links
.prev
=
4148 copy
->size
+= copy_size
;
4153 total_size
-= copy_size
;
4156 base_addr
+= copy_size
;
4158 copy
->offset
= new_offset
;
4159 if(next_copy
!= NULL
) {
4160 copy
->cpy_hdr
.nentries
= remaining_entries
;
4161 copy
->cpy_hdr
.links
.next
= next_copy
;
4162 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4163 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4164 copy
->size
= total_size
;
4166 vm_map_lock(dst_map
);
4168 if (!vm_map_lookup_entry(dst_map
,
4169 base_addr
, &tmp_entry
)) {
4170 vm_map_unlock(dst_map
);
4171 return(KERN_INVALID_ADDRESS
);
4173 if (tmp_entry
->in_transition
) {
4174 entry
->needs_wakeup
= TRUE
;
4175 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4180 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(base_addr
));
4186 * Throw away the vm_map_copy object
4188 vm_map_copy_discard(copy
);
4190 return(KERN_SUCCESS
);
4191 }/* vm_map_copy_overwrite */
4194 vm_map_copy_overwrite(
4196 vm_offset_t dst_addr
,
4198 boolean_t interruptible
)
4200 return vm_map_copy_overwrite_nested(
4201 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4206 * Routine: vm_map_copy_overwrite_unaligned
4209 * Physically copy unaligned data
4212 * Unaligned parts of pages have to be physically copied. We use
4213 * a modified form of vm_fault_copy (which understands none-aligned
4214 * page offsets and sizes) to do the copy. We attempt to copy as
4215 * much memory in one go as possibly, however vm_fault_copy copies
4216 * within 1 memory object so we have to find the smaller of "amount left"
4217 * "source object data size" and "target object data size". With
4218 * unaligned data we don't need to split regions, therefore the source
4219 * (copy) object should be one map entry, the target range may be split
4220 * over multiple map entries however. In any event we are pessimistic
4221 * about these assumptions.
4224 * dst_map is locked on entry and is return locked on success,
4225 * unlocked on error.
4229 vm_map_copy_overwrite_unaligned(
4231 vm_map_entry_t entry
,
4235 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4236 vm_map_version_t version
;
4237 vm_object_t dst_object
;
4238 vm_object_offset_t dst_offset
;
4239 vm_object_offset_t src_offset
;
4240 vm_object_offset_t entry_offset
;
4241 vm_offset_t entry_end
;
4246 kern_return_t kr
= KERN_SUCCESS
;
4248 vm_map_lock_write_to_read(dst_map
);
4250 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4251 amount_left
= copy
->size
;
4253 * unaligned so we never clipped this entry, we need the offset into
4254 * the vm_object not just the data.
4256 while (amount_left
> 0) {
4258 if (entry
== vm_map_to_entry(dst_map
)) {
4259 vm_map_unlock_read(dst_map
);
4260 return KERN_INVALID_ADDRESS
;
4263 /* "start" must be within the current map entry */
4264 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4266 dst_offset
= start
- entry
->vme_start
;
4268 dst_size
= entry
->vme_end
- start
;
4270 src_size
= copy_entry
->vme_end
-
4271 (copy_entry
->vme_start
+ src_offset
);
4273 if (dst_size
< src_size
) {
4275 * we can only copy dst_size bytes before
4276 * we have to get the next destination entry
4278 copy_size
= dst_size
;
4281 * we can only copy src_size bytes before
4282 * we have to get the next source copy entry
4284 copy_size
= src_size
;
4287 if (copy_size
> amount_left
) {
4288 copy_size
= amount_left
;
4291 * Entry needs copy, create a shadow shadow object for
4292 * Copy on write region.
4294 if (entry
->needs_copy
&&
4295 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4297 if (vm_map_lock_read_to_write(dst_map
)) {
4298 vm_map_lock_read(dst_map
);
4301 vm_object_shadow(&entry
->object
.vm_object
,
4303 (vm_size_t
)(entry
->vme_end
4304 - entry
->vme_start
));
4305 entry
->needs_copy
= FALSE
;
4306 vm_map_lock_write_to_read(dst_map
);
4308 dst_object
= entry
->object
.vm_object
;
4310 * unlike with the virtual (aligned) copy we're going
4311 * to fault on it therefore we need a target object.
4313 if (dst_object
== VM_OBJECT_NULL
) {
4314 if (vm_map_lock_read_to_write(dst_map
)) {
4315 vm_map_lock_read(dst_map
);
4318 dst_object
= vm_object_allocate((vm_size_t
)
4319 entry
->vme_end
- entry
->vme_start
);
4320 entry
->object
.vm_object
= dst_object
;
4322 vm_map_lock_write_to_read(dst_map
);
4325 * Take an object reference and unlock map. The "entry" may
4326 * disappear or change when the map is unlocked.
4328 vm_object_reference(dst_object
);
4329 version
.main_timestamp
= dst_map
->timestamp
;
4330 entry_offset
= entry
->offset
;
4331 entry_end
= entry
->vme_end
;
4332 vm_map_unlock_read(dst_map
);
4334 * Copy as much as possible in one pass
4337 copy_entry
->object
.vm_object
,
4338 copy_entry
->offset
+ src_offset
,
4341 entry_offset
+ dst_offset
,
4347 src_offset
+= copy_size
;
4348 amount_left
-= copy_size
;
4350 * Release the object reference
4352 vm_object_deallocate(dst_object
);
4354 * If a hard error occurred, return it now
4356 if (kr
!= KERN_SUCCESS
)
4359 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4360 || amount_left
== 0)
4363 * all done with this copy entry, dispose.
4365 vm_map_copy_entry_unlink(copy
, copy_entry
);
4366 vm_object_deallocate(copy_entry
->object
.vm_object
);
4367 vm_map_copy_entry_dispose(copy
, copy_entry
);
4369 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4370 == vm_map_copy_to_entry(copy
) && amount_left
) {
4372 * not finished copying but run out of source
4374 return KERN_INVALID_ADDRESS
;
4379 if (amount_left
== 0)
4380 return KERN_SUCCESS
;
4382 vm_map_lock_read(dst_map
);
4383 if (version
.main_timestamp
== dst_map
->timestamp
) {
4384 if (start
== entry_end
) {
4386 * destination region is split. Use the version
4387 * information to avoid a lookup in the normal
4390 entry
= entry
->vme_next
;
4392 * should be contiguous. Fail if we encounter
4393 * a hole in the destination.
4395 if (start
!= entry
->vme_start
) {
4396 vm_map_unlock_read(dst_map
);
4397 return KERN_INVALID_ADDRESS
;
4402 * Map version check failed.
4403 * we must lookup the entry because somebody
4404 * might have changed the map behind our backs.
4407 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4409 vm_map_unlock_read(dst_map
);
4410 return KERN_INVALID_ADDRESS
;
4416 vm_map_unlock_read(dst_map
);
4418 return KERN_SUCCESS
;
4419 }/* vm_map_copy_overwrite_unaligned */
4422 * Routine: vm_map_copy_overwrite_aligned
4425 * Does all the vm_trickery possible for whole pages.
4429 * If there are no permanent objects in the destination,
4430 * and the source and destination map entry zones match,
4431 * and the destination map entry is not shared,
4432 * then the map entries can be deleted and replaced
4433 * with those from the copy. The following code is the
4434 * basic idea of what to do, but there are lots of annoying
4435 * little details about getting protection and inheritance
4436 * right. Should add protection, inheritance, and sharing checks
4437 * to the above pass and make sure that no wiring is involved.
4441 vm_map_copy_overwrite_aligned(
4443 vm_map_entry_t tmp_entry
,
4449 vm_map_entry_t copy_entry
;
4450 vm_size_t copy_size
;
4452 vm_map_entry_t entry
;
4454 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4455 != vm_map_copy_to_entry(copy
))
4457 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4460 if (entry
== vm_map_to_entry(dst_map
)) {
4461 vm_map_unlock(dst_map
);
4462 return KERN_INVALID_ADDRESS
;
4464 size
= (entry
->vme_end
- entry
->vme_start
);
4466 * Make sure that no holes popped up in the
4467 * address map, and that the protection is
4468 * still valid, in case the map was unlocked
4472 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4473 && !entry
->needs_copy
)) {
4474 vm_map_unlock(dst_map
);
4475 return(KERN_INVALID_ADDRESS
);
4477 assert(entry
!= vm_map_to_entry(dst_map
));
4480 * Check protection again
4483 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4484 vm_map_unlock(dst_map
);
4485 return(KERN_PROTECTION_FAILURE
);
4489 * Adjust to source size first
4492 if (copy_size
< size
) {
4493 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4498 * Adjust to destination size
4501 if (size
< copy_size
) {
4502 vm_map_copy_clip_end(copy
, copy_entry
,
4503 copy_entry
->vme_start
+ size
);
4507 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4508 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4509 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4512 * If the destination contains temporary unshared memory,
4513 * we can perform the copy by throwing it away and
4514 * installing the source data.
4517 object
= entry
->object
.vm_object
;
4518 if ((!entry
->is_shared
&&
4519 ((object
== VM_OBJECT_NULL
) ||
4520 (object
->internal
&& !object
->true_share
))) ||
4521 entry
->needs_copy
) {
4522 vm_object_t old_object
= entry
->object
.vm_object
;
4523 vm_object_offset_t old_offset
= entry
->offset
;
4524 vm_object_offset_t offset
;
4527 * Ensure that the source and destination aren't
4530 if (old_object
== copy_entry
->object
.vm_object
&&
4531 old_offset
== copy_entry
->offset
) {
4532 vm_map_copy_entry_unlink(copy
, copy_entry
);
4533 vm_map_copy_entry_dispose(copy
, copy_entry
);
4535 if (old_object
!= VM_OBJECT_NULL
)
4536 vm_object_deallocate(old_object
);
4538 start
= tmp_entry
->vme_end
;
4539 tmp_entry
= tmp_entry
->vme_next
;
4543 if (old_object
!= VM_OBJECT_NULL
) {
4544 if(entry
->is_sub_map
) {
4545 if(entry
->use_pmap
) {
4547 pmap_unnest(dst_map
->pmap
,
4549 entry
->vme_end
- entry
->vme_start
);
4552 vm_map_submap_pmap_clean(
4553 dst_map
, entry
->vme_start
,
4555 entry
->object
.sub_map
,
4559 entry
->object
.sub_map
);
4561 vm_object_pmap_protect(
4566 tmp_entry
->vme_start
,
4569 vm_object_deallocate(old_object
);
4573 entry
->is_sub_map
= FALSE
;
4574 entry
->object
= copy_entry
->object
;
4575 object
= entry
->object
.vm_object
;
4576 entry
->needs_copy
= copy_entry
->needs_copy
;
4577 entry
->wired_count
= 0;
4578 entry
->user_wired_count
= 0;
4579 offset
= entry
->offset
= copy_entry
->offset
;
4581 vm_map_copy_entry_unlink(copy
, copy_entry
);
4582 vm_map_copy_entry_dispose(copy
, copy_entry
);
4583 #if BAD_OPTIMIZATION
4585 * if we turn this optimization back on
4586 * we need to revisit our use of pmap mappings
4587 * large copies will cause us to run out and panic
4588 * this optimization only saved on average 2 us per page if ALL
4589 * the pages in the source were currently mapped
4590 * and ALL the pages in the dest were touched, if there were fewer
4591 * than 2/3 of the pages touched, this optimization actually cost more cycles
4595 * Try to aggressively enter physical mappings
4596 * (but avoid uninstantiated objects)
4598 if (object
!= VM_OBJECT_NULL
) {
4599 vm_offset_t va
= entry
->vme_start
;
4601 while (va
< entry
->vme_end
) {
4602 register vm_page_t m
;
4606 * Look for the page in the top object
4608 prot
= entry
->protection
;
4609 vm_object_lock(object
);
4610 vm_object_paging_begin(object
);
4612 if ((m
= vm_page_lookup(object
,offset
)) !=
4613 VM_PAGE_NULL
&& !m
->busy
&&
4615 (!m
->unusual
|| (!m
->error
&&
4616 !m
->restart
&& !m
->absent
&&
4617 (prot
& m
->page_lock
) == 0))) {
4620 vm_object_unlock(object
);
4623 * Honor COW obligations
4625 if (entry
->needs_copy
)
4626 prot
&= ~VM_PROT_WRITE
;
4627 /* It is our policy to require */
4628 /* explicit sync from anyone */
4629 /* writing code and then */
4630 /* a pc to execute it. */
4633 PMAP_ENTER(pmap
, va
, m
,
4636 vm_object_lock(object
);
4637 vm_page_lock_queues();
4638 if (!m
->active
&& !m
->inactive
)
4639 vm_page_activate(m
);
4640 vm_page_unlock_queues();
4641 PAGE_WAKEUP_DONE(m
);
4643 vm_object_paging_end(object
);
4644 vm_object_unlock(object
);
4646 offset
+= PAGE_SIZE_64
;
4648 } /* end while (va < entry->vme_end) */
4649 } /* end if (object) */
4652 * Set up for the next iteration. The map
4653 * has not been unlocked, so the next
4654 * address should be at the end of this
4655 * entry, and the next map entry should be
4656 * the one following it.
4659 start
= tmp_entry
->vme_end
;
4660 tmp_entry
= tmp_entry
->vme_next
;
4662 vm_map_version_t version
;
4663 vm_object_t dst_object
= entry
->object
.vm_object
;
4664 vm_object_offset_t dst_offset
= entry
->offset
;
4668 * Take an object reference, and record
4669 * the map version information so that the
4670 * map can be safely unlocked.
4673 vm_object_reference(dst_object
);
4675 version
.main_timestamp
= dst_map
->timestamp
;
4677 vm_map_unlock(dst_map
);
4680 * Copy as much as possible in one pass
4685 copy_entry
->object
.vm_object
,
4695 * Release the object reference
4698 vm_object_deallocate(dst_object
);
4701 * If a hard error occurred, return it now
4704 if (r
!= KERN_SUCCESS
)
4707 if (copy_size
!= 0) {
4709 * Dispose of the copied region
4712 vm_map_copy_clip_end(copy
, copy_entry
,
4713 copy_entry
->vme_start
+ copy_size
);
4714 vm_map_copy_entry_unlink(copy
, copy_entry
);
4715 vm_object_deallocate(copy_entry
->object
.vm_object
);
4716 vm_map_copy_entry_dispose(copy
, copy_entry
);
4720 * Pick up in the destination map where we left off.
4722 * Use the version information to avoid a lookup
4723 * in the normal case.
4727 vm_map_lock(dst_map
);
4728 if ((version
.main_timestamp
+ 1) == dst_map
->timestamp
) {
4729 /* We can safely use saved tmp_entry value */
4731 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4732 tmp_entry
= tmp_entry
->vme_next
;
4734 /* Must do lookup of tmp_entry */
4736 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4737 vm_map_unlock(dst_map
);
4738 return(KERN_INVALID_ADDRESS
);
4740 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4745 return(KERN_SUCCESS
);
4746 }/* vm_map_copy_overwrite_aligned */
4749 * Routine: vm_map_copyin_kernel_buffer
4752 * Copy in data to a kernel buffer from space in the
4753 * source map. The original space may be otpionally
4756 * If successful, returns a new copy object.
4759 vm_map_copyin_kernel_buffer(
4761 vm_offset_t src_addr
,
4763 boolean_t src_destroy
,
4764 vm_map_copy_t
*copy_result
)
4768 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4770 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4771 if (copy
== VM_MAP_COPY_NULL
) {
4772 return KERN_RESOURCE_SHORTAGE
;
4774 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4777 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4778 copy
->cpy_kalloc_size
= kalloc_size
;
4780 if (src_map
== kernel_map
) {
4781 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4782 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4783 VM_MAP_REMOVE_INTERRUPTIBLE
;
4786 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4787 if (kr
!= KERN_SUCCESS
) {
4788 kfree((vm_offset_t
)copy
, kalloc_size
);
4791 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4792 VM_MAP_REMOVE_INTERRUPTIBLE
;
4795 (void) vm_map_remove(src_map
, trunc_page(src_addr
),
4796 round_page(src_addr
+ len
),
4799 *copy_result
= copy
;
4800 return KERN_SUCCESS
;
4804 * Routine: vm_map_copyout_kernel_buffer
4807 * Copy out data from a kernel buffer into space in the
4808 * destination map. The space may be otpionally dynamically
4811 * If successful, consumes the copy object.
4812 * Otherwise, the caller is responsible for it.
4815 vm_map_copyout_kernel_buffer(
4817 vm_offset_t
*addr
, /* IN/OUT */
4819 boolean_t overwrite
)
4821 kern_return_t kr
= KERN_SUCCESS
;
4822 thread_act_t thr_act
= current_act();
4827 * Allocate space in the target map for the data
4830 kr
= vm_map_enter(map
,
4832 round_page(copy
->size
),
4836 (vm_object_offset_t
) 0,
4840 VM_INHERIT_DEFAULT
);
4841 if (kr
!= KERN_SUCCESS
)
4846 * Copyout the data from the kernel buffer to the target map.
4848 if (thr_act
->map
== map
) {
4851 * If the target map is the current map, just do
4854 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4856 return(KERN_INVALID_ADDRESS
);
4863 * If the target map is another map, assume the
4864 * target's address space identity for the duration
4867 vm_map_reference(map
);
4868 oldmap
= vm_map_switch(map
);
4870 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4872 return(KERN_INVALID_ADDRESS
);
4875 (void) vm_map_switch(oldmap
);
4876 vm_map_deallocate(map
);
4879 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4885 * Macro: vm_map_copy_insert
4888 * Link a copy chain ("copy") into a map at the
4889 * specified location (after "where").
4891 * The copy chain is destroyed.
4893 * The arguments are evaluated multiple times.
4895 #define vm_map_copy_insert(map, where, copy) \
4897 vm_map_t VMCI_map; \
4898 vm_map_entry_t VMCI_where; \
4899 vm_map_copy_t VMCI_copy; \
4901 VMCI_where = (where); \
4902 VMCI_copy = (copy); \
4903 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4904 ->vme_next = (VMCI_where->vme_next); \
4905 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4906 ->vme_prev = VMCI_where; \
4907 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4908 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4909 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4913 * Routine: vm_map_copyout
4916 * Copy out a copy chain ("copy") into newly-allocated
4917 * space in the destination map.
4919 * If successful, consumes the copy object.
4920 * Otherwise, the caller is responsible for it.
4924 register vm_map_t dst_map
,
4925 vm_offset_t
*dst_addr
, /* OUT */
4926 register vm_map_copy_t copy
)
4929 vm_size_t adjustment
;
4931 vm_object_offset_t vm_copy_start
;
4932 vm_map_entry_t last
;
4934 vm_map_entry_t entry
;
4937 * Check for null copy object.
4940 if (copy
== VM_MAP_COPY_NULL
) {
4942 return(KERN_SUCCESS
);
4946 * Check for special copy object, created
4947 * by vm_map_copyin_object.
4950 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
4951 vm_object_t object
= copy
->cpy_object
;
4953 vm_object_offset_t offset
;
4955 offset
= trunc_page_64(copy
->offset
);
4956 size
= round_page(copy
->size
+
4957 (vm_size_t
)(copy
->offset
- offset
));
4959 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
4960 (vm_offset_t
) 0, TRUE
,
4961 object
, offset
, FALSE
,
4962 VM_PROT_DEFAULT
, VM_PROT_ALL
,
4963 VM_INHERIT_DEFAULT
);
4964 if (kr
!= KERN_SUCCESS
)
4966 /* Account for non-pagealigned copy object */
4967 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
4968 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
4969 return(KERN_SUCCESS
);
4973 * Check for special kernel buffer allocated
4974 * by new_ipc_kmsg_copyin.
4977 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
4978 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
4983 * Find space for the data
4986 vm_copy_start
= trunc_page_64(copy
->offset
);
4987 size
= round_page((vm_size_t
)copy
->offset
+ copy
->size
)
4992 vm_map_lock(dst_map
);
4993 assert(first_free_is_valid(dst_map
));
4994 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
4995 vm_map_min(dst_map
) : last
->vme_end
;
4998 vm_map_entry_t next
= last
->vme_next
;
4999 vm_offset_t end
= start
+ size
;
5001 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
5002 if (dst_map
->wait_for_space
) {
5003 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
5004 assert_wait((event_t
) dst_map
,
5005 THREAD_INTERRUPTIBLE
);
5006 vm_map_unlock(dst_map
);
5007 thread_block((void (*)(void))0);
5011 vm_map_unlock(dst_map
);
5012 return(KERN_NO_SPACE
);
5015 if ((next
== vm_map_to_entry(dst_map
)) ||
5016 (next
->vme_start
>= end
))
5020 start
= last
->vme_end
;
5024 * Since we're going to just drop the map
5025 * entries from the copy into the destination
5026 * map, they must come from the same pool.
5029 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5031 * Mismatches occur when dealing with the default
5035 vm_map_entry_t next
, new;
5038 * Find the zone that the copies were allocated from
5040 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5042 : vm_map_kentry_zone
;
5043 entry
= vm_map_copy_first_entry(copy
);
5046 * Reinitialize the copy so that vm_map_copy_entry_link
5049 copy
->cpy_hdr
.nentries
= 0;
5050 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5051 vm_map_copy_first_entry(copy
) =
5052 vm_map_copy_last_entry(copy
) =
5053 vm_map_copy_to_entry(copy
);
5058 while (entry
!= vm_map_copy_to_entry(copy
)) {
5059 new = vm_map_copy_entry_create(copy
);
5060 vm_map_entry_copy_full(new, entry
);
5061 new->use_pmap
= FALSE
; /* clr address space specifics */
5062 vm_map_copy_entry_link(copy
,
5063 vm_map_copy_last_entry(copy
),
5065 next
= entry
->vme_next
;
5066 zfree(old_zone
, (vm_offset_t
) entry
);
5072 * Adjust the addresses in the copy chain, and
5073 * reset the region attributes.
5076 adjustment
= start
- vm_copy_start
;
5077 for (entry
= vm_map_copy_first_entry(copy
);
5078 entry
!= vm_map_copy_to_entry(copy
);
5079 entry
= entry
->vme_next
) {
5080 entry
->vme_start
+= adjustment
;
5081 entry
->vme_end
+= adjustment
;
5083 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5084 entry
->protection
= VM_PROT_DEFAULT
;
5085 entry
->max_protection
= VM_PROT_ALL
;
5086 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5089 * If the entry is now wired,
5090 * map the pages into the destination map.
5092 if (entry
->wired_count
!= 0) {
5093 register vm_offset_t va
;
5094 vm_object_offset_t offset
;
5095 register vm_object_t object
;
5097 object
= entry
->object
.vm_object
;
5098 offset
= entry
->offset
;
5099 va
= entry
->vme_start
;
5101 pmap_pageable(dst_map
->pmap
,
5106 while (va
< entry
->vme_end
) {
5107 register vm_page_t m
;
5110 * Look up the page in the object.
5111 * Assert that the page will be found in the
5114 * the object was newly created by
5115 * vm_object_copy_slowly, and has
5116 * copies of all of the pages from
5119 * the object was moved from the old
5120 * map entry; because the old map
5121 * entry was wired, all of the pages
5122 * were in the top-level object.
5123 * (XXX not true if we wire pages for
5126 vm_object_lock(object
);
5127 vm_object_paging_begin(object
);
5129 m
= vm_page_lookup(object
, offset
);
5130 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5132 panic("vm_map_copyout: wiring 0x%x", m
);
5135 vm_object_unlock(object
);
5137 PMAP_ENTER(dst_map
->pmap
, va
, m
,
5138 entry
->protection
, TRUE
);
5140 vm_object_lock(object
);
5141 PAGE_WAKEUP_DONE(m
);
5142 /* the page is wired, so we don't have to activate */
5143 vm_object_paging_end(object
);
5144 vm_object_unlock(object
);
5146 offset
+= PAGE_SIZE_64
;
5150 else if (size
<= vm_map_aggressive_enter_max
) {
5152 register vm_offset_t va
;
5153 vm_object_offset_t offset
;
5154 register vm_object_t object
;
5157 object
= entry
->object
.vm_object
;
5158 if (object
!= VM_OBJECT_NULL
) {
5160 offset
= entry
->offset
;
5161 va
= entry
->vme_start
;
5162 while (va
< entry
->vme_end
) {
5163 register vm_page_t m
;
5166 * Look up the page in the object.
5167 * Assert that the page will be found
5168 * in the top object if at all...
5170 vm_object_lock(object
);
5171 vm_object_paging_begin(object
);
5173 if (((m
= vm_page_lookup(object
,
5176 !m
->busy
&& !m
->fictitious
&&
5177 !m
->absent
&& !m
->error
) {
5179 vm_object_unlock(object
);
5181 /* honor cow obligations */
5182 prot
= entry
->protection
;
5183 if (entry
->needs_copy
)
5184 prot
&= ~VM_PROT_WRITE
;
5186 PMAP_ENTER(dst_map
->pmap
, va
,
5189 vm_object_lock(object
);
5190 vm_page_lock_queues();
5191 if (!m
->active
&& !m
->inactive
)
5192 vm_page_activate(m
);
5193 vm_page_unlock_queues();
5194 PAGE_WAKEUP_DONE(m
);
5196 vm_object_paging_end(object
);
5197 vm_object_unlock(object
);
5199 offset
+= PAGE_SIZE_64
;
5207 * Correct the page alignment for the result
5210 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5213 * Update the hints and the map size
5216 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5218 dst_map
->size
+= size
;
5224 vm_map_copy_insert(dst_map
, last
, copy
);
5226 vm_map_unlock(dst_map
);
5229 * XXX If wiring_required, call vm_map_pageable
5232 return(KERN_SUCCESS
);
5235 boolean_t vm_map_aggressive_enter
; /* not used yet */
5239 * Routine: vm_map_copyin
5242 * Copy the specified region (src_addr, len) from the
5243 * source address space (src_map), possibly removing
5244 * the region from the source address space (src_destroy).
5247 * A vm_map_copy_t object (copy_result), suitable for
5248 * insertion into another address space (using vm_map_copyout),
5249 * copying over another address space region (using
5250 * vm_map_copy_overwrite). If the copy is unused, it
5251 * should be destroyed (using vm_map_copy_discard).
5253 * In/out conditions:
5254 * The source map should not be locked on entry.
5257 typedef struct submap_map
{
5258 vm_map_t parent_map
;
5259 vm_offset_t base_start
;
5260 vm_offset_t base_end
;
5261 struct submap_map
*next
;
5265 vm_map_copyin_common(
5267 vm_offset_t src_addr
,
5269 boolean_t src_destroy
,
5270 boolean_t src_volatile
,
5271 vm_map_copy_t
*copy_result
, /* OUT */
5272 boolean_t use_maxprot
)
5274 extern int msg_ool_size_small
;
5276 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5277 * in multi-level lookup, this
5278 * entry contains the actual
5282 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5284 vm_offset_t src_start
; /* Start of current entry --
5285 * where copy is taking place now
5287 vm_offset_t src_end
; /* End of entire region to be
5289 vm_offset_t base_start
; /* submap fields to save offsets */
5290 /* in original map */
5291 vm_offset_t base_end
;
5292 vm_map_t base_map
=src_map
;
5293 vm_map_entry_t base_entry
;
5294 boolean_t map_share
=FALSE
;
5295 submap_map_t
*parent_maps
= NULL
;
5298 vm_map_copy_t copy
; /* Resulting copy */
5299 vm_offset_t copy_addr
;
5302 * Check for copies of zero bytes.
5306 *copy_result
= VM_MAP_COPY_NULL
;
5307 return(KERN_SUCCESS
);
5311 * If the copy is sufficiently small, use a kernel buffer instead
5312 * of making a virtual copy. The theory being that the cost of
5313 * setting up VM (and taking C-O-W faults) dominates the copy costs
5314 * for small regions.
5316 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5317 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5318 src_destroy
, copy_result
);
5321 * Compute start and end of region
5324 src_start
= trunc_page(src_addr
);
5325 src_end
= round_page(src_addr
+ len
);
5327 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5330 * Check that the end address doesn't overflow
5333 if (src_end
<= src_start
)
5334 if ((src_end
< src_start
) || (src_start
!= 0))
5335 return(KERN_INVALID_ADDRESS
);
5338 * Allocate a header element for the list.
5340 * Use the start and end in the header to
5341 * remember the endpoints prior to rounding.
5344 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5345 vm_map_copy_first_entry(copy
) =
5346 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5347 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5348 copy
->cpy_hdr
.nentries
= 0;
5349 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5351 copy
->offset
= src_addr
;
5354 new_entry
= vm_map_copy_entry_create(copy
);
5358 vm_map_unlock(src_map); \
5359 if (new_entry != VM_MAP_ENTRY_NULL) \
5360 vm_map_copy_entry_dispose(copy,new_entry); \
5361 vm_map_copy_discard(copy); \
5363 submap_map_t *ptr; \
5365 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5366 parent_maps=parent_maps->next; \
5367 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5374 * Find the beginning of the region.
5377 vm_map_lock(src_map
);
5379 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5380 RETURN(KERN_INVALID_ADDRESS
);
5381 if(!tmp_entry
->is_sub_map
) {
5382 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5384 /* set for later submap fix-up */
5385 copy_addr
= src_start
;
5388 * Go through entries until we get to the end.
5393 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5394 vm_size_t src_size
; /* Size of source
5395 * map entry (in both
5400 vm_object_t src_object
; /* Object to copy */
5401 vm_object_offset_t src_offset
;
5403 boolean_t src_needs_copy
; /* Should source map
5405 * for copy-on-write?
5408 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5410 boolean_t was_wired
; /* Was source wired? */
5411 vm_map_version_t version
; /* Version before locks
5412 * dropped to make copy
5414 kern_return_t result
; /* Return value from
5415 * copy_strategically.
5417 while(tmp_entry
->is_sub_map
) {
5418 vm_size_t submap_len
;
5421 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5422 ptr
->next
= parent_maps
;
5424 ptr
->parent_map
= src_map
;
5425 ptr
->base_start
= src_start
;
5426 ptr
->base_end
= src_end
;
5427 submap_len
= tmp_entry
->vme_end
- src_start
;
5428 if(submap_len
> (src_end
-src_start
))
5429 submap_len
= src_end
-src_start
;
5430 ptr
->base_start
+= submap_len
;
5432 src_start
-= tmp_entry
->vme_start
;
5433 src_start
+= tmp_entry
->offset
;
5434 src_end
= src_start
+ submap_len
;
5435 src_map
= tmp_entry
->object
.sub_map
;
5436 vm_map_lock(src_map
);
5437 vm_map_unlock(ptr
->parent_map
);
5438 if (!vm_map_lookup_entry(
5439 src_map
, src_start
, &tmp_entry
))
5440 RETURN(KERN_INVALID_ADDRESS
);
5442 if(!tmp_entry
->is_sub_map
)
5443 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5444 src_entry
= tmp_entry
;
5446 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5447 (tmp_entry
->object
.vm_object
->phys_contiguous
)) {
5448 /* This is not, cannot be supported for now */
5449 /* we need a description of the caching mode */
5450 /* reflected in the object before we can */
5451 /* support copyin, and then the support will */
5452 /* be for direct copy */
5453 RETURN(KERN_PROTECTION_FAILURE
);
5456 * Create a new address map entry to hold the result.
5457 * Fill in the fields from the appropriate source entries.
5458 * We must unlock the source map to do this if we need
5459 * to allocate a map entry.
5461 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5462 version
.main_timestamp
= src_map
->timestamp
;
5463 vm_map_unlock(src_map
);
5465 new_entry
= vm_map_copy_entry_create(copy
);
5467 vm_map_lock(src_map
);
5468 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5469 if (!vm_map_lookup_entry(src_map
, src_start
,
5471 RETURN(KERN_INVALID_ADDRESS
);
5473 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5474 continue; /* restart w/ new tmp_entry */
5479 * Verify that the region can be read.
5481 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5483 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5484 RETURN(KERN_PROTECTION_FAILURE
);
5487 * Clip against the endpoints of the entire region.
5490 vm_map_clip_end(src_map
, src_entry
, src_end
);
5492 src_size
= src_entry
->vme_end
- src_start
;
5493 src_object
= src_entry
->object
.vm_object
;
5494 src_offset
= src_entry
->offset
;
5495 was_wired
= (src_entry
->wired_count
!= 0);
5497 vm_map_entry_copy(new_entry
, src_entry
);
5498 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5501 * Attempt non-blocking copy-on-write optimizations.
5505 (src_object
== VM_OBJECT_NULL
||
5506 (src_object
->internal
&& !src_object
->true_share
5509 * If we are destroying the source, and the object
5510 * is internal, we can move the object reference
5511 * from the source to the copy. The copy is
5512 * copy-on-write only if the source is.
5513 * We make another reference to the object, because
5514 * destroying the source entry will deallocate it.
5516 vm_object_reference(src_object
);
5519 * Copy is always unwired. vm_map_copy_entry
5520 * set its wired count to zero.
5523 goto CopySuccessful
;
5528 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5529 src_object
, new_entry
, new_entry
->object
.vm_object
,
5532 vm_object_copy_quickly(
5533 &new_entry
->object
.vm_object
,
5537 &new_entry_needs_copy
)) {
5539 new_entry
->needs_copy
= new_entry_needs_copy
;
5542 * Handle copy-on-write obligations
5545 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5546 if (tmp_entry
->is_shared
||
5547 tmp_entry
->object
.vm_object
->true_share
||
5549 vm_map_unlock(src_map
);
5550 new_entry
->object
.vm_object
=
5551 vm_object_copy_delayed(
5555 /* dec ref gained in copy_quickly */
5556 vm_object_lock(src_object
);
5557 src_object
->ref_count
--;
5558 assert(src_object
->ref_count
> 0);
5559 vm_object_res_deallocate(src_object
);
5560 vm_object_unlock(src_object
);
5561 vm_map_lock(src_map
);
5563 * it turns out that we have
5564 * finished our copy. No matter
5565 * what the state of the map
5566 * we will lock it again here
5567 * knowing that if there is
5568 * additional data to copy
5569 * it will be checked at
5570 * the top of the loop
5572 * Don't do timestamp check
5576 vm_object_pmap_protect(
5580 (src_entry
->is_shared
?
5583 src_entry
->vme_start
,
5584 src_entry
->protection
&
5587 tmp_entry
->needs_copy
= TRUE
;
5592 * The map has never been unlocked, so it's safe
5593 * to move to the next entry rather than doing
5597 goto CopySuccessful
;
5600 new_entry
->needs_copy
= FALSE
;
5603 * Take an object reference, so that we may
5604 * release the map lock(s).
5607 assert(src_object
!= VM_OBJECT_NULL
);
5608 vm_object_reference(src_object
);
5611 * Record the timestamp for later verification.
5615 version
.main_timestamp
= src_map
->timestamp
;
5616 vm_map_unlock(src_map
);
5623 vm_object_lock(src_object
);
5624 result
= vm_object_copy_slowly(
5629 &new_entry
->object
.vm_object
);
5630 new_entry
->offset
= 0;
5631 new_entry
->needs_copy
= FALSE
;
5633 result
= vm_object_copy_strategically(src_object
,
5636 &new_entry
->object
.vm_object
,
5638 &new_entry_needs_copy
);
5640 new_entry
->needs_copy
= new_entry_needs_copy
;
5644 if (result
!= KERN_SUCCESS
&&
5645 result
!= KERN_MEMORY_RESTART_COPY
) {
5646 vm_map_lock(src_map
);
5651 * Throw away the extra reference
5654 vm_object_deallocate(src_object
);
5657 * Verify that the map has not substantially
5658 * changed while the copy was being made.
5661 vm_map_lock(src_map
); /* Increments timestamp once! */
5663 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5664 goto VerificationSuccessful
;
5667 * Simple version comparison failed.
5669 * Retry the lookup and verify that the
5670 * same object/offset are still present.
5672 * [Note: a memory manager that colludes with
5673 * the calling task can detect that we have
5674 * cheated. While the map was unlocked, the
5675 * mapping could have been changed and restored.]
5678 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5679 RETURN(KERN_INVALID_ADDRESS
);
5682 src_entry
= tmp_entry
;
5683 vm_map_clip_start(src_map
, src_entry
, src_start
);
5685 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5687 src_entry
->max_protection
& VM_PROT_READ
== 0)
5688 goto VerificationFailed
;
5690 if (src_entry
->vme_end
< new_entry
->vme_end
)
5691 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5693 if ((src_entry
->object
.vm_object
!= src_object
) ||
5694 (src_entry
->offset
!= src_offset
) ) {
5697 * Verification failed.
5699 * Start over with this top-level entry.
5702 VerificationFailed
: ;
5704 vm_object_deallocate(new_entry
->object
.vm_object
);
5705 tmp_entry
= src_entry
;
5710 * Verification succeeded.
5713 VerificationSuccessful
: ;
5715 if (result
== KERN_MEMORY_RESTART_COPY
)
5725 * Link in the new copy entry.
5728 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5732 * Determine whether the entire region
5735 src_start
= new_entry
->vme_end
;
5736 new_entry
= VM_MAP_ENTRY_NULL
;
5737 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5738 if (src_map
!= base_map
) {
5742 assert(ptr
!= NULL
);
5743 parent_maps
= parent_maps
->next
;
5744 vm_map_lock(ptr
->parent_map
);
5745 vm_map_unlock(src_map
);
5746 src_map
= ptr
->parent_map
;
5747 src_start
= ptr
->base_start
;
5748 src_end
= ptr
->base_end
;
5749 if ((src_end
> src_start
) &&
5750 !vm_map_lookup_entry(
5751 src_map
, src_start
, &tmp_entry
))
5752 RETURN(KERN_INVALID_ADDRESS
);
5753 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5754 if(parent_maps
== NULL
)
5756 src_entry
= tmp_entry
->vme_prev
;
5760 if ((src_start
>= src_end
) && (src_end
!= 0))
5764 * Verify that there are no gaps in the region
5767 tmp_entry
= src_entry
->vme_next
;
5768 if ((tmp_entry
->vme_start
!= src_start
) ||
5769 (tmp_entry
== vm_map_to_entry(src_map
)))
5770 RETURN(KERN_INVALID_ADDRESS
);
5774 * If the source should be destroyed, do it now, since the
5775 * copy was successful.
5778 (void) vm_map_delete(src_map
,
5779 trunc_page(src_addr
),
5781 (src_map
== kernel_map
) ?
5782 VM_MAP_REMOVE_KUNWIRE
:
5786 vm_map_unlock(src_map
);
5788 /* Fix-up start and end points in copy. This is necessary */
5789 /* when the various entries in the copy object were picked */
5790 /* up from different sub-maps */
5792 tmp_entry
= vm_map_copy_first_entry(copy
);
5793 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5794 tmp_entry
->vme_end
= copy_addr
+
5795 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5796 tmp_entry
->vme_start
= copy_addr
;
5797 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5798 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5801 *copy_result
= copy
;
5802 return(KERN_SUCCESS
);
5808 * vm_map_copyin_object:
5810 * Create a copy object from an object.
5811 * Our caller donates an object reference.
5815 vm_map_copyin_object(
5817 vm_object_offset_t offset
, /* offset of region in object */
5818 vm_object_size_t size
, /* size of region in object */
5819 vm_map_copy_t
*copy_result
) /* OUT */
5821 vm_map_copy_t copy
; /* Resulting copy */
5824 * We drop the object into a special copy object
5825 * that contains the object directly.
5828 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5829 copy
->type
= VM_MAP_COPY_OBJECT
;
5830 copy
->cpy_object
= object
;
5831 copy
->cpy_index
= 0;
5832 copy
->offset
= offset
;
5835 *copy_result
= copy
;
5836 return(KERN_SUCCESS
);
5842 vm_map_entry_t old_entry
,
5846 vm_map_entry_t new_entry
;
5847 kern_return_t result
;
5850 * New sharing code. New map entry
5851 * references original object. Internal
5852 * objects use asynchronous copy algorithm for
5853 * future copies. First make sure we have
5854 * the right object. If we need a shadow,
5855 * or someone else already has one, then
5856 * make a new shadow and share it.
5859 object
= old_entry
->object
.vm_object
;
5860 if (old_entry
->is_sub_map
) {
5861 assert(old_entry
->wired_count
== 0);
5863 if(old_entry
->use_pmap
) {
5864 result
= pmap_nest(new_map
->pmap
,
5865 (old_entry
->object
.sub_map
)->pmap
,
5866 old_entry
->vme_start
,
5867 old_entry
->vme_end
- old_entry
->vme_start
);
5869 panic("vm_map_fork_share: pmap_nest failed!");
5872 } else if (object
== VM_OBJECT_NULL
) {
5873 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5874 old_entry
->vme_start
));
5875 old_entry
->offset
= 0;
5876 old_entry
->object
.vm_object
= object
;
5877 assert(!old_entry
->needs_copy
);
5878 } else if (object
->copy_strategy
!=
5879 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5882 * We are already using an asymmetric
5883 * copy, and therefore we already have
5887 assert(! old_entry
->needs_copy
);
5889 else if (old_entry
->needs_copy
|| /* case 1 */
5890 object
->shadowed
|| /* case 2 */
5891 (!object
->true_share
&& /* case 3 */
5892 !old_entry
->is_shared
&&
5894 (vm_size_t
)(old_entry
->vme_end
-
5895 old_entry
->vme_start
)))) {
5898 * We need to create a shadow.
5899 * There are three cases here.
5900 * In the first case, we need to
5901 * complete a deferred symmetrical
5902 * copy that we participated in.
5903 * In the second and third cases,
5904 * we need to create the shadow so
5905 * that changes that we make to the
5906 * object do not interfere with
5907 * any symmetrical copies which
5908 * have occured (case 2) or which
5909 * might occur (case 3).
5911 * The first case is when we had
5912 * deferred shadow object creation
5913 * via the entry->needs_copy mechanism.
5914 * This mechanism only works when
5915 * only one entry points to the source
5916 * object, and we are about to create
5917 * a second entry pointing to the
5918 * same object. The problem is that
5919 * there is no way of mapping from
5920 * an object to the entries pointing
5921 * to it. (Deferred shadow creation
5922 * works with one entry because occurs
5923 * at fault time, and we walk from the
5924 * entry to the object when handling
5927 * The second case is when the object
5928 * to be shared has already been copied
5929 * with a symmetric copy, but we point
5930 * directly to the object without
5931 * needs_copy set in our entry. (This
5932 * can happen because different ranges
5933 * of an object can be pointed to by
5934 * different entries. In particular,
5935 * a single entry pointing to an object
5936 * can be split by a call to vm_inherit,
5937 * which, combined with task_create, can
5938 * result in the different entries
5939 * having different needs_copy values.)
5940 * The shadowed flag in the object allows
5941 * us to detect this case. The problem
5942 * with this case is that if this object
5943 * has or will have shadows, then we
5944 * must not perform an asymmetric copy
5945 * of this object, since such a copy
5946 * allows the object to be changed, which
5947 * will break the previous symmetrical
5948 * copies (which rely upon the object
5949 * not changing). In a sense, the shadowed
5950 * flag says "don't change this object".
5951 * We fix this by creating a shadow
5952 * object for this object, and sharing
5953 * that. This works because we are free
5954 * to change the shadow object (and thus
5955 * to use an asymmetric copy strategy);
5956 * this is also semantically correct,
5957 * since this object is temporary, and
5958 * therefore a copy of the object is
5959 * as good as the object itself. (This
5960 * is not true for permanent objects,
5961 * since the pager needs to see changes,
5962 * which won't happen if the changes
5963 * are made to a copy.)
5965 * The third case is when the object
5966 * to be shared has parts sticking
5967 * outside of the entry we're working
5968 * with, and thus may in the future
5969 * be subject to a symmetrical copy.
5970 * (This is a preemptive version of
5974 assert(!(object
->shadowed
&& old_entry
->is_shared
));
5975 vm_object_shadow(&old_entry
->object
.vm_object
,
5977 (vm_size_t
) (old_entry
->vme_end
-
5978 old_entry
->vme_start
));
5981 * If we're making a shadow for other than
5982 * copy on write reasons, then we have
5983 * to remove write permission.
5986 /* CDY FIX this! page_protect! */
5987 if (!old_entry
->needs_copy
&&
5988 (old_entry
->protection
& VM_PROT_WRITE
)) {
5989 if(old_entry
->is_sub_map
&& old_entry
->use_pmap
) {
5990 pmap_protect(old_entry
->object
.sub_map
->pmap
,
5991 old_entry
->vme_start
,
5993 old_entry
->protection
& ~VM_PROT_WRITE
);
5995 pmap_protect(vm_map_pmap(old_map
),
5996 old_entry
->vme_start
,
5998 old_entry
->protection
& ~VM_PROT_WRITE
);
6002 old_entry
->needs_copy
= FALSE
;
6003 object
= old_entry
->object
.vm_object
;
6007 * If object was using a symmetric copy strategy,
6008 * change its copy strategy to the default
6009 * asymmetric copy strategy, which is copy_delay
6010 * in the non-norma case and copy_call in the
6011 * norma case. Bump the reference count for the
6015 if(old_entry
->is_sub_map
) {
6016 vm_map_lock(old_entry
->object
.sub_map
);
6017 vm_map_reference(old_entry
->object
.sub_map
);
6018 vm_map_unlock(old_entry
->object
.sub_map
);
6020 vm_object_lock(object
);
6021 object
->ref_count
++;
6022 vm_object_res_reference(object
);
6023 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6024 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6026 vm_object_unlock(object
);
6030 * Clone the entry, using object ref from above.
6031 * Mark both entries as shared.
6034 new_entry
= vm_map_entry_create(new_map
);
6035 vm_map_entry_copy(new_entry
, old_entry
);
6036 old_entry
->is_shared
= TRUE
;
6037 new_entry
->is_shared
= TRUE
;
6040 * Insert the entry into the new map -- we
6041 * know we're inserting at the end of the new
6045 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6048 * Update the physical map
6051 if (old_entry
->is_sub_map
) {
6052 /* Bill Angell pmap support goes here */
6054 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6055 old_entry
->vme_end
- old_entry
->vme_start
,
6056 old_entry
->vme_start
);
6063 vm_map_entry_t
*old_entry_p
,
6066 vm_map_entry_t old_entry
= *old_entry_p
;
6067 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6068 vm_offset_t start
= old_entry
->vme_start
;
6070 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6072 vm_map_unlock(old_map
);
6074 * Use maxprot version of copyin because we
6075 * care about whether this memory can ever
6076 * be accessed, not just whether it's accessible
6079 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6082 * The map might have changed while it
6083 * was unlocked, check it again. Skip
6084 * any blank space or permanently
6085 * unreadable region.
6087 vm_map_lock(old_map
);
6088 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6089 last
->max_protection
& VM_PROT_READ
==
6091 last
= last
->vme_next
;
6093 *old_entry_p
= last
;
6096 * XXX For some error returns, want to
6097 * XXX skip to the next element. Note
6098 * that INVALID_ADDRESS and
6099 * PROTECTION_FAILURE are handled above.
6106 * Insert the copy into the new map
6109 vm_map_copy_insert(new_map
, last
, copy
);
6112 * Pick up the traversal at the end of
6113 * the copied region.
6116 vm_map_lock(old_map
);
6117 start
+= entry_size
;
6118 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6119 last
= last
->vme_next
;
6121 vm_map_clip_start(old_map
, last
, start
);
6123 *old_entry_p
= last
;
6131 * Create and return a new map based on the old
6132 * map, according to the inheritance values on the
6133 * regions in that map.
6135 * The source map must not be locked.
6141 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6143 vm_map_entry_t old_entry
;
6144 vm_size_t new_size
= 0, entry_size
;
6145 vm_map_entry_t new_entry
;
6146 boolean_t src_needs_copy
;
6147 boolean_t new_entry_needs_copy
;
6149 vm_map_reference_swap(old_map
);
6150 vm_map_lock(old_map
);
6152 new_map
= vm_map_create(new_pmap
,
6153 old_map
->min_offset
,
6154 old_map
->max_offset
,
6155 old_map
->hdr
.entries_pageable
);
6158 old_entry
= vm_map_first_entry(old_map
);
6159 old_entry
!= vm_map_to_entry(old_map
);
6162 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6164 switch (old_entry
->inheritance
) {
6165 case VM_INHERIT_NONE
:
6168 case VM_INHERIT_SHARE
:
6169 vm_map_fork_share(old_map
, old_entry
, new_map
);
6170 new_size
+= entry_size
;
6173 case VM_INHERIT_COPY
:
6176 * Inline the copy_quickly case;
6177 * upon failure, fall back on call
6178 * to vm_map_fork_copy.
6181 if(old_entry
->is_sub_map
)
6183 if (old_entry
->wired_count
!= 0) {
6184 goto slow_vm_map_fork_copy
;
6187 new_entry
= vm_map_entry_create(new_map
);
6188 vm_map_entry_copy(new_entry
, old_entry
);
6189 /* clear address space specifics */
6190 new_entry
->use_pmap
= FALSE
;
6192 if (! vm_object_copy_quickly(
6193 &new_entry
->object
.vm_object
,
6195 (old_entry
->vme_end
-
6196 old_entry
->vme_start
),
6198 &new_entry_needs_copy
)) {
6199 vm_map_entry_dispose(new_map
, new_entry
);
6200 goto slow_vm_map_fork_copy
;
6204 * Handle copy-on-write obligations
6207 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6208 vm_object_pmap_protect(
6209 old_entry
->object
.vm_object
,
6211 (old_entry
->vme_end
-
6212 old_entry
->vme_start
),
6213 ((old_entry
->is_shared
6214 || old_entry
->is_sub_map
)
6217 old_entry
->vme_start
,
6218 old_entry
->protection
& ~VM_PROT_WRITE
);
6220 old_entry
->needs_copy
= TRUE
;
6222 new_entry
->needs_copy
= new_entry_needs_copy
;
6225 * Insert the entry at the end
6229 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6231 new_size
+= entry_size
;
6234 slow_vm_map_fork_copy
:
6235 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6236 new_size
+= entry_size
;
6240 old_entry
= old_entry
->vme_next
;
6243 new_map
->size
= new_size
;
6244 vm_map_unlock(old_map
);
6245 vm_map_deallocate(old_map
);
6252 * vm_map_lookup_locked:
6254 * Finds the VM object, offset, and
6255 * protection for a given virtual address in the
6256 * specified map, assuming a page fault of the
6259 * Returns the (object, offset, protection) for
6260 * this address, whether it is wired down, and whether
6261 * this map has the only reference to the data in question.
6262 * In order to later verify this lookup, a "version"
6265 * The map MUST be locked by the caller and WILL be
6266 * locked on exit. In order to guarantee the
6267 * existence of the returned object, it is returned
6270 * If a lookup is requested with "write protection"
6271 * specified, the map may be changed to perform virtual
6272 * copying operations, although the data referenced will
6276 vm_map_lookup_locked(
6277 vm_map_t
*var_map
, /* IN/OUT */
6278 register vm_offset_t vaddr
,
6279 register vm_prot_t fault_type
,
6280 vm_map_version_t
*out_version
, /* OUT */
6281 vm_object_t
*object
, /* OUT */
6282 vm_object_offset_t
*offset
, /* OUT */
6283 vm_prot_t
*out_prot
, /* OUT */
6284 boolean_t
*wired
, /* OUT */
6285 int *behavior
, /* OUT */
6286 vm_object_offset_t
*lo_offset
, /* OUT */
6287 vm_object_offset_t
*hi_offset
, /* OUT */
6290 vm_map_entry_t entry
;
6291 register vm_map_t map
= *var_map
;
6292 vm_map_t old_map
= *var_map
;
6293 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6294 vm_offset_t cow_parent_vaddr
;
6295 vm_offset_t old_start
;
6296 vm_offset_t old_end
;
6297 register vm_prot_t prot
;
6303 * If the map has an interesting hint, try it before calling
6304 * full blown lookup routine.
6307 mutex_lock(&map
->s_lock
);
6309 mutex_unlock(&map
->s_lock
);
6311 if ((entry
== vm_map_to_entry(map
)) ||
6312 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6313 vm_map_entry_t tmp_entry
;
6316 * Entry was either not a valid hint, or the vaddr
6317 * was not contained in the entry, so do a full lookup.
6319 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6320 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6321 vm_map_unlock(cow_sub_map_parent
);
6322 if((*pmap_map
!= map
)
6323 && (*pmap_map
!= cow_sub_map_parent
))
6324 vm_map_unlock(*pmap_map
);
6325 return KERN_INVALID_ADDRESS
;
6330 if(map
== old_map
) {
6331 old_start
= entry
->vme_start
;
6332 old_end
= entry
->vme_end
;
6336 * Handle submaps. Drop lock on upper map, submap is
6341 if (entry
->is_sub_map
) {
6342 vm_offset_t local_vaddr
;
6343 vm_offset_t end_delta
;
6344 vm_offset_t start_delta
;
6345 vm_offset_t object_start_delta
;
6346 vm_map_entry_t submap_entry
;
6347 boolean_t mapped_needs_copy
=FALSE
;
6349 local_vaddr
= vaddr
;
6351 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6352 /* if pmap_map equals map we unlock below */
6353 if ((*pmap_map
!= map
) &&
6354 (*pmap_map
!= cow_sub_map_parent
))
6355 vm_map_unlock(*pmap_map
);
6356 *pmap_map
= entry
->object
.sub_map
;
6359 if(entry
->needs_copy
) {
6360 if (!mapped_needs_copy
) {
6361 if (vm_map_lock_read_to_write(map
)) {
6362 vm_map_lock_read(map
);
6363 if(*pmap_map
== entry
->object
.sub_map
)
6367 vm_map_lock_read(entry
->object
.sub_map
);
6368 cow_sub_map_parent
= map
;
6369 /* reset base to map before cow object */
6370 /* this is the map which will accept */
6371 /* the new cow object */
6372 old_start
= entry
->vme_start
;
6373 old_end
= entry
->vme_end
;
6374 cow_parent_vaddr
= vaddr
;
6375 mapped_needs_copy
= TRUE
;
6377 vm_map_lock_read(entry
->object
.sub_map
);
6378 if((cow_sub_map_parent
!= map
) &&
6383 vm_map_lock_read(entry
->object
.sub_map
);
6384 /* leave map locked if it is a target */
6385 /* cow sub_map above otherwise, just */
6386 /* follow the maps down to the object */
6387 /* here we unlock knowing we are not */
6388 /* revisiting the map. */
6389 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6390 vm_map_unlock_read(map
);
6393 *var_map
= map
= entry
->object
.sub_map
;
6395 /* calculate the offset in the submap for vaddr */
6396 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6399 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6400 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6401 vm_map_unlock(cow_sub_map_parent
);
6403 if((*pmap_map
!= map
)
6404 && (*pmap_map
!= cow_sub_map_parent
)) {
6405 vm_map_unlock(*pmap_map
);
6408 return KERN_INVALID_ADDRESS
;
6410 /* find the attenuated shadow of the underlying object */
6411 /* on our target map */
6413 /* in english the submap object may extend beyond the */
6414 /* region mapped by the entry or, may only fill a portion */
6415 /* of it. For our purposes, we only care if the object */
6416 /* doesn't fill. In this case the area which will */
6417 /* ultimately be clipped in the top map will only need */
6418 /* to be as big as the portion of the underlying entry */
6419 /* which is mapped */
6420 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6421 submap_entry
->vme_start
- entry
->offset
: 0;
6424 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6425 submap_entry
->vme_end
?
6426 0 : (entry
->offset
+
6427 (old_end
- old_start
))
6428 - submap_entry
->vme_end
;
6430 old_start
+= start_delta
;
6431 old_end
-= end_delta
;
6433 if(submap_entry
->is_sub_map
) {
6434 entry
= submap_entry
;
6435 vaddr
= local_vaddr
;
6436 goto submap_recurse
;
6439 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6441 vm_object_t copy_object
;
6442 vm_offset_t local_start
;
6443 vm_offset_t local_end
;
6444 boolean_t copied_slowly
= FALSE
;
6446 if (vm_map_lock_read_to_write(map
)) {
6447 vm_map_lock_read(map
);
6448 old_start
-= start_delta
;
6449 old_end
+= end_delta
;
6454 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6455 submap_entry
->object
.vm_object
=
6458 (submap_entry
->vme_end
6459 - submap_entry
->vme_start
));
6460 submap_entry
->offset
= 0;
6462 local_start
= local_vaddr
-
6463 (cow_parent_vaddr
- old_start
);
6464 local_end
= local_vaddr
+
6465 (old_end
- cow_parent_vaddr
);
6466 vm_map_clip_start(map
, submap_entry
, local_start
);
6467 vm_map_clip_end(map
, submap_entry
, local_end
);
6469 /* This is the COW case, lets connect */
6470 /* an entry in our space to the underlying */
6471 /* object in the submap, bypassing the */
6475 if(submap_entry
->wired_count
!= 0) {
6477 submap_entry
->object
.vm_object
);
6478 vm_object_copy_slowly(
6479 submap_entry
->object
.vm_object
,
6480 submap_entry
->offset
,
6481 submap_entry
->vme_end
-
6482 submap_entry
->vme_start
,
6485 copied_slowly
= TRUE
;
6488 /* set up shadow object */
6489 copy_object
= submap_entry
->object
.vm_object
;
6490 vm_object_reference(copy_object
);
6491 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6492 submap_entry
->needs_copy
= TRUE
;
6493 vm_object_pmap_protect(
6494 submap_entry
->object
.vm_object
,
6495 submap_entry
->offset
,
6496 submap_entry
->vme_end
-
6497 submap_entry
->vme_start
,
6498 submap_entry
->is_shared
?
6499 PMAP_NULL
: map
->pmap
,
6500 submap_entry
->vme_start
,
6501 submap_entry
->protection
&
6506 /* This works diffently than the */
6507 /* normal submap case. We go back */
6508 /* to the parent of the cow map and*/
6509 /* clip out the target portion of */
6510 /* the sub_map, substituting the */
6511 /* new copy object, */
6514 local_start
= old_start
;
6515 local_end
= old_end
;
6516 map
= cow_sub_map_parent
;
6517 *var_map
= cow_sub_map_parent
;
6518 vaddr
= cow_parent_vaddr
;
6519 cow_sub_map_parent
= NULL
;
6521 if(!vm_map_lookup_entry(map
,
6523 vm_object_deallocate(
6525 vm_map_lock_write_to_read(map
);
6526 return KERN_INVALID_ADDRESS
;
6529 /* clip out the portion of space */
6530 /* mapped by the sub map which */
6531 /* corresponds to the underlying */
6533 vm_map_clip_start(map
, entry
, local_start
);
6534 vm_map_clip_end(map
, entry
, local_end
);
6537 /* substitute copy object for */
6538 /* shared map entry */
6539 vm_map_deallocate(entry
->object
.sub_map
);
6540 entry
->is_sub_map
= FALSE
;
6541 entry
->object
.vm_object
= copy_object
;
6543 entry
->protection
|= VM_PROT_WRITE
;
6544 entry
->max_protection
|= VM_PROT_WRITE
;
6547 entry
->needs_copy
= FALSE
;
6548 entry
->is_shared
= FALSE
;
6550 entry
->offset
= submap_entry
->offset
;
6551 entry
->needs_copy
= TRUE
;
6552 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6553 entry
->inheritance
= VM_INHERIT_COPY
;
6555 entry
->is_shared
= TRUE
;
6557 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6558 entry
->inheritance
= VM_INHERIT_COPY
;
6560 vm_map_lock_write_to_read(map
);
6562 if((cow_sub_map_parent
)
6563 && (cow_sub_map_parent
!= *pmap_map
)
6564 && (cow_sub_map_parent
!= map
)) {
6565 vm_map_unlock(cow_sub_map_parent
);
6567 entry
= submap_entry
;
6568 vaddr
= local_vaddr
;
6573 * Check whether this task is allowed to have
6577 prot
= entry
->protection
;
6578 if ((fault_type
& (prot
)) != fault_type
) {
6579 if (*pmap_map
!= map
) {
6580 vm_map_unlock(*pmap_map
);
6583 return KERN_PROTECTION_FAILURE
;
6587 * If this page is not pageable, we have to get
6588 * it for all possible accesses.
6591 if (*wired
= (entry
->wired_count
!= 0))
6592 prot
= fault_type
= entry
->protection
;
6595 * If the entry was copy-on-write, we either ...
6598 if (entry
->needs_copy
) {
6600 * If we want to write the page, we may as well
6601 * handle that now since we've got the map locked.
6603 * If we don't need to write the page, we just
6604 * demote the permissions allowed.
6607 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6609 * Make a new object, and place it in the
6610 * object chain. Note that no new references
6611 * have appeared -- one just moved from the
6612 * map to the new object.
6615 if (vm_map_lock_read_to_write(map
)) {
6616 vm_map_lock_read(map
);
6619 vm_object_shadow(&entry
->object
.vm_object
,
6621 (vm_size_t
) (entry
->vme_end
-
6624 entry
->object
.vm_object
->shadowed
= TRUE
;
6625 entry
->needs_copy
= FALSE
;
6626 vm_map_lock_write_to_read(map
);
6630 * We're attempting to read a copy-on-write
6631 * page -- don't allow writes.
6634 prot
&= (~VM_PROT_WRITE
);
6639 * Create an object if necessary.
6641 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6643 if (vm_map_lock_read_to_write(map
)) {
6644 vm_map_lock_read(map
);
6648 entry
->object
.vm_object
= vm_object_allocate(
6649 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6651 vm_map_lock_write_to_read(map
);
6655 * Return the object/offset from this entry. If the entry
6656 * was copy-on-write or empty, it has been fixed up. Also
6657 * return the protection.
6660 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6661 *object
= entry
->object
.vm_object
;
6663 *behavior
= entry
->behavior
;
6664 *lo_offset
= entry
->offset
;
6665 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6668 * Lock the object to prevent it from disappearing
6671 vm_object_lock(*object
);
6674 * Save the version number
6677 out_version
->main_timestamp
= map
->timestamp
;
6679 return KERN_SUCCESS
;
6686 * Verifies that the map in question has not changed
6687 * since the given version. If successful, the map
6688 * will not change until vm_map_verify_done() is called.
6692 register vm_map_t map
,
6693 register vm_map_version_t
*version
) /* REF */
6697 vm_map_lock_read(map
);
6698 result
= (map
->timestamp
== version
->main_timestamp
);
6701 vm_map_unlock_read(map
);
6707 * vm_map_verify_done:
6709 * Releases locks acquired by a vm_map_verify.
6711 * This is now a macro in vm/vm_map.h. It does a
6712 * vm_map_unlock_read on the map.
6719 * User call to obtain information about a region in
6720 * a task's address map. Currently, only one flavor is
6723 * XXX The reserved and behavior fields cannot be filled
6724 * in until the vm merge from the IK is completed, and
6725 * vm_reserve is implemented.
6727 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6733 vm_offset_t
*address
, /* IN/OUT */
6734 vm_size_t
*size
, /* OUT */
6735 vm_region_flavor_t flavor
, /* IN */
6736 vm_region_info_t info
, /* OUT */
6737 mach_msg_type_number_t
*count
, /* IN/OUT */
6738 ipc_port_t
*object_name
) /* OUT */
6740 vm_map_entry_t tmp_entry
;
6742 vm_map_entry_t entry
;
6745 vm_region_basic_info_t basic
;
6746 vm_region_extended_info_t extended
;
6747 vm_region_top_info_t top
;
6749 if (map
== VM_MAP_NULL
)
6750 return(KERN_INVALID_ARGUMENT
);
6754 case VM_REGION_BASIC_INFO
:
6756 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6757 return(KERN_INVALID_ARGUMENT
);
6759 basic
= (vm_region_basic_info_t
) info
;
6760 *count
= VM_REGION_BASIC_INFO_COUNT
;
6762 vm_map_lock_read(map
);
6765 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6766 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6767 vm_map_unlock_read(map
);
6768 return(KERN_INVALID_ADDRESS
);
6774 start
= entry
->vme_start
;
6776 basic
->offset
= entry
->offset
;
6777 basic
->protection
= entry
->protection
;
6778 basic
->inheritance
= entry
->inheritance
;
6779 basic
->max_protection
= entry
->max_protection
;
6780 basic
->behavior
= entry
->behavior
;
6781 basic
->user_wired_count
= entry
->user_wired_count
;
6782 basic
->reserved
= entry
->is_sub_map
;
6784 *size
= (entry
->vme_end
- start
);
6786 if (object_name
) *object_name
= IP_NULL
;
6787 if (entry
->is_sub_map
) {
6788 basic
->shared
= FALSE
;
6790 basic
->shared
= entry
->is_shared
;
6793 vm_map_unlock_read(map
);
6794 return(KERN_SUCCESS
);
6796 case VM_REGION_EXTENDED_INFO
:
6799 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6800 return(KERN_INVALID_ARGUMENT
);
6802 extended
= (vm_region_extended_info_t
) info
;
6803 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6805 vm_map_lock_read(map
);
6808 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6809 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6810 vm_map_unlock_read(map
);
6811 return(KERN_INVALID_ADDRESS
);
6816 start
= entry
->vme_start
;
6818 extended
->protection
= entry
->protection
;
6819 extended
->user_tag
= entry
->alias
;
6820 extended
->pages_resident
= 0;
6821 extended
->pages_swapped_out
= 0;
6822 extended
->pages_shared_now_private
= 0;
6823 extended
->pages_dirtied
= 0;
6824 extended
->external_pager
= 0;
6825 extended
->shadow_depth
= 0;
6827 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6829 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6830 extended
->share_mode
= SM_PRIVATE
;
6833 *object_name
= IP_NULL
;
6835 *size
= (entry
->vme_end
- start
);
6837 vm_map_unlock_read(map
);
6838 return(KERN_SUCCESS
);
6840 case VM_REGION_TOP_INFO
:
6843 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6844 return(KERN_INVALID_ARGUMENT
);
6846 top
= (vm_region_top_info_t
) info
;
6847 *count
= VM_REGION_TOP_INFO_COUNT
;
6849 vm_map_lock_read(map
);
6852 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6853 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6854 vm_map_unlock_read(map
);
6855 return(KERN_INVALID_ADDRESS
);
6861 start
= entry
->vme_start
;
6863 top
->private_pages_resident
= 0;
6864 top
->shared_pages_resident
= 0;
6866 vm_region_top_walk(entry
, top
);
6869 *object_name
= IP_NULL
;
6871 *size
= (entry
->vme_end
- start
);
6873 vm_map_unlock_read(map
);
6874 return(KERN_SUCCESS
);
6877 return(KERN_INVALID_ARGUMENT
);
6882 * vm_region_recurse: A form of vm_region which follows the
6883 * submaps in a target map
6890 vm_offset_t
*address
, /* IN/OUT */
6891 vm_size_t
*size
, /* OUT */
6892 natural_t
*nesting_depth
, /* IN/OUT */
6893 vm_region_recurse_info_t info
, /* IN/OUT */
6894 mach_msg_type_number_t
*count
) /* IN/OUT */
6896 vm_map_entry_t tmp_entry
;
6898 vm_map_entry_t entry
;
6902 unsigned int recurse_count
;
6905 vm_map_entry_t base_entry
;
6906 vm_offset_t base_next
;
6907 vm_offset_t base_addr
;
6908 vm_offset_t baddr_start_delta
;
6909 vm_region_submap_info_t submap_info
;
6910 vm_region_extended_info_data_t extended
;
6912 if (map
== VM_MAP_NULL
)
6913 return(KERN_INVALID_ARGUMENT
);
6915 submap_info
= (vm_region_submap_info_t
) info
;
6916 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6918 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6919 return(KERN_INVALID_ARGUMENT
);
6923 recurse_count
= *nesting_depth
;
6925 LOOKUP_NEXT_BASE_ENTRY
:
6926 vm_map_lock_read(map
);
6927 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6928 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6929 vm_map_unlock_read(map
);
6930 return(KERN_INVALID_ADDRESS
);
6935 *size
= entry
->vme_end
- entry
->vme_start
;
6936 start
= entry
->vme_start
;
6938 baddr_start_delta
= *address
- start
;
6939 base_next
= entry
->vme_end
;
6942 while(entry
->is_sub_map
&& recurse_count
) {
6944 vm_map_lock_read(entry
->object
.sub_map
);
6947 if(entry
== base_entry
) {
6948 start
= entry
->offset
;
6949 start
+= *address
- entry
->vme_start
;
6952 submap
= entry
->object
.sub_map
;
6953 vm_map_unlock_read(map
);
6956 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6957 if ((entry
= tmp_entry
->vme_next
)
6958 == vm_map_to_entry(map
)) {
6959 vm_map_unlock_read(map
);
6964 goto LOOKUP_NEXT_BASE_ENTRY
;
6970 if(start
<= entry
->vme_start
) {
6971 vm_offset_t old_start
= start
;
6972 if(baddr_start_delta
) {
6973 base_addr
+= (baddr_start_delta
);
6974 *size
-= baddr_start_delta
;
6975 baddr_start_delta
= 0;
6978 (base_addr
+= (entry
->vme_start
- start
))) {
6979 vm_map_unlock_read(map
);
6984 goto LOOKUP_NEXT_BASE_ENTRY
;
6986 *size
-= entry
->vme_start
- start
;
6987 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
6988 *size
= entry
->vme_end
- entry
->vme_start
;
6992 if(baddr_start_delta
) {
6993 if((start
- entry
->vme_start
)
6994 < baddr_start_delta
) {
6995 base_addr
+= start
- entry
->vme_start
;
6996 *size
-= start
- entry
->vme_start
;
6998 base_addr
+= baddr_start_delta
;
6999 *size
+= baddr_start_delta
;
7001 baddr_start_delta
= 0;
7003 base_addr
+= entry
->vme_start
;
7004 if(base_addr
>= base_next
) {
7005 vm_map_unlock_read(map
);
7010 goto LOOKUP_NEXT_BASE_ENTRY
;
7012 if (*size
> (entry
->vme_end
- start
))
7013 *size
= entry
->vme_end
- start
;
7015 start
= entry
->vme_start
- start
;
7018 start
+= entry
->offset
;
7021 *nesting_depth
-= recurse_count
;
7022 if(entry
!= base_entry
) {
7023 start
= entry
->vme_start
+ (start
- entry
->offset
);
7027 submap_info
->user_tag
= entry
->alias
;
7028 submap_info
->offset
= entry
->offset
;
7029 submap_info
->protection
= entry
->protection
;
7030 submap_info
->inheritance
= entry
->inheritance
;
7031 submap_info
->max_protection
= entry
->max_protection
;
7032 submap_info
->behavior
= entry
->behavior
;
7033 submap_info
->user_wired_count
= entry
->user_wired_count
;
7034 submap_info
->is_submap
= entry
->is_sub_map
;
7035 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7036 *address
= base_addr
;
7039 extended
.pages_resident
= 0;
7040 extended
.pages_swapped_out
= 0;
7041 extended
.pages_shared_now_private
= 0;
7042 extended
.pages_dirtied
= 0;
7043 extended
.external_pager
= 0;
7044 extended
.shadow_depth
= 0;
7046 if(!entry
->is_sub_map
) {
7047 vm_region_walk(entry
, &extended
, entry
->offset
,
7048 entry
->vme_end
- start
, map
, start
);
7049 submap_info
->share_mode
= extended
.share_mode
;
7050 if (extended
.external_pager
&& extended
.ref_count
== 2
7051 && extended
.share_mode
== SM_SHARED
)
7052 submap_info
->share_mode
= SM_PRIVATE
;
7053 submap_info
->ref_count
= extended
.ref_count
;
7056 submap_info
->share_mode
= SM_TRUESHARED
;
7058 submap_info
->share_mode
= SM_PRIVATE
;
7059 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7062 submap_info
->pages_resident
= extended
.pages_resident
;
7063 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7064 submap_info
->pages_shared_now_private
=
7065 extended
.pages_shared_now_private
;
7066 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7067 submap_info
->external_pager
= extended
.external_pager
;
7068 submap_info
->shadow_depth
= extended
.shadow_depth
;
7070 vm_map_unlock_read(map
);
7071 return(KERN_SUCCESS
);
7075 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7076 * Goes away after regular vm_region_recurse function migrates to
7078 * vm_region_recurse: A form of vm_region which follows the
7079 * submaps in a target map
7084 vm_region_recurse_64(
7086 vm_offset_t
*address
, /* IN/OUT */
7087 vm_size_t
*size
, /* OUT */
7088 natural_t
*nesting_depth
, /* IN/OUT */
7089 vm_region_recurse_info_t info
, /* IN/OUT */
7090 mach_msg_type_number_t
*count
) /* IN/OUT */
7092 vm_map_entry_t tmp_entry
;
7094 vm_map_entry_t entry
;
7098 unsigned int recurse_count
;
7101 vm_map_entry_t base_entry
;
7102 vm_offset_t base_next
;
7103 vm_offset_t base_addr
;
7104 vm_offset_t baddr_start_delta
;
7105 vm_region_submap_info_64_t submap_info
;
7106 vm_region_extended_info_data_t extended
;
7108 if (map
== VM_MAP_NULL
)
7109 return(KERN_INVALID_ARGUMENT
);
7111 submap_info
= (vm_region_submap_info_64_t
) info
;
7112 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7114 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7115 return(KERN_INVALID_ARGUMENT
);
7119 recurse_count
= *nesting_depth
;
7121 LOOKUP_NEXT_BASE_ENTRY
:
7122 vm_map_lock_read(map
);
7123 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7124 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7125 vm_map_unlock_read(map
);
7126 return(KERN_INVALID_ADDRESS
);
7131 *size
= entry
->vme_end
- entry
->vme_start
;
7132 start
= entry
->vme_start
;
7134 baddr_start_delta
= *address
- start
;
7135 base_next
= entry
->vme_end
;
7138 while(entry
->is_sub_map
&& recurse_count
) {
7140 vm_map_lock_read(entry
->object
.sub_map
);
7143 if(entry
== base_entry
) {
7144 start
= entry
->offset
;
7145 start
+= *address
- entry
->vme_start
;
7148 submap
= entry
->object
.sub_map
;
7149 vm_map_unlock_read(map
);
7152 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7153 if ((entry
= tmp_entry
->vme_next
)
7154 == vm_map_to_entry(map
)) {
7155 vm_map_unlock_read(map
);
7160 goto LOOKUP_NEXT_BASE_ENTRY
;
7166 if(start
<= entry
->vme_start
) {
7167 vm_offset_t old_start
= start
;
7168 if(baddr_start_delta
) {
7169 base_addr
+= (baddr_start_delta
);
7170 *size
-= baddr_start_delta
;
7171 baddr_start_delta
= 0;
7174 (base_addr
+= (entry
->vme_start
- start
))) {
7175 vm_map_unlock_read(map
);
7180 goto LOOKUP_NEXT_BASE_ENTRY
;
7182 *size
-= entry
->vme_start
- start
;
7183 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7184 *size
= entry
->vme_end
- entry
->vme_start
;
7188 if(baddr_start_delta
) {
7189 if((start
- entry
->vme_start
)
7190 < baddr_start_delta
) {
7191 base_addr
+= start
- entry
->vme_start
;
7192 *size
-= start
- entry
->vme_start
;
7194 base_addr
+= baddr_start_delta
;
7195 *size
+= baddr_start_delta
;
7197 baddr_start_delta
= 0;
7199 base_addr
+= entry
->vme_start
;
7200 if(base_addr
>= base_next
) {
7201 vm_map_unlock_read(map
);
7206 goto LOOKUP_NEXT_BASE_ENTRY
;
7208 if (*size
> (entry
->vme_end
- start
))
7209 *size
= entry
->vme_end
- start
;
7211 start
= entry
->vme_start
- start
;
7214 start
+= entry
->offset
;
7217 *nesting_depth
-= recurse_count
;
7218 if(entry
!= base_entry
) {
7219 start
= entry
->vme_start
+ (start
- entry
->offset
);
7223 submap_info
->user_tag
= entry
->alias
;
7224 submap_info
->offset
= entry
->offset
;
7225 submap_info
->protection
= entry
->protection
;
7226 submap_info
->inheritance
= entry
->inheritance
;
7227 submap_info
->max_protection
= entry
->max_protection
;
7228 submap_info
->behavior
= entry
->behavior
;
7229 submap_info
->user_wired_count
= entry
->user_wired_count
;
7230 submap_info
->is_submap
= entry
->is_sub_map
;
7231 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7232 *address
= base_addr
;
7235 extended
.pages_resident
= 0;
7236 extended
.pages_swapped_out
= 0;
7237 extended
.pages_shared_now_private
= 0;
7238 extended
.pages_dirtied
= 0;
7239 extended
.external_pager
= 0;
7240 extended
.shadow_depth
= 0;
7242 if(!entry
->is_sub_map
) {
7243 vm_region_walk(entry
, &extended
, entry
->offset
,
7244 entry
->vme_end
- start
, map
, start
);
7245 submap_info
->share_mode
= extended
.share_mode
;
7246 if (extended
.external_pager
&& extended
.ref_count
== 2
7247 && extended
.share_mode
== SM_SHARED
)
7248 submap_info
->share_mode
= SM_PRIVATE
;
7249 submap_info
->ref_count
= extended
.ref_count
;
7252 submap_info
->share_mode
= SM_TRUESHARED
;
7254 submap_info
->share_mode
= SM_PRIVATE
;
7255 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7258 submap_info
->pages_resident
= extended
.pages_resident
;
7259 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7260 submap_info
->pages_shared_now_private
=
7261 extended
.pages_shared_now_private
;
7262 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7263 submap_info
->external_pager
= extended
.external_pager
;
7264 submap_info
->shadow_depth
= extended
.shadow_depth
;
7266 vm_map_unlock_read(map
);
7267 return(KERN_SUCCESS
);
7272 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7273 * Goes away after regular vm_region function migrates to
7281 vm_offset_t
*address
, /* IN/OUT */
7282 vm_size_t
*size
, /* OUT */
7283 vm_region_flavor_t flavor
, /* IN */
7284 vm_region_info_t info
, /* OUT */
7285 mach_msg_type_number_t
*count
, /* IN/OUT */
7286 ipc_port_t
*object_name
) /* OUT */
7288 vm_map_entry_t tmp_entry
;
7290 vm_map_entry_t entry
;
7293 vm_region_basic_info_64_t basic
;
7294 vm_region_extended_info_t extended
;
7295 vm_region_top_info_t top
;
7297 if (map
== VM_MAP_NULL
)
7298 return(KERN_INVALID_ARGUMENT
);
7302 case VM_REGION_BASIC_INFO
:
7304 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7305 return(KERN_INVALID_ARGUMENT
);
7307 basic
= (vm_region_basic_info_64_t
) info
;
7308 *count
= VM_REGION_BASIC_INFO_COUNT
;
7310 vm_map_lock_read(map
);
7313 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7314 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7315 vm_map_unlock_read(map
);
7316 return(KERN_INVALID_ADDRESS
);
7322 start
= entry
->vme_start
;
7324 basic
->offset
= entry
->offset
;
7325 basic
->protection
= entry
->protection
;
7326 basic
->inheritance
= entry
->inheritance
;
7327 basic
->max_protection
= entry
->max_protection
;
7328 basic
->behavior
= entry
->behavior
;
7329 basic
->user_wired_count
= entry
->user_wired_count
;
7330 basic
->reserved
= entry
->is_sub_map
;
7332 *size
= (entry
->vme_end
- start
);
7334 if (object_name
) *object_name
= IP_NULL
;
7335 if (entry
->is_sub_map
) {
7336 basic
->shared
= FALSE
;
7338 basic
->shared
= entry
->is_shared
;
7341 vm_map_unlock_read(map
);
7342 return(KERN_SUCCESS
);
7344 case VM_REGION_EXTENDED_INFO
:
7347 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7348 return(KERN_INVALID_ARGUMENT
);
7350 extended
= (vm_region_extended_info_t
) info
;
7351 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7353 vm_map_lock_read(map
);
7356 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7357 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7358 vm_map_unlock_read(map
);
7359 return(KERN_INVALID_ADDRESS
);
7364 start
= entry
->vme_start
;
7366 extended
->protection
= entry
->protection
;
7367 extended
->user_tag
= entry
->alias
;
7368 extended
->pages_resident
= 0;
7369 extended
->pages_swapped_out
= 0;
7370 extended
->pages_shared_now_private
= 0;
7371 extended
->pages_dirtied
= 0;
7372 extended
->external_pager
= 0;
7373 extended
->shadow_depth
= 0;
7375 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7377 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7378 extended
->share_mode
= SM_PRIVATE
;
7381 *object_name
= IP_NULL
;
7383 *size
= (entry
->vme_end
- start
);
7385 vm_map_unlock_read(map
);
7386 return(KERN_SUCCESS
);
7388 case VM_REGION_TOP_INFO
:
7391 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7392 return(KERN_INVALID_ARGUMENT
);
7394 top
= (vm_region_top_info_t
) info
;
7395 *count
= VM_REGION_TOP_INFO_COUNT
;
7397 vm_map_lock_read(map
);
7400 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7401 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7402 vm_map_unlock_read(map
);
7403 return(KERN_INVALID_ADDRESS
);
7409 start
= entry
->vme_start
;
7411 top
->private_pages_resident
= 0;
7412 top
->shared_pages_resident
= 0;
7414 vm_region_top_walk(entry
, top
);
7417 *object_name
= IP_NULL
;
7419 *size
= (entry
->vme_end
- start
);
7421 vm_map_unlock_read(map
);
7422 return(KERN_SUCCESS
);
7425 return(KERN_INVALID_ARGUMENT
);
7431 vm_map_entry_t entry
,
7432 vm_region_top_info_t top
)
7434 register struct vm_object
*obj
, *tmp_obj
;
7435 register int ref_count
;
7437 if (entry
->object
.vm_object
== 0) {
7438 top
->share_mode
= SM_EMPTY
;
7443 if (entry
->is_sub_map
)
7444 vm_region_top_walk((vm_map_entry_t
)entry
->object
.sub_map
, top
);
7446 obj
= entry
->object
.vm_object
;
7448 vm_object_lock(obj
);
7450 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7455 top
->private_pages_resident
= obj
->resident_page_count
;
7457 top
->shared_pages_resident
= obj
->resident_page_count
;
7458 top
->ref_count
= ref_count
;
7459 top
->share_mode
= SM_COW
;
7461 while (tmp_obj
= obj
->shadow
) {
7462 vm_object_lock(tmp_obj
);
7463 vm_object_unlock(obj
);
7466 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7469 top
->shared_pages_resident
+= obj
->resident_page_count
;
7470 top
->ref_count
+= ref_count
- 1;
7473 if (entry
->needs_copy
) {
7474 top
->share_mode
= SM_COW
;
7475 top
->shared_pages_resident
= obj
->resident_page_count
;
7477 if (ref_count
== 1 ||
7478 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7479 top
->share_mode
= SM_PRIVATE
;
7480 top
->private_pages_resident
= obj
->resident_page_count
;
7482 top
->share_mode
= SM_SHARED
;
7483 top
->shared_pages_resident
= obj
->resident_page_count
;
7486 top
->ref_count
= ref_count
;
7488 top
->obj_id
= (int)obj
;
7490 vm_object_unlock(obj
);
7496 vm_map_entry_t entry
,
7497 vm_region_extended_info_t extended
,
7498 vm_object_offset_t offset
,
7503 register struct vm_object
*obj
, *tmp_obj
;
7504 register vm_offset_t last_offset
;
7506 register int ref_count
;
7507 void vm_region_look_for_page();
7509 if ((entry
->object
.vm_object
== 0) ||
7510 (entry
->object
.vm_object
->phys_contiguous
)) {
7511 extended
->share_mode
= SM_EMPTY
;
7512 extended
->ref_count
= 0;
7515 if (entry
->is_sub_map
)
7516 vm_region_walk((vm_map_entry_t
)entry
->object
.sub_map
, extended
, offset
+ entry
->offset
,
7519 obj
= entry
->object
.vm_object
;
7521 vm_object_lock(obj
);
7523 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7526 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7527 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7529 if (extended
->shadow_depth
|| entry
->needs_copy
)
7530 extended
->share_mode
= SM_COW
;
7533 extended
->share_mode
= SM_PRIVATE
;
7535 if (obj
->true_share
)
7536 extended
->share_mode
= SM_TRUESHARED
;
7538 extended
->share_mode
= SM_SHARED
;
7541 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7543 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7544 if ((tmp_obj
= obj
->shadow
) == 0)
7546 vm_object_lock(tmp_obj
);
7547 vm_object_unlock(obj
);
7549 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7552 extended
->ref_count
+= ref_count
;
7555 vm_object_unlock(obj
);
7557 if (extended
->share_mode
== SM_SHARED
) {
7558 register vm_map_entry_t cur
;
7559 register vm_map_entry_t last
;
7562 obj
= entry
->object
.vm_object
;
7563 last
= vm_map_to_entry(map
);
7566 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7568 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7569 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7571 if (my_refs
== ref_count
)
7572 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7573 else if (my_refs
> 1)
7574 extended
->share_mode
= SM_SHARED_ALIASED
;
7580 /* object is locked on entry and locked on return */
7584 vm_region_look_for_page(
7586 vm_region_extended_info_t extended
,
7587 vm_object_offset_t offset
,
7593 register vm_page_t p
;
7594 register vm_object_t shadow
;
7595 register int ref_count
;
7596 vm_object_t caller_object
;
7598 shadow
= object
->shadow
;
7599 caller_object
= object
;
7604 if ( !(object
->pager_trusted
) && !(object
->internal
))
7605 extended
->external_pager
= 1;
7607 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7608 if (shadow
&& (max_refcnt
== 1))
7609 extended
->pages_shared_now_private
++;
7611 if (p
->dirty
|| pmap_is_modified(p
->phys_addr
))
7612 extended
->pages_dirtied
++;
7613 extended
->pages_resident
++;
7615 if(object
!= caller_object
)
7616 vm_object_unlock(object
);
7620 if (object
->existence_map
) {
7621 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7623 extended
->pages_swapped_out
++;
7625 if(object
!= caller_object
)
7626 vm_object_unlock(object
);
7632 vm_object_lock(shadow
);
7634 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7637 if (++depth
> extended
->shadow_depth
)
7638 extended
->shadow_depth
= depth
;
7640 if (ref_count
> max_refcnt
)
7641 max_refcnt
= ref_count
;
7643 if(object
!= caller_object
)
7644 vm_object_unlock(object
);
7647 shadow
= object
->shadow
;
7648 offset
= offset
+ object
->shadow_offset
;
7651 if(object
!= caller_object
)
7652 vm_object_unlock(object
);
7658 vm_region_count_obj_refs(
7659 vm_map_entry_t entry
,
7662 register int ref_count
;
7663 register vm_object_t chk_obj
;
7664 register vm_object_t tmp_obj
;
7666 if (entry
->object
.vm_object
== 0)
7669 if (entry
->is_sub_map
)
7670 ref_count
= vm_region_count_obj_refs((vm_map_entry_t
)entry
->object
.sub_map
, object
);
7674 chk_obj
= entry
->object
.vm_object
;
7675 vm_object_lock(chk_obj
);
7678 if (chk_obj
== object
)
7680 if (tmp_obj
= chk_obj
->shadow
)
7681 vm_object_lock(tmp_obj
);
7682 vm_object_unlock(chk_obj
);
7692 * Routine: vm_map_simplify
7695 * Attempt to simplify the map representation in
7696 * the vicinity of the given starting address.
7698 * This routine is intended primarily to keep the
7699 * kernel maps more compact -- they generally don't
7700 * benefit from the "expand a map entry" technology
7701 * at allocation time because the adjacent entry
7702 * is often wired down.
7709 vm_map_entry_t this_entry
;
7710 vm_map_entry_t prev_entry
;
7711 vm_map_entry_t next_entry
;
7715 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7716 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7718 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7720 (prev_entry
->is_shared
== FALSE
) &&
7721 (prev_entry
->is_sub_map
== FALSE
) &&
7723 (this_entry
->is_shared
== FALSE
) &&
7724 (this_entry
->is_sub_map
== FALSE
) &&
7726 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7727 (prev_entry
->protection
== this_entry
->protection
) &&
7728 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7729 (prev_entry
->behavior
== this_entry
->behavior
) &&
7730 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7731 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7732 (prev_entry
->in_transition
== FALSE
) &&
7733 (this_entry
->in_transition
== FALSE
) &&
7735 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7737 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7738 ((prev_entry
->offset
+
7739 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7740 == this_entry
->offset
)
7742 SAVE_HINT(map
, prev_entry
);
7743 vm_map_entry_unlink(map
, this_entry
);
7744 prev_entry
->vme_end
= this_entry
->vme_end
;
7745 UPDATE_FIRST_FREE(map
, map
->first_free
);
7746 vm_object_deallocate(this_entry
->object
.vm_object
);
7747 vm_map_entry_dispose(map
, this_entry
);
7748 counter(c_vm_map_simplified_lower
++);
7751 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7752 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7754 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7756 (next_entry
->is_shared
== FALSE
) &&
7757 (next_entry
->is_sub_map
== FALSE
) &&
7759 (next_entry
->is_shared
== FALSE
) &&
7760 (next_entry
->is_sub_map
== FALSE
) &&
7762 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7763 (next_entry
->protection
== this_entry
->protection
) &&
7764 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7765 (next_entry
->behavior
== this_entry
->behavior
) &&
7766 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7767 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7768 (this_entry
->in_transition
== FALSE
) &&
7769 (next_entry
->in_transition
== FALSE
) &&
7771 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7773 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7774 ((this_entry
->offset
+
7775 (this_entry
->vme_end
- this_entry
->vme_start
))
7776 == next_entry
->offset
)
7778 vm_map_entry_unlink(map
, next_entry
);
7779 this_entry
->vme_end
= next_entry
->vme_end
;
7780 UPDATE_FIRST_FREE(map
, map
->first_free
);
7781 vm_object_deallocate(next_entry
->object
.vm_object
);
7782 vm_map_entry_dispose(map
, next_entry
);
7783 counter(c_vm_map_simplified_upper
++);
7785 counter(c_vm_map_simplify_called
++);
7791 * Routine: vm_map_machine_attribute
7793 * Provide machine-specific attributes to mappings,
7794 * such as cachability etc. for machines that provide
7795 * them. NUMA architectures and machines with big/strange
7796 * caches will use this.
7798 * Responsibilities for locking and checking are handled here,
7799 * everything else in the pmap module. If any non-volatile
7800 * information must be kept, the pmap module should handle
7801 * it itself. [This assumes that attributes do not
7802 * need to be inherited, which seems ok to me]
7805 vm_map_machine_attribute(
7807 vm_offset_t address
,
7809 vm_machine_attribute_t attribute
,
7810 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7814 if (address
< vm_map_min(map
) ||
7815 (address
+ size
) > vm_map_max(map
))
7816 return KERN_INVALID_ADDRESS
;
7820 ret
= pmap_attribute(map
->pmap
, address
, size
, attribute
, value
);
7828 * vm_map_behavior_set:
7830 * Sets the paging reference behavior of the specified address
7831 * range in the target map. Paging reference behavior affects
7832 * how pagein operations resulting from faults on the map will be
7836 vm_map_behavior_set(
7840 vm_behavior_t new_behavior
)
7842 register vm_map_entry_t entry
;
7843 vm_map_entry_t temp_entry
;
7846 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
7847 (integer_t
)map
, start
, end
, new_behavior
, 0);
7849 switch (new_behavior
) {
7850 case VM_BEHAVIOR_DEFAULT
:
7851 case VM_BEHAVIOR_RANDOM
:
7852 case VM_BEHAVIOR_SEQUENTIAL
:
7853 case VM_BEHAVIOR_RSEQNTL
:
7856 return(KERN_INVALID_ARGUMENT
);
7862 * The entire address range must be valid for the map.
7863 * Note that vm_map_range_check() does a
7864 * vm_map_lookup_entry() internally and returns the
7865 * entry containing the start of the address range if
7866 * the entire range is valid.
7868 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
7870 vm_map_clip_start(map
, entry
, start
);
7874 return(KERN_INVALID_ADDRESS
);
7877 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
7878 vm_map_clip_end(map
, entry
, end
);
7880 entry
->behavior
= new_behavior
;
7882 entry
= entry
->vme_next
;
7886 return(KERN_SUCCESS
);
7890 #include <mach_kdb.h>
7892 #include <ddb/db_output.h>
7893 #include <vm/vm_print.h>
7895 #define printf db_printf
7898 * Forward declarations for internal functions.
7900 extern void vm_map_links_print(
7901 struct vm_map_links
*links
);
7903 extern void vm_map_header_print(
7904 struct vm_map_header
*header
);
7906 extern void vm_map_entry_print(
7907 vm_map_entry_t entry
);
7909 extern void vm_follow_entry(
7910 vm_map_entry_t entry
);
7912 extern void vm_follow_map(
7916 * vm_map_links_print: [ debug ]
7920 struct vm_map_links
*links
)
7922 iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
7930 * vm_map_header_print: [ debug ]
7933 vm_map_header_print(
7934 struct vm_map_header
*header
)
7936 vm_map_links_print(&header
->links
);
7937 iprintf("nentries=0x%x, %sentries_pageable\n",
7939 (header
->entries_pageable
? "" : "!"));
7943 * vm_follow_entry: [ debug ]
7947 vm_map_entry_t entry
)
7949 extern int db_indent
;
7952 iprintf("map entry 0x%x:\n", entry
);
7956 shadows
= vm_follow_object(entry
->object
.vm_object
);
7957 iprintf("Total objects : %d\n",shadows
);
7963 * vm_map_entry_print: [ debug ]
7967 register vm_map_entry_t entry
)
7969 extern int db_indent
;
7970 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
7971 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
7973 iprintf("map entry 0x%x:\n", entry
);
7977 vm_map_links_print(&entry
->links
);
7979 iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
7983 entry
->max_protection
,
7984 inheritance_name
[(entry
->inheritance
& 0x3)]);
7986 iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
7987 behavior_name
[(entry
->behavior
& 0x3)],
7989 entry
->user_wired_count
);
7990 iprintf("%sin_transition, %sneeds_wakeup\n",
7991 (entry
->in_transition
? "" : "!"),
7992 (entry
->needs_wakeup
? "" : "!"));
7994 if (entry
->is_sub_map
) {
7995 iprintf("submap=0x%x, offset=0x%x\n",
7996 entry
->object
.sub_map
,
7999 iprintf("object=0x%x, offset=0x%x, ",
8000 entry
->object
.vm_object
,
8002 printf("%sis_shared, %sneeds_copy\n",
8003 (entry
->is_shared
? "" : "!"),
8004 (entry
->needs_copy
? "" : "!"));
8011 * vm_follow_map: [ debug ]
8017 register vm_map_entry_t entry
;
8018 extern int db_indent
;
8020 iprintf("task map 0x%x:\n", map
);
8024 for (entry
= vm_map_first_entry(map
);
8025 entry
&& entry
!= vm_map_to_entry(map
);
8026 entry
= entry
->vme_next
) {
8027 vm_follow_entry(entry
);
8034 * vm_map_print: [ debug ]
8038 register vm_map_t map
)
8040 register vm_map_entry_t entry
;
8041 extern int db_indent
;
8044 iprintf("task map 0x%x:\n", map
);
8048 vm_map_header_print(&map
->hdr
);
8050 iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
8057 iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
8058 (map
->wait_for_space
? "" : "!"),
8059 (map
->wiring_required
? "" : "!"),
8063 switch (map
->sw_state
) {
8074 iprintf("res=%d, sw_state=%s\n", map
->res_count
, swstate
);
8075 #endif /* TASK_SWAPPER */
8077 for (entry
= vm_map_first_entry(map
);
8078 entry
&& entry
!= vm_map_to_entry(map
);
8079 entry
= entry
->vme_next
) {
8080 vm_map_entry_print(entry
);
8087 * Routine: vm_map_copy_print
8089 * Pretty-print a copy object for ddb.
8096 extern int db_indent
;
8098 vm_map_entry_t entry
;
8100 printf("copy object 0x%x\n", copy
);
8104 iprintf("type=%d", copy
->type
);
8105 switch (copy
->type
) {
8106 case VM_MAP_COPY_ENTRY_LIST
:
8107 printf("[entry_list]");
8110 case VM_MAP_COPY_OBJECT
:
8114 case VM_MAP_COPY_KERNEL_BUFFER
:
8115 printf("[kernel_buffer]");
8119 printf("[bad type]");
8122 printf(", offset=0x%x", copy
->offset
);
8123 printf(", size=0x%x\n", copy
->size
);
8125 switch (copy
->type
) {
8126 case VM_MAP_COPY_ENTRY_LIST
:
8127 vm_map_header_print(©
->cpy_hdr
);
8128 for (entry
= vm_map_copy_first_entry(copy
);
8129 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8130 entry
= entry
->vme_next
) {
8131 vm_map_entry_print(entry
);
8135 case VM_MAP_COPY_OBJECT
:
8136 iprintf("object=0x%x\n", copy
->cpy_object
);
8139 case VM_MAP_COPY_KERNEL_BUFFER
:
8140 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8141 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8150 * db_vm_map_total_size(map) [ debug ]
8152 * return the total virtual size (in bytes) of the map
8155 db_vm_map_total_size(
8158 vm_map_entry_t entry
;
8162 for (entry
= vm_map_first_entry(map
);
8163 entry
!= vm_map_to_entry(map
);
8164 entry
= entry
->vme_next
) {
8165 total
+= entry
->vme_end
- entry
->vme_start
;
8171 #endif /* MACH_KDB */
8174 * Routine: vm_map_entry_insert
8176 * Descritpion: This routine inserts a new vm_entry in a locked map.
8179 vm_map_entry_insert(
8181 vm_map_entry_t insp_entry
,
8185 vm_object_offset_t offset
,
8186 boolean_t needs_copy
,
8187 boolean_t is_shared
,
8188 boolean_t in_transition
,
8189 vm_prot_t cur_protection
,
8190 vm_prot_t max_protection
,
8191 vm_behavior_t behavior
,
8192 vm_inherit_t inheritance
,
8193 unsigned wired_count
)
8195 vm_map_entry_t new_entry
;
8197 assert(insp_entry
!= (vm_map_entry_t
)0);
8199 new_entry
= vm_map_entry_create(map
);
8201 new_entry
->vme_start
= start
;
8202 new_entry
->vme_end
= end
;
8203 assert(page_aligned(new_entry
->vme_start
));
8204 assert(page_aligned(new_entry
->vme_end
));
8206 new_entry
->object
.vm_object
= object
;
8207 new_entry
->offset
= offset
;
8208 new_entry
->is_shared
= is_shared
;
8209 new_entry
->is_sub_map
= FALSE
;
8210 new_entry
->needs_copy
= needs_copy
;
8211 new_entry
->in_transition
= in_transition
;
8212 new_entry
->needs_wakeup
= FALSE
;
8213 new_entry
->inheritance
= inheritance
;
8214 new_entry
->protection
= cur_protection
;
8215 new_entry
->max_protection
= max_protection
;
8216 new_entry
->behavior
= behavior
;
8217 new_entry
->wired_count
= wired_count
;
8218 new_entry
->user_wired_count
= 0;
8219 new_entry
->use_pmap
= FALSE
;
8222 * Insert the new entry into the list.
8225 vm_map_entry_link(map
, insp_entry
, new_entry
);
8226 map
->size
+= end
- start
;
8229 * Update the free space hint and the lookup hint.
8232 SAVE_HINT(map
, new_entry
);
8237 * Routine: vm_remap_extract
8239 * Descritpion: This routine returns a vm_entry list from a map.
8247 struct vm_map_header
*map_header
,
8248 vm_prot_t
*cur_protection
,
8249 vm_prot_t
*max_protection
,
8250 /* What, no behavior? */
8251 vm_inherit_t inheritance
,
8254 kern_return_t result
;
8255 vm_size_t mapped_size
;
8257 vm_map_entry_t src_entry
; /* result of last map lookup */
8258 vm_map_entry_t new_entry
;
8259 vm_object_offset_t offset
;
8260 vm_offset_t map_address
;
8261 vm_offset_t src_start
; /* start of entry to map */
8262 vm_offset_t src_end
; /* end of region to be mapped */
8264 vm_map_version_t version
;
8265 boolean_t src_needs_copy
;
8266 boolean_t new_entry_needs_copy
;
8268 assert(map
!= VM_MAP_NULL
);
8269 assert(size
!= 0 && size
== round_page(size
));
8270 assert(inheritance
== VM_INHERIT_NONE
||
8271 inheritance
== VM_INHERIT_COPY
||
8272 inheritance
== VM_INHERIT_SHARE
);
8275 * Compute start and end of region.
8277 src_start
= trunc_page(addr
);
8278 src_end
= round_page(src_start
+ size
);
8281 * Initialize map_header.
8283 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8284 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8285 map_header
->nentries
= 0;
8286 map_header
->entries_pageable
= pageable
;
8288 *cur_protection
= VM_PROT_ALL
;
8289 *max_protection
= VM_PROT_ALL
;
8293 result
= KERN_SUCCESS
;
8296 * The specified source virtual space might correspond to
8297 * multiple map entries, need to loop on them.
8300 while (mapped_size
!= size
) {
8301 vm_size_t entry_size
;
8304 * Find the beginning of the region.
8306 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8307 result
= KERN_INVALID_ADDRESS
;
8311 if (src_start
< src_entry
->vme_start
||
8312 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8313 result
= KERN_INVALID_ADDRESS
;
8317 if(src_entry
->is_sub_map
) {
8318 result
= KERN_INVALID_ADDRESS
;
8322 tmp_size
= size
- mapped_size
;
8323 if (src_end
> src_entry
->vme_end
)
8324 tmp_size
-= (src_end
- src_entry
->vme_end
);
8326 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8327 src_entry
->vme_start
);
8329 if(src_entry
->is_sub_map
) {
8330 vm_map_reference(src_entry
->object
.sub_map
);
8332 object
= src_entry
->object
.vm_object
;
8334 if (object
== VM_OBJECT_NULL
) {
8335 object
= vm_object_allocate(entry_size
);
8336 src_entry
->offset
= 0;
8337 src_entry
->object
.vm_object
= object
;
8338 } else if (object
->copy_strategy
!=
8339 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8341 * We are already using an asymmetric
8342 * copy, and therefore we already have
8345 assert(!src_entry
->needs_copy
);
8346 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8347 (object
->internal
&& !object
->true_share
&&
8348 !src_entry
->is_shared
&&
8349 object
->size
> entry_size
)) {
8351 vm_object_shadow(&src_entry
->object
.vm_object
,
8355 if (!src_entry
->needs_copy
&&
8356 (src_entry
->protection
& VM_PROT_WRITE
)) {
8357 pmap_protect(vm_map_pmap(map
),
8358 src_entry
->vme_start
,
8360 src_entry
->protection
&
8364 object
= src_entry
->object
.vm_object
;
8365 src_entry
->needs_copy
= FALSE
;
8369 vm_object_lock(object
);
8370 object
->ref_count
++; /* object ref. for new entry */
8371 VM_OBJ_RES_INCR(object
);
8372 if (object
->copy_strategy
==
8373 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8374 object
->copy_strategy
=
8375 MEMORY_OBJECT_COPY_DELAY
;
8377 vm_object_unlock(object
);
8380 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8382 new_entry
= _vm_map_entry_create(map_header
);
8383 vm_map_entry_copy(new_entry
, src_entry
);
8384 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8386 new_entry
->vme_start
= map_address
;
8387 new_entry
->vme_end
= map_address
+ tmp_size
;
8388 new_entry
->inheritance
= inheritance
;
8389 new_entry
->offset
= offset
;
8392 * The new region has to be copied now if required.
8396 src_entry
->is_shared
= TRUE
;
8397 new_entry
->is_shared
= TRUE
;
8398 if (!(new_entry
->is_sub_map
))
8399 new_entry
->needs_copy
= FALSE
;
8401 } else if (src_entry
->is_sub_map
) {
8402 /* make this a COW sub_map if not already */
8403 new_entry
->needs_copy
= TRUE
;
8404 } else if (src_entry
->wired_count
== 0 &&
8405 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8407 (new_entry
->vme_end
-
8408 new_entry
->vme_start
),
8410 &new_entry_needs_copy
)) {
8412 new_entry
->needs_copy
= new_entry_needs_copy
;
8413 new_entry
->is_shared
= FALSE
;
8416 * Handle copy_on_write semantics.
8418 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8419 vm_object_pmap_protect(object
,
8422 (src_entry
->is_shared
?
8423 PMAP_NULL
: map
->pmap
),
8424 src_entry
->vme_start
,
8425 src_entry
->protection
&
8428 src_entry
->needs_copy
= TRUE
;
8431 * Throw away the old object reference of the new entry.
8433 vm_object_deallocate(object
);
8436 new_entry
->is_shared
= FALSE
;
8439 * The map can be safely unlocked since we
8440 * already hold a reference on the object.
8442 * Record the timestamp of the map for later
8443 * verification, and unlock the map.
8445 version
.main_timestamp
= map
->timestamp
;
8451 if (src_entry
->wired_count
> 0) {
8452 vm_object_lock(object
);
8453 result
= vm_object_copy_slowly(
8458 &new_entry
->object
.vm_object
);
8460 new_entry
->offset
= 0;
8461 new_entry
->needs_copy
= FALSE
;
8463 result
= vm_object_copy_strategically(
8467 &new_entry
->object
.vm_object
,
8469 &new_entry_needs_copy
);
8471 new_entry
->needs_copy
= new_entry_needs_copy
;
8475 * Throw away the old object reference of the new entry.
8477 vm_object_deallocate(object
);
8479 if (result
!= KERN_SUCCESS
&&
8480 result
!= KERN_MEMORY_RESTART_COPY
) {
8481 _vm_map_entry_dispose(map_header
, new_entry
);
8486 * Verify that the map has not substantially
8487 * changed while the copy was being made.
8490 vm_map_lock(map
); /* Increments timestamp once! */
8491 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8493 * Simple version comparison failed.
8495 * Retry the lookup and verify that the
8496 * same object/offset are still present.
8498 vm_object_deallocate(new_entry
->
8500 _vm_map_entry_dispose(map_header
, new_entry
);
8501 if (result
== KERN_MEMORY_RESTART_COPY
)
8502 result
= KERN_SUCCESS
;
8506 if (result
== KERN_MEMORY_RESTART_COPY
) {
8507 vm_object_reference(object
);
8512 _vm_map_entry_link(map_header
,
8513 map_header
->links
.prev
, new_entry
);
8515 *cur_protection
&= src_entry
->protection
;
8516 *max_protection
&= src_entry
->max_protection
;
8518 map_address
+= tmp_size
;
8519 mapped_size
+= tmp_size
;
8520 src_start
+= tmp_size
;
8525 if (result
!= KERN_SUCCESS
) {
8527 * Free all allocated elements.
8529 for (src_entry
= map_header
->links
.next
;
8530 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8531 src_entry
= new_entry
) {
8532 new_entry
= src_entry
->vme_next
;
8533 _vm_map_entry_unlink(map_header
, src_entry
);
8534 vm_object_deallocate(src_entry
->object
.vm_object
);
8535 _vm_map_entry_dispose(map_header
, src_entry
);
8544 * Map portion of a task's address space.
8545 * Mapped region must not overlap more than
8546 * one vm memory object. Protections and
8547 * inheritance attributes remain the same
8548 * as in the original task and are out parameters.
8549 * Source and Target task can be identical
8550 * Other attributes are identical as for vm_map()
8554 vm_map_t target_map
,
8555 vm_offset_t
*address
,
8560 vm_offset_t memory_address
,
8562 vm_prot_t
*cur_protection
,
8563 vm_prot_t
*max_protection
,
8564 vm_inherit_t inheritance
)
8566 kern_return_t result
;
8567 vm_map_entry_t entry
;
8568 vm_map_entry_t insp_entry
;
8569 vm_map_entry_t new_entry
;
8570 struct vm_map_header map_header
;
8572 if (target_map
== VM_MAP_NULL
)
8573 return KERN_INVALID_ARGUMENT
;
8575 switch (inheritance
) {
8576 case VM_INHERIT_NONE
:
8577 case VM_INHERIT_COPY
:
8578 case VM_INHERIT_SHARE
:
8579 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8583 return KERN_INVALID_ARGUMENT
;
8586 size
= round_page(size
);
8588 result
= vm_remap_extract(src_map
, memory_address
,
8589 size
, copy
, &map_header
,
8596 if (result
!= KERN_SUCCESS
) {
8601 * Allocate/check a range of free virtual address
8602 * space for the target
8604 *address
= trunc_page(*address
);
8605 vm_map_lock(target_map
);
8606 result
= vm_remap_range_allocate(target_map
, address
, size
,
8607 mask
, anywhere
, &insp_entry
);
8609 for (entry
= map_header
.links
.next
;
8610 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8611 entry
= new_entry
) {
8612 new_entry
= entry
->vme_next
;
8613 _vm_map_entry_unlink(&map_header
, entry
);
8614 if (result
== KERN_SUCCESS
) {
8615 entry
->vme_start
+= *address
;
8616 entry
->vme_end
+= *address
;
8617 vm_map_entry_link(target_map
, insp_entry
, entry
);
8620 if (!entry
->is_sub_map
) {
8621 vm_object_deallocate(entry
->object
.vm_object
);
8623 vm_map_deallocate(entry
->object
.sub_map
);
8625 _vm_map_entry_dispose(&map_header
, entry
);
8629 if (result
== KERN_SUCCESS
) {
8630 target_map
->size
+= size
;
8631 SAVE_HINT(target_map
, insp_entry
);
8633 vm_map_unlock(target_map
);
8635 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8636 result
= vm_map_wire(target_map
, *address
,
8637 *address
+ size
, *cur_protection
, TRUE
);
8642 * Routine: vm_remap_range_allocate
8645 * Allocate a range in the specified virtual address map.
8646 * returns the address and the map entry just before the allocated
8649 * Map must be locked.
8653 vm_remap_range_allocate(
8655 vm_offset_t
*address
, /* IN/OUT */
8659 vm_map_entry_t
*map_entry
) /* OUT */
8661 register vm_map_entry_t entry
;
8662 register vm_offset_t start
;
8663 register vm_offset_t end
;
8664 kern_return_t result
= KERN_SUCCESS
;
8673 * Calculate the first possible address.
8676 if (start
< map
->min_offset
)
8677 start
= map
->min_offset
;
8678 if (start
> map
->max_offset
)
8679 return(KERN_NO_SPACE
);
8682 * Look for the first possible address;
8683 * if there's already something at this
8684 * address, we have to start after it.
8687 assert(first_free_is_valid(map
));
8688 if (start
== map
->min_offset
) {
8689 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8690 start
= entry
->vme_end
;
8692 vm_map_entry_t tmp_entry
;
8693 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8694 start
= tmp_entry
->vme_end
;
8699 * In any case, the "entry" always precedes
8700 * the proposed new region throughout the
8705 register vm_map_entry_t next
;
8708 * Find the end of the proposed new region.
8709 * Be sure we didn't go beyond the end, or
8710 * wrap around the address.
8713 end
= ((start
+ mask
) & ~mask
);
8715 return(KERN_NO_SPACE
);
8719 if ((end
> map
->max_offset
) || (end
< start
)) {
8720 if (map
->wait_for_space
) {
8721 if (size
<= (map
->max_offset
-
8723 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8725 thread_block((void (*)(void))0);
8731 return(KERN_NO_SPACE
);
8735 * If there are no more entries, we must win.
8738 next
= entry
->vme_next
;
8739 if (next
== vm_map_to_entry(map
))
8743 * If there is another entry, it must be
8744 * after the end of the potential new region.
8747 if (next
->vme_start
>= end
)
8751 * Didn't fit -- move to the next entry.
8755 start
= entry
->vme_end
;
8759 vm_map_entry_t temp_entry
;
8763 * the address doesn't itself violate
8764 * the mask requirement.
8767 if ((start
& mask
) != 0)
8768 return(KERN_NO_SPACE
);
8772 * ... the address is within bounds
8777 if ((start
< map
->min_offset
) ||
8778 (end
> map
->max_offset
) ||
8780 return(KERN_INVALID_ADDRESS
);
8784 * ... the starting address isn't allocated
8787 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
8788 return(KERN_NO_SPACE
);
8793 * ... the next region doesn't overlap the
8797 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
8798 (entry
->vme_next
->vme_start
< end
))
8799 return(KERN_NO_SPACE
);
8802 return(KERN_SUCCESS
);
8808 * Set the address map for the current thr_act to the specified map
8816 thread_act_t thr_act
= current_act();
8817 vm_map_t oldmap
= thr_act
->map
;
8819 mp_disable_preemption();
8820 mycpu
= cpu_number();
8823 * Deactivate the current map and activate the requested map
8825 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
8827 mp_enable_preemption();
8833 * Routine: vm_map_write_user
8836 * Copy out data from a kernel space into space in the
8837 * destination map. The space must already exist in the
8839 * NOTE: This routine should only be called by threads
8840 * which can block on a page fault. i.e. kernel mode user
8847 vm_offset_t src_addr
,
8848 vm_offset_t dst_addr
,
8851 thread_act_t thr_act
= current_act();
8852 kern_return_t kr
= KERN_SUCCESS
;
8854 if(thr_act
->map
== map
) {
8855 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8856 kr
= KERN_INVALID_ADDRESS
;
8861 /* take on the identity of the target map while doing */
8864 vm_map_reference(map
);
8865 oldmap
= vm_map_switch(map
);
8866 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8867 kr
= KERN_INVALID_ADDRESS
;
8869 vm_map_switch(oldmap
);
8870 vm_map_deallocate(map
);
8876 * Routine: vm_map_read_user
8879 * Copy in data from a user space source map into the
8880 * kernel map. The space must already exist in the
8882 * NOTE: This routine should only be called by threads
8883 * which can block on a page fault. i.e. kernel mode user
8890 vm_offset_t src_addr
,
8891 vm_offset_t dst_addr
,
8894 thread_act_t thr_act
= current_act();
8895 kern_return_t kr
= KERN_SUCCESS
;
8897 if(thr_act
->map
== map
) {
8898 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
8899 kr
= KERN_INVALID_ADDRESS
;
8904 /* take on the identity of the target map while doing */
8907 vm_map_reference(map
);
8908 oldmap
= vm_map_switch(map
);
8909 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
8910 kr
= KERN_INVALID_ADDRESS
;
8912 vm_map_switch(oldmap
);
8913 vm_map_deallocate(map
);
8918 /* Takes existing source and destination sub-maps and clones the contents of */
8919 /* the source map */
8923 ipc_port_t src_region
,
8924 ipc_port_t dst_region
)
8926 vm_named_entry_t src_object
;
8927 vm_named_entry_t dst_object
;
8931 vm_offset_t max_off
;
8932 vm_map_entry_t entry
;
8933 vm_map_entry_t new_entry
;
8934 vm_map_entry_t insert_point
;
8936 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
8937 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
8938 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
8939 return KERN_INVALID_ARGUMENT
;
8941 src_map
= (vm_map_t
)src_object
->backing
.map
;
8942 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
8943 /* destination map is assumed to be unavailable to any other */
8944 /* activity. i.e. it is new */
8945 vm_map_lock(src_map
);
8946 if((src_map
->min_offset
!= dst_map
->min_offset
)
8947 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
8948 vm_map_unlock(src_map
);
8949 return KERN_INVALID_ARGUMENT
;
8951 addr
= src_map
->min_offset
;
8952 vm_map_lookup_entry(dst_map
, addr
, &entry
);
8953 if(entry
== vm_map_to_entry(dst_map
)) {
8954 entry
= entry
->vme_next
;
8956 if(entry
== vm_map_to_entry(dst_map
)) {
8957 max_off
= src_map
->max_offset
;
8959 max_off
= entry
->vme_start
;
8961 vm_map_lookup_entry(src_map
, addr
, &entry
);
8962 if(entry
== vm_map_to_entry(src_map
)) {
8963 entry
= entry
->vme_next
;
8965 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
8966 while((entry
!= vm_map_to_entry(src_map
)) &&
8967 (entry
->vme_end
<= max_off
)) {
8968 addr
= entry
->vme_start
;
8969 new_entry
= vm_map_entry_create(dst_map
);
8970 vm_map_entry_copy(new_entry
, entry
);
8971 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
8972 insert_point
= new_entry
;
8973 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
8974 if (new_entry
->is_sub_map
) {
8975 vm_map_reference(new_entry
->object
.sub_map
);
8977 vm_object_reference(
8978 new_entry
->object
.vm_object
);
8981 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
8982 entry
= entry
->vme_next
;
8984 vm_map_unlock(src_map
);
8985 return KERN_SUCCESS
;
8989 * Export routines to other components for the things we access locally through
8996 return (current_map_fast());
9000 * vm_map_check_protection:
9002 * Assert that the target map allows the specified
9003 * privilege on the entire address region given.
9004 * The entire region must be allocated.
9006 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9007 register vm_map_t map
;
9008 register vm_offset_t start
;
9009 register vm_offset_t end
;
9010 register vm_prot_t protection
;
9012 register vm_map_entry_t entry
;
9013 vm_map_entry_t tmp_entry
;
9017 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9023 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9030 while (start
< end
) {
9031 if (entry
== vm_map_to_entry(map
)) {
9040 if (start
< entry
->vme_start
) {
9046 * Check protection associated with entry.
9049 if ((entry
->protection
& protection
) != protection
) {
9054 /* go to next entry */
9056 start
= entry
->vme_end
;
9057 entry
= entry
->vme_next
;