2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Virtual memory mapping module.
61 #include <task_swapper.h>
62 #include <mach_assert.h>
64 #include <mach/kern_return.h>
65 #include <mach/port.h>
66 #include <mach/vm_attributes.h>
67 #include <mach/vm_param.h>
68 #include <mach/vm_behavior.h>
69 #include <kern/assert.h>
70 #include <kern/counters.h>
71 #include <kern/zalloc.h>
72 #include <vm/vm_init.h>
73 #include <vm/vm_fault.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/sched_prim.h>
80 #include <kern/misc_protos.h>
81 #include <mach/vm_map_server.h>
82 #include <mach/mach_host_server.h>
86 /* Internal prototypes
88 extern boolean_t
vm_map_range_check(
92 vm_map_entry_t
*entry
);
94 extern vm_map_entry_t
_vm_map_entry_create(
95 struct vm_map_header
*map_header
);
97 extern void _vm_map_entry_dispose(
98 struct vm_map_header
*map_header
,
99 vm_map_entry_t entry
);
101 extern void vm_map_pmap_enter(
104 vm_offset_t end_addr
,
106 vm_object_offset_t offset
,
107 vm_prot_t protection
);
109 extern void _vm_map_clip_end(
110 struct vm_map_header
*map_header
,
111 vm_map_entry_t entry
,
114 extern void vm_map_entry_delete(
116 vm_map_entry_t entry
);
118 extern kern_return_t
vm_map_delete(
124 extern void vm_map_copy_steal_pages(
127 extern kern_return_t
vm_map_copy_overwrite_unaligned(
129 vm_map_entry_t entry
,
133 extern kern_return_t
vm_map_copy_overwrite_aligned(
135 vm_map_entry_t tmp_entry
,
140 extern kern_return_t
vm_map_copyin_kernel_buffer(
142 vm_offset_t src_addr
,
144 boolean_t src_destroy
,
145 vm_map_copy_t
*copy_result
); /* OUT */
147 extern kern_return_t
vm_map_copyout_kernel_buffer(
149 vm_offset_t
*addr
, /* IN/OUT */
151 boolean_t overwrite
);
153 extern void vm_map_fork_share(
155 vm_map_entry_t old_entry
,
158 extern boolean_t
vm_map_fork_copy(
160 vm_map_entry_t
*old_entry_p
,
163 extern kern_return_t
vm_remap_range_allocate(
165 vm_offset_t
*address
, /* IN/OUT */
169 vm_map_entry_t
*map_entry
); /* OUT */
171 extern void _vm_map_clip_start(
172 struct vm_map_header
*map_header
,
173 vm_map_entry_t entry
,
176 void vm_region_top_walk(
177 vm_map_entry_t entry
,
178 vm_region_top_info_t top
);
181 vm_map_entry_t entry
,
182 vm_region_extended_info_t extended
,
183 vm_object_offset_t offset
,
189 * Macros to copy a vm_map_entry. We must be careful to correctly
190 * manage the wired page count. vm_map_entry_copy() creates a new
191 * map entry to the same memory - the wired count in the new entry
192 * must be set to zero. vm_map_entry_copy_full() creates a new
193 * entry that is identical to the old entry. This preserves the
194 * wire count; it's used for map splitting and zone changing in
197 #define vm_map_entry_copy(NEW,OLD) \
200 (NEW)->is_shared = FALSE; \
201 (NEW)->needs_wakeup = FALSE; \
202 (NEW)->in_transition = FALSE; \
203 (NEW)->wired_count = 0; \
204 (NEW)->user_wired_count = 0; \
207 #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
210 * Virtual memory maps provide for the mapping, protection,
211 * and sharing of virtual memory objects. In addition,
212 * this module provides for an efficient virtual copy of
213 * memory from one map to another.
215 * Synchronization is required prior to most operations.
217 * Maps consist of an ordered doubly-linked list of simple
218 * entries; a single hint is used to speed up lookups.
220 * Sharing maps have been deleted from this version of Mach.
221 * All shared objects are now mapped directly into the respective
222 * maps. This requires a change in the copy on write strategy;
223 * the asymmetric (delayed) strategy is used for shared temporary
224 * objects instead of the symmetric (shadow) strategy. All maps
225 * are now "top level" maps (either task map, kernel map or submap
226 * of the kernel map).
228 * Since portions of maps are specified by start/end addreses,
229 * which may not align with existing map entries, all
230 * routines merely "clip" entries to these start/end values.
231 * [That is, an entry is split into two, bordering at a
232 * start or end value.] Note that these clippings may not
233 * always be necessary (as the two resulting entries are then
234 * not changed); however, the clipping is done for convenience.
235 * No attempt is currently made to "glue back together" two
238 * The symmetric (shadow) copy strategy implements virtual copy
239 * by copying VM object references from one map to
240 * another, and then marking both regions as copy-on-write.
241 * It is important to note that only one writeable reference
242 * to a VM object region exists in any map when this strategy
243 * is used -- this means that shadow object creation can be
244 * delayed until a write operation occurs. The symmetric (delayed)
245 * strategy allows multiple maps to have writeable references to
246 * the same region of a vm object, and hence cannot delay creating
247 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
248 * Copying of permanent objects is completely different; see
249 * vm_object_copy_strategically() in vm_object.c.
252 zone_t vm_map_zone
; /* zone for vm_map structures */
253 zone_t vm_map_entry_zone
; /* zone for vm_map_entry structures */
254 zone_t vm_map_kentry_zone
; /* zone for kernel entry structures */
255 zone_t vm_map_copy_zone
; /* zone for vm_map_copy structures */
259 * Placeholder object for submap operations. This object is dropped
260 * into the range by a call to vm_map_find, and removed when
261 * vm_map_submap creates the submap.
264 vm_object_t vm_submap_object
;
269 * Initialize the vm_map module. Must be called before
270 * any other vm_map routines.
272 * Map and entry structures are allocated from zones -- we must
273 * initialize those zones.
275 * There are three zones of interest:
277 * vm_map_zone: used to allocate maps.
278 * vm_map_entry_zone: used to allocate map entries.
279 * vm_map_kentry_zone: used to allocate map entries for the kernel.
281 * The kernel allocates map entries from a special zone that is initially
282 * "crammed" with memory. It would be difficult (perhaps impossible) for
283 * the kernel to allocate more memory to a entry zone when it became
284 * empty since the very act of allocating memory implies the creation
288 vm_offset_t map_data
;
289 vm_size_t map_data_size
;
290 vm_offset_t kentry_data
;
291 vm_size_t kentry_data_size
;
292 int kentry_count
= 2048; /* to init kentry_data_size */
294 #define NO_COALESCE_LIMIT (1024 * 128)
297 * Threshold for aggressive (eager) page map entering for vm copyout
298 * operations. Any copyout larger will NOT be aggressively entered.
300 vm_size_t vm_map_aggressive_enter_max
; /* set by bootstrap */
306 vm_map_zone
= zinit((vm_size_t
) sizeof(struct vm_map
), 40*1024,
309 vm_map_entry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
310 1024*1024, PAGE_SIZE
*5,
311 "non-kernel map entries");
313 vm_map_kentry_zone
= zinit((vm_size_t
) sizeof(struct vm_map_entry
),
314 kentry_data_size
, kentry_data_size
,
315 "kernel map entries");
317 vm_map_copy_zone
= zinit((vm_size_t
) sizeof(struct vm_map_copy
),
318 16*1024, PAGE_SIZE
, "map copies");
321 * Cram the map and kentry zones with initial data.
322 * Set kentry_zone non-collectible to aid zone_gc().
324 zone_change(vm_map_zone
, Z_COLLECT
, FALSE
);
325 zone_change(vm_map_kentry_zone
, Z_COLLECT
, FALSE
);
326 zone_change(vm_map_kentry_zone
, Z_EXPAND
, FALSE
);
327 zcram(vm_map_zone
, map_data
, map_data_size
);
328 zcram(vm_map_kentry_zone
, kentry_data
, kentry_data_size
);
335 map_data_size
= round_page(10 * sizeof(struct vm_map
));
336 map_data
= pmap_steal_memory(map_data_size
);
340 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
341 * physical page (i.e. that beyond the kernel image and page tables)
342 * individually; we guess at most one entry per eight pages in the
343 * real world. This works out to roughly .1 of 1% of physical memory,
344 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
347 kentry_count
= pmap_free_pages() / 8;
351 round_page(kentry_count
* sizeof(struct vm_map_entry
));
352 kentry_data
= pmap_steal_memory(kentry_data_size
);
358 * Creates and returns a new empty VM map with
359 * the given physical map structure, and having
360 * the given lower and upper address bounds.
369 register vm_map_t result
;
371 result
= (vm_map_t
) zalloc(vm_map_zone
);
372 if (result
== VM_MAP_NULL
)
373 panic("vm_map_create");
375 vm_map_first_entry(result
) = vm_map_to_entry(result
);
376 vm_map_last_entry(result
) = vm_map_to_entry(result
);
377 result
->hdr
.nentries
= 0;
378 result
->hdr
.entries_pageable
= pageable
;
381 result
->ref_count
= 1;
383 result
->res_count
= 1;
384 result
->sw_state
= MAP_SW_IN
;
385 #endif /* TASK_SWAPPER */
387 result
->min_offset
= min
;
388 result
->max_offset
= max
;
389 result
->wiring_required
= FALSE
;
390 result
->no_zero_fill
= FALSE
;
391 result
->wait_for_space
= FALSE
;
392 result
->first_free
= vm_map_to_entry(result
);
393 result
->hint
= vm_map_to_entry(result
);
394 vm_map_lock_init(result
);
395 mutex_init(&result
->s_lock
, ETAP_VM_RESULT
);
401 * vm_map_entry_create: [ internal use only ]
403 * Allocates a VM map entry for insertion in the
404 * given map (or map copy). No fields are filled.
406 #define vm_map_entry_create(map) \
407 _vm_map_entry_create(&(map)->hdr)
409 #define vm_map_copy_entry_create(copy) \
410 _vm_map_entry_create(&(copy)->cpy_hdr)
413 _vm_map_entry_create(
414 register struct vm_map_header
*map_header
)
416 register zone_t zone
;
417 register vm_map_entry_t entry
;
419 if (map_header
->entries_pageable
)
420 zone
= vm_map_entry_zone
;
422 zone
= vm_map_kentry_zone
;
424 entry
= (vm_map_entry_t
) zalloc(zone
);
425 if (entry
== VM_MAP_ENTRY_NULL
)
426 panic("vm_map_entry_create");
432 * vm_map_entry_dispose: [ internal use only ]
434 * Inverse of vm_map_entry_create.
436 #define vm_map_entry_dispose(map, entry) \
438 if((entry) == (map)->first_free) \
439 (map)->first_free = vm_map_to_entry(map); \
440 if((entry) == (map)->hint) \
441 (map)->hint = vm_map_to_entry(map); \
442 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
445 #define vm_map_copy_entry_dispose(map, entry) \
446 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
449 _vm_map_entry_dispose(
450 register struct vm_map_header
*map_header
,
451 register vm_map_entry_t entry
)
453 register zone_t zone
;
455 if (map_header
->entries_pageable
)
456 zone
= vm_map_entry_zone
;
458 zone
= vm_map_kentry_zone
;
460 zfree(zone
, (vm_offset_t
) entry
);
463 boolean_t
first_free_is_valid(vm_map_t map
); /* forward */
464 boolean_t first_free_check
= FALSE
;
469 vm_map_entry_t entry
, next
;
471 if (!first_free_check
)
474 entry
= vm_map_to_entry(map
);
475 next
= entry
->vme_next
;
476 while (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_end
) ||
477 (trunc_page(next
->vme_start
) == trunc_page(entry
->vme_start
) &&
478 next
!= vm_map_to_entry(map
))) {
480 next
= entry
->vme_next
;
481 if (entry
== vm_map_to_entry(map
))
484 if (map
->first_free
!= entry
) {
485 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
486 map
, map
->first_free
, entry
);
495 * Updates the map->first_free pointer to the
496 * entry immediately before the first hole in the map.
497 * The map should be locked.
499 #define UPDATE_FIRST_FREE(map, new_first_free) \
502 vm_map_entry_t UFF_first_free; \
503 vm_map_entry_t UFF_next_entry; \
505 UFF_first_free = (new_first_free); \
506 UFF_next_entry = UFF_first_free->vme_next; \
507 while (trunc_page(UFF_next_entry->vme_start) == \
508 trunc_page(UFF_first_free->vme_end) || \
509 (trunc_page(UFF_next_entry->vme_start) == \
510 trunc_page(UFF_first_free->vme_start) && \
511 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
512 UFF_first_free = UFF_next_entry; \
513 UFF_next_entry = UFF_first_free->vme_next; \
514 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
517 UFF_map->first_free = UFF_first_free; \
518 assert(first_free_is_valid(UFF_map)); \
522 * vm_map_entry_{un,}link:
524 * Insert/remove entries from maps (or map copies).
526 #define vm_map_entry_link(map, after_where, entry) \
529 vm_map_entry_t VMEL_entry; \
531 VMEL_entry = (entry); \
532 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
533 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
537 #define vm_map_copy_entry_link(copy, after_where, entry) \
538 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
540 #define _vm_map_entry_link(hdr, after_where, entry) \
543 (entry)->vme_prev = (after_where); \
544 (entry)->vme_next = (after_where)->vme_next; \
545 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
548 #define vm_map_entry_unlink(map, entry) \
551 vm_map_entry_t VMEU_entry; \
552 vm_map_entry_t VMEU_first_free; \
554 VMEU_entry = (entry); \
555 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
556 VMEU_first_free = VMEU_entry->vme_prev; \
558 VMEU_first_free = VMEU_map->first_free; \
559 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
560 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
563 #define vm_map_copy_entry_unlink(copy, entry) \
564 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
566 #define _vm_map_entry_unlink(hdr, entry) \
569 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
570 (entry)->vme_prev->vme_next = (entry)->vme_next; \
574 * kernel_vm_map_reference:
576 * kernel internal export version for iokit and bsd components
577 * in lieu of component interface semantics.
581 kernel_vm_map_reference(
582 register vm_map_t map
)
584 if (map
== VM_MAP_NULL
)
587 mutex_lock(&map
->s_lock
);
589 assert(map
->res_count
> 0);
590 assert(map
->ref_count
>= map
->res_count
);
594 mutex_unlock(&map
->s_lock
);
597 #if MACH_ASSERT && TASK_SWAPPER
601 * Adds valid reference and residence counts to the given map.
602 * The map must be in memory (i.e. non-zero residence count).
607 register vm_map_t map
)
609 if (map
== VM_MAP_NULL
)
612 mutex_lock(&map
->s_lock
);
613 assert(map
->res_count
> 0);
614 assert(map
->ref_count
>= map
->res_count
);
617 mutex_unlock(&map
->s_lock
);
621 * vm_map_res_reference:
623 * Adds another valid residence count to the given map.
625 * Map is locked so this function can be called from
629 void vm_map_res_reference(register vm_map_t map
)
631 /* assert map is locked */
632 assert(map
->res_count
>= 0);
633 assert(map
->ref_count
>= map
->res_count
);
634 if (map
->res_count
== 0) {
635 mutex_unlock(&map
->s_lock
);
638 mutex_lock(&map
->s_lock
);
646 * vm_map_reference_swap:
648 * Adds valid reference and residence counts to the given map.
650 * The map may not be in memory (i.e. zero residence count).
653 void vm_map_reference_swap(register vm_map_t map
)
655 assert(map
!= VM_MAP_NULL
);
656 mutex_lock(&map
->s_lock
);
657 assert(map
->res_count
>= 0);
658 assert(map
->ref_count
>= map
->res_count
);
660 vm_map_res_reference(map
);
661 mutex_unlock(&map
->s_lock
);
665 * vm_map_res_deallocate:
667 * Decrement residence count on a map; possibly causing swapout.
669 * The map must be in memory (i.e. non-zero residence count).
671 * The map is locked, so this function is callable from vm_map_deallocate.
674 void vm_map_res_deallocate(register vm_map_t map
)
676 assert(map
->res_count
> 0);
677 if (--map
->res_count
== 0) {
678 mutex_unlock(&map
->s_lock
);
682 mutex_lock(&map
->s_lock
);
684 assert(map
->ref_count
>= map
->res_count
);
686 #endif /* MACH_ASSERT && TASK_SWAPPER */
691 * Removes a reference from the specified map,
692 * destroying it if no references remain.
693 * The map should not be locked.
697 register vm_map_t map
)
701 if (map
== VM_MAP_NULL
)
704 mutex_lock(&map
->s_lock
);
705 ref
= --map
->ref_count
;
707 vm_map_res_deallocate(map
);
708 mutex_unlock(&map
->s_lock
);
711 assert(map
->ref_count
== 0);
712 mutex_unlock(&map
->s_lock
);
716 * The map residence count isn't decremented here because
717 * the vm_map_delete below will traverse the entire map,
718 * deleting entries, and the residence counts on objects
719 * and sharing maps will go away then.
729 * Actually destroy a map.
733 register vm_map_t map
)
736 (void) vm_map_delete(map
, map
->min_offset
,
737 map
->max_offset
, VM_MAP_NO_FLAGS
);
740 pmap_destroy(map
->pmap
);
742 zfree(vm_map_zone
, (vm_offset_t
) map
);
747 * vm_map_swapin/vm_map_swapout
749 * Swap a map in and out, either referencing or releasing its resources.
750 * These functions are internal use only; however, they must be exported
751 * because they may be called from macros, which are exported.
753 * In the case of swapout, there could be races on the residence count,
754 * so if the residence count is up, we return, assuming that a
755 * vm_map_deallocate() call in the near future will bring us back.
758 * -- We use the map write lock for synchronization among races.
759 * -- The map write lock, and not the simple s_lock, protects the
760 * swap state of the map.
761 * -- If a map entry is a share map, then we hold both locks, in
762 * hierarchical order.
764 * Synchronization Notes:
765 * 1) If a vm_map_swapin() call happens while swapout in progress, it
766 * will block on the map lock and proceed when swapout is through.
767 * 2) A vm_map_reference() call at this time is illegal, and will
768 * cause a panic. vm_map_reference() is only allowed on resident
769 * maps, since it refuses to block.
770 * 3) A vm_map_swapin() call during a swapin will block, and
771 * proceeed when the first swapin is done, turning into a nop.
772 * This is the reason the res_count is not incremented until
773 * after the swapin is complete.
774 * 4) There is a timing hole after the checks of the res_count, before
775 * the map lock is taken, during which a swapin may get the lock
776 * before a swapout about to happen. If this happens, the swapin
777 * will detect the state and increment the reference count, causing
778 * the swapout to be a nop, thereby delaying it until a later
779 * vm_map_deallocate. If the swapout gets the lock first, then
780 * the swapin will simply block until the swapout is done, and
783 * Because vm_map_swapin() is potentially an expensive operation, it
784 * should be used with caution.
787 * 1) A map with a residence count of zero is either swapped, or
789 * 2) A map with a non-zero residence count is either resident,
790 * or being swapped in.
793 int vm_map_swap_enable
= 1;
795 void vm_map_swapin (vm_map_t map
)
797 register vm_map_entry_t entry
;
799 if (!vm_map_swap_enable
) /* debug */
804 * First deal with various races.
806 if (map
->sw_state
== MAP_SW_IN
)
808 * we raced with swapout and won. Returning will incr.
809 * the res_count, turning the swapout into a nop.
814 * The residence count must be zero. If we raced with another
815 * swapin, the state would have been IN; if we raced with a
816 * swapout (after another competing swapin), we must have lost
817 * the race to get here (see above comment), in which case
818 * res_count is still 0.
820 assert(map
->res_count
== 0);
823 * There are no intermediate states of a map going out or
824 * coming in, since the map is locked during the transition.
826 assert(map
->sw_state
== MAP_SW_OUT
);
829 * We now operate upon each map entry. If the entry is a sub-
830 * or share-map, we call vm_map_res_reference upon it.
831 * If the entry is an object, we call vm_object_res_reference
832 * (this may iterate through the shadow chain).
833 * Note that we hold the map locked the entire time,
834 * even if we get back here via a recursive call in
835 * vm_map_res_reference.
837 entry
= vm_map_first_entry(map
);
839 while (entry
!= vm_map_to_entry(map
)) {
840 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
841 if (entry
->is_sub_map
) {
842 vm_map_t lmap
= entry
->object
.sub_map
;
843 mutex_lock(&lmap
->s_lock
);
844 vm_map_res_reference(lmap
);
845 mutex_unlock(&lmap
->s_lock
);
847 vm_object_t object
= entry
->object
.vm_object
;
848 vm_object_lock(object
);
850 * This call may iterate through the
853 vm_object_res_reference(object
);
854 vm_object_unlock(object
);
857 entry
= entry
->vme_next
;
859 assert(map
->sw_state
== MAP_SW_OUT
);
860 map
->sw_state
= MAP_SW_IN
;
863 void vm_map_swapout(vm_map_t map
)
865 register vm_map_entry_t entry
;
869 * First deal with various races.
870 * If we raced with a swapin and lost, the residence count
871 * will have been incremented to 1, and we simply return.
873 mutex_lock(&map
->s_lock
);
874 if (map
->res_count
!= 0) {
875 mutex_unlock(&map
->s_lock
);
878 mutex_unlock(&map
->s_lock
);
881 * There are no intermediate states of a map going out or
882 * coming in, since the map is locked during the transition.
884 assert(map
->sw_state
== MAP_SW_IN
);
886 if (!vm_map_swap_enable
)
890 * We now operate upon each map entry. If the entry is a sub-
891 * or share-map, we call vm_map_res_deallocate upon it.
892 * If the entry is an object, we call vm_object_res_deallocate
893 * (this may iterate through the shadow chain).
894 * Note that we hold the map locked the entire time,
895 * even if we get back here via a recursive call in
896 * vm_map_res_deallocate.
898 entry
= vm_map_first_entry(map
);
900 while (entry
!= vm_map_to_entry(map
)) {
901 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
902 if (entry
->is_sub_map
) {
903 vm_map_t lmap
= entry
->object
.sub_map
;
904 mutex_lock(&lmap
->s_lock
);
905 vm_map_res_deallocate(lmap
);
906 mutex_unlock(&lmap
->s_lock
);
908 vm_object_t object
= entry
->object
.vm_object
;
909 vm_object_lock(object
);
911 * This call may take a long time,
912 * since it could actively push
913 * out pages (if we implement it
916 vm_object_res_deallocate(object
);
917 vm_object_unlock(object
);
920 entry
= entry
->vme_next
;
922 assert(map
->sw_state
== MAP_SW_IN
);
923 map
->sw_state
= MAP_SW_OUT
;
926 #endif /* TASK_SWAPPER */
932 * Saves the specified entry as the hint for
933 * future lookups. Performs necessary interlocks.
935 #define SAVE_HINT(map,value) \
936 mutex_lock(&(map)->s_lock); \
937 (map)->hint = (value); \
938 mutex_unlock(&(map)->s_lock);
941 * vm_map_lookup_entry: [ internal use only ]
943 * Finds the map entry containing (or
944 * immediately preceding) the specified address
945 * in the given map; the entry is returned
946 * in the "entry" parameter. The boolean
947 * result indicates whether the address is
948 * actually contained in the map.
952 register vm_map_t map
,
953 register vm_offset_t address
,
954 vm_map_entry_t
*entry
) /* OUT */
956 register vm_map_entry_t cur
;
957 register vm_map_entry_t last
;
960 * Start looking either from the head of the
961 * list, or from the hint.
964 mutex_lock(&map
->s_lock
);
966 mutex_unlock(&map
->s_lock
);
968 if (cur
== vm_map_to_entry(map
))
971 if (address
>= cur
->vme_start
) {
973 * Go from hint to end of list.
975 * But first, make a quick check to see if
976 * we are already looking at the entry we
977 * want (which is usually the case).
978 * Note also that we don't need to save the hint
979 * here... it is the same hint (unless we are
980 * at the header, in which case the hint didn't
981 * buy us anything anyway).
983 last
= vm_map_to_entry(map
);
984 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
991 * Go from start to hint, *inclusively*
993 last
= cur
->vme_next
;
994 cur
= vm_map_first_entry(map
);
1001 while (cur
!= last
) {
1002 if (cur
->vme_end
> address
) {
1003 if (address
>= cur
->vme_start
) {
1005 * Save this lookup for future
1010 SAVE_HINT(map
, cur
);
1015 cur
= cur
->vme_next
;
1017 *entry
= cur
->vme_prev
;
1018 SAVE_HINT(map
, *entry
);
1023 * Routine: vm_map_find_space
1025 * Allocate a range in the specified virtual address map,
1026 * returning the entry allocated for that range.
1027 * Used by kmem_alloc, etc.
1029 * The map must be NOT be locked. It will be returned locked
1030 * on KERN_SUCCESS, unlocked on failure.
1032 * If an entry is allocated, the object/offset fields
1033 * are initialized to zero.
1037 register vm_map_t map
,
1038 vm_offset_t
*address
, /* OUT */
1041 vm_map_entry_t
*o_entry
) /* OUT */
1043 register vm_map_entry_t entry
, new_entry
;
1044 register vm_offset_t start
;
1045 register vm_offset_t end
;
1047 new_entry
= vm_map_entry_create(map
);
1050 * Look for the first possible address; if there's already
1051 * something at this address, we have to start after it.
1056 assert(first_free_is_valid(map
));
1057 if ((entry
= map
->first_free
) == vm_map_to_entry(map
))
1058 start
= map
->min_offset
;
1060 start
= entry
->vme_end
;
1063 * In any case, the "entry" always precedes
1064 * the proposed new region throughout the loop:
1068 register vm_map_entry_t next
;
1071 * Find the end of the proposed new region.
1072 * Be sure we didn't go beyond the end, or
1073 * wrap around the address.
1076 end
= ((start
+ mask
) & ~mask
);
1078 vm_map_entry_dispose(map
, new_entry
);
1080 return(KERN_NO_SPACE
);
1085 if ((end
> map
->max_offset
) || (end
< start
)) {
1086 vm_map_entry_dispose(map
, new_entry
);
1088 return(KERN_NO_SPACE
);
1092 * If there are no more entries, we must win.
1095 next
= entry
->vme_next
;
1096 if (next
== vm_map_to_entry(map
))
1100 * If there is another entry, it must be
1101 * after the end of the potential new region.
1104 if (next
->vme_start
>= end
)
1108 * Didn't fit -- move to the next entry.
1112 start
= entry
->vme_end
;
1117 * "start" and "end" should define the endpoints of the
1118 * available new range, and
1119 * "entry" should refer to the region before the new
1122 * the map should be locked.
1127 new_entry
->vme_start
= start
;
1128 new_entry
->vme_end
= end
;
1129 assert(page_aligned(new_entry
->vme_start
));
1130 assert(page_aligned(new_entry
->vme_end
));
1132 new_entry
->is_shared
= FALSE
;
1133 new_entry
->is_sub_map
= FALSE
;
1134 new_entry
->use_pmap
= FALSE
;
1135 new_entry
->object
.vm_object
= VM_OBJECT_NULL
;
1136 new_entry
->offset
= (vm_object_offset_t
) 0;
1138 new_entry
->needs_copy
= FALSE
;
1140 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1141 new_entry
->protection
= VM_PROT_DEFAULT
;
1142 new_entry
->max_protection
= VM_PROT_ALL
;
1143 new_entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
1144 new_entry
->wired_count
= 0;
1145 new_entry
->user_wired_count
= 0;
1147 new_entry
->in_transition
= FALSE
;
1148 new_entry
->needs_wakeup
= FALSE
;
1151 * Insert the new entry into the list
1154 vm_map_entry_link(map
, entry
, new_entry
);
1159 * Update the lookup hint
1161 SAVE_HINT(map
, new_entry
);
1163 *o_entry
= new_entry
;
1164 return(KERN_SUCCESS
);
1167 int vm_map_pmap_enter_print
= FALSE
;
1168 int vm_map_pmap_enter_enable
= FALSE
;
1171 * Routine: vm_map_pmap_enter
1174 * Force pages from the specified object to be entered into
1175 * the pmap at the specified address if they are present.
1176 * As soon as a page not found in the object the scan ends.
1181 * In/out conditions:
1182 * The source map should not be locked on entry.
1187 register vm_offset_t addr
,
1188 register vm_offset_t end_addr
,
1189 register vm_object_t object
,
1190 vm_object_offset_t offset
,
1191 vm_prot_t protection
)
1194 while (addr
< end_addr
) {
1195 register vm_page_t m
;
1197 vm_object_lock(object
);
1198 vm_object_paging_begin(object
);
1200 m
= vm_page_lookup(object
, offset
);
1201 if (m
== VM_PAGE_NULL
|| m
->busy
||
1202 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
1203 protection
& m
->page_lock
))) {
1205 vm_object_paging_end(object
);
1206 vm_object_unlock(object
);
1210 assert(!m
->fictitious
); /* XXX is this possible ??? */
1212 if (vm_map_pmap_enter_print
) {
1213 printf("vm_map_pmap_enter:");
1214 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1215 map
, addr
, object
, offset
);
1219 if (m
->no_isync
== TRUE
) {
1220 pmap_sync_caches_phys(m
->phys_addr
);
1222 m
->no_isync
= FALSE
;
1224 vm_object_unlock(object
);
1226 PMAP_ENTER(map
->pmap
, addr
, m
,
1229 vm_object_lock(object
);
1231 PAGE_WAKEUP_DONE(m
);
1232 vm_page_lock_queues();
1233 if (!m
->active
&& !m
->inactive
)
1234 vm_page_activate(m
);
1235 vm_page_unlock_queues();
1236 vm_object_paging_end(object
);
1237 vm_object_unlock(object
);
1239 offset
+= PAGE_SIZE_64
;
1245 * Routine: vm_map_enter
1248 * Allocate a range in the specified virtual address map.
1249 * The resulting range will refer to memory defined by
1250 * the given memory object and offset into that object.
1252 * Arguments are as defined in the vm_map call.
1256 register vm_map_t map
,
1257 vm_offset_t
*address
, /* IN/OUT */
1262 vm_object_offset_t offset
,
1263 boolean_t needs_copy
,
1264 vm_prot_t cur_protection
,
1265 vm_prot_t max_protection
,
1266 vm_inherit_t inheritance
)
1268 vm_map_entry_t entry
;
1269 register vm_offset_t start
;
1270 register vm_offset_t end
;
1271 kern_return_t result
= KERN_SUCCESS
;
1273 boolean_t anywhere
= VM_FLAGS_ANYWHERE
& flags
;
1276 VM_GET_FLAGS_ALIAS(flags
, alias
);
1278 #define RETURN(value) { result = value; goto BailOut; }
1280 assert(page_aligned(*address
));
1281 assert(page_aligned(size
));
1290 * Calculate the first possible address.
1293 if (start
< map
->min_offset
)
1294 start
= map
->min_offset
;
1295 if (start
> map
->max_offset
)
1296 RETURN(KERN_NO_SPACE
);
1299 * Look for the first possible address;
1300 * if there's already something at this
1301 * address, we have to start after it.
1304 assert(first_free_is_valid(map
));
1305 if (start
== map
->min_offset
) {
1306 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
1307 start
= entry
->vme_end
;
1309 vm_map_entry_t tmp_entry
;
1310 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
1311 start
= tmp_entry
->vme_end
;
1316 * In any case, the "entry" always precedes
1317 * the proposed new region throughout the
1322 register vm_map_entry_t next
;
1325 * Find the end of the proposed new region.
1326 * Be sure we didn't go beyond the end, or
1327 * wrap around the address.
1330 end
= ((start
+ mask
) & ~mask
);
1332 RETURN(KERN_NO_SPACE
);
1336 if ((end
> map
->max_offset
) || (end
< start
)) {
1337 if (map
->wait_for_space
) {
1338 if (size
<= (map
->max_offset
-
1340 assert_wait((event_t
)map
,
1343 thread_block((void (*)(void))0);
1347 RETURN(KERN_NO_SPACE
);
1351 * If there are no more entries, we must win.
1354 next
= entry
->vme_next
;
1355 if (next
== vm_map_to_entry(map
))
1359 * If there is another entry, it must be
1360 * after the end of the potential new region.
1363 if (next
->vme_start
>= end
)
1367 * Didn't fit -- move to the next entry.
1371 start
= entry
->vme_end
;
1375 vm_map_entry_t temp_entry
;
1379 * the address doesn't itself violate
1380 * the mask requirement.
1384 if ((start
& mask
) != 0)
1385 RETURN(KERN_NO_SPACE
);
1388 * ... the address is within bounds
1393 if ((start
< map
->min_offset
) ||
1394 (end
> map
->max_offset
) ||
1396 RETURN(KERN_INVALID_ADDRESS
);
1400 * ... the starting address isn't allocated
1403 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1404 RETURN(KERN_NO_SPACE
);
1409 * ... the next region doesn't overlap the
1413 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
1414 (entry
->vme_next
->vme_start
< end
))
1415 RETURN(KERN_NO_SPACE
);
1420 * "start" and "end" should define the endpoints of the
1421 * available new range, and
1422 * "entry" should refer to the region before the new
1425 * the map should be locked.
1429 * See whether we can avoid creating a new entry (and object) by
1430 * extending one of our neighbors. [So far, we only attempt to
1431 * extend from below.]
1434 if ((object
== VM_OBJECT_NULL
) &&
1435 (entry
!= vm_map_to_entry(map
)) &&
1436 (entry
->vme_end
== start
) &&
1437 (!entry
->is_shared
) &&
1438 (!entry
->is_sub_map
) &&
1439 (entry
->alias
== alias
) &&
1440 (entry
->inheritance
== inheritance
) &&
1441 (entry
->protection
== cur_protection
) &&
1442 (entry
->max_protection
== max_protection
) &&
1443 (entry
->behavior
== VM_BEHAVIOR_DEFAULT
) &&
1444 (entry
->in_transition
== 0) &&
1445 ((entry
->vme_end
- entry
->vme_start
) + size
< NO_COALESCE_LIMIT
) &&
1446 (entry
->wired_count
== 0)) { /* implies user_wired_count == 0 */
1447 if (vm_object_coalesce(entry
->object
.vm_object
,
1450 (vm_object_offset_t
) 0,
1451 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
),
1452 (vm_size_t
)(end
- entry
->vme_end
))) {
1455 * Coalesced the two objects - can extend
1456 * the previous map entry to include the
1459 map
->size
+= (end
- entry
->vme_end
);
1460 entry
->vme_end
= end
;
1461 UPDATE_FIRST_FREE(map
, map
->first_free
);
1462 RETURN(KERN_SUCCESS
);
1467 * Create a new entry
1471 register vm_map_entry_t new_entry
;
1473 new_entry
= vm_map_entry_insert(map
, entry
, start
, end
, object
,
1474 offset
, needs_copy
, FALSE
, FALSE
,
1475 cur_protection
, max_protection
,
1476 VM_BEHAVIOR_DEFAULT
, inheritance
, 0);
1477 new_entry
->alias
= alias
;
1480 /* Wire down the new entry if the user
1481 * requested all new map entries be wired.
1483 if (map
->wiring_required
) {
1484 result
= vm_map_wire(map
, start
, end
,
1485 new_entry
->protection
, TRUE
);
1489 if ((object
!= VM_OBJECT_NULL
) &&
1490 (vm_map_pmap_enter_enable
) &&
1493 (size
< (128*1024))) {
1494 vm_map_pmap_enter(map
, start
, end
,
1495 object
, offset
, cur_protection
);
1509 * vm_map_clip_start: [ internal use only ]
1511 * Asserts that the given entry begins at or after
1512 * the specified address; if necessary,
1513 * it splits the entry into two.
1516 #define vm_map_clip_start(map, entry, startaddr) \
1518 vm_map_t VMCS_map; \
1519 vm_map_entry_t VMCS_entry; \
1520 vm_offset_t VMCS_startaddr; \
1522 VMCS_entry = (entry); \
1523 VMCS_startaddr = (startaddr); \
1524 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1525 if(entry->use_pmap) { \
1526 vm_offset_t pmap_base_addr; \
1527 vm_offset_t pmap_end_addr; \
1529 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1530 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
1531 pmap_unnest(map->pmap, pmap_base_addr, \
1532 (pmap_end_addr - pmap_base_addr) + 1); \
1533 entry->use_pmap = FALSE; \
1535 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1537 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1540 #define vm_map_clip_start(map, entry, startaddr) \
1542 vm_map_t VMCS_map; \
1543 vm_map_entry_t VMCS_entry; \
1544 vm_offset_t VMCS_startaddr; \
1546 VMCS_entry = (entry); \
1547 VMCS_startaddr = (startaddr); \
1548 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1549 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1551 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1555 #define vm_map_copy_clip_start(copy, entry, startaddr) \
1557 if ((startaddr) > (entry)->vme_start) \
1558 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1562 * This routine is called only when it is known that
1563 * the entry must be split.
1567 register struct vm_map_header
*map_header
,
1568 register vm_map_entry_t entry
,
1569 register vm_offset_t start
)
1571 register vm_map_entry_t new_entry
;
1574 * Split off the front portion --
1575 * note that we must insert the new
1576 * entry BEFORE this one, so that
1577 * this entry has the specified starting
1581 new_entry
= _vm_map_entry_create(map_header
);
1582 vm_map_entry_copy_full(new_entry
, entry
);
1584 new_entry
->vme_end
= start
;
1585 entry
->offset
+= (start
- entry
->vme_start
);
1586 entry
->vme_start
= start
;
1588 _vm_map_entry_link(map_header
, entry
->vme_prev
, new_entry
);
1590 if (entry
->is_sub_map
)
1591 vm_map_reference(new_entry
->object
.sub_map
);
1593 vm_object_reference(new_entry
->object
.vm_object
);
1598 * vm_map_clip_end: [ internal use only ]
1600 * Asserts that the given entry ends at or before
1601 * the specified address; if necessary,
1602 * it splits the entry into two.
1605 #define vm_map_clip_end(map, entry, endaddr) \
1607 vm_map_t VMCE_map; \
1608 vm_map_entry_t VMCE_entry; \
1609 vm_offset_t VMCE_endaddr; \
1611 VMCE_entry = (entry); \
1612 VMCE_endaddr = (endaddr); \
1613 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1614 if(entry->use_pmap) { \
1615 vm_offset_t pmap_base_addr; \
1616 vm_offset_t pmap_end_addr; \
1618 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1619 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
1620 pmap_unnest(map->pmap, pmap_base_addr, \
1621 (pmap_end_addr - pmap_base_addr) + 1); \
1622 entry->use_pmap = FALSE; \
1624 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1626 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1629 #define vm_map_clip_end(map, entry, endaddr) \
1631 vm_map_t VMCE_map; \
1632 vm_map_entry_t VMCE_entry; \
1633 vm_offset_t VMCE_endaddr; \
1635 VMCE_entry = (entry); \
1636 VMCE_endaddr = (endaddr); \
1637 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1638 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1640 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1644 #define vm_map_copy_clip_end(copy, entry, endaddr) \
1646 if ((endaddr) < (entry)->vme_end) \
1647 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1651 * This routine is called only when it is known that
1652 * the entry must be split.
1656 register struct vm_map_header
*map_header
,
1657 register vm_map_entry_t entry
,
1658 register vm_offset_t end
)
1660 register vm_map_entry_t new_entry
;
1663 * Create a new entry and insert it
1664 * AFTER the specified entry
1667 new_entry
= _vm_map_entry_create(map_header
);
1668 vm_map_entry_copy_full(new_entry
, entry
);
1670 new_entry
->vme_start
= entry
->vme_end
= end
;
1671 new_entry
->offset
+= (end
- entry
->vme_start
);
1673 _vm_map_entry_link(map_header
, entry
, new_entry
);
1675 if (entry
->is_sub_map
)
1676 vm_map_reference(new_entry
->object
.sub_map
);
1678 vm_object_reference(new_entry
->object
.vm_object
);
1683 * VM_MAP_RANGE_CHECK: [ internal use only ]
1685 * Asserts that the starting and ending region
1686 * addresses fall within the valid range of the map.
1688 #define VM_MAP_RANGE_CHECK(map, start, end) \
1690 if (start < vm_map_min(map)) \
1691 start = vm_map_min(map); \
1692 if (end > vm_map_max(map)) \
1693 end = vm_map_max(map); \
1699 * vm_map_range_check: [ internal use only ]
1701 * Check that the region defined by the specified start and
1702 * end addresses are wholly contained within a single map
1703 * entry or set of adjacent map entries of the spacified map,
1704 * i.e. the specified region contains no unmapped space.
1705 * If any or all of the region is unmapped, FALSE is returned.
1706 * Otherwise, TRUE is returned and if the output argument 'entry'
1707 * is not NULL it points to the map entry containing the start
1710 * The map is locked for reading on entry and is left locked.
1714 register vm_map_t map
,
1715 register vm_offset_t start
,
1716 register vm_offset_t end
,
1717 vm_map_entry_t
*entry
)
1720 register vm_offset_t prev
;
1723 * Basic sanity checks first
1725 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
1729 * Check first if the region starts within a valid
1730 * mapping for the map.
1732 if (!vm_map_lookup_entry(map
, start
, &cur
))
1736 * Optimize for the case that the region is contained
1737 * in a single map entry.
1739 if (entry
!= (vm_map_entry_t
*) NULL
)
1741 if (end
<= cur
->vme_end
)
1745 * If the region is not wholly contained within a
1746 * single entry, walk the entries looking for holes.
1748 prev
= cur
->vme_end
;
1749 cur
= cur
->vme_next
;
1750 while ((cur
!= vm_map_to_entry(map
)) && (prev
== cur
->vme_start
)) {
1751 if (end
<= cur
->vme_end
)
1753 prev
= cur
->vme_end
;
1754 cur
= cur
->vme_next
;
1760 * vm_map_submap: [ kernel use only ]
1762 * Mark the given range as handled by a subordinate map.
1764 * This range must have been created with vm_map_find using
1765 * the vm_submap_object, and no other operations may have been
1766 * performed on this range prior to calling vm_map_submap.
1768 * Only a limited number of operations can be performed
1769 * within this rage after calling vm_map_submap:
1771 * [Don't try vm_map_copyin!]
1773 * To remove a submapping, one must first remove the
1774 * range from the superior map, and then destroy the
1775 * submap (if desired). [Better yet, don't try it.]
1779 register vm_map_t map
,
1780 register vm_offset_t start
,
1781 register vm_offset_t end
,
1786 vm_map_entry_t entry
;
1787 register kern_return_t result
= KERN_INVALID_ARGUMENT
;
1788 register vm_object_t object
;
1792 VM_MAP_RANGE_CHECK(map
, start
, end
);
1794 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1795 vm_map_clip_start(map
, entry
, start
);
1798 entry
= entry
->vme_next
;
1800 if(entry
== vm_map_to_entry(map
)) {
1802 return KERN_INVALID_ARGUMENT
;
1805 vm_map_clip_end(map
, entry
, end
);
1807 if ((entry
->vme_start
== start
) && (entry
->vme_end
== end
) &&
1808 (!entry
->is_sub_map
) &&
1809 ((object
= entry
->object
.vm_object
) == vm_submap_object
) &&
1810 (object
->resident_page_count
== 0) &&
1811 (object
->copy
== VM_OBJECT_NULL
) &&
1812 (object
->shadow
== VM_OBJECT_NULL
) &&
1813 (!object
->pager_created
)) {
1814 entry
->offset
= (vm_object_offset_t
)offset
;
1815 entry
->object
.vm_object
= VM_OBJECT_NULL
;
1816 vm_object_deallocate(object
);
1817 entry
->is_sub_map
= TRUE
;
1818 vm_map_reference(entry
->object
.sub_map
= submap
);
1820 if ((use_pmap
) && (offset
== 0)) {
1821 /* nest if platform code will allow */
1822 result
= pmap_nest(map
->pmap
, (entry
->object
.sub_map
)->pmap
,
1823 start
, end
- start
);
1825 panic("pmap_nest failed!");
1826 entry
->use_pmap
= TRUE
;
1830 pmap_remove(map
->pmap
, start
, end
);
1832 result
= KERN_SUCCESS
;
1842 * Sets the protection of the specified address
1843 * region in the target map. If "set_max" is
1844 * specified, the maximum protection is to be set;
1845 * otherwise, only the current protection is affected.
1849 register vm_map_t map
,
1850 register vm_offset_t start
,
1851 register vm_offset_t end
,
1852 register vm_prot_t new_prot
,
1853 register boolean_t set_max
)
1855 register vm_map_entry_t current
;
1856 register vm_offset_t prev
;
1857 vm_map_entry_t entry
;
1862 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1863 (integer_t
)map
, start
, end
, new_prot
, set_max
);
1868 * Lookup the entry. If it doesn't start in a valid
1869 * entry, return an error. Remember if we need to
1870 * clip the entry. We don't do it here because we don't
1871 * want to make any changes until we've scanned the
1872 * entire range below for address and protection
1875 if (!(clip
= vm_map_lookup_entry(map
, start
, &entry
))) {
1877 return(KERN_INVALID_ADDRESS
);
1881 * Make a first pass to check for protection and address
1886 prev
= current
->vme_start
;
1887 while ((current
!= vm_map_to_entry(map
)) &&
1888 (current
->vme_start
< end
)) {
1891 * If there is a hole, return an error.
1893 if (current
->vme_start
!= prev
) {
1895 return(KERN_INVALID_ADDRESS
);
1898 new_max
= current
->max_protection
;
1899 if(new_prot
& VM_PROT_COPY
) {
1900 new_max
|= VM_PROT_WRITE
;
1901 if ((new_prot
& (new_max
| VM_PROT_COPY
)) != new_prot
) {
1903 return(KERN_PROTECTION_FAILURE
);
1906 if ((new_prot
& new_max
) != new_prot
) {
1908 return(KERN_PROTECTION_FAILURE
);
1912 prev
= current
->vme_end
;
1913 current
= current
->vme_next
;
1917 return(KERN_INVALID_ADDRESS
);
1921 * Go back and fix up protections.
1922 * Clip to start here if the range starts within
1928 vm_map_clip_start(map
, entry
, start
);
1930 while ((current
!= vm_map_to_entry(map
)) &&
1931 (current
->vme_start
< end
)) {
1935 vm_map_clip_end(map
, current
, end
);
1937 old_prot
= current
->protection
;
1939 if(new_prot
& VM_PROT_COPY
) {
1940 /* caller is asking specifically to copy the */
1941 /* mapped data, this implies that max protection */
1942 /* will include write. Caller must be prepared */
1943 /* for loss of shared memory communication in the */
1944 /* target area after taking this step */
1945 current
->needs_copy
= TRUE
;
1946 current
->max_protection
|= VM_PROT_WRITE
;
1950 current
->protection
=
1951 (current
->max_protection
=
1952 new_prot
& ~VM_PROT_COPY
) &
1955 current
->protection
= new_prot
& ~VM_PROT_COPY
;
1958 * Update physical map if necessary.
1959 * If the request is to turn off write protection,
1960 * we won't do it for real (in pmap). This is because
1961 * it would cause copy-on-write to fail. We've already
1962 * set, the new protection in the map, so if a
1963 * write-protect fault occurred, it will be fixed up
1964 * properly, COW or not.
1966 /* the 256M hack for existing hardware limitations */
1967 if (current
->protection
!= old_prot
) {
1968 if(current
->is_sub_map
&& current
->use_pmap
) {
1969 vm_offset_t pmap_base_addr
;
1970 vm_offset_t pmap_end_addr
;
1971 vm_map_entry_t local_entry
;
1973 pmap_base_addr
= 0xF0000000 & current
->vme_start
;
1974 pmap_end_addr
= (pmap_base_addr
+ 0x10000000) - 1;
1976 if(!vm_map_lookup_entry(map
,
1977 pmap_base_addr
, &local_entry
))
1978 panic("vm_map_protect: nested pmap area is missing");
1979 while ((local_entry
!= vm_map_to_entry(map
)) &&
1980 (local_entry
->vme_start
< pmap_end_addr
)) {
1981 local_entry
->use_pmap
= FALSE
;
1982 local_entry
= local_entry
->vme_next
;
1984 pmap_unnest(map
->pmap
, pmap_base_addr
,
1985 (pmap_end_addr
- pmap_base_addr
) + 1);
1988 if (!(current
->protection
& VM_PROT_WRITE
)) {
1989 /* Look one level in we support nested pmaps */
1990 /* from mapped submaps which are direct entries */
1992 if(current
->is_sub_map
&& current
->use_pmap
) {
1993 pmap_protect(current
->object
.sub_map
->pmap
,
1996 current
->protection
);
1998 pmap_protect(map
->pmap
, current
->vme_start
,
2000 current
->protection
);
2004 current
= current
->vme_next
;
2008 return(KERN_SUCCESS
);
2014 * Sets the inheritance of the specified address
2015 * range in the target map. Inheritance
2016 * affects how the map will be shared with
2017 * child maps at the time of vm_map_fork.
2021 register vm_map_t map
,
2022 register vm_offset_t start
,
2023 register vm_offset_t end
,
2024 register vm_inherit_t new_inheritance
)
2026 register vm_map_entry_t entry
;
2027 vm_map_entry_t temp_entry
;
2031 VM_MAP_RANGE_CHECK(map
, start
, end
);
2033 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
2035 vm_map_clip_start(map
, entry
, start
);
2038 temp_entry
= temp_entry
->vme_next
;
2042 /* first check entire range for submaps which can't support the */
2043 /* given inheritance. */
2044 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2045 if(entry
->is_sub_map
) {
2046 if(new_inheritance
== VM_INHERIT_COPY
)
2047 return(KERN_INVALID_ARGUMENT
);
2050 entry
= entry
->vme_next
;
2055 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2056 vm_map_clip_end(map
, entry
, end
);
2058 entry
->inheritance
= new_inheritance
;
2060 entry
= entry
->vme_next
;
2064 return(KERN_SUCCESS
);
2070 * Sets the pageability of the specified address range in the
2071 * target map as wired. Regions specified as not pageable require
2072 * locked-down physical memory and physical page maps. The
2073 * access_type variable indicates types of accesses that must not
2074 * generate page faults. This is checked against protection of
2075 * memory being locked-down.
2077 * The map must not be locked, but a reference must remain to the
2078 * map throughout the call.
2082 register vm_map_t map
,
2083 register vm_offset_t start
,
2084 register vm_offset_t end
,
2085 register vm_prot_t access_type
,
2086 boolean_t user_wire
,
2089 register vm_map_entry_t entry
;
2090 struct vm_map_entry
*first_entry
, tmp_entry
;
2092 register vm_offset_t s
,e
;
2094 boolean_t need_wakeup
;
2095 boolean_t main_map
= FALSE
;
2096 boolean_t interruptible_state
;
2097 thread_t cur_thread
;
2098 unsigned int last_timestamp
;
2102 if(map_pmap
== NULL
)
2104 last_timestamp
= map
->timestamp
;
2106 VM_MAP_RANGE_CHECK(map
, start
, end
);
2107 assert(page_aligned(start
));
2108 assert(page_aligned(end
));
2110 /* We wired what the caller asked for, zero pages */
2112 return KERN_SUCCESS
;
2115 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2116 entry
= first_entry
;
2117 /* vm_map_clip_start will be done later. */
2119 /* Start address is not in map */
2121 return(KERN_INVALID_ADDRESS
);
2125 need_wakeup
= FALSE
;
2126 cur_thread
= current_thread();
2127 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2129 * If another thread is wiring/unwiring this entry then
2130 * block after informing other thread to wake us up.
2132 if (entry
->in_transition
) {
2134 * We have not clipped the entry. Make sure that
2135 * the start address is in range so that the lookup
2136 * below will succeed.
2138 s
= entry
->vme_start
< start
? start
: entry
->vme_start
;
2140 entry
->needs_wakeup
= TRUE
;
2143 * wake up anybody waiting on entries that we have
2147 vm_map_entry_wakeup(map
);
2148 need_wakeup
= FALSE
;
2151 * User wiring is interruptible
2153 vm_map_entry_wait(map
,
2154 (user_wire
) ? THREAD_ABORTSAFE
:
2156 if (user_wire
&& cur_thread
->wait_result
==
2157 THREAD_INTERRUPTED
) {
2159 * undo the wirings we have done so far
2160 * We do not clear the needs_wakeup flag,
2161 * because we cannot tell if we were the
2164 vm_map_unwire(map
, start
, s
, user_wire
);
2165 return(KERN_FAILURE
);
2170 * Cannot avoid a lookup here. reset timestamp.
2172 last_timestamp
= map
->timestamp
;
2175 * The entry could have been clipped, look it up again.
2176 * Worse that can happen is, it may not exist anymore.
2178 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
2180 panic("vm_map_wire: re-lookup failed");
2183 * User: undo everything upto the previous
2184 * entry. let vm_map_unwire worry about
2185 * checking the validity of the range.
2188 vm_map_unwire(map
, start
, s
, user_wire
);
2189 return(KERN_FAILURE
);
2191 entry
= first_entry
;
2195 if(entry
->is_sub_map
) {
2196 vm_offset_t sub_start
;
2197 vm_offset_t sub_end
;
2198 vm_offset_t local_end
;
2201 vm_map_clip_start(map
, entry
, start
);
2202 vm_map_clip_end(map
, entry
, end
);
2204 sub_start
+= entry
->offset
;
2205 sub_end
= entry
->vme_end
- entry
->vme_start
;
2206 sub_end
+= entry
->offset
;
2208 local_end
= entry
->vme_end
;
2209 if(map_pmap
== NULL
) {
2210 if(entry
->use_pmap
) {
2211 pmap
= entry
->object
.sub_map
->pmap
;
2215 if (entry
->wired_count
) {
2216 if (entry
->wired_count
2218 panic("vm_map_wire: too many wirings");
2221 entry
->user_wired_count
2222 >= MAX_WIRE_COUNT
) {
2224 vm_map_unwire(map
, start
,
2225 entry
->vme_start
, user_wire
);
2226 return(KERN_FAILURE
);
2229 (entry
->user_wired_count
++ == 0))
2230 entry
->wired_count
++;
2231 entry
= entry
->vme_next
;
2236 vm_object_offset_t offset_hi
;
2237 vm_object_offset_t offset_lo
;
2238 vm_object_offset_t offset
;
2241 vm_behavior_t behavior
;
2242 vm_offset_t local_start
;
2243 vm_map_entry_t local_entry
;
2244 vm_map_version_t version
;
2245 vm_map_t lookup_map
;
2247 /* call vm_map_lookup_locked to */
2248 /* cause any needs copy to be */
2250 local_start
= entry
->vme_start
;
2252 vm_map_lock_write_to_read(map
);
2253 if(vm_map_lookup_locked(
2254 &lookup_map
, local_start
,
2257 &offset
, &prot
, &wired
,
2258 &behavior
, &offset_lo
,
2259 &offset_hi
, &pmap_map
)) {
2261 vm_map_unlock(lookup_map
);
2262 vm_map_unwire(map
, start
,
2263 entry
->vme_start
, user_wire
);
2264 return(KERN_FAILURE
);
2266 if(pmap_map
!= lookup_map
)
2267 vm_map_unlock(pmap_map
);
2268 if(lookup_map
!= map
) {
2269 vm_map_unlock(lookup_map
);
2276 version
.main_timestamp
;
2277 vm_object_unlock(object
);
2278 if (vm_map_lookup_entry(map
,
2279 local_start
, &local_entry
)) {
2281 vm_map_unwire(map
, start
,
2282 entry
->vme_start
, user_wire
);
2283 return(KERN_FAILURE
);
2285 /* did we have a change of type? */
2286 if (!local_entry
->is_sub_map
)
2288 entry
= local_entry
;
2290 entry
->user_wired_count
++;
2291 entry
->wired_count
++;
2293 entry
->in_transition
= TRUE
;
2296 rc
= vm_map_wire_nested(
2297 entry
->object
.sub_map
,
2302 last_timestamp
= map
->timestamp
;
2306 rc
= vm_map_wire_nested(entry
->object
.sub_map
,
2311 last_timestamp
= map
->timestamp
;
2313 s
= entry
->vme_start
;
2315 if (last_timestamp
+1 != map
->timestamp
) {
2317 * Find the entry again. It could have been clipped
2318 * after we unlocked the map.
2320 if (!vm_map_lookup_entry(map
, local_end
,
2322 panic("vm_map_wire: re-lookup failed");
2324 entry
= first_entry
;
2327 last_timestamp
= map
->timestamp
;
2328 while ((entry
!= vm_map_to_entry(map
)) &&
2329 (entry
->vme_start
< e
)) {
2330 assert(entry
->in_transition
);
2331 entry
->in_transition
= FALSE
;
2332 if (entry
->needs_wakeup
) {
2333 entry
->needs_wakeup
= FALSE
;
2336 if (rc
!= KERN_SUCCESS
) {/* from vm_*_wire */
2339 entry
->user_wired_count
--;
2340 entry
->wired_count
--;
2343 entry
= entry
->vme_next
;
2345 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2348 vm_map_entry_wakeup(map
);
2350 * undo everything upto the previous entry.
2352 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2359 * If this entry is already wired then increment
2360 * the appropriate wire reference count.
2362 if (entry
->wired_count
&& main_map
) {
2363 /* sanity check: wired_count is a short */
2364 if (entry
->wired_count
>= MAX_WIRE_COUNT
)
2365 panic("vm_map_wire: too many wirings");
2368 entry
->user_wired_count
>= MAX_WIRE_COUNT
) {
2370 vm_map_unwire(map
, start
,
2371 entry
->vme_start
, user_wire
);
2372 return(KERN_FAILURE
);
2375 * entry is already wired down, get our reference
2376 * after clipping to our range.
2378 vm_map_clip_start(map
, entry
, start
);
2379 vm_map_clip_end(map
, entry
, end
);
2380 if (!user_wire
|| (entry
->user_wired_count
++ == 0))
2381 entry
->wired_count
++;
2383 entry
= entry
->vme_next
;
2388 * Unwired entry or wire request transmitted via submap
2393 * Perform actions of vm_map_lookup that need the write
2394 * lock on the map: create a shadow object for a
2395 * copy-on-write region, or an object for a zero-fill
2398 size
= entry
->vme_end
- entry
->vme_start
;
2400 * If wiring a copy-on-write page, we need to copy it now
2401 * even if we're only (currently) requesting read access.
2402 * This is aggressive, but once it's wired we can't move it.
2404 if (entry
->needs_copy
) {
2405 vm_object_shadow(&entry
->object
.vm_object
,
2406 &entry
->offset
, size
);
2407 entry
->needs_copy
= FALSE
;
2408 } else if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
2409 entry
->object
.vm_object
= vm_object_allocate(size
);
2410 entry
->offset
= (vm_object_offset_t
)0;
2413 vm_map_clip_start(map
, entry
, start
);
2414 vm_map_clip_end(map
, entry
, end
);
2416 s
= entry
->vme_start
;
2420 * Check for holes and protection mismatch.
2421 * Holes: Next entry should be contiguous unless this
2422 * is the end of the region.
2423 * Protection: Access requested must be allowed, unless
2424 * wiring is by protection class
2426 if ((((entry
->vme_end
< end
) &&
2427 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2428 (entry
->vme_next
->vme_start
> entry
->vme_end
))) ||
2429 ((entry
->protection
& access_type
) != access_type
))) {
2431 * Found a hole or protection problem.
2432 * Unwire the region we wired so far.
2434 if (start
!= entry
->vme_start
) {
2436 vm_map_unwire(map
, start
, s
, user_wire
);
2440 return((entry
->protection
&access_type
) != access_type
?
2441 KERN_PROTECTION_FAILURE
: KERN_INVALID_ADDRESS
);
2444 assert(entry
->wired_count
== 0 && entry
->user_wired_count
== 0);
2448 entry
->user_wired_count
++;
2449 entry
->wired_count
++;
2452 entry
->in_transition
= TRUE
;
2455 * This entry might get split once we unlock the map.
2456 * In vm_fault_wire(), we need the current range as
2457 * defined by this entry. In order for this to work
2458 * along with a simultaneous clip operation, we make a
2459 * temporary copy of this entry and use that for the
2460 * wiring. Note that the underlying objects do not
2461 * change during a clip.
2466 * The in_transition state guarentees that the entry
2467 * (or entries for this range, if split occured) will be
2468 * there when the map lock is acquired for the second time.
2472 if (!user_wire
&& cur_thread
!= THREAD_NULL
) {
2473 interruptible_state
= cur_thread
->interruptible
;
2474 cur_thread
->interruptible
= FALSE
;
2478 rc
= vm_fault_wire(map
, &tmp_entry
, map_pmap
);
2480 rc
= vm_fault_wire(map
, &tmp_entry
, map
->pmap
);
2482 if (!user_wire
&& cur_thread
!= THREAD_NULL
)
2483 cur_thread
->interruptible
= interruptible_state
;
2487 if (last_timestamp
+1 != map
->timestamp
) {
2489 * Find the entry again. It could have been clipped
2490 * after we unlocked the map.
2492 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2494 panic("vm_map_wire: re-lookup failed");
2496 entry
= first_entry
;
2499 last_timestamp
= map
->timestamp
;
2501 while ((entry
!= vm_map_to_entry(map
)) &&
2502 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2503 assert(entry
->in_transition
);
2504 entry
->in_transition
= FALSE
;
2505 if (entry
->needs_wakeup
) {
2506 entry
->needs_wakeup
= FALSE
;
2509 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2512 entry
->user_wired_count
--;
2513 entry
->wired_count
--;
2516 entry
= entry
->vme_next
;
2519 if (rc
!= KERN_SUCCESS
) { /* from vm_*_wire */
2522 vm_map_entry_wakeup(map
);
2524 * undo everything upto the previous entry.
2526 (void)vm_map_unwire(map
, start
, s
, user_wire
);
2529 } /* end while loop through map entries */
2533 * wake up anybody waiting on entries we wired.
2536 vm_map_entry_wakeup(map
);
2538 return(KERN_SUCCESS
);
2544 register vm_map_t map
,
2545 register vm_offset_t start
,
2546 register vm_offset_t end
,
2547 register vm_prot_t access_type
,
2548 boolean_t user_wire
)
2555 * the calls to mapping_prealloc and mapping_relpre
2556 * (along with the VM_MAP_RANGE_CHECK to insure a
2557 * resonable range was passed in) are
2558 * currently necessary because
2559 * we haven't enabled kernel pre-emption
2560 * and/or the pmap_enter cannot purge and re-use
2563 VM_MAP_RANGE_CHECK(map
, start
, end
);
2564 mapping_prealloc(end
- start
);
2566 kret
= vm_map_wire_nested(map
, start
, end
, access_type
,
2567 user_wire
, (pmap_t
)NULL
);
2577 * Sets the pageability of the specified address range in the target
2578 * as pageable. Regions specified must have been wired previously.
2580 * The map must not be locked, but a reference must remain to the map
2581 * throughout the call.
2583 * Kernel will panic on failures. User unwire ignores holes and
2584 * unwired and intransition entries to avoid losing memory by leaving
2588 vm_map_unwire_nested(
2589 register vm_map_t map
,
2590 register vm_offset_t start
,
2591 register vm_offset_t end
,
2592 boolean_t user_wire
,
2595 register vm_map_entry_t entry
;
2596 struct vm_map_entry
*first_entry
, tmp_entry
;
2597 boolean_t need_wakeup
;
2598 boolean_t main_map
= FALSE
;
2599 unsigned int last_timestamp
;
2602 if(map_pmap
== NULL
)
2604 last_timestamp
= map
->timestamp
;
2606 VM_MAP_RANGE_CHECK(map
, start
, end
);
2607 assert(page_aligned(start
));
2608 assert(page_aligned(end
));
2610 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
2611 entry
= first_entry
;
2612 /* vm_map_clip_start will be done later. */
2615 /* Start address is not in map. */
2617 return(KERN_INVALID_ADDRESS
);
2620 need_wakeup
= FALSE
;
2621 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
2622 if (entry
->in_transition
) {
2625 * Another thread is wiring down this entry. Note
2626 * that if it is not for the other thread we would
2627 * be unwiring an unwired entry. This is not
2628 * permitted. If we wait, we will be unwiring memory
2632 * Another thread is unwiring this entry. We did not
2633 * have a reference to it, because if we did, this
2634 * entry will not be getting unwired now.
2637 panic("vm_map_unwire: in_transition entry");
2639 entry
= entry
->vme_next
;
2643 if(entry
->is_sub_map
) {
2644 vm_offset_t sub_start
;
2645 vm_offset_t sub_end
;
2646 vm_offset_t local_end
;
2650 vm_map_clip_start(map
, entry
, start
);
2651 vm_map_clip_end(map
, entry
, end
);
2653 sub_start
= entry
->offset
;
2654 sub_end
= entry
->vme_end
- entry
->vme_start
;
2655 sub_end
+= entry
->offset
;
2656 local_end
= entry
->vme_end
;
2657 if(map_pmap
== NULL
) {
2658 if(entry
->use_pmap
) {
2659 pmap
= entry
->object
.sub_map
->pmap
;
2663 if (entry
->wired_count
== 0 ||
2664 (user_wire
&& entry
->user_wired_count
== 0)) {
2666 panic("vm_map_unwire: entry is unwired");
2667 entry
= entry
->vme_next
;
2673 * Holes: Next entry should be contiguous unless
2674 * this is the end of the region.
2676 if (((entry
->vme_end
< end
) &&
2677 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2678 (entry
->vme_next
->vme_start
2679 > entry
->vme_end
)))) {
2681 panic("vm_map_unwire: non-contiguous region");
2683 entry = entry->vme_next;
2688 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2689 entry
->wired_count
--;
2691 if (entry
->wired_count
!= 0) {
2692 entry
= entry
->vme_next
;
2696 entry
->in_transition
= TRUE
;
2697 tmp_entry
= *entry
;/* see comment in vm_map_wire() */
2700 * We can unlock the map now. The in_transition state
2701 * guarantees existance of the entry.
2704 vm_map_unwire_nested(entry
->object
.sub_map
,
2705 sub_start
, sub_end
, user_wire
, pmap
);
2708 if (last_timestamp
+1 != map
->timestamp
) {
2710 * Find the entry again. It could have been
2711 * clipped or deleted after we unlocked the map.
2713 if (!vm_map_lookup_entry(map
,
2714 tmp_entry
.vme_start
,
2717 panic("vm_map_unwire: re-lookup failed");
2718 entry
= first_entry
->vme_next
;
2720 entry
= first_entry
;
2722 last_timestamp
= map
->timestamp
;
2725 * clear transition bit for all constituent entries
2726 * that were in the original entry (saved in
2727 * tmp_entry). Also check for waiters.
2729 while ((entry
!= vm_map_to_entry(map
)) &&
2730 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2731 assert(entry
->in_transition
);
2732 entry
->in_transition
= FALSE
;
2733 if (entry
->needs_wakeup
) {
2734 entry
->needs_wakeup
= FALSE
;
2737 entry
= entry
->vme_next
;
2742 vm_map_unwire_nested(entry
->object
.sub_map
,
2743 sub_start
, sub_end
, user_wire
, pmap
);
2746 if (last_timestamp
+1 != map
->timestamp
) {
2748 * Find the entry again. It could have been
2749 * clipped or deleted after we unlocked the map.
2751 if (!vm_map_lookup_entry(map
,
2752 tmp_entry
.vme_start
,
2755 panic("vm_map_unwire: re-lookup failed");
2756 entry
= first_entry
->vme_next
;
2758 entry
= first_entry
;
2760 last_timestamp
= map
->timestamp
;
2765 if (main_map
&& (entry
->wired_count
== 0 ||
2766 (user_wire
&& entry
->user_wired_count
== 0))) {
2768 panic("vm_map_unwire: entry is unwired");
2770 entry
= entry
->vme_next
;
2774 assert(entry
->wired_count
> 0 &&
2775 (!user_wire
|| entry
->user_wired_count
> 0));
2777 vm_map_clip_start(map
, entry
, start
);
2778 vm_map_clip_end(map
, entry
, end
);
2782 * Holes: Next entry should be contiguous unless
2783 * this is the end of the region.
2785 if (((entry
->vme_end
< end
) &&
2786 ((entry
->vme_next
== vm_map_to_entry(map
)) ||
2787 (entry
->vme_next
->vme_start
> entry
->vme_end
)))) {
2790 panic("vm_map_unwire: non-contiguous region");
2791 entry
= entry
->vme_next
;
2796 if (!user_wire
|| (--entry
->user_wired_count
== 0))
2797 entry
->wired_count
--;
2799 if (entry
->wired_count
!= 0) {
2800 entry
= entry
->vme_next
;
2805 entry
->in_transition
= TRUE
;
2806 tmp_entry
= *entry
; /* see comment in vm_map_wire() */
2809 * We can unlock the map now. The in_transition state
2810 * guarantees existance of the entry.
2814 vm_fault_unwire(map
, &tmp_entry
, FALSE
, map_pmap
);
2816 vm_fault_unwire(map
, &tmp_entry
, FALSE
, map
->pmap
);
2820 if (last_timestamp
+1 != map
->timestamp
) {
2822 * Find the entry again. It could have been clipped
2823 * or deleted after we unlocked the map.
2825 if (!vm_map_lookup_entry(map
, tmp_entry
.vme_start
,
2828 panic("vm_map_unwire: re-lookup failed");
2829 entry
= first_entry
->vme_next
;
2831 entry
= first_entry
;
2833 last_timestamp
= map
->timestamp
;
2836 * clear transition bit for all constituent entries that
2837 * were in the original entry (saved in tmp_entry). Also
2838 * check for waiters.
2840 while ((entry
!= vm_map_to_entry(map
)) &&
2841 (entry
->vme_start
< tmp_entry
.vme_end
)) {
2842 assert(entry
->in_transition
);
2843 entry
->in_transition
= FALSE
;
2844 if (entry
->needs_wakeup
) {
2845 entry
->needs_wakeup
= FALSE
;
2848 entry
= entry
->vme_next
;
2853 * wake up anybody waiting on entries that we have unwired.
2856 vm_map_entry_wakeup(map
);
2857 return(KERN_SUCCESS
);
2863 register vm_map_t map
,
2864 register vm_offset_t start
,
2865 register vm_offset_t end
,
2866 boolean_t user_wire
)
2868 return vm_map_unwire_nested(map
, start
, end
, user_wire
, (pmap_t
)NULL
);
2873 * vm_map_entry_delete: [ internal use only ]
2875 * Deallocate the given entry from the target map.
2878 vm_map_entry_delete(
2879 register vm_map_t map
,
2880 register vm_map_entry_t entry
)
2882 register vm_offset_t s
, e
;
2883 register vm_object_t object
;
2884 register vm_map_t submap
;
2885 extern vm_object_t kernel_object
;
2887 s
= entry
->vme_start
;
2889 assert(page_aligned(s
));
2890 assert(page_aligned(e
));
2891 assert(entry
->wired_count
== 0);
2892 assert(entry
->user_wired_count
== 0);
2894 if (entry
->is_sub_map
) {
2896 submap
= entry
->object
.sub_map
;
2899 object
= entry
->object
.vm_object
;
2902 vm_map_entry_unlink(map
, entry
);
2905 vm_map_entry_dispose(map
, entry
);
2909 * Deallocate the object only after removing all
2910 * pmap entries pointing to its pages.
2913 vm_map_deallocate(submap
);
2915 vm_object_deallocate(object
);
2920 vm_map_submap_pmap_clean(
2927 vm_offset_t submap_start
;
2928 vm_offset_t submap_end
;
2930 vm_size_t remove_size
;
2931 vm_map_entry_t entry
;
2933 submap_end
= offset
+ (end
- start
);
2934 submap_start
= offset
;
2935 if(vm_map_lookup_entry(sub_map
, offset
, &entry
)) {
2937 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2938 if(offset
> entry
->vme_start
)
2939 remove_size
-= offset
- entry
->vme_start
;
2942 if(submap_end
< entry
->vme_end
) {
2944 entry
->vme_end
- submap_end
;
2946 if(entry
->is_sub_map
) {
2947 vm_map_submap_pmap_clean(
2950 start
+ remove_size
,
2951 entry
->object
.sub_map
,
2954 pmap_remove(map
->pmap
, start
, start
+ remove_size
);
2958 entry
= entry
->vme_next
;
2960 while((entry
!= vm_map_to_entry(sub_map
))
2961 && (entry
->vme_start
< submap_end
)) {
2962 remove_size
= (entry
->vme_end
- entry
->vme_start
);
2963 if(submap_end
< entry
->vme_end
) {
2964 remove_size
-= entry
->vme_end
- submap_end
;
2966 if(entry
->is_sub_map
) {
2967 vm_map_submap_pmap_clean(
2969 (start
+ entry
->vme_start
) - offset
,
2970 ((start
+ entry
->vme_start
) - offset
) + remove_size
,
2971 entry
->object
.sub_map
,
2974 pmap_remove(map
->pmap
,
2975 (start
+ entry
->vme_start
) - offset
,
2976 ((start
+ entry
->vme_start
) - offset
) + remove_size
);
2978 entry
= entry
->vme_next
;
2984 * vm_map_delete: [ internal use only ]
2986 * Deallocates the given address range from the target map.
2987 * Removes all user wirings. Unwires one kernel wiring if
2988 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2989 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2990 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2992 * This routine is called with map locked and leaves map locked.
2996 register vm_map_t map
,
2998 register vm_offset_t end
,
3001 vm_map_entry_t entry
, next
;
3002 struct vm_map_entry
*first_entry
, tmp_entry
;
3003 register vm_offset_t s
, e
;
3004 register vm_object_t object
;
3005 boolean_t need_wakeup
;
3006 unsigned int last_timestamp
= ~0; /* unlikely value */
3008 extern vm_map_t kernel_map
;
3010 interruptible
= (flags
& VM_MAP_REMOVE_INTERRUPTIBLE
) ?
3011 THREAD_ABORTSAFE
: THREAD_UNINT
;
3014 * All our DMA I/O operations in IOKit are currently done by
3015 * wiring through the map entries of the task requesting the I/O.
3016 * Because of this, we must always wait for kernel wirings
3017 * to go away on the entries before deleting them.
3019 * Any caller who wants to actually remove a kernel wiring
3020 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3021 * properly remove one wiring instead of blasting through
3024 flags
|= VM_MAP_REMOVE_WAIT_FOR_KWIRE
;
3027 * Find the start of the region, and clip it
3029 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3030 entry
= first_entry
;
3031 vm_map_clip_start(map
, entry
, start
);
3034 * Fix the lookup hint now, rather than each
3035 * time through the loop.
3037 SAVE_HINT(map
, entry
->vme_prev
);
3039 entry
= first_entry
->vme_next
;
3042 need_wakeup
= FALSE
;
3044 * Step through all entries in this region
3046 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
3048 vm_map_clip_end(map
, entry
, end
);
3049 if (entry
->in_transition
) {
3051 * Another thread is wiring/unwiring this entry.
3052 * Let the other thread know we are waiting.
3054 s
= entry
->vme_start
;
3055 entry
->needs_wakeup
= TRUE
;
3058 * wake up anybody waiting on entries that we have
3059 * already unwired/deleted.
3062 vm_map_entry_wakeup(map
);
3063 need_wakeup
= FALSE
;
3066 vm_map_entry_wait(map
, interruptible
);
3068 if (interruptible
&&
3069 current_thread()->wait_result
== THREAD_INTERRUPTED
)
3071 * We do not clear the needs_wakeup flag,
3072 * since we cannot tell if we were the only one.
3074 return KERN_ABORTED
;
3078 * Cannot avoid a lookup here. reset timestamp.
3080 last_timestamp
= map
->timestamp
;
3083 * The entry could have been clipped or it
3084 * may not exist anymore. Look it up again.
3086 if (!vm_map_lookup_entry(map
, s
, &first_entry
)) {
3087 assert((map
!= kernel_map
) &&
3088 (!entry
->is_sub_map
));
3090 * User: use the next entry
3092 entry
= first_entry
->vme_next
;
3094 entry
= first_entry
;
3095 SAVE_HINT(map
, entry
->vme_prev
);
3098 } /* end in_transition */
3100 if (entry
->wired_count
) {
3102 * Remove a kernel wiring if requested or if
3103 * there are user wirings.
3105 if ((flags
& VM_MAP_REMOVE_KUNWIRE
) ||
3106 (entry
->user_wired_count
> 0))
3107 entry
->wired_count
--;
3109 /* remove all user wire references */
3110 entry
->user_wired_count
= 0;
3112 if (entry
->wired_count
!= 0) {
3113 assert((map
!= kernel_map
) &&
3114 (!entry
->is_sub_map
));
3116 * Cannot continue. Typical case is when
3117 * a user thread has physical io pending on
3118 * on this page. Either wait for the
3119 * kernel wiring to go away or return an
3122 if (flags
& VM_MAP_REMOVE_WAIT_FOR_KWIRE
) {
3124 s
= entry
->vme_start
;
3125 entry
->needs_wakeup
= TRUE
;
3126 vm_map_entry_wait(map
, interruptible
);
3128 if (interruptible
&&
3129 current_thread()->wait_result
==
3132 * We do not clear the
3133 * needs_wakeup flag, since we
3134 * cannot tell if we were the
3137 return KERN_ABORTED
;
3141 * Cannot avoid a lookup here. reset
3144 last_timestamp
= map
->timestamp
;
3147 * The entry could have been clipped or
3148 * it may not exist anymore. Look it
3151 if (!vm_map_lookup_entry(map
, s
,
3153 assert((map
!= kernel_map
) &&
3154 (!entry
->is_sub_map
));
3156 * User: use the next entry
3158 entry
= first_entry
->vme_next
;
3160 entry
= first_entry
;
3161 SAVE_HINT(map
, entry
->vme_prev
);
3166 return KERN_FAILURE
;
3170 entry
->in_transition
= TRUE
;
3172 * copy current entry. see comment in vm_map_wire()
3175 s
= entry
->vme_start
;
3179 * We can unlock the map now. The in_transition
3180 * state guarentees existance of the entry.
3183 vm_fault_unwire(map
, &tmp_entry
,
3184 tmp_entry
.object
.vm_object
== kernel_object
,
3188 if (last_timestamp
+1 != map
->timestamp
) {
3190 * Find the entry again. It could have
3191 * been clipped after we unlocked the map.
3193 if (!vm_map_lookup_entry(map
, s
, &first_entry
)){
3194 assert((map
!= kernel_map
) &&
3195 (!entry
->is_sub_map
));
3196 first_entry
= first_entry
->vme_next
;
3198 SAVE_HINT(map
, entry
->vme_prev
);
3201 SAVE_HINT(map
, entry
->vme_prev
);
3202 first_entry
= entry
;
3205 last_timestamp
= map
->timestamp
;
3207 entry
= first_entry
;
3208 while ((entry
!= vm_map_to_entry(map
)) &&
3209 (entry
->vme_start
< tmp_entry
.vme_end
)) {
3210 assert(entry
->in_transition
);
3211 entry
->in_transition
= FALSE
;
3212 if (entry
->needs_wakeup
) {
3213 entry
->needs_wakeup
= FALSE
;
3216 entry
= entry
->vme_next
;
3219 * We have unwired the entry(s). Go back and
3222 entry
= first_entry
;
3226 /* entry is unwired */
3227 assert(entry
->wired_count
== 0);
3228 assert(entry
->user_wired_count
== 0);
3230 if ((!entry
->is_sub_map
&&
3231 entry
->object
.vm_object
!= kernel_object
) ||
3232 entry
->is_sub_map
) {
3233 if(entry
->is_sub_map
) {
3234 if(entry
->use_pmap
) {
3236 pmap_unnest(map
->pmap
, entry
->vme_start
,
3237 entry
->vme_end
- entry
->vme_start
);
3240 vm_map_submap_pmap_clean(
3241 map
, entry
->vme_start
, entry
->vme_end
,
3242 entry
->object
.sub_map
,
3246 pmap_remove(map
->pmap
,
3247 entry
->vme_start
, entry
->vme_end
);
3251 next
= entry
->vme_next
;
3252 s
= next
->vme_start
;
3253 last_timestamp
= map
->timestamp
;
3254 vm_map_entry_delete(map
, entry
);
3255 /* vm_map_entry_delete unlocks the map */
3259 if(entry
== vm_map_to_entry(map
)) {
3262 if (last_timestamp
+1 != map
->timestamp
) {
3264 * we are responsible for deleting everything
3265 * from the give space, if someone has interfered
3266 * we pick up where we left off, back fills should
3267 * be all right for anyone except map_delete and
3268 * we have to assume that the task has been fully
3269 * disabled before we get here
3271 if (!vm_map_lookup_entry(map
, s
, &entry
)){
3272 entry
= entry
->vme_next
;
3274 SAVE_HINT(map
, entry
->vme_prev
);
3277 * others can not only allocate behind us, we can
3278 * also see coalesce while we don't have the map lock
3280 if(entry
== vm_map_to_entry(map
)) {
3283 vm_map_clip_start(map
, entry
, s
);
3285 last_timestamp
= map
->timestamp
;
3288 if (map
->wait_for_space
)
3289 thread_wakeup((event_t
) map
);
3291 * wake up anybody waiting on entries that we have already deleted.
3294 vm_map_entry_wakeup(map
);
3296 return KERN_SUCCESS
;
3302 * Remove the given address range from the target map.
3303 * This is the exported form of vm_map_delete.
3307 register vm_map_t map
,
3308 register vm_offset_t start
,
3309 register vm_offset_t end
,
3310 register boolean_t flags
)
3312 register kern_return_t result
;
3315 VM_MAP_RANGE_CHECK(map
, start
, end
);
3316 result
= vm_map_delete(map
, start
, end
, flags
);
3324 * Routine: vm_map_copy_discard
3327 * Dispose of a map copy object (returned by
3331 vm_map_copy_discard(
3334 TR_DECL("vm_map_copy_discard");
3336 /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3338 if (copy
== VM_MAP_COPY_NULL
)
3341 switch (copy
->type
) {
3342 case VM_MAP_COPY_ENTRY_LIST
:
3343 while (vm_map_copy_first_entry(copy
) !=
3344 vm_map_copy_to_entry(copy
)) {
3345 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
3347 vm_map_copy_entry_unlink(copy
, entry
);
3348 vm_object_deallocate(entry
->object
.vm_object
);
3349 vm_map_copy_entry_dispose(copy
, entry
);
3352 case VM_MAP_COPY_OBJECT
:
3353 vm_object_deallocate(copy
->cpy_object
);
3355 case VM_MAP_COPY_KERNEL_BUFFER
:
3358 * The vm_map_copy_t and possibly the data buffer were
3359 * allocated by a single call to kalloc(), i.e. the
3360 * vm_map_copy_t was not allocated out of the zone.
3362 kfree((vm_offset_t
) copy
, copy
->cpy_kalloc_size
);
3365 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
3369 * Routine: vm_map_copy_copy
3372 * Move the information in a map copy object to
3373 * a new map copy object, leaving the old one
3376 * This is used by kernel routines that need
3377 * to look at out-of-line data (in copyin form)
3378 * before deciding whether to return SUCCESS.
3379 * If the routine returns FAILURE, the original
3380 * copy object will be deallocated; therefore,
3381 * these routines must make a copy of the copy
3382 * object and leave the original empty so that
3383 * deallocation will not fail.
3389 vm_map_copy_t new_copy
;
3391 if (copy
== VM_MAP_COPY_NULL
)
3392 return VM_MAP_COPY_NULL
;
3395 * Allocate a new copy object, and copy the information
3396 * from the old one into it.
3399 new_copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
3402 if (copy
->type
== VM_MAP_COPY_ENTRY_LIST
) {
3404 * The links in the entry chain must be
3405 * changed to point to the new copy object.
3407 vm_map_copy_first_entry(copy
)->vme_prev
3408 = vm_map_copy_to_entry(new_copy
);
3409 vm_map_copy_last_entry(copy
)->vme_next
3410 = vm_map_copy_to_entry(new_copy
);
3414 * Change the old copy object into one that contains
3415 * nothing to be deallocated.
3417 copy
->type
= VM_MAP_COPY_OBJECT
;
3418 copy
->cpy_object
= VM_OBJECT_NULL
;
3421 * Return the new object.
3427 vm_map_overwrite_submap_recurse(
3429 vm_offset_t dst_addr
,
3432 vm_offset_t dst_end
;
3433 vm_map_entry_t tmp_entry
;
3434 vm_map_entry_t entry
;
3435 kern_return_t result
;
3436 boolean_t encountered_sub_map
= FALSE
;
3441 * Verify that the destination is all writeable
3442 * initially. We have to trunc the destination
3443 * address and round the copy size or we'll end up
3444 * splitting entries in strange ways.
3447 dst_end
= round_page(dst_addr
+ dst_size
);
3450 vm_map_lock(dst_map
);
3451 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3452 vm_map_unlock(dst_map
);
3453 return(KERN_INVALID_ADDRESS
);
3456 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3458 for (entry
= tmp_entry
;;) {
3459 vm_map_entry_t next
;
3461 next
= entry
->vme_next
;
3462 while(entry
->is_sub_map
) {
3463 vm_offset_t sub_start
;
3464 vm_offset_t sub_end
;
3465 vm_offset_t local_end
;
3467 if (entry
->in_transition
) {
3469 * Say that we are waiting, and wait for entry.
3471 entry
->needs_wakeup
= TRUE
;
3472 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3477 encountered_sub_map
= TRUE
;
3478 sub_start
= entry
->offset
;
3480 if(entry
->vme_end
< dst_end
)
3481 sub_end
= entry
->vme_end
;
3484 sub_end
-= entry
->vme_start
;
3485 sub_end
+= entry
->offset
;
3486 local_end
= entry
->vme_end
;
3487 vm_map_unlock(dst_map
);
3489 result
= vm_map_overwrite_submap_recurse(
3490 entry
->object
.sub_map
,
3492 sub_end
- sub_start
);
3494 if(result
!= KERN_SUCCESS
)
3496 if (dst_end
<= entry
->vme_end
)
3497 return KERN_SUCCESS
;
3498 vm_map_lock(dst_map
);
3499 if(!vm_map_lookup_entry(dst_map
, local_end
,
3501 vm_map_unlock(dst_map
);
3502 return(KERN_INVALID_ADDRESS
);
3505 next
= entry
->vme_next
;
3508 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3509 vm_map_unlock(dst_map
);
3510 return(KERN_PROTECTION_FAILURE
);
3514 * If the entry is in transition, we must wait
3515 * for it to exit that state. Anything could happen
3516 * when we unlock the map, so start over.
3518 if (entry
->in_transition
) {
3521 * Say that we are waiting, and wait for entry.
3523 entry
->needs_wakeup
= TRUE
;
3524 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3530 * our range is contained completely within this map entry
3532 if (dst_end
<= entry
->vme_end
) {
3533 vm_map_unlock(dst_map
);
3534 return KERN_SUCCESS
;
3537 * check that range specified is contiguous region
3539 if ((next
== vm_map_to_entry(dst_map
)) ||
3540 (next
->vme_start
!= entry
->vme_end
)) {
3541 vm_map_unlock(dst_map
);
3542 return(KERN_INVALID_ADDRESS
);
3546 * Check for permanent objects in the destination.
3548 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3549 ((!entry
->object
.vm_object
->internal
) ||
3550 (entry
->object
.vm_object
->true_share
))) {
3551 if(encountered_sub_map
) {
3552 vm_map_unlock(dst_map
);
3553 return(KERN_FAILURE
);
3560 vm_map_unlock(dst_map
);
3561 return(KERN_SUCCESS
);
3565 * Routine: vm_map_copy_overwrite
3568 * Copy the memory described by the map copy
3569 * object (copy; returned by vm_map_copyin) onto
3570 * the specified destination region (dst_map, dst_addr).
3571 * The destination must be writeable.
3573 * Unlike vm_map_copyout, this routine actually
3574 * writes over previously-mapped memory. If the
3575 * previous mapping was to a permanent (user-supplied)
3576 * memory object, it is preserved.
3578 * The attributes (protection and inheritance) of the
3579 * destination region are preserved.
3581 * If successful, consumes the copy object.
3582 * Otherwise, the caller is responsible for it.
3584 * Implementation notes:
3585 * To overwrite aligned temporary virtual memory, it is
3586 * sufficient to remove the previous mapping and insert
3587 * the new copy. This replacement is done either on
3588 * the whole region (if no permanent virtual memory
3589 * objects are embedded in the destination region) or
3590 * in individual map entries.
3592 * To overwrite permanent virtual memory , it is necessary
3593 * to copy each page, as the external memory management
3594 * interface currently does not provide any optimizations.
3596 * Unaligned memory also has to be copied. It is possible
3597 * to use 'vm_trickery' to copy the aligned data. This is
3598 * not done but not hard to implement.
3600 * Once a page of permanent memory has been overwritten,
3601 * it is impossible to interrupt this function; otherwise,
3602 * the call would be neither atomic nor location-independent.
3603 * The kernel-state portion of a user thread must be
3606 * It may be expensive to forward all requests that might
3607 * overwrite permanent memory (vm_write, vm_copy) to
3608 * uninterruptible kernel threads. This routine may be
3609 * called by interruptible threads; however, success is
3610 * not guaranteed -- if the request cannot be performed
3611 * atomically and interruptibly, an error indication is
3616 vm_map_copy_overwrite_nested(
3618 vm_offset_t dst_addr
,
3620 boolean_t interruptible
,
3623 vm_offset_t dst_end
;
3624 vm_map_entry_t tmp_entry
;
3625 vm_map_entry_t entry
;
3627 boolean_t aligned
= TRUE
;
3628 boolean_t contains_permanent_objects
= FALSE
;
3629 boolean_t encountered_sub_map
= FALSE
;
3630 vm_offset_t base_addr
;
3631 vm_size_t copy_size
;
3632 vm_size_t total_size
;
3636 * Check for null copy object.
3639 if (copy
== VM_MAP_COPY_NULL
)
3640 return(KERN_SUCCESS
);
3643 * Check for special kernel buffer allocated
3644 * by new_ipc_kmsg_copyin.
3647 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
3648 return(vm_map_copyout_kernel_buffer(
3654 * Only works for entry lists at the moment. Will
3655 * support page lists later.
3658 assert(copy
->type
== VM_MAP_COPY_ENTRY_LIST
);
3660 if (copy
->size
== 0) {
3661 vm_map_copy_discard(copy
);
3662 return(KERN_SUCCESS
);
3666 * Verify that the destination is all writeable
3667 * initially. We have to trunc the destination
3668 * address and round the copy size or we'll end up
3669 * splitting entries in strange ways.
3672 if (!page_aligned(copy
->size
) ||
3673 !page_aligned (copy
->offset
) ||
3674 !page_aligned (dst_addr
))
3677 dst_end
= round_page(dst_addr
+ copy
->size
);
3679 dst_end
= dst_addr
+ copy
->size
;
3683 vm_map_lock(dst_map
);
3684 if (!vm_map_lookup_entry(dst_map
, dst_addr
, &tmp_entry
)) {
3685 vm_map_unlock(dst_map
);
3686 return(KERN_INVALID_ADDRESS
);
3688 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(dst_addr
));
3689 for (entry
= tmp_entry
;;) {
3690 vm_map_entry_t next
= entry
->vme_next
;
3692 while(entry
->is_sub_map
) {
3693 vm_offset_t sub_start
;
3694 vm_offset_t sub_end
;
3695 vm_offset_t local_end
;
3697 if (entry
->in_transition
) {
3700 * Say that we are waiting, and wait for entry.
3702 entry
->needs_wakeup
= TRUE
;
3703 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3708 local_end
= entry
->vme_end
;
3709 if (!(entry
->needs_copy
)) {
3710 /* if needs_copy we are a COW submap */
3711 /* in such a case we just replace so */
3712 /* there is no need for the follow- */
3714 encountered_sub_map
= TRUE
;
3715 sub_start
= entry
->offset
;
3717 if(entry
->vme_end
< dst_end
)
3718 sub_end
= entry
->vme_end
;
3721 sub_end
-= entry
->vme_start
;
3722 sub_end
+= entry
->offset
;
3723 vm_map_unlock(dst_map
);
3725 kr
= vm_map_overwrite_submap_recurse(
3726 entry
->object
.sub_map
,
3728 sub_end
- sub_start
);
3729 if(kr
!= KERN_SUCCESS
)
3731 vm_map_lock(dst_map
);
3734 if (dst_end
<= entry
->vme_end
)
3735 goto start_overwrite
;
3736 if(!vm_map_lookup_entry(dst_map
, local_end
,
3738 vm_map_unlock(dst_map
);
3739 return(KERN_INVALID_ADDRESS
);
3741 next
= entry
->vme_next
;
3744 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
3745 vm_map_unlock(dst_map
);
3746 return(KERN_PROTECTION_FAILURE
);
3750 * If the entry is in transition, we must wait
3751 * for it to exit that state. Anything could happen
3752 * when we unlock the map, so start over.
3754 if (entry
->in_transition
) {
3757 * Say that we are waiting, and wait for entry.
3759 entry
->needs_wakeup
= TRUE
;
3760 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3766 * our range is contained completely within this map entry
3768 if (dst_end
<= entry
->vme_end
)
3771 * check that range specified is contiguous region
3773 if ((next
== vm_map_to_entry(dst_map
)) ||
3774 (next
->vme_start
!= entry
->vme_end
)) {
3775 vm_map_unlock(dst_map
);
3776 return(KERN_INVALID_ADDRESS
);
3781 * Check for permanent objects in the destination.
3783 if ((entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
3784 ((!entry
->object
.vm_object
->internal
) ||
3785 (entry
->object
.vm_object
->true_share
))) {
3786 contains_permanent_objects
= TRUE
;
3794 * If there are permanent objects in the destination, then
3795 * the copy cannot be interrupted.
3798 if (interruptible
&& contains_permanent_objects
) {
3799 vm_map_unlock(dst_map
);
3800 return(KERN_FAILURE
); /* XXX */
3805 * Make a second pass, overwriting the data
3806 * At the beginning of each loop iteration,
3807 * the next entry to be overwritten is "tmp_entry"
3808 * (initially, the value returned from the lookup above),
3809 * and the starting address expected in that entry
3813 total_size
= copy
->size
;
3814 if(encountered_sub_map
) {
3816 /* re-calculate tmp_entry since we've had the map */
3818 if (!vm_map_lookup_entry( dst_map
, dst_addr
, &tmp_entry
)) {
3819 vm_map_unlock(dst_map
);
3820 return(KERN_INVALID_ADDRESS
);
3823 copy_size
= copy
->size
;
3826 base_addr
= dst_addr
;
3828 /* deconstruct the copy object and do in parts */
3829 /* only in sub_map, interruptable case */
3830 vm_map_entry_t copy_entry
;
3831 vm_map_entry_t previous_prev
;
3832 vm_map_entry_t next_copy
;
3834 int remaining_entries
;
3837 for (entry
= tmp_entry
; copy_size
== 0;) {
3838 vm_map_entry_t next
;
3840 next
= entry
->vme_next
;
3842 /* tmp_entry and base address are moved along */
3843 /* each time we encounter a sub-map. Otherwise */
3844 /* entry can outpase tmp_entry, and the copy_size */
3845 /* may reflect the distance between them */
3846 /* if the current entry is found to be in transition */
3847 /* we will start over at the beginning or the last */
3848 /* encounter of a submap as dictated by base_addr */
3849 /* we will zero copy_size accordingly. */
3850 if (entry
->in_transition
) {
3852 * Say that we are waiting, and wait for entry.
3854 entry
->needs_wakeup
= TRUE
;
3855 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
3857 vm_map_lock(dst_map
);
3858 if(!vm_map_lookup_entry(dst_map
, base_addr
,
3860 vm_map_unlock(dst_map
);
3861 return(KERN_INVALID_ADDRESS
);
3867 if(entry
->is_sub_map
) {
3868 vm_offset_t sub_start
;
3869 vm_offset_t sub_end
;
3870 vm_offset_t local_end
;
3872 if (entry
->needs_copy
) {
3873 /* if this is a COW submap */
3874 /* just back the range with a */
3875 /* anonymous entry */
3876 if(entry
->vme_end
< dst_end
)
3877 sub_end
= entry
->vme_end
;
3880 if(entry
->vme_start
< base_addr
)
3881 sub_start
= base_addr
;
3883 sub_start
= entry
->vme_start
;
3885 dst_map
, entry
, sub_end
);
3887 dst_map
, entry
, sub_start
);
3888 entry
->is_sub_map
= FALSE
;
3890 entry
->object
.sub_map
);
3891 entry
->object
.sub_map
= NULL
;
3892 entry
->is_shared
= FALSE
;
3893 entry
->needs_copy
= FALSE
;
3895 entry
->protection
= VM_PROT_ALL
;
3896 entry
->max_protection
= VM_PROT_ALL
;
3897 entry
->wired_count
= 0;
3898 entry
->user_wired_count
= 0;
3899 if(entry
->inheritance
3900 == VM_INHERIT_SHARE
)
3901 entry
->inheritance
= VM_INHERIT_COPY
;
3904 /* first take care of any non-sub_map */
3905 /* entries to send */
3906 if(base_addr
< entry
->vme_start
) {
3909 entry
->vme_start
- base_addr
;
3912 sub_start
= entry
->offset
;
3914 if(entry
->vme_end
< dst_end
)
3915 sub_end
= entry
->vme_end
;
3918 sub_end
-= entry
->vme_start
;
3919 sub_end
+= entry
->offset
;
3920 local_end
= entry
->vme_end
;
3921 vm_map_unlock(dst_map
);
3922 copy_size
= sub_end
- sub_start
;
3924 /* adjust the copy object */
3925 if (total_size
> copy_size
) {
3926 vm_size_t local_size
= 0;
3927 vm_size_t entry_size
;
3930 new_offset
= copy
->offset
;
3931 copy_entry
= vm_map_copy_first_entry(copy
);
3933 vm_map_copy_to_entry(copy
)){
3934 entry_size
= copy_entry
->vme_end
-
3935 copy_entry
->vme_start
;
3936 if((local_size
< copy_size
) &&
3937 ((local_size
+ entry_size
)
3939 vm_map_copy_clip_end(copy
,
3941 copy_entry
->vme_start
+
3942 (copy_size
- local_size
));
3943 entry_size
= copy_entry
->vme_end
-
3944 copy_entry
->vme_start
;
3945 local_size
+= entry_size
;
3946 new_offset
+= entry_size
;
3948 if(local_size
>= copy_size
) {
3949 next_copy
= copy_entry
->vme_next
;
3950 copy_entry
->vme_next
=
3951 vm_map_copy_to_entry(copy
);
3953 copy
->cpy_hdr
.links
.prev
;
3954 copy
->cpy_hdr
.links
.prev
= copy_entry
;
3955 copy
->size
= copy_size
;
3957 copy
->cpy_hdr
.nentries
;
3958 remaining_entries
-= nentries
;
3959 copy
->cpy_hdr
.nentries
= nentries
;
3962 local_size
+= entry_size
;
3963 new_offset
+= entry_size
;
3966 copy_entry
= copy_entry
->vme_next
;
3970 if((entry
->use_pmap
) && (pmap
== NULL
)) {
3971 kr
= vm_map_copy_overwrite_nested(
3972 entry
->object
.sub_map
,
3976 entry
->object
.sub_map
->pmap
);
3977 } else if (pmap
!= NULL
) {
3978 kr
= vm_map_copy_overwrite_nested(
3979 entry
->object
.sub_map
,
3982 interruptible
, pmap
);
3984 kr
= vm_map_copy_overwrite_nested(
3985 entry
->object
.sub_map
,
3991 if(kr
!= KERN_SUCCESS
) {
3992 if(next_copy
!= NULL
) {
3993 copy
->cpy_hdr
.nentries
+=
3995 copy
->cpy_hdr
.links
.prev
->vme_next
=
3997 copy
->cpy_hdr
.links
.prev
3999 copy
->size
= total_size
;
4003 if (dst_end
<= local_end
) {
4004 return(KERN_SUCCESS
);
4006 /* otherwise copy no longer exists, it was */
4007 /* destroyed after successful copy_overwrite */
4008 copy
= (vm_map_copy_t
)
4009 zalloc(vm_map_copy_zone
);
4010 vm_map_copy_first_entry(copy
) =
4011 vm_map_copy_last_entry(copy
) =
4012 vm_map_copy_to_entry(copy
);
4013 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
4014 copy
->offset
= new_offset
;
4016 total_size
-= copy_size
;
4018 /* put back remainder of copy in container */
4019 if(next_copy
!= NULL
) {
4020 copy
->cpy_hdr
.nentries
= remaining_entries
;
4021 copy
->cpy_hdr
.links
.next
= next_copy
;
4022 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4023 copy
->size
= total_size
;
4024 next_copy
->vme_prev
=
4025 vm_map_copy_to_entry(copy
);
4028 base_addr
= local_end
;
4029 vm_map_lock(dst_map
);
4030 if(!vm_map_lookup_entry(dst_map
,
4031 local_end
, &tmp_entry
)) {
4032 vm_map_unlock(dst_map
);
4033 return(KERN_INVALID_ADDRESS
);
4038 if (dst_end
<= entry
->vme_end
) {
4039 copy_size
= dst_end
- base_addr
;
4043 if ((next
== vm_map_to_entry(dst_map
)) ||
4044 (next
->vme_start
!= entry
->vme_end
)) {
4045 vm_map_unlock(dst_map
);
4046 return(KERN_INVALID_ADDRESS
);
4055 /* adjust the copy object */
4056 if (total_size
> copy_size
) {
4057 vm_size_t local_size
= 0;
4058 vm_size_t entry_size
;
4060 new_offset
= copy
->offset
;
4061 copy_entry
= vm_map_copy_first_entry(copy
);
4062 while(copy_entry
!= vm_map_copy_to_entry(copy
)) {
4063 entry_size
= copy_entry
->vme_end
-
4064 copy_entry
->vme_start
;
4065 if((local_size
< copy_size
) &&
4066 ((local_size
+ entry_size
)
4068 vm_map_copy_clip_end(copy
, copy_entry
,
4069 copy_entry
->vme_start
+
4070 (copy_size
- local_size
));
4071 entry_size
= copy_entry
->vme_end
-
4072 copy_entry
->vme_start
;
4073 local_size
+= entry_size
;
4074 new_offset
+= entry_size
;
4076 if(local_size
>= copy_size
) {
4077 next_copy
= copy_entry
->vme_next
;
4078 copy_entry
->vme_next
=
4079 vm_map_copy_to_entry(copy
);
4081 copy
->cpy_hdr
.links
.prev
;
4082 copy
->cpy_hdr
.links
.prev
= copy_entry
;
4083 copy
->size
= copy_size
;
4085 copy
->cpy_hdr
.nentries
;
4086 remaining_entries
-= nentries
;
4087 copy
->cpy_hdr
.nentries
= nentries
;
4090 local_size
+= entry_size
;
4091 new_offset
+= entry_size
;
4094 copy_entry
= copy_entry
->vme_next
;
4104 local_pmap
= dst_map
->pmap
;
4106 if ((kr
= vm_map_copy_overwrite_aligned(
4107 dst_map
, tmp_entry
, copy
,
4108 base_addr
, local_pmap
)) != KERN_SUCCESS
) {
4109 if(next_copy
!= NULL
) {
4110 copy
->cpy_hdr
.nentries
+=
4112 copy
->cpy_hdr
.links
.prev
->vme_next
=
4114 copy
->cpy_hdr
.links
.prev
=
4116 copy
->size
+= copy_size
;
4120 vm_map_unlock(dst_map
);
4125 * if the copy and dst address are misaligned but the same
4126 * offset within the page we can copy_not_aligned the
4127 * misaligned parts and copy aligned the rest. If they are
4128 * aligned but len is unaligned we simply need to copy
4129 * the end bit unaligned. We'll need to split the misaligned
4130 * bits of the region in this case !
4132 /* ALWAYS UNLOCKS THE dst_map MAP */
4133 if ((kr
= vm_map_copy_overwrite_unaligned( dst_map
,
4134 tmp_entry
, copy
, base_addr
)) != KERN_SUCCESS
) {
4135 if(next_copy
!= NULL
) {
4136 copy
->cpy_hdr
.nentries
+=
4138 copy
->cpy_hdr
.links
.prev
->vme_next
=
4140 copy
->cpy_hdr
.links
.prev
=
4142 copy
->size
+= copy_size
;
4147 total_size
-= copy_size
;
4150 base_addr
+= copy_size
;
4152 copy
->offset
= new_offset
;
4153 if(next_copy
!= NULL
) {
4154 copy
->cpy_hdr
.nentries
= remaining_entries
;
4155 copy
->cpy_hdr
.links
.next
= next_copy
;
4156 copy
->cpy_hdr
.links
.prev
= previous_prev
;
4157 next_copy
->vme_prev
= vm_map_copy_to_entry(copy
);
4158 copy
->size
= total_size
;
4160 vm_map_lock(dst_map
);
4162 if (!vm_map_lookup_entry(dst_map
,
4163 base_addr
, &tmp_entry
)) {
4164 vm_map_unlock(dst_map
);
4165 return(KERN_INVALID_ADDRESS
);
4167 if (tmp_entry
->in_transition
) {
4168 entry
->needs_wakeup
= TRUE
;
4169 vm_map_entry_wait(dst_map
, THREAD_UNINT
);
4174 vm_map_clip_start(dst_map
, tmp_entry
, trunc_page(base_addr
));
4180 * Throw away the vm_map_copy object
4182 vm_map_copy_discard(copy
);
4184 return(KERN_SUCCESS
);
4185 }/* vm_map_copy_overwrite */
4188 vm_map_copy_overwrite(
4190 vm_offset_t dst_addr
,
4192 boolean_t interruptible
)
4194 return vm_map_copy_overwrite_nested(
4195 dst_map
, dst_addr
, copy
, interruptible
, (pmap_t
) NULL
);
4200 * Routine: vm_map_copy_overwrite_unaligned
4203 * Physically copy unaligned data
4206 * Unaligned parts of pages have to be physically copied. We use
4207 * a modified form of vm_fault_copy (which understands none-aligned
4208 * page offsets and sizes) to do the copy. We attempt to copy as
4209 * much memory in one go as possibly, however vm_fault_copy copies
4210 * within 1 memory object so we have to find the smaller of "amount left"
4211 * "source object data size" and "target object data size". With
4212 * unaligned data we don't need to split regions, therefore the source
4213 * (copy) object should be one map entry, the target range may be split
4214 * over multiple map entries however. In any event we are pessimistic
4215 * about these assumptions.
4218 * dst_map is locked on entry and is return locked on success,
4219 * unlocked on error.
4223 vm_map_copy_overwrite_unaligned(
4225 vm_map_entry_t entry
,
4229 vm_map_entry_t copy_entry
= vm_map_copy_first_entry(copy
);
4230 vm_map_version_t version
;
4231 vm_object_t dst_object
;
4232 vm_object_offset_t dst_offset
;
4233 vm_object_offset_t src_offset
;
4234 vm_object_offset_t entry_offset
;
4235 vm_offset_t entry_end
;
4240 kern_return_t kr
= KERN_SUCCESS
;
4242 vm_map_lock_write_to_read(dst_map
);
4244 src_offset
= copy
->offset
- trunc_page_64(copy
->offset
);
4245 amount_left
= copy
->size
;
4247 * unaligned so we never clipped this entry, we need the offset into
4248 * the vm_object not just the data.
4250 while (amount_left
> 0) {
4252 if (entry
== vm_map_to_entry(dst_map
)) {
4253 vm_map_unlock_read(dst_map
);
4254 return KERN_INVALID_ADDRESS
;
4257 /* "start" must be within the current map entry */
4258 assert ((start
>=entry
->vme_start
) && (start
<entry
->vme_end
));
4260 dst_offset
= start
- entry
->vme_start
;
4262 dst_size
= entry
->vme_end
- start
;
4264 src_size
= copy_entry
->vme_end
-
4265 (copy_entry
->vme_start
+ src_offset
);
4267 if (dst_size
< src_size
) {
4269 * we can only copy dst_size bytes before
4270 * we have to get the next destination entry
4272 copy_size
= dst_size
;
4275 * we can only copy src_size bytes before
4276 * we have to get the next source copy entry
4278 copy_size
= src_size
;
4281 if (copy_size
> amount_left
) {
4282 copy_size
= amount_left
;
4285 * Entry needs copy, create a shadow shadow object for
4286 * Copy on write region.
4288 if (entry
->needs_copy
&&
4289 ((entry
->protection
& VM_PROT_WRITE
) != 0))
4291 if (vm_map_lock_read_to_write(dst_map
)) {
4292 vm_map_lock_read(dst_map
);
4295 vm_object_shadow(&entry
->object
.vm_object
,
4297 (vm_size_t
)(entry
->vme_end
4298 - entry
->vme_start
));
4299 entry
->needs_copy
= FALSE
;
4300 vm_map_lock_write_to_read(dst_map
);
4302 dst_object
= entry
->object
.vm_object
;
4304 * unlike with the virtual (aligned) copy we're going
4305 * to fault on it therefore we need a target object.
4307 if (dst_object
== VM_OBJECT_NULL
) {
4308 if (vm_map_lock_read_to_write(dst_map
)) {
4309 vm_map_lock_read(dst_map
);
4312 dst_object
= vm_object_allocate((vm_size_t
)
4313 entry
->vme_end
- entry
->vme_start
);
4314 entry
->object
.vm_object
= dst_object
;
4316 vm_map_lock_write_to_read(dst_map
);
4319 * Take an object reference and unlock map. The "entry" may
4320 * disappear or change when the map is unlocked.
4322 vm_object_reference(dst_object
);
4323 version
.main_timestamp
= dst_map
->timestamp
;
4324 entry_offset
= entry
->offset
;
4325 entry_end
= entry
->vme_end
;
4326 vm_map_unlock_read(dst_map
);
4328 * Copy as much as possible in one pass
4331 copy_entry
->object
.vm_object
,
4332 copy_entry
->offset
+ src_offset
,
4335 entry_offset
+ dst_offset
,
4341 src_offset
+= copy_size
;
4342 amount_left
-= copy_size
;
4344 * Release the object reference
4346 vm_object_deallocate(dst_object
);
4348 * If a hard error occurred, return it now
4350 if (kr
!= KERN_SUCCESS
)
4353 if ((copy_entry
->vme_start
+ src_offset
) == copy_entry
->vme_end
4354 || amount_left
== 0)
4357 * all done with this copy entry, dispose.
4359 vm_map_copy_entry_unlink(copy
, copy_entry
);
4360 vm_object_deallocate(copy_entry
->object
.vm_object
);
4361 vm_map_copy_entry_dispose(copy
, copy_entry
);
4363 if ((copy_entry
= vm_map_copy_first_entry(copy
))
4364 == vm_map_copy_to_entry(copy
) && amount_left
) {
4366 * not finished copying but run out of source
4368 return KERN_INVALID_ADDRESS
;
4373 if (amount_left
== 0)
4374 return KERN_SUCCESS
;
4376 vm_map_lock_read(dst_map
);
4377 if (version
.main_timestamp
== dst_map
->timestamp
) {
4378 if (start
== entry_end
) {
4380 * destination region is split. Use the version
4381 * information to avoid a lookup in the normal
4384 entry
= entry
->vme_next
;
4386 * should be contiguous. Fail if we encounter
4387 * a hole in the destination.
4389 if (start
!= entry
->vme_start
) {
4390 vm_map_unlock_read(dst_map
);
4391 return KERN_INVALID_ADDRESS
;
4396 * Map version check failed.
4397 * we must lookup the entry because somebody
4398 * might have changed the map behind our backs.
4401 if (!vm_map_lookup_entry(dst_map
, start
, &entry
))
4403 vm_map_unlock_read(dst_map
);
4404 return KERN_INVALID_ADDRESS
;
4410 vm_map_unlock_read(dst_map
);
4412 return KERN_SUCCESS
;
4413 }/* vm_map_copy_overwrite_unaligned */
4416 * Routine: vm_map_copy_overwrite_aligned
4419 * Does all the vm_trickery possible for whole pages.
4423 * If there are no permanent objects in the destination,
4424 * and the source and destination map entry zones match,
4425 * and the destination map entry is not shared,
4426 * then the map entries can be deleted and replaced
4427 * with those from the copy. The following code is the
4428 * basic idea of what to do, but there are lots of annoying
4429 * little details about getting protection and inheritance
4430 * right. Should add protection, inheritance, and sharing checks
4431 * to the above pass and make sure that no wiring is involved.
4435 vm_map_copy_overwrite_aligned(
4437 vm_map_entry_t tmp_entry
,
4443 vm_map_entry_t copy_entry
;
4444 vm_size_t copy_size
;
4446 vm_map_entry_t entry
;
4448 while ((copy_entry
= vm_map_copy_first_entry(copy
))
4449 != vm_map_copy_to_entry(copy
))
4451 copy_size
= (copy_entry
->vme_end
- copy_entry
->vme_start
);
4454 if (entry
== vm_map_to_entry(dst_map
)) {
4455 vm_map_unlock(dst_map
);
4456 return KERN_INVALID_ADDRESS
;
4458 size
= (entry
->vme_end
- entry
->vme_start
);
4460 * Make sure that no holes popped up in the
4461 * address map, and that the protection is
4462 * still valid, in case the map was unlocked
4466 if ((entry
->vme_start
!= start
) || ((entry
->is_sub_map
)
4467 && !entry
->needs_copy
)) {
4468 vm_map_unlock(dst_map
);
4469 return(KERN_INVALID_ADDRESS
);
4471 assert(entry
!= vm_map_to_entry(dst_map
));
4474 * Check protection again
4477 if ( ! (entry
->protection
& VM_PROT_WRITE
)) {
4478 vm_map_unlock(dst_map
);
4479 return(KERN_PROTECTION_FAILURE
);
4483 * Adjust to source size first
4486 if (copy_size
< size
) {
4487 vm_map_clip_end(dst_map
, entry
, entry
->vme_start
+ copy_size
);
4492 * Adjust to destination size
4495 if (size
< copy_size
) {
4496 vm_map_copy_clip_end(copy
, copy_entry
,
4497 copy_entry
->vme_start
+ size
);
4501 assert((entry
->vme_end
- entry
->vme_start
) == size
);
4502 assert((tmp_entry
->vme_end
- tmp_entry
->vme_start
) == size
);
4503 assert((copy_entry
->vme_end
- copy_entry
->vme_start
) == size
);
4506 * If the destination contains temporary unshared memory,
4507 * we can perform the copy by throwing it away and
4508 * installing the source data.
4511 object
= entry
->object
.vm_object
;
4512 if ((!entry
->is_shared
&&
4513 ((object
== VM_OBJECT_NULL
) ||
4514 (object
->internal
&& !object
->true_share
))) ||
4515 entry
->needs_copy
) {
4516 vm_object_t old_object
= entry
->object
.vm_object
;
4517 vm_object_offset_t old_offset
= entry
->offset
;
4518 vm_object_offset_t offset
;
4521 * Ensure that the source and destination aren't
4524 if (old_object
== copy_entry
->object
.vm_object
&&
4525 old_offset
== copy_entry
->offset
) {
4526 vm_map_copy_entry_unlink(copy
, copy_entry
);
4527 vm_map_copy_entry_dispose(copy
, copy_entry
);
4529 if (old_object
!= VM_OBJECT_NULL
)
4530 vm_object_deallocate(old_object
);
4532 start
= tmp_entry
->vme_end
;
4533 tmp_entry
= tmp_entry
->vme_next
;
4537 if (old_object
!= VM_OBJECT_NULL
) {
4538 if(entry
->is_sub_map
) {
4539 if(entry
->use_pmap
) {
4541 pmap_unnest(dst_map
->pmap
,
4543 entry
->vme_end
- entry
->vme_start
);
4546 vm_map_submap_pmap_clean(
4547 dst_map
, entry
->vme_start
,
4549 entry
->object
.sub_map
,
4553 entry
->object
.sub_map
);
4555 vm_object_pmap_protect(
4560 tmp_entry
->vme_start
,
4563 vm_object_deallocate(old_object
);
4567 entry
->is_sub_map
= FALSE
;
4568 entry
->object
= copy_entry
->object
;
4569 object
= entry
->object
.vm_object
;
4570 entry
->needs_copy
= copy_entry
->needs_copy
;
4571 entry
->wired_count
= 0;
4572 entry
->user_wired_count
= 0;
4573 offset
= entry
->offset
= copy_entry
->offset
;
4575 vm_map_copy_entry_unlink(copy
, copy_entry
);
4576 vm_map_copy_entry_dispose(copy
, copy_entry
);
4577 #if BAD_OPTIMIZATION
4579 * if we turn this optimization back on
4580 * we need to revisit our use of pmap mappings
4581 * large copies will cause us to run out and panic
4582 * this optimization only saved on average 2 us per page if ALL
4583 * the pages in the source were currently mapped
4584 * and ALL the pages in the dest were touched, if there were fewer
4585 * than 2/3 of the pages touched, this optimization actually cost more cycles
4589 * Try to aggressively enter physical mappings
4590 * (but avoid uninstantiated objects)
4592 if (object
!= VM_OBJECT_NULL
) {
4593 vm_offset_t va
= entry
->vme_start
;
4595 while (va
< entry
->vme_end
) {
4596 register vm_page_t m
;
4600 * Look for the page in the top object
4602 prot
= entry
->protection
;
4603 vm_object_lock(object
);
4604 vm_object_paging_begin(object
);
4606 if ((m
= vm_page_lookup(object
,offset
)) !=
4607 VM_PAGE_NULL
&& !m
->busy
&&
4609 (!m
->unusual
|| (!m
->error
&&
4610 !m
->restart
&& !m
->absent
&&
4611 (prot
& m
->page_lock
) == 0))) {
4614 vm_object_unlock(object
);
4617 * Honor COW obligations
4619 if (entry
->needs_copy
)
4620 prot
&= ~VM_PROT_WRITE
;
4621 /* It is our policy to require */
4622 /* explicit sync from anyone */
4623 /* writing code and then */
4624 /* a pc to execute it. */
4627 PMAP_ENTER(pmap
, va
, m
,
4630 vm_object_lock(object
);
4631 vm_page_lock_queues();
4632 if (!m
->active
&& !m
->inactive
)
4633 vm_page_activate(m
);
4634 vm_page_unlock_queues();
4635 PAGE_WAKEUP_DONE(m
);
4637 vm_object_paging_end(object
);
4638 vm_object_unlock(object
);
4640 offset
+= PAGE_SIZE_64
;
4642 } /* end while (va < entry->vme_end) */
4643 } /* end if (object) */
4646 * Set up for the next iteration. The map
4647 * has not been unlocked, so the next
4648 * address should be at the end of this
4649 * entry, and the next map entry should be
4650 * the one following it.
4653 start
= tmp_entry
->vme_end
;
4654 tmp_entry
= tmp_entry
->vme_next
;
4656 vm_map_version_t version
;
4657 vm_object_t dst_object
= entry
->object
.vm_object
;
4658 vm_object_offset_t dst_offset
= entry
->offset
;
4662 * Take an object reference, and record
4663 * the map version information so that the
4664 * map can be safely unlocked.
4667 vm_object_reference(dst_object
);
4669 version
.main_timestamp
= dst_map
->timestamp
;
4671 vm_map_unlock(dst_map
);
4674 * Copy as much as possible in one pass
4679 copy_entry
->object
.vm_object
,
4689 * Release the object reference
4692 vm_object_deallocate(dst_object
);
4695 * If a hard error occurred, return it now
4698 if (r
!= KERN_SUCCESS
)
4701 if (copy_size
!= 0) {
4703 * Dispose of the copied region
4706 vm_map_copy_clip_end(copy
, copy_entry
,
4707 copy_entry
->vme_start
+ copy_size
);
4708 vm_map_copy_entry_unlink(copy
, copy_entry
);
4709 vm_object_deallocate(copy_entry
->object
.vm_object
);
4710 vm_map_copy_entry_dispose(copy
, copy_entry
);
4714 * Pick up in the destination map where we left off.
4716 * Use the version information to avoid a lookup
4717 * in the normal case.
4721 vm_map_lock(dst_map
);
4722 if ((version
.main_timestamp
+ 1) == dst_map
->timestamp
) {
4723 /* We can safely use saved tmp_entry value */
4725 vm_map_clip_end(dst_map
, tmp_entry
, start
);
4726 tmp_entry
= tmp_entry
->vme_next
;
4728 /* Must do lookup of tmp_entry */
4730 if (!vm_map_lookup_entry(dst_map
, start
, &tmp_entry
)) {
4731 vm_map_unlock(dst_map
);
4732 return(KERN_INVALID_ADDRESS
);
4734 vm_map_clip_start(dst_map
, tmp_entry
, start
);
4739 return(KERN_SUCCESS
);
4740 }/* vm_map_copy_overwrite_aligned */
4743 * Routine: vm_map_copyin_kernel_buffer
4746 * Copy in data to a kernel buffer from space in the
4747 * source map. The original space may be otpionally
4750 * If successful, returns a new copy object.
4753 vm_map_copyin_kernel_buffer(
4755 vm_offset_t src_addr
,
4757 boolean_t src_destroy
,
4758 vm_map_copy_t
*copy_result
)
4762 vm_size_t kalloc_size
= sizeof(struct vm_map_copy
) + len
;
4764 copy
= (vm_map_copy_t
) kalloc(kalloc_size
);
4765 if (copy
== VM_MAP_COPY_NULL
) {
4766 return KERN_RESOURCE_SHORTAGE
;
4768 copy
->type
= VM_MAP_COPY_KERNEL_BUFFER
;
4771 copy
->cpy_kdata
= (vm_offset_t
) (copy
+ 1);
4772 copy
->cpy_kalloc_size
= kalloc_size
;
4774 if (src_map
== kernel_map
) {
4775 bcopy((char *)src_addr
, (char *)copy
->cpy_kdata
, len
);
4776 flags
= VM_MAP_REMOVE_KUNWIRE
| VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4777 VM_MAP_REMOVE_INTERRUPTIBLE
;
4780 kr
= copyinmap(src_map
, src_addr
, copy
->cpy_kdata
, len
);
4781 if (kr
!= KERN_SUCCESS
) {
4782 kfree((vm_offset_t
)copy
, kalloc_size
);
4785 flags
= VM_MAP_REMOVE_WAIT_FOR_KWIRE
|
4786 VM_MAP_REMOVE_INTERRUPTIBLE
;
4789 (void) vm_map_remove(src_map
, trunc_page(src_addr
),
4790 round_page(src_addr
+ len
),
4793 *copy_result
= copy
;
4794 return KERN_SUCCESS
;
4798 * Routine: vm_map_copyout_kernel_buffer
4801 * Copy out data from a kernel buffer into space in the
4802 * destination map. The space may be otpionally dynamically
4805 * If successful, consumes the copy object.
4806 * Otherwise, the caller is responsible for it.
4809 vm_map_copyout_kernel_buffer(
4811 vm_offset_t
*addr
, /* IN/OUT */
4813 boolean_t overwrite
)
4815 kern_return_t kr
= KERN_SUCCESS
;
4816 thread_act_t thr_act
= current_act();
4821 * Allocate space in the target map for the data
4824 kr
= vm_map_enter(map
,
4826 round_page(copy
->size
),
4830 (vm_object_offset_t
) 0,
4834 VM_INHERIT_DEFAULT
);
4835 if (kr
!= KERN_SUCCESS
)
4840 * Copyout the data from the kernel buffer to the target map.
4842 if (thr_act
->map
== map
) {
4845 * If the target map is the current map, just do
4848 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4850 return(KERN_INVALID_ADDRESS
);
4857 * If the target map is another map, assume the
4858 * target's address space identity for the duration
4861 vm_map_reference(map
);
4862 oldmap
= vm_map_switch(map
);
4864 if (copyout((char *)copy
->cpy_kdata
, (char *)*addr
,
4866 return(KERN_INVALID_ADDRESS
);
4869 (void) vm_map_switch(oldmap
);
4870 vm_map_deallocate(map
);
4873 kfree((vm_offset_t
)copy
, copy
->cpy_kalloc_size
);
4879 * Macro: vm_map_copy_insert
4882 * Link a copy chain ("copy") into a map at the
4883 * specified location (after "where").
4885 * The copy chain is destroyed.
4887 * The arguments are evaluated multiple times.
4889 #define vm_map_copy_insert(map, where, copy) \
4891 vm_map_t VMCI_map; \
4892 vm_map_entry_t VMCI_where; \
4893 vm_map_copy_t VMCI_copy; \
4895 VMCI_where = (where); \
4896 VMCI_copy = (copy); \
4897 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4898 ->vme_next = (VMCI_where->vme_next); \
4899 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4900 ->vme_prev = VMCI_where; \
4901 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4902 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4903 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4907 * Routine: vm_map_copyout
4910 * Copy out a copy chain ("copy") into newly-allocated
4911 * space in the destination map.
4913 * If successful, consumes the copy object.
4914 * Otherwise, the caller is responsible for it.
4918 register vm_map_t dst_map
,
4919 vm_offset_t
*dst_addr
, /* OUT */
4920 register vm_map_copy_t copy
)
4923 vm_size_t adjustment
;
4925 vm_object_offset_t vm_copy_start
;
4926 vm_map_entry_t last
;
4928 vm_map_entry_t entry
;
4931 * Check for null copy object.
4934 if (copy
== VM_MAP_COPY_NULL
) {
4936 return(KERN_SUCCESS
);
4940 * Check for special copy object, created
4941 * by vm_map_copyin_object.
4944 if (copy
->type
== VM_MAP_COPY_OBJECT
) {
4945 vm_object_t object
= copy
->cpy_object
;
4947 vm_object_offset_t offset
;
4949 offset
= trunc_page_64(copy
->offset
);
4950 size
= round_page(copy
->size
+
4951 (vm_size_t
)(copy
->offset
- offset
));
4953 kr
= vm_map_enter(dst_map
, dst_addr
, size
,
4954 (vm_offset_t
) 0, TRUE
,
4955 object
, offset
, FALSE
,
4956 VM_PROT_DEFAULT
, VM_PROT_ALL
,
4957 VM_INHERIT_DEFAULT
);
4958 if (kr
!= KERN_SUCCESS
)
4960 /* Account for non-pagealigned copy object */
4961 *dst_addr
+= (vm_offset_t
)(copy
->offset
- offset
);
4962 zfree(vm_map_copy_zone
, (vm_offset_t
) copy
);
4963 return(KERN_SUCCESS
);
4967 * Check for special kernel buffer allocated
4968 * by new_ipc_kmsg_copyin.
4971 if (copy
->type
== VM_MAP_COPY_KERNEL_BUFFER
) {
4972 return(vm_map_copyout_kernel_buffer(dst_map
, dst_addr
,
4977 * Find space for the data
4980 vm_copy_start
= trunc_page_64(copy
->offset
);
4981 size
= round_page((vm_size_t
)copy
->offset
+ copy
->size
)
4986 vm_map_lock(dst_map
);
4987 assert(first_free_is_valid(dst_map
));
4988 start
= ((last
= dst_map
->first_free
) == vm_map_to_entry(dst_map
)) ?
4989 vm_map_min(dst_map
) : last
->vme_end
;
4992 vm_map_entry_t next
= last
->vme_next
;
4993 vm_offset_t end
= start
+ size
;
4995 if ((end
> dst_map
->max_offset
) || (end
< start
)) {
4996 if (dst_map
->wait_for_space
) {
4997 if (size
<= (dst_map
->max_offset
- dst_map
->min_offset
)) {
4998 assert_wait((event_t
) dst_map
,
4999 THREAD_INTERRUPTIBLE
);
5000 vm_map_unlock(dst_map
);
5001 thread_block((void (*)(void))0);
5005 vm_map_unlock(dst_map
);
5006 return(KERN_NO_SPACE
);
5009 if ((next
== vm_map_to_entry(dst_map
)) ||
5010 (next
->vme_start
>= end
))
5014 start
= last
->vme_end
;
5018 * Since we're going to just drop the map
5019 * entries from the copy into the destination
5020 * map, they must come from the same pool.
5023 if (copy
->cpy_hdr
.entries_pageable
!= dst_map
->hdr
.entries_pageable
) {
5025 * Mismatches occur when dealing with the default
5029 vm_map_entry_t next
, new;
5032 * Find the zone that the copies were allocated from
5034 old_zone
= (copy
->cpy_hdr
.entries_pageable
)
5036 : vm_map_kentry_zone
;
5037 entry
= vm_map_copy_first_entry(copy
);
5040 * Reinitialize the copy so that vm_map_copy_entry_link
5043 copy
->cpy_hdr
.nentries
= 0;
5044 copy
->cpy_hdr
.entries_pageable
= dst_map
->hdr
.entries_pageable
;
5045 vm_map_copy_first_entry(copy
) =
5046 vm_map_copy_last_entry(copy
) =
5047 vm_map_copy_to_entry(copy
);
5052 while (entry
!= vm_map_copy_to_entry(copy
)) {
5053 new = vm_map_copy_entry_create(copy
);
5054 vm_map_entry_copy_full(new, entry
);
5055 new->use_pmap
= FALSE
; /* clr address space specifics */
5056 vm_map_copy_entry_link(copy
,
5057 vm_map_copy_last_entry(copy
),
5059 next
= entry
->vme_next
;
5060 zfree(old_zone
, (vm_offset_t
) entry
);
5066 * Adjust the addresses in the copy chain, and
5067 * reset the region attributes.
5070 adjustment
= start
- vm_copy_start
;
5071 for (entry
= vm_map_copy_first_entry(copy
);
5072 entry
!= vm_map_copy_to_entry(copy
);
5073 entry
= entry
->vme_next
) {
5074 entry
->vme_start
+= adjustment
;
5075 entry
->vme_end
+= adjustment
;
5077 entry
->inheritance
= VM_INHERIT_DEFAULT
;
5078 entry
->protection
= VM_PROT_DEFAULT
;
5079 entry
->max_protection
= VM_PROT_ALL
;
5080 entry
->behavior
= VM_BEHAVIOR_DEFAULT
;
5083 * If the entry is now wired,
5084 * map the pages into the destination map.
5086 if (entry
->wired_count
!= 0) {
5087 register vm_offset_t va
;
5088 vm_object_offset_t offset
;
5089 register vm_object_t object
;
5091 object
= entry
->object
.vm_object
;
5092 offset
= entry
->offset
;
5093 va
= entry
->vme_start
;
5095 pmap_pageable(dst_map
->pmap
,
5100 while (va
< entry
->vme_end
) {
5101 register vm_page_t m
;
5104 * Look up the page in the object.
5105 * Assert that the page will be found in the
5108 * the object was newly created by
5109 * vm_object_copy_slowly, and has
5110 * copies of all of the pages from
5113 * the object was moved from the old
5114 * map entry; because the old map
5115 * entry was wired, all of the pages
5116 * were in the top-level object.
5117 * (XXX not true if we wire pages for
5120 vm_object_lock(object
);
5121 vm_object_paging_begin(object
);
5123 m
= vm_page_lookup(object
, offset
);
5124 if (m
== VM_PAGE_NULL
|| m
->wire_count
== 0 ||
5126 panic("vm_map_copyout: wiring 0x%x", m
);
5129 vm_object_unlock(object
);
5131 PMAP_ENTER(dst_map
->pmap
, va
, m
,
5132 entry
->protection
, TRUE
);
5134 vm_object_lock(object
);
5135 PAGE_WAKEUP_DONE(m
);
5136 /* the page is wired, so we don't have to activate */
5137 vm_object_paging_end(object
);
5138 vm_object_unlock(object
);
5140 offset
+= PAGE_SIZE_64
;
5144 else if (size
<= vm_map_aggressive_enter_max
) {
5146 register vm_offset_t va
;
5147 vm_object_offset_t offset
;
5148 register vm_object_t object
;
5151 object
= entry
->object
.vm_object
;
5152 if (object
!= VM_OBJECT_NULL
) {
5154 offset
= entry
->offset
;
5155 va
= entry
->vme_start
;
5156 while (va
< entry
->vme_end
) {
5157 register vm_page_t m
;
5160 * Look up the page in the object.
5161 * Assert that the page will be found
5162 * in the top object if at all...
5164 vm_object_lock(object
);
5165 vm_object_paging_begin(object
);
5167 if (((m
= vm_page_lookup(object
,
5170 !m
->busy
&& !m
->fictitious
&&
5171 !m
->absent
&& !m
->error
) {
5173 vm_object_unlock(object
);
5175 /* honor cow obligations */
5176 prot
= entry
->protection
;
5177 if (entry
->needs_copy
)
5178 prot
&= ~VM_PROT_WRITE
;
5180 PMAP_ENTER(dst_map
->pmap
, va
,
5183 vm_object_lock(object
);
5184 vm_page_lock_queues();
5185 if (!m
->active
&& !m
->inactive
)
5186 vm_page_activate(m
);
5187 vm_page_unlock_queues();
5188 PAGE_WAKEUP_DONE(m
);
5190 vm_object_paging_end(object
);
5191 vm_object_unlock(object
);
5193 offset
+= PAGE_SIZE_64
;
5201 * Correct the page alignment for the result
5204 *dst_addr
= start
+ (copy
->offset
- vm_copy_start
);
5207 * Update the hints and the map size
5210 SAVE_HINT(dst_map
, vm_map_copy_last_entry(copy
));
5212 dst_map
->size
+= size
;
5218 vm_map_copy_insert(dst_map
, last
, copy
);
5220 vm_map_unlock(dst_map
);
5223 * XXX If wiring_required, call vm_map_pageable
5226 return(KERN_SUCCESS
);
5229 boolean_t vm_map_aggressive_enter
; /* not used yet */
5233 * Routine: vm_map_copyin
5236 * Copy the specified region (src_addr, len) from the
5237 * source address space (src_map), possibly removing
5238 * the region from the source address space (src_destroy).
5241 * A vm_map_copy_t object (copy_result), suitable for
5242 * insertion into another address space (using vm_map_copyout),
5243 * copying over another address space region (using
5244 * vm_map_copy_overwrite). If the copy is unused, it
5245 * should be destroyed (using vm_map_copy_discard).
5247 * In/out conditions:
5248 * The source map should not be locked on entry.
5251 typedef struct submap_map
{
5252 vm_map_t parent_map
;
5253 vm_offset_t base_start
;
5254 vm_offset_t base_end
;
5255 struct submap_map
*next
;
5259 vm_map_copyin_common(
5261 vm_offset_t src_addr
,
5263 boolean_t src_destroy
,
5264 boolean_t src_volatile
,
5265 vm_map_copy_t
*copy_result
, /* OUT */
5266 boolean_t use_maxprot
)
5268 extern int msg_ool_size_small
;
5270 vm_map_entry_t tmp_entry
; /* Result of last map lookup --
5271 * in multi-level lookup, this
5272 * entry contains the actual
5276 vm_map_entry_t new_entry
= VM_MAP_ENTRY_NULL
; /* Map entry for copy */
5278 vm_offset_t src_start
; /* Start of current entry --
5279 * where copy is taking place now
5281 vm_offset_t src_end
; /* End of entire region to be
5283 vm_offset_t base_start
; /* submap fields to save offsets */
5284 /* in original map */
5285 vm_offset_t base_end
;
5286 vm_map_t base_map
=src_map
;
5287 vm_map_entry_t base_entry
;
5288 boolean_t map_share
=FALSE
;
5289 submap_map_t
*parent_maps
= NULL
;
5292 vm_map_copy_t copy
; /* Resulting copy */
5293 vm_offset_t copy_addr
;
5296 * Check for copies of zero bytes.
5300 *copy_result
= VM_MAP_COPY_NULL
;
5301 return(KERN_SUCCESS
);
5305 * If the copy is sufficiently small, use a kernel buffer instead
5306 * of making a virtual copy. The theory being that the cost of
5307 * setting up VM (and taking C-O-W faults) dominates the copy costs
5308 * for small regions.
5310 if ((len
< msg_ool_size_small
) && !use_maxprot
)
5311 return vm_map_copyin_kernel_buffer(src_map
, src_addr
, len
,
5312 src_destroy
, copy_result
);
5315 * Compute start and end of region
5318 src_start
= trunc_page(src_addr
);
5319 src_end
= round_page(src_addr
+ len
);
5321 XPR(XPR_VM_MAP
, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t
)src_map
, src_addr
, len
, src_destroy
, 0);
5324 * Check that the end address doesn't overflow
5327 if (src_end
<= src_start
)
5328 if ((src_end
< src_start
) || (src_start
!= 0))
5329 return(KERN_INVALID_ADDRESS
);
5332 * Allocate a header element for the list.
5334 * Use the start and end in the header to
5335 * remember the endpoints prior to rounding.
5338 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5339 vm_map_copy_first_entry(copy
) =
5340 vm_map_copy_last_entry(copy
) = vm_map_copy_to_entry(copy
);
5341 copy
->type
= VM_MAP_COPY_ENTRY_LIST
;
5342 copy
->cpy_hdr
.nentries
= 0;
5343 copy
->cpy_hdr
.entries_pageable
= TRUE
;
5345 copy
->offset
= src_addr
;
5348 new_entry
= vm_map_copy_entry_create(copy
);
5352 vm_map_unlock(src_map); \
5353 if (new_entry != VM_MAP_ENTRY_NULL) \
5354 vm_map_copy_entry_dispose(copy,new_entry); \
5355 vm_map_copy_discard(copy); \
5357 submap_map_t *ptr; \
5359 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5360 parent_maps=parent_maps->next; \
5361 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5368 * Find the beginning of the region.
5371 vm_map_lock(src_map
);
5373 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
))
5374 RETURN(KERN_INVALID_ADDRESS
);
5375 if(!tmp_entry
->is_sub_map
) {
5376 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5378 /* set for later submap fix-up */
5379 copy_addr
= src_start
;
5382 * Go through entries until we get to the end.
5387 vm_map_entry_t src_entry
= tmp_entry
; /* Top-level entry */
5388 vm_size_t src_size
; /* Size of source
5389 * map entry (in both
5394 vm_object_t src_object
; /* Object to copy */
5395 vm_object_offset_t src_offset
;
5397 boolean_t src_needs_copy
; /* Should source map
5399 * for copy-on-write?
5402 boolean_t new_entry_needs_copy
; /* Will new entry be COW? */
5404 boolean_t was_wired
; /* Was source wired? */
5405 vm_map_version_t version
; /* Version before locks
5406 * dropped to make copy
5408 kern_return_t result
; /* Return value from
5409 * copy_strategically.
5411 while(tmp_entry
->is_sub_map
) {
5412 vm_size_t submap_len
;
5415 ptr
= (submap_map_t
*)kalloc(sizeof(submap_map_t
));
5416 ptr
->next
= parent_maps
;
5418 ptr
->parent_map
= src_map
;
5419 ptr
->base_start
= src_start
;
5420 ptr
->base_end
= src_end
;
5421 submap_len
= tmp_entry
->vme_end
- src_start
;
5422 if(submap_len
> (src_end
-src_start
))
5423 submap_len
= src_end
-src_start
;
5424 ptr
->base_start
+= submap_len
;
5426 src_start
-= tmp_entry
->vme_start
;
5427 src_start
+= tmp_entry
->offset
;
5428 src_end
= src_start
+ submap_len
;
5429 src_map
= tmp_entry
->object
.sub_map
;
5430 vm_map_lock(src_map
);
5431 vm_map_unlock(ptr
->parent_map
);
5432 if (!vm_map_lookup_entry(
5433 src_map
, src_start
, &tmp_entry
))
5434 RETURN(KERN_INVALID_ADDRESS
);
5436 if(!tmp_entry
->is_sub_map
)
5437 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5438 src_entry
= tmp_entry
;
5440 if ((tmp_entry
->object
.vm_object
!= VM_OBJECT_NULL
) &&
5441 (tmp_entry
->object
.vm_object
->phys_contiguous
)) {
5442 /* This is not, cannot be supported for now */
5443 /* we need a description of the caching mode */
5444 /* reflected in the object before we can */
5445 /* support copyin, and then the support will */
5446 /* be for direct copy */
5447 RETURN(KERN_PROTECTION_FAILURE
);
5450 * Create a new address map entry to hold the result.
5451 * Fill in the fields from the appropriate source entries.
5452 * We must unlock the source map to do this if we need
5453 * to allocate a map entry.
5455 if (new_entry
== VM_MAP_ENTRY_NULL
) {
5456 version
.main_timestamp
= src_map
->timestamp
;
5457 vm_map_unlock(src_map
);
5459 new_entry
= vm_map_copy_entry_create(copy
);
5461 vm_map_lock(src_map
);
5462 if ((version
.main_timestamp
+ 1) != src_map
->timestamp
) {
5463 if (!vm_map_lookup_entry(src_map
, src_start
,
5465 RETURN(KERN_INVALID_ADDRESS
);
5467 vm_map_clip_start(src_map
, tmp_entry
, src_start
);
5468 continue; /* restart w/ new tmp_entry */
5473 * Verify that the region can be read.
5475 if (((src_entry
->protection
& VM_PROT_READ
) == VM_PROT_NONE
&&
5477 (src_entry
->max_protection
& VM_PROT_READ
) == 0)
5478 RETURN(KERN_PROTECTION_FAILURE
);
5481 * Clip against the endpoints of the entire region.
5484 vm_map_clip_end(src_map
, src_entry
, src_end
);
5486 src_size
= src_entry
->vme_end
- src_start
;
5487 src_object
= src_entry
->object
.vm_object
;
5488 src_offset
= src_entry
->offset
;
5489 was_wired
= (src_entry
->wired_count
!= 0);
5491 vm_map_entry_copy(new_entry
, src_entry
);
5492 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
5495 * Attempt non-blocking copy-on-write optimizations.
5499 (src_object
== VM_OBJECT_NULL
||
5500 (src_object
->internal
&& !src_object
->true_share
5503 * If we are destroying the source, and the object
5504 * is internal, we can move the object reference
5505 * from the source to the copy. The copy is
5506 * copy-on-write only if the source is.
5507 * We make another reference to the object, because
5508 * destroying the source entry will deallocate it.
5510 vm_object_reference(src_object
);
5513 * Copy is always unwired. vm_map_copy_entry
5514 * set its wired count to zero.
5517 goto CopySuccessful
;
5522 XPR(XPR_VM_MAP
, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5523 src_object
, new_entry
, new_entry
->object
.vm_object
,
5526 vm_object_copy_quickly(
5527 &new_entry
->object
.vm_object
,
5531 &new_entry_needs_copy
)) {
5533 new_entry
->needs_copy
= new_entry_needs_copy
;
5536 * Handle copy-on-write obligations
5539 if (src_needs_copy
&& !tmp_entry
->needs_copy
) {
5540 if (tmp_entry
->is_shared
||
5541 tmp_entry
->object
.vm_object
->true_share
||
5543 vm_map_unlock(src_map
);
5544 new_entry
->object
.vm_object
=
5545 vm_object_copy_delayed(
5549 /* dec ref gained in copy_quickly */
5550 vm_object_lock(src_object
);
5551 src_object
->ref_count
--;
5552 assert(src_object
->ref_count
> 0);
5553 vm_object_res_deallocate(src_object
);
5554 vm_object_unlock(src_object
);
5555 vm_map_lock(src_map
);
5557 * it turns out that we have
5558 * finished our copy. No matter
5559 * what the state of the map
5560 * we will lock it again here
5561 * knowing that if there is
5562 * additional data to copy
5563 * it will be checked at
5564 * the top of the loop
5566 * Don't do timestamp check
5570 vm_object_pmap_protect(
5574 (src_entry
->is_shared
?
5577 src_entry
->vme_start
,
5578 src_entry
->protection
&
5581 tmp_entry
->needs_copy
= TRUE
;
5586 * The map has never been unlocked, so it's safe
5587 * to move to the next entry rather than doing
5591 goto CopySuccessful
;
5594 new_entry
->needs_copy
= FALSE
;
5597 * Take an object reference, so that we may
5598 * release the map lock(s).
5601 assert(src_object
!= VM_OBJECT_NULL
);
5602 vm_object_reference(src_object
);
5605 * Record the timestamp for later verification.
5609 version
.main_timestamp
= src_map
->timestamp
;
5610 vm_map_unlock(src_map
);
5617 vm_object_lock(src_object
);
5618 result
= vm_object_copy_slowly(
5623 &new_entry
->object
.vm_object
);
5624 new_entry
->offset
= 0;
5625 new_entry
->needs_copy
= FALSE
;
5627 result
= vm_object_copy_strategically(src_object
,
5630 &new_entry
->object
.vm_object
,
5632 &new_entry_needs_copy
);
5634 new_entry
->needs_copy
= new_entry_needs_copy
;
5638 if (result
!= KERN_SUCCESS
&&
5639 result
!= KERN_MEMORY_RESTART_COPY
) {
5640 vm_map_lock(src_map
);
5645 * Throw away the extra reference
5648 vm_object_deallocate(src_object
);
5651 * Verify that the map has not substantially
5652 * changed while the copy was being made.
5655 vm_map_lock(src_map
); /* Increments timestamp once! */
5657 if ((version
.main_timestamp
+ 1) == src_map
->timestamp
)
5658 goto VerificationSuccessful
;
5661 * Simple version comparison failed.
5663 * Retry the lookup and verify that the
5664 * same object/offset are still present.
5666 * [Note: a memory manager that colludes with
5667 * the calling task can detect that we have
5668 * cheated. While the map was unlocked, the
5669 * mapping could have been changed and restored.]
5672 if (!vm_map_lookup_entry(src_map
, src_start
, &tmp_entry
)) {
5673 RETURN(KERN_INVALID_ADDRESS
);
5676 src_entry
= tmp_entry
;
5677 vm_map_clip_start(src_map
, src_entry
, src_start
);
5679 if ((src_entry
->protection
& VM_PROT_READ
== VM_PROT_NONE
&&
5681 src_entry
->max_protection
& VM_PROT_READ
== 0)
5682 goto VerificationFailed
;
5684 if (src_entry
->vme_end
< new_entry
->vme_end
)
5685 src_size
= (new_entry
->vme_end
= src_entry
->vme_end
) - src_start
;
5687 if ((src_entry
->object
.vm_object
!= src_object
) ||
5688 (src_entry
->offset
!= src_offset
) ) {
5691 * Verification failed.
5693 * Start over with this top-level entry.
5696 VerificationFailed
: ;
5698 vm_object_deallocate(new_entry
->object
.vm_object
);
5699 tmp_entry
= src_entry
;
5704 * Verification succeeded.
5707 VerificationSuccessful
: ;
5709 if (result
== KERN_MEMORY_RESTART_COPY
)
5719 * Link in the new copy entry.
5722 vm_map_copy_entry_link(copy
, vm_map_copy_last_entry(copy
),
5726 * Determine whether the entire region
5729 src_start
= new_entry
->vme_end
;
5730 new_entry
= VM_MAP_ENTRY_NULL
;
5731 while ((src_start
>= src_end
) && (src_end
!= 0)) {
5732 if (src_map
!= base_map
) {
5736 assert(ptr
!= NULL
);
5737 parent_maps
= parent_maps
->next
;
5738 vm_map_lock(ptr
->parent_map
);
5739 vm_map_unlock(src_map
);
5740 src_map
= ptr
->parent_map
;
5741 src_start
= ptr
->base_start
;
5742 src_end
= ptr
->base_end
;
5743 if ((src_end
> src_start
) &&
5744 !vm_map_lookup_entry(
5745 src_map
, src_start
, &tmp_entry
))
5746 RETURN(KERN_INVALID_ADDRESS
);
5747 kfree((vm_offset_t
)ptr
, sizeof(submap_map_t
));
5748 if(parent_maps
== NULL
)
5750 src_entry
= tmp_entry
->vme_prev
;
5754 if ((src_start
>= src_end
) && (src_end
!= 0))
5758 * Verify that there are no gaps in the region
5761 tmp_entry
= src_entry
->vme_next
;
5762 if ((tmp_entry
->vme_start
!= src_start
) ||
5763 (tmp_entry
== vm_map_to_entry(src_map
)))
5764 RETURN(KERN_INVALID_ADDRESS
);
5768 * If the source should be destroyed, do it now, since the
5769 * copy was successful.
5772 (void) vm_map_delete(src_map
,
5773 trunc_page(src_addr
),
5775 (src_map
== kernel_map
) ?
5776 VM_MAP_REMOVE_KUNWIRE
:
5780 vm_map_unlock(src_map
);
5782 /* Fix-up start and end points in copy. This is necessary */
5783 /* when the various entries in the copy object were picked */
5784 /* up from different sub-maps */
5786 tmp_entry
= vm_map_copy_first_entry(copy
);
5787 while (tmp_entry
!= vm_map_copy_to_entry(copy
)) {
5788 tmp_entry
->vme_end
= copy_addr
+
5789 (tmp_entry
->vme_end
- tmp_entry
->vme_start
);
5790 tmp_entry
->vme_start
= copy_addr
;
5791 copy_addr
+= tmp_entry
->vme_end
- tmp_entry
->vme_start
;
5792 tmp_entry
= (struct vm_map_entry
*)tmp_entry
->vme_next
;
5795 *copy_result
= copy
;
5796 return(KERN_SUCCESS
);
5802 * vm_map_copyin_object:
5804 * Create a copy object from an object.
5805 * Our caller donates an object reference.
5809 vm_map_copyin_object(
5811 vm_object_offset_t offset
, /* offset of region in object */
5812 vm_object_size_t size
, /* size of region in object */
5813 vm_map_copy_t
*copy_result
) /* OUT */
5815 vm_map_copy_t copy
; /* Resulting copy */
5818 * We drop the object into a special copy object
5819 * that contains the object directly.
5822 copy
= (vm_map_copy_t
) zalloc(vm_map_copy_zone
);
5823 copy
->type
= VM_MAP_COPY_OBJECT
;
5824 copy
->cpy_object
= object
;
5825 copy
->cpy_index
= 0;
5826 copy
->offset
= offset
;
5829 *copy_result
= copy
;
5830 return(KERN_SUCCESS
);
5836 vm_map_entry_t old_entry
,
5840 vm_map_entry_t new_entry
;
5841 kern_return_t result
;
5844 * New sharing code. New map entry
5845 * references original object. Internal
5846 * objects use asynchronous copy algorithm for
5847 * future copies. First make sure we have
5848 * the right object. If we need a shadow,
5849 * or someone else already has one, then
5850 * make a new shadow and share it.
5853 object
= old_entry
->object
.vm_object
;
5854 if (old_entry
->is_sub_map
) {
5855 assert(old_entry
->wired_count
== 0);
5857 if(old_entry
->use_pmap
) {
5858 result
= pmap_nest(new_map
->pmap
,
5859 (old_entry
->object
.sub_map
)->pmap
,
5860 old_entry
->vme_start
,
5861 old_entry
->vme_end
- old_entry
->vme_start
);
5863 panic("vm_map_fork_share: pmap_nest failed!");
5866 } else if (object
== VM_OBJECT_NULL
) {
5867 object
= vm_object_allocate((vm_size_t
)(old_entry
->vme_end
-
5868 old_entry
->vme_start
));
5869 old_entry
->offset
= 0;
5870 old_entry
->object
.vm_object
= object
;
5871 assert(!old_entry
->needs_copy
);
5872 } else if (object
->copy_strategy
!=
5873 MEMORY_OBJECT_COPY_SYMMETRIC
) {
5876 * We are already using an asymmetric
5877 * copy, and therefore we already have
5881 assert(! old_entry
->needs_copy
);
5883 else if (old_entry
->needs_copy
|| /* case 1 */
5884 object
->shadowed
|| /* case 2 */
5885 (!object
->true_share
&& /* case 3 */
5886 !old_entry
->is_shared
&&
5888 (vm_size_t
)(old_entry
->vme_end
-
5889 old_entry
->vme_start
)))) {
5892 * We need to create a shadow.
5893 * There are three cases here.
5894 * In the first case, we need to
5895 * complete a deferred symmetrical
5896 * copy that we participated in.
5897 * In the second and third cases,
5898 * we need to create the shadow so
5899 * that changes that we make to the
5900 * object do not interfere with
5901 * any symmetrical copies which
5902 * have occured (case 2) or which
5903 * might occur (case 3).
5905 * The first case is when we had
5906 * deferred shadow object creation
5907 * via the entry->needs_copy mechanism.
5908 * This mechanism only works when
5909 * only one entry points to the source
5910 * object, and we are about to create
5911 * a second entry pointing to the
5912 * same object. The problem is that
5913 * there is no way of mapping from
5914 * an object to the entries pointing
5915 * to it. (Deferred shadow creation
5916 * works with one entry because occurs
5917 * at fault time, and we walk from the
5918 * entry to the object when handling
5921 * The second case is when the object
5922 * to be shared has already been copied
5923 * with a symmetric copy, but we point
5924 * directly to the object without
5925 * needs_copy set in our entry. (This
5926 * can happen because different ranges
5927 * of an object can be pointed to by
5928 * different entries. In particular,
5929 * a single entry pointing to an object
5930 * can be split by a call to vm_inherit,
5931 * which, combined with task_create, can
5932 * result in the different entries
5933 * having different needs_copy values.)
5934 * The shadowed flag in the object allows
5935 * us to detect this case. The problem
5936 * with this case is that if this object
5937 * has or will have shadows, then we
5938 * must not perform an asymmetric copy
5939 * of this object, since such a copy
5940 * allows the object to be changed, which
5941 * will break the previous symmetrical
5942 * copies (which rely upon the object
5943 * not changing). In a sense, the shadowed
5944 * flag says "don't change this object".
5945 * We fix this by creating a shadow
5946 * object for this object, and sharing
5947 * that. This works because we are free
5948 * to change the shadow object (and thus
5949 * to use an asymmetric copy strategy);
5950 * this is also semantically correct,
5951 * since this object is temporary, and
5952 * therefore a copy of the object is
5953 * as good as the object itself. (This
5954 * is not true for permanent objects,
5955 * since the pager needs to see changes,
5956 * which won't happen if the changes
5957 * are made to a copy.)
5959 * The third case is when the object
5960 * to be shared has parts sticking
5961 * outside of the entry we're working
5962 * with, and thus may in the future
5963 * be subject to a symmetrical copy.
5964 * (This is a preemptive version of
5968 assert(!(object
->shadowed
&& old_entry
->is_shared
));
5969 vm_object_shadow(&old_entry
->object
.vm_object
,
5971 (vm_size_t
) (old_entry
->vme_end
-
5972 old_entry
->vme_start
));
5975 * If we're making a shadow for other than
5976 * copy on write reasons, then we have
5977 * to remove write permission.
5980 /* CDY FIX this! page_protect! */
5981 if (!old_entry
->needs_copy
&&
5982 (old_entry
->protection
& VM_PROT_WRITE
)) {
5983 if(old_entry
->is_sub_map
&& old_entry
->use_pmap
) {
5984 pmap_protect(old_entry
->object
.sub_map
->pmap
,
5985 old_entry
->vme_start
,
5987 old_entry
->protection
& ~VM_PROT_WRITE
);
5989 pmap_protect(vm_map_pmap(old_map
),
5990 old_entry
->vme_start
,
5992 old_entry
->protection
& ~VM_PROT_WRITE
);
5996 old_entry
->needs_copy
= FALSE
;
5997 object
= old_entry
->object
.vm_object
;
6001 * If object was using a symmetric copy strategy,
6002 * change its copy strategy to the default
6003 * asymmetric copy strategy, which is copy_delay
6004 * in the non-norma case and copy_call in the
6005 * norma case. Bump the reference count for the
6009 if(old_entry
->is_sub_map
) {
6010 vm_map_lock(old_entry
->object
.sub_map
);
6011 vm_map_reference(old_entry
->object
.sub_map
);
6012 vm_map_unlock(old_entry
->object
.sub_map
);
6014 vm_object_lock(object
);
6015 object
->ref_count
++;
6016 vm_object_res_reference(object
);
6017 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
6018 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6020 vm_object_unlock(object
);
6024 * Clone the entry, using object ref from above.
6025 * Mark both entries as shared.
6028 new_entry
= vm_map_entry_create(new_map
);
6029 vm_map_entry_copy(new_entry
, old_entry
);
6030 old_entry
->is_shared
= TRUE
;
6031 new_entry
->is_shared
= TRUE
;
6034 * Insert the entry into the new map -- we
6035 * know we're inserting at the end of the new
6039 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
), new_entry
);
6042 * Update the physical map
6045 if (old_entry
->is_sub_map
) {
6046 /* Bill Angell pmap support goes here */
6048 pmap_copy(new_map
->pmap
, old_map
->pmap
, new_entry
->vme_start
,
6049 old_entry
->vme_end
- old_entry
->vme_start
,
6050 old_entry
->vme_start
);
6057 vm_map_entry_t
*old_entry_p
,
6060 vm_map_entry_t old_entry
= *old_entry_p
;
6061 vm_size_t entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6062 vm_offset_t start
= old_entry
->vme_start
;
6064 vm_map_entry_t last
= vm_map_last_entry(new_map
);
6066 vm_map_unlock(old_map
);
6068 * Use maxprot version of copyin because we
6069 * care about whether this memory can ever
6070 * be accessed, not just whether it's accessible
6073 if (vm_map_copyin_maxprot(old_map
, start
, entry_size
, FALSE
, ©
)
6076 * The map might have changed while it
6077 * was unlocked, check it again. Skip
6078 * any blank space or permanently
6079 * unreadable region.
6081 vm_map_lock(old_map
);
6082 if (!vm_map_lookup_entry(old_map
, start
, &last
) ||
6083 last
->max_protection
& VM_PROT_READ
==
6085 last
= last
->vme_next
;
6087 *old_entry_p
= last
;
6090 * XXX For some error returns, want to
6091 * XXX skip to the next element. Note
6092 * that INVALID_ADDRESS and
6093 * PROTECTION_FAILURE are handled above.
6100 * Insert the copy into the new map
6103 vm_map_copy_insert(new_map
, last
, copy
);
6106 * Pick up the traversal at the end of
6107 * the copied region.
6110 vm_map_lock(old_map
);
6111 start
+= entry_size
;
6112 if (! vm_map_lookup_entry(old_map
, start
, &last
)) {
6113 last
= last
->vme_next
;
6115 vm_map_clip_start(old_map
, last
, start
);
6117 *old_entry_p
= last
;
6125 * Create and return a new map based on the old
6126 * map, according to the inheritance values on the
6127 * regions in that map.
6129 * The source map must not be locked.
6135 pmap_t new_pmap
= pmap_create((vm_size_t
) 0);
6137 vm_map_entry_t old_entry
;
6138 vm_size_t new_size
= 0, entry_size
;
6139 vm_map_entry_t new_entry
;
6140 boolean_t src_needs_copy
;
6141 boolean_t new_entry_needs_copy
;
6143 vm_map_reference_swap(old_map
);
6144 vm_map_lock(old_map
);
6146 new_map
= vm_map_create(new_pmap
,
6147 old_map
->min_offset
,
6148 old_map
->max_offset
,
6149 old_map
->hdr
.entries_pageable
);
6152 old_entry
= vm_map_first_entry(old_map
);
6153 old_entry
!= vm_map_to_entry(old_map
);
6156 entry_size
= old_entry
->vme_end
- old_entry
->vme_start
;
6158 switch (old_entry
->inheritance
) {
6159 case VM_INHERIT_NONE
:
6162 case VM_INHERIT_SHARE
:
6163 vm_map_fork_share(old_map
, old_entry
, new_map
);
6164 new_size
+= entry_size
;
6167 case VM_INHERIT_COPY
:
6170 * Inline the copy_quickly case;
6171 * upon failure, fall back on call
6172 * to vm_map_fork_copy.
6175 if(old_entry
->is_sub_map
)
6177 if (old_entry
->wired_count
!= 0) {
6178 goto slow_vm_map_fork_copy
;
6181 new_entry
= vm_map_entry_create(new_map
);
6182 vm_map_entry_copy(new_entry
, old_entry
);
6183 /* clear address space specifics */
6184 new_entry
->use_pmap
= FALSE
;
6186 if (! vm_object_copy_quickly(
6187 &new_entry
->object
.vm_object
,
6189 (old_entry
->vme_end
-
6190 old_entry
->vme_start
),
6192 &new_entry_needs_copy
)) {
6193 vm_map_entry_dispose(new_map
, new_entry
);
6194 goto slow_vm_map_fork_copy
;
6198 * Handle copy-on-write obligations
6201 if (src_needs_copy
&& !old_entry
->needs_copy
) {
6202 vm_object_pmap_protect(
6203 old_entry
->object
.vm_object
,
6205 (old_entry
->vme_end
-
6206 old_entry
->vme_start
),
6207 ((old_entry
->is_shared
6208 || old_entry
->is_sub_map
)
6211 old_entry
->vme_start
,
6212 old_entry
->protection
& ~VM_PROT_WRITE
);
6214 old_entry
->needs_copy
= TRUE
;
6216 new_entry
->needs_copy
= new_entry_needs_copy
;
6219 * Insert the entry at the end
6223 vm_map_entry_link(new_map
, vm_map_last_entry(new_map
),
6225 new_size
+= entry_size
;
6228 slow_vm_map_fork_copy
:
6229 if (vm_map_fork_copy(old_map
, &old_entry
, new_map
)) {
6230 new_size
+= entry_size
;
6234 old_entry
= old_entry
->vme_next
;
6237 new_map
->size
= new_size
;
6238 vm_map_unlock(old_map
);
6239 vm_map_deallocate(old_map
);
6246 * vm_map_lookup_locked:
6248 * Finds the VM object, offset, and
6249 * protection for a given virtual address in the
6250 * specified map, assuming a page fault of the
6253 * Returns the (object, offset, protection) for
6254 * this address, whether it is wired down, and whether
6255 * this map has the only reference to the data in question.
6256 * In order to later verify this lookup, a "version"
6259 * The map MUST be locked by the caller and WILL be
6260 * locked on exit. In order to guarantee the
6261 * existence of the returned object, it is returned
6264 * If a lookup is requested with "write protection"
6265 * specified, the map may be changed to perform virtual
6266 * copying operations, although the data referenced will
6270 vm_map_lookup_locked(
6271 vm_map_t
*var_map
, /* IN/OUT */
6272 register vm_offset_t vaddr
,
6273 register vm_prot_t fault_type
,
6274 vm_map_version_t
*out_version
, /* OUT */
6275 vm_object_t
*object
, /* OUT */
6276 vm_object_offset_t
*offset
, /* OUT */
6277 vm_prot_t
*out_prot
, /* OUT */
6278 boolean_t
*wired
, /* OUT */
6279 int *behavior
, /* OUT */
6280 vm_object_offset_t
*lo_offset
, /* OUT */
6281 vm_object_offset_t
*hi_offset
, /* OUT */
6284 vm_map_entry_t entry
;
6285 register vm_map_t map
= *var_map
;
6286 vm_map_t old_map
= *var_map
;
6287 vm_map_t cow_sub_map_parent
= VM_MAP_NULL
;
6288 vm_offset_t cow_parent_vaddr
;
6289 vm_offset_t old_start
;
6290 vm_offset_t old_end
;
6291 register vm_prot_t prot
;
6297 * If the map has an interesting hint, try it before calling
6298 * full blown lookup routine.
6301 mutex_lock(&map
->s_lock
);
6303 mutex_unlock(&map
->s_lock
);
6305 if ((entry
== vm_map_to_entry(map
)) ||
6306 (vaddr
< entry
->vme_start
) || (vaddr
>= entry
->vme_end
)) {
6307 vm_map_entry_t tmp_entry
;
6310 * Entry was either not a valid hint, or the vaddr
6311 * was not contained in the entry, so do a full lookup.
6313 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
6314 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
))
6315 vm_map_unlock(cow_sub_map_parent
);
6316 if((*pmap_map
!= map
)
6317 && (*pmap_map
!= cow_sub_map_parent
))
6318 vm_map_unlock(*pmap_map
);
6319 return KERN_INVALID_ADDRESS
;
6324 if(map
== old_map
) {
6325 old_start
= entry
->vme_start
;
6326 old_end
= entry
->vme_end
;
6330 * Handle submaps. Drop lock on upper map, submap is
6335 if (entry
->is_sub_map
) {
6336 vm_offset_t local_vaddr
;
6337 vm_offset_t end_delta
;
6338 vm_offset_t start_delta
;
6339 vm_offset_t object_start_delta
;
6340 vm_map_entry_t submap_entry
;
6341 boolean_t mapped_needs_copy
=FALSE
;
6343 local_vaddr
= vaddr
;
6345 if ((!entry
->needs_copy
) && (entry
->use_pmap
)) {
6346 /* if pmap_map equals map we unlock below */
6347 if ((*pmap_map
!= map
) &&
6348 (*pmap_map
!= cow_sub_map_parent
))
6349 vm_map_unlock(*pmap_map
);
6350 *pmap_map
= entry
->object
.sub_map
;
6353 if(entry
->needs_copy
) {
6354 if (!mapped_needs_copy
) {
6355 if (vm_map_lock_read_to_write(map
)) {
6356 vm_map_lock_read(map
);
6357 if(*pmap_map
== entry
->object
.sub_map
)
6361 vm_map_lock_read(entry
->object
.sub_map
);
6362 cow_sub_map_parent
= map
;
6363 /* reset base to map before cow object */
6364 /* this is the map which will accept */
6365 /* the new cow object */
6366 old_start
= entry
->vme_start
;
6367 old_end
= entry
->vme_end
;
6368 cow_parent_vaddr
= vaddr
;
6369 mapped_needs_copy
= TRUE
;
6371 vm_map_lock_read(entry
->object
.sub_map
);
6372 if((cow_sub_map_parent
!= map
) &&
6377 vm_map_lock_read(entry
->object
.sub_map
);
6378 /* leave map locked if it is a target */
6379 /* cow sub_map above otherwise, just */
6380 /* follow the maps down to the object */
6381 /* here we unlock knowing we are not */
6382 /* revisiting the map. */
6383 if((*pmap_map
!= map
) && (map
!= cow_sub_map_parent
))
6384 vm_map_unlock_read(map
);
6387 *var_map
= map
= entry
->object
.sub_map
;
6389 /* calculate the offset in the submap for vaddr */
6390 local_vaddr
= (local_vaddr
- entry
->vme_start
) + entry
->offset
;
6393 if(!vm_map_lookup_entry(map
, local_vaddr
, &submap_entry
)) {
6394 if((cow_sub_map_parent
) && (cow_sub_map_parent
!= map
)){
6395 vm_map_unlock(cow_sub_map_parent
);
6397 if((*pmap_map
!= map
)
6398 && (*pmap_map
!= cow_sub_map_parent
)) {
6399 vm_map_unlock(*pmap_map
);
6402 return KERN_INVALID_ADDRESS
;
6404 /* find the attenuated shadow of the underlying object */
6405 /* on our target map */
6407 /* in english the submap object may extend beyond the */
6408 /* region mapped by the entry or, may only fill a portion */
6409 /* of it. For our purposes, we only care if the object */
6410 /* doesn't fill. In this case the area which will */
6411 /* ultimately be clipped in the top map will only need */
6412 /* to be as big as the portion of the underlying entry */
6413 /* which is mapped */
6414 start_delta
= submap_entry
->vme_start
> entry
->offset
?
6415 submap_entry
->vme_start
- entry
->offset
: 0;
6418 (entry
->offset
+ start_delta
+ (old_end
- old_start
)) <=
6419 submap_entry
->vme_end
?
6420 0 : (entry
->offset
+
6421 (old_end
- old_start
))
6422 - submap_entry
->vme_end
;
6424 old_start
+= start_delta
;
6425 old_end
-= end_delta
;
6427 if(submap_entry
->is_sub_map
) {
6428 entry
= submap_entry
;
6429 vaddr
= local_vaddr
;
6430 goto submap_recurse
;
6433 if(((fault_type
& VM_PROT_WRITE
) && cow_sub_map_parent
)) {
6435 vm_object_t copy_object
;
6436 vm_offset_t local_start
;
6437 vm_offset_t local_end
;
6438 boolean_t copied_slowly
= FALSE
;
6440 if (vm_map_lock_read_to_write(map
)) {
6441 vm_map_lock_read(map
);
6442 old_start
-= start_delta
;
6443 old_end
+= end_delta
;
6448 if (submap_entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6449 submap_entry
->object
.vm_object
=
6452 (submap_entry
->vme_end
6453 - submap_entry
->vme_start
));
6454 submap_entry
->offset
= 0;
6456 local_start
= local_vaddr
-
6457 (cow_parent_vaddr
- old_start
);
6458 local_end
= local_vaddr
+
6459 (old_end
- cow_parent_vaddr
);
6460 vm_map_clip_start(map
, submap_entry
, local_start
);
6461 vm_map_clip_end(map
, submap_entry
, local_end
);
6463 /* This is the COW case, lets connect */
6464 /* an entry in our space to the underlying */
6465 /* object in the submap, bypassing the */
6469 if(submap_entry
->wired_count
!= 0) {
6471 submap_entry
->object
.vm_object
);
6472 vm_object_copy_slowly(
6473 submap_entry
->object
.vm_object
,
6474 submap_entry
->offset
,
6475 submap_entry
->vme_end
-
6476 submap_entry
->vme_start
,
6479 copied_slowly
= TRUE
;
6482 /* set up shadow object */
6483 copy_object
= submap_entry
->object
.vm_object
;
6484 vm_object_reference(copy_object
);
6485 submap_entry
->object
.vm_object
->shadowed
= TRUE
;
6486 submap_entry
->needs_copy
= TRUE
;
6487 vm_object_pmap_protect(
6488 submap_entry
->object
.vm_object
,
6489 submap_entry
->offset
,
6490 submap_entry
->vme_end
-
6491 submap_entry
->vme_start
,
6492 submap_entry
->is_shared
?
6493 PMAP_NULL
: map
->pmap
,
6494 submap_entry
->vme_start
,
6495 submap_entry
->protection
&
6500 /* This works diffently than the */
6501 /* normal submap case. We go back */
6502 /* to the parent of the cow map and*/
6503 /* clip out the target portion of */
6504 /* the sub_map, substituting the */
6505 /* new copy object, */
6508 local_start
= old_start
;
6509 local_end
= old_end
;
6510 map
= cow_sub_map_parent
;
6511 *var_map
= cow_sub_map_parent
;
6512 vaddr
= cow_parent_vaddr
;
6513 cow_sub_map_parent
= NULL
;
6515 if(!vm_map_lookup_entry(map
,
6517 vm_object_deallocate(
6519 vm_map_lock_write_to_read(map
);
6520 return KERN_INVALID_ADDRESS
;
6523 /* clip out the portion of space */
6524 /* mapped by the sub map which */
6525 /* corresponds to the underlying */
6527 vm_map_clip_start(map
, entry
, local_start
);
6528 vm_map_clip_end(map
, entry
, local_end
);
6531 /* substitute copy object for */
6532 /* shared map entry */
6533 vm_map_deallocate(entry
->object
.sub_map
);
6534 entry
->is_sub_map
= FALSE
;
6535 entry
->object
.vm_object
= copy_object
;
6537 entry
->protection
|= VM_PROT_WRITE
;
6538 entry
->max_protection
|= VM_PROT_WRITE
;
6541 entry
->needs_copy
= FALSE
;
6542 entry
->is_shared
= FALSE
;
6544 entry
->offset
= submap_entry
->offset
;
6545 entry
->needs_copy
= TRUE
;
6546 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6547 entry
->inheritance
= VM_INHERIT_COPY
;
6549 entry
->is_shared
= TRUE
;
6551 if(entry
->inheritance
== VM_INHERIT_SHARE
)
6552 entry
->inheritance
= VM_INHERIT_COPY
;
6554 vm_map_lock_write_to_read(map
);
6556 if((cow_sub_map_parent
)
6557 && (cow_sub_map_parent
!= *pmap_map
)
6558 && (cow_sub_map_parent
!= map
)) {
6559 vm_map_unlock(cow_sub_map_parent
);
6561 entry
= submap_entry
;
6562 vaddr
= local_vaddr
;
6567 * Check whether this task is allowed to have
6571 prot
= entry
->protection
;
6572 if ((fault_type
& (prot
)) != fault_type
) {
6573 if (*pmap_map
!= map
) {
6574 vm_map_unlock(*pmap_map
);
6577 return KERN_PROTECTION_FAILURE
;
6581 * If this page is not pageable, we have to get
6582 * it for all possible accesses.
6585 if (*wired
= (entry
->wired_count
!= 0))
6586 prot
= fault_type
= entry
->protection
;
6589 * If the entry was copy-on-write, we either ...
6592 if (entry
->needs_copy
) {
6594 * If we want to write the page, we may as well
6595 * handle that now since we've got the map locked.
6597 * If we don't need to write the page, we just
6598 * demote the permissions allowed.
6601 if (fault_type
& VM_PROT_WRITE
|| *wired
) {
6603 * Make a new object, and place it in the
6604 * object chain. Note that no new references
6605 * have appeared -- one just moved from the
6606 * map to the new object.
6609 if (vm_map_lock_read_to_write(map
)) {
6610 vm_map_lock_read(map
);
6613 vm_object_shadow(&entry
->object
.vm_object
,
6615 (vm_size_t
) (entry
->vme_end
-
6618 entry
->object
.vm_object
->shadowed
= TRUE
;
6619 entry
->needs_copy
= FALSE
;
6620 vm_map_lock_write_to_read(map
);
6624 * We're attempting to read a copy-on-write
6625 * page -- don't allow writes.
6628 prot
&= (~VM_PROT_WRITE
);
6633 * Create an object if necessary.
6635 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
6637 if (vm_map_lock_read_to_write(map
)) {
6638 vm_map_lock_read(map
);
6642 entry
->object
.vm_object
= vm_object_allocate(
6643 (vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
6645 vm_map_lock_write_to_read(map
);
6649 * Return the object/offset from this entry. If the entry
6650 * was copy-on-write or empty, it has been fixed up. Also
6651 * return the protection.
6654 *offset
= (vaddr
- entry
->vme_start
) + entry
->offset
;
6655 *object
= entry
->object
.vm_object
;
6657 *behavior
= entry
->behavior
;
6658 *lo_offset
= entry
->offset
;
6659 *hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
6662 * Lock the object to prevent it from disappearing
6665 vm_object_lock(*object
);
6668 * Save the version number
6671 out_version
->main_timestamp
= map
->timestamp
;
6673 return KERN_SUCCESS
;
6680 * Verifies that the map in question has not changed
6681 * since the given version. If successful, the map
6682 * will not change until vm_map_verify_done() is called.
6686 register vm_map_t map
,
6687 register vm_map_version_t
*version
) /* REF */
6691 vm_map_lock_read(map
);
6692 result
= (map
->timestamp
== version
->main_timestamp
);
6695 vm_map_unlock_read(map
);
6701 * vm_map_verify_done:
6703 * Releases locks acquired by a vm_map_verify.
6705 * This is now a macro in vm/vm_map.h. It does a
6706 * vm_map_unlock_read on the map.
6713 * User call to obtain information about a region in
6714 * a task's address map. Currently, only one flavor is
6717 * XXX The reserved and behavior fields cannot be filled
6718 * in until the vm merge from the IK is completed, and
6719 * vm_reserve is implemented.
6721 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6727 vm_offset_t
*address
, /* IN/OUT */
6728 vm_size_t
*size
, /* OUT */
6729 vm_region_flavor_t flavor
, /* IN */
6730 vm_region_info_t info
, /* OUT */
6731 mach_msg_type_number_t
*count
, /* IN/OUT */
6732 ipc_port_t
*object_name
) /* OUT */
6734 vm_map_entry_t tmp_entry
;
6736 vm_map_entry_t entry
;
6739 vm_region_basic_info_t basic
;
6740 vm_region_extended_info_t extended
;
6741 vm_region_top_info_t top
;
6743 if (map
== VM_MAP_NULL
)
6744 return(KERN_INVALID_ARGUMENT
);
6748 case VM_REGION_BASIC_INFO
:
6750 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
6751 return(KERN_INVALID_ARGUMENT
);
6753 basic
= (vm_region_basic_info_t
) info
;
6754 *count
= VM_REGION_BASIC_INFO_COUNT
;
6756 vm_map_lock_read(map
);
6759 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6760 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6761 vm_map_unlock_read(map
);
6762 return(KERN_INVALID_ADDRESS
);
6768 start
= entry
->vme_start
;
6770 basic
->offset
= entry
->offset
;
6771 basic
->protection
= entry
->protection
;
6772 basic
->inheritance
= entry
->inheritance
;
6773 basic
->max_protection
= entry
->max_protection
;
6774 basic
->behavior
= entry
->behavior
;
6775 basic
->user_wired_count
= entry
->user_wired_count
;
6776 basic
->reserved
= entry
->is_sub_map
;
6778 *size
= (entry
->vme_end
- start
);
6780 if (object_name
) *object_name
= IP_NULL
;
6781 if (entry
->is_sub_map
) {
6782 basic
->shared
= FALSE
;
6784 basic
->shared
= entry
->is_shared
;
6787 vm_map_unlock_read(map
);
6788 return(KERN_SUCCESS
);
6790 case VM_REGION_EXTENDED_INFO
:
6793 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
6794 return(KERN_INVALID_ARGUMENT
);
6796 extended
= (vm_region_extended_info_t
) info
;
6797 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
6799 vm_map_lock_read(map
);
6802 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6803 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6804 vm_map_unlock_read(map
);
6805 return(KERN_INVALID_ADDRESS
);
6810 start
= entry
->vme_start
;
6812 extended
->protection
= entry
->protection
;
6813 extended
->user_tag
= entry
->alias
;
6814 extended
->pages_resident
= 0;
6815 extended
->pages_swapped_out
= 0;
6816 extended
->pages_shared_now_private
= 0;
6817 extended
->pages_dirtied
= 0;
6818 extended
->external_pager
= 0;
6819 extended
->shadow_depth
= 0;
6821 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
6823 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
6824 extended
->share_mode
= SM_PRIVATE
;
6827 *object_name
= IP_NULL
;
6829 *size
= (entry
->vme_end
- start
);
6831 vm_map_unlock_read(map
);
6832 return(KERN_SUCCESS
);
6834 case VM_REGION_TOP_INFO
:
6837 if (*count
< VM_REGION_TOP_INFO_COUNT
)
6838 return(KERN_INVALID_ARGUMENT
);
6840 top
= (vm_region_top_info_t
) info
;
6841 *count
= VM_REGION_TOP_INFO_COUNT
;
6843 vm_map_lock_read(map
);
6846 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6847 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6848 vm_map_unlock_read(map
);
6849 return(KERN_INVALID_ADDRESS
);
6855 start
= entry
->vme_start
;
6857 top
->private_pages_resident
= 0;
6858 top
->shared_pages_resident
= 0;
6860 vm_region_top_walk(entry
, top
);
6863 *object_name
= IP_NULL
;
6865 *size
= (entry
->vme_end
- start
);
6867 vm_map_unlock_read(map
);
6868 return(KERN_SUCCESS
);
6871 return(KERN_INVALID_ARGUMENT
);
6876 * vm_region_recurse: A form of vm_region which follows the
6877 * submaps in a target map
6884 vm_offset_t
*address
, /* IN/OUT */
6885 vm_size_t
*size
, /* OUT */
6886 natural_t
*nesting_depth
, /* IN/OUT */
6887 vm_region_recurse_info_t info
, /* IN/OUT */
6888 mach_msg_type_number_t
*count
) /* IN/OUT */
6890 vm_map_entry_t tmp_entry
;
6892 vm_map_entry_t entry
;
6896 unsigned int recurse_count
;
6899 vm_map_entry_t base_entry
;
6900 vm_offset_t base_next
;
6901 vm_offset_t base_addr
;
6902 vm_offset_t baddr_start_delta
;
6903 vm_region_submap_info_t submap_info
;
6904 vm_region_extended_info_data_t extended
;
6906 if (map
== VM_MAP_NULL
)
6907 return(KERN_INVALID_ARGUMENT
);
6909 submap_info
= (vm_region_submap_info_t
) info
;
6910 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
6912 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
6913 return(KERN_INVALID_ARGUMENT
);
6917 recurse_count
= *nesting_depth
;
6919 LOOKUP_NEXT_BASE_ENTRY
:
6920 vm_map_lock_read(map
);
6921 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6922 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
6923 vm_map_unlock_read(map
);
6924 return(KERN_INVALID_ADDRESS
);
6929 *size
= entry
->vme_end
- entry
->vme_start
;
6930 start
= entry
->vme_start
;
6932 baddr_start_delta
= *address
- start
;
6933 base_next
= entry
->vme_end
;
6936 while(entry
->is_sub_map
&& recurse_count
) {
6938 vm_map_lock_read(entry
->object
.sub_map
);
6941 if(entry
== base_entry
) {
6942 start
= entry
->offset
;
6943 start
+= *address
- entry
->vme_start
;
6946 submap
= entry
->object
.sub_map
;
6947 vm_map_unlock_read(map
);
6950 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
6951 if ((entry
= tmp_entry
->vme_next
)
6952 == vm_map_to_entry(map
)) {
6953 vm_map_unlock_read(map
);
6958 goto LOOKUP_NEXT_BASE_ENTRY
;
6964 if(start
<= entry
->vme_start
) {
6965 vm_offset_t old_start
= start
;
6966 if(baddr_start_delta
) {
6967 base_addr
+= (baddr_start_delta
);
6968 *size
-= baddr_start_delta
;
6969 baddr_start_delta
= 0;
6972 (base_addr
+= (entry
->vme_start
- start
))) {
6973 vm_map_unlock_read(map
);
6978 goto LOOKUP_NEXT_BASE_ENTRY
;
6980 *size
-= entry
->vme_start
- start
;
6981 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
6982 *size
= entry
->vme_end
- entry
->vme_start
;
6986 if(baddr_start_delta
) {
6987 if((start
- entry
->vme_start
)
6988 < baddr_start_delta
) {
6989 base_addr
+= start
- entry
->vme_start
;
6990 *size
-= start
- entry
->vme_start
;
6992 base_addr
+= baddr_start_delta
;
6993 *size
+= baddr_start_delta
;
6995 baddr_start_delta
= 0;
6997 base_addr
+= entry
->vme_start
;
6998 if(base_addr
>= base_next
) {
6999 vm_map_unlock_read(map
);
7004 goto LOOKUP_NEXT_BASE_ENTRY
;
7006 if (*size
> (entry
->vme_end
- start
))
7007 *size
= entry
->vme_end
- start
;
7009 start
= entry
->vme_start
- start
;
7012 start
+= entry
->offset
;
7015 *nesting_depth
-= recurse_count
;
7016 if(entry
!= base_entry
) {
7017 start
= entry
->vme_start
+ (start
- entry
->offset
);
7021 submap_info
->user_tag
= entry
->alias
;
7022 submap_info
->offset
= entry
->offset
;
7023 submap_info
->protection
= entry
->protection
;
7024 submap_info
->inheritance
= entry
->inheritance
;
7025 submap_info
->max_protection
= entry
->max_protection
;
7026 submap_info
->behavior
= entry
->behavior
;
7027 submap_info
->user_wired_count
= entry
->user_wired_count
;
7028 submap_info
->is_submap
= entry
->is_sub_map
;
7029 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7030 *address
= base_addr
;
7033 extended
.pages_resident
= 0;
7034 extended
.pages_swapped_out
= 0;
7035 extended
.pages_shared_now_private
= 0;
7036 extended
.pages_dirtied
= 0;
7037 extended
.external_pager
= 0;
7038 extended
.shadow_depth
= 0;
7040 if(!entry
->is_sub_map
) {
7041 vm_region_walk(entry
, &extended
, entry
->offset
,
7042 entry
->vme_end
- start
, map
, start
);
7043 submap_info
->share_mode
= extended
.share_mode
;
7044 if (extended
.external_pager
&& extended
.ref_count
== 2
7045 && extended
.share_mode
== SM_SHARED
)
7046 submap_info
->share_mode
= SM_PRIVATE
;
7047 submap_info
->ref_count
= extended
.ref_count
;
7050 submap_info
->share_mode
= SM_TRUESHARED
;
7052 submap_info
->share_mode
= SM_PRIVATE
;
7053 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7056 submap_info
->pages_resident
= extended
.pages_resident
;
7057 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7058 submap_info
->pages_shared_now_private
=
7059 extended
.pages_shared_now_private
;
7060 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7061 submap_info
->external_pager
= extended
.external_pager
;
7062 submap_info
->shadow_depth
= extended
.shadow_depth
;
7064 vm_map_unlock_read(map
);
7065 return(KERN_SUCCESS
);
7069 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7070 * Goes away after regular vm_region_recurse function migrates to
7072 * vm_region_recurse: A form of vm_region which follows the
7073 * submaps in a target map
7078 vm_region_recurse_64(
7080 vm_offset_t
*address
, /* IN/OUT */
7081 vm_size_t
*size
, /* OUT */
7082 natural_t
*nesting_depth
, /* IN/OUT */
7083 vm_region_recurse_info_t info
, /* IN/OUT */
7084 mach_msg_type_number_t
*count
) /* IN/OUT */
7086 vm_map_entry_t tmp_entry
;
7088 vm_map_entry_t entry
;
7092 unsigned int recurse_count
;
7095 vm_map_entry_t base_entry
;
7096 vm_offset_t base_next
;
7097 vm_offset_t base_addr
;
7098 vm_offset_t baddr_start_delta
;
7099 vm_region_submap_info_64_t submap_info
;
7100 vm_region_extended_info_data_t extended
;
7102 if (map
== VM_MAP_NULL
)
7103 return(KERN_INVALID_ARGUMENT
);
7105 submap_info
= (vm_region_submap_info_64_t
) info
;
7106 *count
= VM_REGION_SUBMAP_INFO_COUNT
;
7108 if (*count
< VM_REGION_SUBMAP_INFO_COUNT
)
7109 return(KERN_INVALID_ARGUMENT
);
7113 recurse_count
= *nesting_depth
;
7115 LOOKUP_NEXT_BASE_ENTRY
:
7116 vm_map_lock_read(map
);
7117 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7118 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7119 vm_map_unlock_read(map
);
7120 return(KERN_INVALID_ADDRESS
);
7125 *size
= entry
->vme_end
- entry
->vme_start
;
7126 start
= entry
->vme_start
;
7128 baddr_start_delta
= *address
- start
;
7129 base_next
= entry
->vme_end
;
7132 while(entry
->is_sub_map
&& recurse_count
) {
7134 vm_map_lock_read(entry
->object
.sub_map
);
7137 if(entry
== base_entry
) {
7138 start
= entry
->offset
;
7139 start
+= *address
- entry
->vme_start
;
7142 submap
= entry
->object
.sub_map
;
7143 vm_map_unlock_read(map
);
7146 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7147 if ((entry
= tmp_entry
->vme_next
)
7148 == vm_map_to_entry(map
)) {
7149 vm_map_unlock_read(map
);
7154 goto LOOKUP_NEXT_BASE_ENTRY
;
7160 if(start
<= entry
->vme_start
) {
7161 vm_offset_t old_start
= start
;
7162 if(baddr_start_delta
) {
7163 base_addr
+= (baddr_start_delta
);
7164 *size
-= baddr_start_delta
;
7165 baddr_start_delta
= 0;
7168 (base_addr
+= (entry
->vme_start
- start
))) {
7169 vm_map_unlock_read(map
);
7174 goto LOOKUP_NEXT_BASE_ENTRY
;
7176 *size
-= entry
->vme_start
- start
;
7177 if (*size
> (entry
->vme_end
- entry
->vme_start
)) {
7178 *size
= entry
->vme_end
- entry
->vme_start
;
7182 if(baddr_start_delta
) {
7183 if((start
- entry
->vme_start
)
7184 < baddr_start_delta
) {
7185 base_addr
+= start
- entry
->vme_start
;
7186 *size
-= start
- entry
->vme_start
;
7188 base_addr
+= baddr_start_delta
;
7189 *size
+= baddr_start_delta
;
7191 baddr_start_delta
= 0;
7193 base_addr
+= entry
->vme_start
;
7194 if(base_addr
>= base_next
) {
7195 vm_map_unlock_read(map
);
7200 goto LOOKUP_NEXT_BASE_ENTRY
;
7202 if (*size
> (entry
->vme_end
- start
))
7203 *size
= entry
->vme_end
- start
;
7205 start
= entry
->vme_start
- start
;
7208 start
+= entry
->offset
;
7211 *nesting_depth
-= recurse_count
;
7212 if(entry
!= base_entry
) {
7213 start
= entry
->vme_start
+ (start
- entry
->offset
);
7217 submap_info
->user_tag
= entry
->alias
;
7218 submap_info
->offset
= entry
->offset
;
7219 submap_info
->protection
= entry
->protection
;
7220 submap_info
->inheritance
= entry
->inheritance
;
7221 submap_info
->max_protection
= entry
->max_protection
;
7222 submap_info
->behavior
= entry
->behavior
;
7223 submap_info
->user_wired_count
= entry
->user_wired_count
;
7224 submap_info
->is_submap
= entry
->is_sub_map
;
7225 submap_info
->object_id
= (vm_offset_t
)entry
->object
.vm_object
;
7226 *address
= base_addr
;
7229 extended
.pages_resident
= 0;
7230 extended
.pages_swapped_out
= 0;
7231 extended
.pages_shared_now_private
= 0;
7232 extended
.pages_dirtied
= 0;
7233 extended
.external_pager
= 0;
7234 extended
.shadow_depth
= 0;
7236 if(!entry
->is_sub_map
) {
7237 vm_region_walk(entry
, &extended
, entry
->offset
,
7238 entry
->vme_end
- start
, map
, start
);
7239 submap_info
->share_mode
= extended
.share_mode
;
7240 if (extended
.external_pager
&& extended
.ref_count
== 2
7241 && extended
.share_mode
== SM_SHARED
)
7242 submap_info
->share_mode
= SM_PRIVATE
;
7243 submap_info
->ref_count
= extended
.ref_count
;
7246 submap_info
->share_mode
= SM_TRUESHARED
;
7248 submap_info
->share_mode
= SM_PRIVATE
;
7249 submap_info
->ref_count
= entry
->object
.sub_map
->ref_count
;
7252 submap_info
->pages_resident
= extended
.pages_resident
;
7253 submap_info
->pages_swapped_out
= extended
.pages_swapped_out
;
7254 submap_info
->pages_shared_now_private
=
7255 extended
.pages_shared_now_private
;
7256 submap_info
->pages_dirtied
= extended
.pages_dirtied
;
7257 submap_info
->external_pager
= extended
.external_pager
;
7258 submap_info
->shadow_depth
= extended
.shadow_depth
;
7260 vm_map_unlock_read(map
);
7261 return(KERN_SUCCESS
);
7266 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7267 * Goes away after regular vm_region function migrates to
7275 vm_offset_t
*address
, /* IN/OUT */
7276 vm_size_t
*size
, /* OUT */
7277 vm_region_flavor_t flavor
, /* IN */
7278 vm_region_info_t info
, /* OUT */
7279 mach_msg_type_number_t
*count
, /* IN/OUT */
7280 ipc_port_t
*object_name
) /* OUT */
7282 vm_map_entry_t tmp_entry
;
7284 vm_map_entry_t entry
;
7287 vm_region_basic_info_64_t basic
;
7288 vm_region_extended_info_t extended
;
7289 vm_region_top_info_t top
;
7291 if (map
== VM_MAP_NULL
)
7292 return(KERN_INVALID_ARGUMENT
);
7296 case VM_REGION_BASIC_INFO
:
7298 if (*count
< VM_REGION_BASIC_INFO_COUNT
)
7299 return(KERN_INVALID_ARGUMENT
);
7301 basic
= (vm_region_basic_info_64_t
) info
;
7302 *count
= VM_REGION_BASIC_INFO_COUNT
;
7304 vm_map_lock_read(map
);
7307 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7308 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7309 vm_map_unlock_read(map
);
7310 return(KERN_INVALID_ADDRESS
);
7316 start
= entry
->vme_start
;
7318 basic
->offset
= entry
->offset
;
7319 basic
->protection
= entry
->protection
;
7320 basic
->inheritance
= entry
->inheritance
;
7321 basic
->max_protection
= entry
->max_protection
;
7322 basic
->behavior
= entry
->behavior
;
7323 basic
->user_wired_count
= entry
->user_wired_count
;
7324 basic
->reserved
= entry
->is_sub_map
;
7326 *size
= (entry
->vme_end
- start
);
7328 if (object_name
) *object_name
= IP_NULL
;
7329 if (entry
->is_sub_map
) {
7330 basic
->shared
= FALSE
;
7332 basic
->shared
= entry
->is_shared
;
7335 vm_map_unlock_read(map
);
7336 return(KERN_SUCCESS
);
7338 case VM_REGION_EXTENDED_INFO
:
7341 if (*count
< VM_REGION_EXTENDED_INFO_COUNT
)
7342 return(KERN_INVALID_ARGUMENT
);
7344 extended
= (vm_region_extended_info_t
) info
;
7345 *count
= VM_REGION_EXTENDED_INFO_COUNT
;
7347 vm_map_lock_read(map
);
7350 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7351 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7352 vm_map_unlock_read(map
);
7353 return(KERN_INVALID_ADDRESS
);
7358 start
= entry
->vme_start
;
7360 extended
->protection
= entry
->protection
;
7361 extended
->user_tag
= entry
->alias
;
7362 extended
->pages_resident
= 0;
7363 extended
->pages_swapped_out
= 0;
7364 extended
->pages_shared_now_private
= 0;
7365 extended
->pages_dirtied
= 0;
7366 extended
->external_pager
= 0;
7367 extended
->shadow_depth
= 0;
7369 vm_region_walk(entry
, extended
, entry
->offset
, entry
->vme_end
- start
, map
, start
);
7371 if (extended
->external_pager
&& extended
->ref_count
== 2 && extended
->share_mode
== SM_SHARED
)
7372 extended
->share_mode
= SM_PRIVATE
;
7375 *object_name
= IP_NULL
;
7377 *size
= (entry
->vme_end
- start
);
7379 vm_map_unlock_read(map
);
7380 return(KERN_SUCCESS
);
7382 case VM_REGION_TOP_INFO
:
7385 if (*count
< VM_REGION_TOP_INFO_COUNT
)
7386 return(KERN_INVALID_ARGUMENT
);
7388 top
= (vm_region_top_info_t
) info
;
7389 *count
= VM_REGION_TOP_INFO_COUNT
;
7391 vm_map_lock_read(map
);
7394 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
7395 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
7396 vm_map_unlock_read(map
);
7397 return(KERN_INVALID_ADDRESS
);
7403 start
= entry
->vme_start
;
7405 top
->private_pages_resident
= 0;
7406 top
->shared_pages_resident
= 0;
7408 vm_region_top_walk(entry
, top
);
7411 *object_name
= IP_NULL
;
7413 *size
= (entry
->vme_end
- start
);
7415 vm_map_unlock_read(map
);
7416 return(KERN_SUCCESS
);
7419 return(KERN_INVALID_ARGUMENT
);
7425 vm_map_entry_t entry
,
7426 vm_region_top_info_t top
)
7428 register struct vm_object
*obj
, *tmp_obj
;
7429 register int ref_count
;
7431 if (entry
->object
.vm_object
== 0) {
7432 top
->share_mode
= SM_EMPTY
;
7437 if (entry
->is_sub_map
)
7438 vm_region_top_walk((vm_map_entry_t
)entry
->object
.sub_map
, top
);
7440 obj
= entry
->object
.vm_object
;
7442 vm_object_lock(obj
);
7444 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7449 top
->private_pages_resident
= obj
->resident_page_count
;
7451 top
->shared_pages_resident
= obj
->resident_page_count
;
7452 top
->ref_count
= ref_count
;
7453 top
->share_mode
= SM_COW
;
7455 while (tmp_obj
= obj
->shadow
) {
7456 vm_object_lock(tmp_obj
);
7457 vm_object_unlock(obj
);
7460 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7463 top
->shared_pages_resident
+= obj
->resident_page_count
;
7464 top
->ref_count
+= ref_count
- 1;
7467 if (entry
->needs_copy
) {
7468 top
->share_mode
= SM_COW
;
7469 top
->shared_pages_resident
= obj
->resident_page_count
;
7471 if (ref_count
== 1 ||
7472 (ref_count
== 2 && !(obj
->pager_trusted
) && !(obj
->internal
))) {
7473 top
->share_mode
= SM_PRIVATE
;
7474 top
->private_pages_resident
= obj
->resident_page_count
;
7476 top
->share_mode
= SM_SHARED
;
7477 top
->shared_pages_resident
= obj
->resident_page_count
;
7480 top
->ref_count
= ref_count
;
7482 top
->obj_id
= (int)obj
;
7484 vm_object_unlock(obj
);
7490 vm_map_entry_t entry
,
7491 vm_region_extended_info_t extended
,
7492 vm_object_offset_t offset
,
7497 register struct vm_object
*obj
, *tmp_obj
;
7498 register vm_offset_t last_offset
;
7500 register int ref_count
;
7501 void vm_region_look_for_page();
7503 if ((entry
->object
.vm_object
== 0) ||
7504 (entry
->object
.vm_object
->phys_contiguous
)) {
7505 extended
->share_mode
= SM_EMPTY
;
7506 extended
->ref_count
= 0;
7509 if (entry
->is_sub_map
)
7510 vm_region_walk((vm_map_entry_t
)entry
->object
.sub_map
, extended
, offset
+ entry
->offset
,
7513 obj
= entry
->object
.vm_object
;
7515 vm_object_lock(obj
);
7517 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7520 for (last_offset
= offset
+ range
; offset
< last_offset
; offset
+= PAGE_SIZE_64
, va
+= PAGE_SIZE
)
7521 vm_region_look_for_page(obj
, extended
, offset
, ref_count
, 0, map
, va
);
7523 if (extended
->shadow_depth
|| entry
->needs_copy
)
7524 extended
->share_mode
= SM_COW
;
7527 extended
->share_mode
= SM_PRIVATE
;
7529 if (obj
->true_share
)
7530 extended
->share_mode
= SM_TRUESHARED
;
7532 extended
->share_mode
= SM_SHARED
;
7535 extended
->ref_count
= ref_count
- extended
->shadow_depth
;
7537 for (i
= 0; i
< extended
->shadow_depth
; i
++) {
7538 if ((tmp_obj
= obj
->shadow
) == 0)
7540 vm_object_lock(tmp_obj
);
7541 vm_object_unlock(obj
);
7543 if ((ref_count
= tmp_obj
->ref_count
) > 1 && tmp_obj
->paging_in_progress
)
7546 extended
->ref_count
+= ref_count
;
7549 vm_object_unlock(obj
);
7551 if (extended
->share_mode
== SM_SHARED
) {
7552 register vm_map_entry_t cur
;
7553 register vm_map_entry_t last
;
7556 obj
= entry
->object
.vm_object
;
7557 last
= vm_map_to_entry(map
);
7560 if ((ref_count
= obj
->ref_count
) > 1 && obj
->paging_in_progress
)
7562 for (cur
= vm_map_first_entry(map
); cur
!= last
; cur
= cur
->vme_next
)
7563 my_refs
+= vm_region_count_obj_refs(cur
, obj
);
7565 if (my_refs
== ref_count
)
7566 extended
->share_mode
= SM_PRIVATE_ALIASED
;
7567 else if (my_refs
> 1)
7568 extended
->share_mode
= SM_SHARED_ALIASED
;
7574 /* object is locked on entry and locked on return */
7578 vm_region_look_for_page(
7580 vm_region_extended_info_t extended
,
7581 vm_object_offset_t offset
,
7587 register vm_page_t p
;
7588 register vm_object_t shadow
;
7589 register int ref_count
;
7590 vm_object_t caller_object
;
7592 shadow
= object
->shadow
;
7593 caller_object
= object
;
7598 if ( !(object
->pager_trusted
) && !(object
->internal
))
7599 extended
->external_pager
= 1;
7601 if ((p
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
7602 if (shadow
&& (max_refcnt
== 1))
7603 extended
->pages_shared_now_private
++;
7605 if (p
->dirty
|| pmap_is_modified(p
->phys_addr
))
7606 extended
->pages_dirtied
++;
7607 extended
->pages_resident
++;
7609 if(object
!= caller_object
)
7610 vm_object_unlock(object
);
7614 if (object
->existence_map
) {
7615 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_EXISTS
) {
7617 extended
->pages_swapped_out
++;
7619 if(object
!= caller_object
)
7620 vm_object_unlock(object
);
7626 vm_object_lock(shadow
);
7628 if ((ref_count
= shadow
->ref_count
) > 1 && shadow
->paging_in_progress
)
7631 if (++depth
> extended
->shadow_depth
)
7632 extended
->shadow_depth
= depth
;
7634 if (ref_count
> max_refcnt
)
7635 max_refcnt
= ref_count
;
7637 if(object
!= caller_object
)
7638 vm_object_unlock(object
);
7641 shadow
= object
->shadow
;
7642 offset
= offset
+ object
->shadow_offset
;
7645 if(object
!= caller_object
)
7646 vm_object_unlock(object
);
7652 vm_region_count_obj_refs(
7653 vm_map_entry_t entry
,
7656 register int ref_count
;
7657 register vm_object_t chk_obj
;
7658 register vm_object_t tmp_obj
;
7660 if (entry
->object
.vm_object
== 0)
7663 if (entry
->is_sub_map
)
7664 ref_count
= vm_region_count_obj_refs((vm_map_entry_t
)entry
->object
.sub_map
, object
);
7668 chk_obj
= entry
->object
.vm_object
;
7669 vm_object_lock(chk_obj
);
7672 if (chk_obj
== object
)
7674 if (tmp_obj
= chk_obj
->shadow
)
7675 vm_object_lock(tmp_obj
);
7676 vm_object_unlock(chk_obj
);
7686 * Routine: vm_map_simplify
7689 * Attempt to simplify the map representation in
7690 * the vicinity of the given starting address.
7692 * This routine is intended primarily to keep the
7693 * kernel maps more compact -- they generally don't
7694 * benefit from the "expand a map entry" technology
7695 * at allocation time because the adjacent entry
7696 * is often wired down.
7703 vm_map_entry_t this_entry
;
7704 vm_map_entry_t prev_entry
;
7705 vm_map_entry_t next_entry
;
7709 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7710 ((prev_entry
= this_entry
->vme_prev
) != vm_map_to_entry(map
)) &&
7712 (prev_entry
->vme_end
== this_entry
->vme_start
) &&
7714 (prev_entry
->is_shared
== FALSE
) &&
7715 (prev_entry
->is_sub_map
== FALSE
) &&
7717 (this_entry
->is_shared
== FALSE
) &&
7718 (this_entry
->is_sub_map
== FALSE
) &&
7720 (prev_entry
->inheritance
== this_entry
->inheritance
) &&
7721 (prev_entry
->protection
== this_entry
->protection
) &&
7722 (prev_entry
->max_protection
== this_entry
->max_protection
) &&
7723 (prev_entry
->behavior
== this_entry
->behavior
) &&
7724 (prev_entry
->wired_count
== this_entry
->wired_count
) &&
7725 (prev_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7726 (prev_entry
->in_transition
== FALSE
) &&
7727 (this_entry
->in_transition
== FALSE
) &&
7729 (prev_entry
->needs_copy
== this_entry
->needs_copy
) &&
7731 (prev_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7732 ((prev_entry
->offset
+
7733 (prev_entry
->vme_end
- prev_entry
->vme_start
))
7734 == this_entry
->offset
)
7736 SAVE_HINT(map
, prev_entry
);
7737 vm_map_entry_unlink(map
, this_entry
);
7738 prev_entry
->vme_end
= this_entry
->vme_end
;
7739 UPDATE_FIRST_FREE(map
, map
->first_free
);
7740 vm_object_deallocate(this_entry
->object
.vm_object
);
7741 vm_map_entry_dispose(map
, this_entry
);
7742 counter(c_vm_map_simplified_lower
++);
7745 (vm_map_lookup_entry(map
, start
, &this_entry
)) &&
7746 ((next_entry
= this_entry
->vme_next
) != vm_map_to_entry(map
)) &&
7748 (next_entry
->vme_start
== this_entry
->vme_end
) &&
7750 (next_entry
->is_shared
== FALSE
) &&
7751 (next_entry
->is_sub_map
== FALSE
) &&
7753 (next_entry
->is_shared
== FALSE
) &&
7754 (next_entry
->is_sub_map
== FALSE
) &&
7756 (next_entry
->inheritance
== this_entry
->inheritance
) &&
7757 (next_entry
->protection
== this_entry
->protection
) &&
7758 (next_entry
->max_protection
== this_entry
->max_protection
) &&
7759 (next_entry
->behavior
== this_entry
->behavior
) &&
7760 (next_entry
->wired_count
== this_entry
->wired_count
) &&
7761 (next_entry
->user_wired_count
== this_entry
->user_wired_count
)&&
7762 (this_entry
->in_transition
== FALSE
) &&
7763 (next_entry
->in_transition
== FALSE
) &&
7765 (next_entry
->needs_copy
== this_entry
->needs_copy
) &&
7767 (next_entry
->object
.vm_object
== this_entry
->object
.vm_object
)&&
7768 ((this_entry
->offset
+
7769 (this_entry
->vme_end
- this_entry
->vme_start
))
7770 == next_entry
->offset
)
7772 vm_map_entry_unlink(map
, next_entry
);
7773 this_entry
->vme_end
= next_entry
->vme_end
;
7774 UPDATE_FIRST_FREE(map
, map
->first_free
);
7775 vm_object_deallocate(next_entry
->object
.vm_object
);
7776 vm_map_entry_dispose(map
, next_entry
);
7777 counter(c_vm_map_simplified_upper
++);
7779 counter(c_vm_map_simplify_called
++);
7785 * Routine: vm_map_machine_attribute
7787 * Provide machine-specific attributes to mappings,
7788 * such as cachability etc. for machines that provide
7789 * them. NUMA architectures and machines with big/strange
7790 * caches will use this.
7792 * Responsibilities for locking and checking are handled here,
7793 * everything else in the pmap module. If any non-volatile
7794 * information must be kept, the pmap module should handle
7795 * it itself. [This assumes that attributes do not
7796 * need to be inherited, which seems ok to me]
7799 vm_map_machine_attribute(
7801 vm_offset_t address
,
7803 vm_machine_attribute_t attribute
,
7804 vm_machine_attribute_val_t
* value
) /* IN/OUT */
7808 if (address
< vm_map_min(map
) ||
7809 (address
+ size
) > vm_map_max(map
))
7810 return KERN_INVALID_ADDRESS
;
7814 ret
= pmap_attribute(map
->pmap
, address
, size
, attribute
, value
);
7822 * vm_map_behavior_set:
7824 * Sets the paging reference behavior of the specified address
7825 * range in the target map. Paging reference behavior affects
7826 * how pagein operations resulting from faults on the map will be
7830 vm_map_behavior_set(
7834 vm_behavior_t new_behavior
)
7836 register vm_map_entry_t entry
;
7837 vm_map_entry_t temp_entry
;
7840 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
7841 (integer_t
)map
, start
, end
, new_behavior
, 0);
7843 switch (new_behavior
) {
7844 case VM_BEHAVIOR_DEFAULT
:
7845 case VM_BEHAVIOR_RANDOM
:
7846 case VM_BEHAVIOR_SEQUENTIAL
:
7847 case VM_BEHAVIOR_RSEQNTL
:
7850 return(KERN_INVALID_ARGUMENT
);
7856 * The entire address range must be valid for the map.
7857 * Note that vm_map_range_check() does a
7858 * vm_map_lookup_entry() internally and returns the
7859 * entry containing the start of the address range if
7860 * the entire range is valid.
7862 if (vm_map_range_check(map
, start
, end
, &temp_entry
)) {
7864 vm_map_clip_start(map
, entry
, start
);
7868 return(KERN_INVALID_ADDRESS
);
7871 while ((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
7872 vm_map_clip_end(map
, entry
, end
);
7874 entry
->behavior
= new_behavior
;
7876 entry
= entry
->vme_next
;
7880 return(KERN_SUCCESS
);
7884 #include <mach_kdb.h>
7886 #include <ddb/db_output.h>
7887 #include <vm/vm_print.h>
7889 #define printf db_printf
7892 * Forward declarations for internal functions.
7894 extern void vm_map_links_print(
7895 struct vm_map_links
*links
);
7897 extern void vm_map_header_print(
7898 struct vm_map_header
*header
);
7900 extern void vm_map_entry_print(
7901 vm_map_entry_t entry
);
7903 extern void vm_follow_entry(
7904 vm_map_entry_t entry
);
7906 extern void vm_follow_map(
7910 * vm_map_links_print: [ debug ]
7914 struct vm_map_links
*links
)
7916 iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
7924 * vm_map_header_print: [ debug ]
7927 vm_map_header_print(
7928 struct vm_map_header
*header
)
7930 vm_map_links_print(&header
->links
);
7931 iprintf("nentries=0x%x, %sentries_pageable\n",
7933 (header
->entries_pageable
? "" : "!"));
7937 * vm_follow_entry: [ debug ]
7941 vm_map_entry_t entry
)
7943 extern int db_indent
;
7946 iprintf("map entry 0x%x:\n", entry
);
7950 shadows
= vm_follow_object(entry
->object
.vm_object
);
7951 iprintf("Total objects : %d\n",shadows
);
7957 * vm_map_entry_print: [ debug ]
7961 register vm_map_entry_t entry
)
7963 extern int db_indent
;
7964 static char *inheritance_name
[4] = { "share", "copy", "none", "?"};
7965 static char *behavior_name
[4] = { "dflt", "rand", "seqtl", "rseqntl" };
7967 iprintf("map entry 0x%x:\n", entry
);
7971 vm_map_links_print(&entry
->links
);
7973 iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
7977 entry
->max_protection
,
7978 inheritance_name
[(entry
->inheritance
& 0x3)]);
7980 iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
7981 behavior_name
[(entry
->behavior
& 0x3)],
7983 entry
->user_wired_count
);
7984 iprintf("%sin_transition, %sneeds_wakeup\n",
7985 (entry
->in_transition
? "" : "!"),
7986 (entry
->needs_wakeup
? "" : "!"));
7988 if (entry
->is_sub_map
) {
7989 iprintf("submap=0x%x, offset=0x%x\n",
7990 entry
->object
.sub_map
,
7993 iprintf("object=0x%x, offset=0x%x, ",
7994 entry
->object
.vm_object
,
7996 printf("%sis_shared, %sneeds_copy\n",
7997 (entry
->is_shared
? "" : "!"),
7998 (entry
->needs_copy
? "" : "!"));
8005 * vm_follow_map: [ debug ]
8011 register vm_map_entry_t entry
;
8012 extern int db_indent
;
8014 iprintf("task map 0x%x:\n", map
);
8018 for (entry
= vm_map_first_entry(map
);
8019 entry
&& entry
!= vm_map_to_entry(map
);
8020 entry
= entry
->vme_next
) {
8021 vm_follow_entry(entry
);
8028 * vm_map_print: [ debug ]
8032 register vm_map_t map
)
8034 register vm_map_entry_t entry
;
8035 extern int db_indent
;
8038 iprintf("task map 0x%x:\n", map
);
8042 vm_map_header_print(&map
->hdr
);
8044 iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
8051 iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
8052 (map
->wait_for_space
? "" : "!"),
8053 (map
->wiring_required
? "" : "!"),
8057 switch (map
->sw_state
) {
8068 iprintf("res=%d, sw_state=%s\n", map
->res_count
, swstate
);
8069 #endif /* TASK_SWAPPER */
8071 for (entry
= vm_map_first_entry(map
);
8072 entry
&& entry
!= vm_map_to_entry(map
);
8073 entry
= entry
->vme_next
) {
8074 vm_map_entry_print(entry
);
8081 * Routine: vm_map_copy_print
8083 * Pretty-print a copy object for ddb.
8090 extern int db_indent
;
8092 vm_map_entry_t entry
;
8094 printf("copy object 0x%x\n", copy
);
8098 iprintf("type=%d", copy
->type
);
8099 switch (copy
->type
) {
8100 case VM_MAP_COPY_ENTRY_LIST
:
8101 printf("[entry_list]");
8104 case VM_MAP_COPY_OBJECT
:
8108 case VM_MAP_COPY_KERNEL_BUFFER
:
8109 printf("[kernel_buffer]");
8113 printf("[bad type]");
8116 printf(", offset=0x%x", copy
->offset
);
8117 printf(", size=0x%x\n", copy
->size
);
8119 switch (copy
->type
) {
8120 case VM_MAP_COPY_ENTRY_LIST
:
8121 vm_map_header_print(©
->cpy_hdr
);
8122 for (entry
= vm_map_copy_first_entry(copy
);
8123 entry
&& entry
!= vm_map_copy_to_entry(copy
);
8124 entry
= entry
->vme_next
) {
8125 vm_map_entry_print(entry
);
8129 case VM_MAP_COPY_OBJECT
:
8130 iprintf("object=0x%x\n", copy
->cpy_object
);
8133 case VM_MAP_COPY_KERNEL_BUFFER
:
8134 iprintf("kernel buffer=0x%x", copy
->cpy_kdata
);
8135 printf(", kalloc_size=0x%x\n", copy
->cpy_kalloc_size
);
8144 * db_vm_map_total_size(map) [ debug ]
8146 * return the total virtual size (in bytes) of the map
8149 db_vm_map_total_size(
8152 vm_map_entry_t entry
;
8156 for (entry
= vm_map_first_entry(map
);
8157 entry
!= vm_map_to_entry(map
);
8158 entry
= entry
->vme_next
) {
8159 total
+= entry
->vme_end
- entry
->vme_start
;
8165 #endif /* MACH_KDB */
8168 * Routine: vm_map_entry_insert
8170 * Descritpion: This routine inserts a new vm_entry in a locked map.
8173 vm_map_entry_insert(
8175 vm_map_entry_t insp_entry
,
8179 vm_object_offset_t offset
,
8180 boolean_t needs_copy
,
8181 boolean_t is_shared
,
8182 boolean_t in_transition
,
8183 vm_prot_t cur_protection
,
8184 vm_prot_t max_protection
,
8185 vm_behavior_t behavior
,
8186 vm_inherit_t inheritance
,
8187 unsigned wired_count
)
8189 vm_map_entry_t new_entry
;
8191 assert(insp_entry
!= (vm_map_entry_t
)0);
8193 new_entry
= vm_map_entry_create(map
);
8195 new_entry
->vme_start
= start
;
8196 new_entry
->vme_end
= end
;
8197 assert(page_aligned(new_entry
->vme_start
));
8198 assert(page_aligned(new_entry
->vme_end
));
8200 new_entry
->object
.vm_object
= object
;
8201 new_entry
->offset
= offset
;
8202 new_entry
->is_shared
= is_shared
;
8203 new_entry
->is_sub_map
= FALSE
;
8204 new_entry
->needs_copy
= needs_copy
;
8205 new_entry
->in_transition
= in_transition
;
8206 new_entry
->needs_wakeup
= FALSE
;
8207 new_entry
->inheritance
= inheritance
;
8208 new_entry
->protection
= cur_protection
;
8209 new_entry
->max_protection
= max_protection
;
8210 new_entry
->behavior
= behavior
;
8211 new_entry
->wired_count
= wired_count
;
8212 new_entry
->user_wired_count
= 0;
8213 new_entry
->use_pmap
= FALSE
;
8216 * Insert the new entry into the list.
8219 vm_map_entry_link(map
, insp_entry
, new_entry
);
8220 map
->size
+= end
- start
;
8223 * Update the free space hint and the lookup hint.
8226 SAVE_HINT(map
, new_entry
);
8231 * Routine: vm_remap_extract
8233 * Descritpion: This routine returns a vm_entry list from a map.
8241 struct vm_map_header
*map_header
,
8242 vm_prot_t
*cur_protection
,
8243 vm_prot_t
*max_protection
,
8244 /* What, no behavior? */
8245 vm_inherit_t inheritance
,
8248 kern_return_t result
;
8249 vm_size_t mapped_size
;
8251 vm_map_entry_t src_entry
; /* result of last map lookup */
8252 vm_map_entry_t new_entry
;
8253 vm_object_offset_t offset
;
8254 vm_offset_t map_address
;
8255 vm_offset_t src_start
; /* start of entry to map */
8256 vm_offset_t src_end
; /* end of region to be mapped */
8258 vm_map_version_t version
;
8259 boolean_t src_needs_copy
;
8260 boolean_t new_entry_needs_copy
;
8262 assert(map
!= VM_MAP_NULL
);
8263 assert(size
!= 0 && size
== round_page(size
));
8264 assert(inheritance
== VM_INHERIT_NONE
||
8265 inheritance
== VM_INHERIT_COPY
||
8266 inheritance
== VM_INHERIT_SHARE
);
8269 * Compute start and end of region.
8271 src_start
= trunc_page(addr
);
8272 src_end
= round_page(src_start
+ size
);
8275 * Initialize map_header.
8277 map_header
->links
.next
= (struct vm_map_entry
*)&map_header
->links
;
8278 map_header
->links
.prev
= (struct vm_map_entry
*)&map_header
->links
;
8279 map_header
->nentries
= 0;
8280 map_header
->entries_pageable
= pageable
;
8282 *cur_protection
= VM_PROT_ALL
;
8283 *max_protection
= VM_PROT_ALL
;
8287 result
= KERN_SUCCESS
;
8290 * The specified source virtual space might correspond to
8291 * multiple map entries, need to loop on them.
8294 while (mapped_size
!= size
) {
8295 vm_size_t entry_size
;
8298 * Find the beginning of the region.
8300 if (! vm_map_lookup_entry(map
, src_start
, &src_entry
)) {
8301 result
= KERN_INVALID_ADDRESS
;
8305 if (src_start
< src_entry
->vme_start
||
8306 (mapped_size
&& src_start
!= src_entry
->vme_start
)) {
8307 result
= KERN_INVALID_ADDRESS
;
8311 if(src_entry
->is_sub_map
) {
8312 result
= KERN_INVALID_ADDRESS
;
8316 tmp_size
= size
- mapped_size
;
8317 if (src_end
> src_entry
->vme_end
)
8318 tmp_size
-= (src_end
- src_entry
->vme_end
);
8320 entry_size
= (vm_size_t
)(src_entry
->vme_end
-
8321 src_entry
->vme_start
);
8323 if(src_entry
->is_sub_map
) {
8324 vm_map_reference(src_entry
->object
.sub_map
);
8326 object
= src_entry
->object
.vm_object
;
8328 if (object
== VM_OBJECT_NULL
) {
8329 object
= vm_object_allocate(entry_size
);
8330 src_entry
->offset
= 0;
8331 src_entry
->object
.vm_object
= object
;
8332 } else if (object
->copy_strategy
!=
8333 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8335 * We are already using an asymmetric
8336 * copy, and therefore we already have
8339 assert(!src_entry
->needs_copy
);
8340 } else if (src_entry
->needs_copy
|| object
->shadowed
||
8341 (object
->internal
&& !object
->true_share
&&
8342 !src_entry
->is_shared
&&
8343 object
->size
> entry_size
)) {
8345 vm_object_shadow(&src_entry
->object
.vm_object
,
8349 if (!src_entry
->needs_copy
&&
8350 (src_entry
->protection
& VM_PROT_WRITE
)) {
8351 pmap_protect(vm_map_pmap(map
),
8352 src_entry
->vme_start
,
8354 src_entry
->protection
&
8358 object
= src_entry
->object
.vm_object
;
8359 src_entry
->needs_copy
= FALSE
;
8363 vm_object_lock(object
);
8364 object
->ref_count
++; /* object ref. for new entry */
8365 VM_OBJ_RES_INCR(object
);
8366 if (object
->copy_strategy
==
8367 MEMORY_OBJECT_COPY_SYMMETRIC
) {
8368 object
->copy_strategy
=
8369 MEMORY_OBJECT_COPY_DELAY
;
8371 vm_object_unlock(object
);
8374 offset
= src_entry
->offset
+ (src_start
- src_entry
->vme_start
);
8376 new_entry
= _vm_map_entry_create(map_header
);
8377 vm_map_entry_copy(new_entry
, src_entry
);
8378 new_entry
->use_pmap
= FALSE
; /* clr address space specifics */
8380 new_entry
->vme_start
= map_address
;
8381 new_entry
->vme_end
= map_address
+ tmp_size
;
8382 new_entry
->inheritance
= inheritance
;
8383 new_entry
->offset
= offset
;
8386 * The new region has to be copied now if required.
8390 src_entry
->is_shared
= TRUE
;
8391 new_entry
->is_shared
= TRUE
;
8392 if (!(new_entry
->is_sub_map
))
8393 new_entry
->needs_copy
= FALSE
;
8395 } else if (src_entry
->is_sub_map
) {
8396 /* make this a COW sub_map if not already */
8397 new_entry
->needs_copy
= TRUE
;
8398 } else if (src_entry
->wired_count
== 0 &&
8399 vm_object_copy_quickly(&new_entry
->object
.vm_object
,
8401 (new_entry
->vme_end
-
8402 new_entry
->vme_start
),
8404 &new_entry_needs_copy
)) {
8406 new_entry
->needs_copy
= new_entry_needs_copy
;
8407 new_entry
->is_shared
= FALSE
;
8410 * Handle copy_on_write semantics.
8412 if (src_needs_copy
&& !src_entry
->needs_copy
) {
8413 vm_object_pmap_protect(object
,
8416 (src_entry
->is_shared
?
8417 PMAP_NULL
: map
->pmap
),
8418 src_entry
->vme_start
,
8419 src_entry
->protection
&
8422 src_entry
->needs_copy
= TRUE
;
8425 * Throw away the old object reference of the new entry.
8427 vm_object_deallocate(object
);
8430 new_entry
->is_shared
= FALSE
;
8433 * The map can be safely unlocked since we
8434 * already hold a reference on the object.
8436 * Record the timestamp of the map for later
8437 * verification, and unlock the map.
8439 version
.main_timestamp
= map
->timestamp
;
8445 if (src_entry
->wired_count
> 0) {
8446 vm_object_lock(object
);
8447 result
= vm_object_copy_slowly(
8452 &new_entry
->object
.vm_object
);
8454 new_entry
->offset
= 0;
8455 new_entry
->needs_copy
= FALSE
;
8457 result
= vm_object_copy_strategically(
8461 &new_entry
->object
.vm_object
,
8463 &new_entry_needs_copy
);
8465 new_entry
->needs_copy
= new_entry_needs_copy
;
8469 * Throw away the old object reference of the new entry.
8471 vm_object_deallocate(object
);
8473 if (result
!= KERN_SUCCESS
&&
8474 result
!= KERN_MEMORY_RESTART_COPY
) {
8475 _vm_map_entry_dispose(map_header
, new_entry
);
8480 * Verify that the map has not substantially
8481 * changed while the copy was being made.
8484 vm_map_lock(map
); /* Increments timestamp once! */
8485 if (version
.main_timestamp
+ 1 != map
->timestamp
) {
8487 * Simple version comparison failed.
8489 * Retry the lookup and verify that the
8490 * same object/offset are still present.
8492 vm_object_deallocate(new_entry
->
8494 _vm_map_entry_dispose(map_header
, new_entry
);
8495 if (result
== KERN_MEMORY_RESTART_COPY
)
8496 result
= KERN_SUCCESS
;
8500 if (result
== KERN_MEMORY_RESTART_COPY
) {
8501 vm_object_reference(object
);
8506 _vm_map_entry_link(map_header
,
8507 map_header
->links
.prev
, new_entry
);
8509 *cur_protection
&= src_entry
->protection
;
8510 *max_protection
&= src_entry
->max_protection
;
8512 map_address
+= tmp_size
;
8513 mapped_size
+= tmp_size
;
8514 src_start
+= tmp_size
;
8519 if (result
!= KERN_SUCCESS
) {
8521 * Free all allocated elements.
8523 for (src_entry
= map_header
->links
.next
;
8524 src_entry
!= (struct vm_map_entry
*)&map_header
->links
;
8525 src_entry
= new_entry
) {
8526 new_entry
= src_entry
->vme_next
;
8527 _vm_map_entry_unlink(map_header
, src_entry
);
8528 vm_object_deallocate(src_entry
->object
.vm_object
);
8529 _vm_map_entry_dispose(map_header
, src_entry
);
8538 * Map portion of a task's address space.
8539 * Mapped region must not overlap more than
8540 * one vm memory object. Protections and
8541 * inheritance attributes remain the same
8542 * as in the original task and are out parameters.
8543 * Source and Target task can be identical
8544 * Other attributes are identical as for vm_map()
8548 vm_map_t target_map
,
8549 vm_offset_t
*address
,
8554 vm_offset_t memory_address
,
8556 vm_prot_t
*cur_protection
,
8557 vm_prot_t
*max_protection
,
8558 vm_inherit_t inheritance
)
8560 kern_return_t result
;
8561 vm_map_entry_t entry
;
8562 vm_map_entry_t insp_entry
;
8563 vm_map_entry_t new_entry
;
8564 struct vm_map_header map_header
;
8566 if (target_map
== VM_MAP_NULL
)
8567 return KERN_INVALID_ARGUMENT
;
8569 switch (inheritance
) {
8570 case VM_INHERIT_NONE
:
8571 case VM_INHERIT_COPY
:
8572 case VM_INHERIT_SHARE
:
8573 if (size
!= 0 && src_map
!= VM_MAP_NULL
)
8577 return KERN_INVALID_ARGUMENT
;
8580 size
= round_page(size
);
8582 result
= vm_remap_extract(src_map
, memory_address
,
8583 size
, copy
, &map_header
,
8590 if (result
!= KERN_SUCCESS
) {
8595 * Allocate/check a range of free virtual address
8596 * space for the target
8598 *address
= trunc_page(*address
);
8599 vm_map_lock(target_map
);
8600 result
= vm_remap_range_allocate(target_map
, address
, size
,
8601 mask
, anywhere
, &insp_entry
);
8603 for (entry
= map_header
.links
.next
;
8604 entry
!= (struct vm_map_entry
*)&map_header
.links
;
8605 entry
= new_entry
) {
8606 new_entry
= entry
->vme_next
;
8607 _vm_map_entry_unlink(&map_header
, entry
);
8608 if (result
== KERN_SUCCESS
) {
8609 entry
->vme_start
+= *address
;
8610 entry
->vme_end
+= *address
;
8611 vm_map_entry_link(target_map
, insp_entry
, entry
);
8614 if (!entry
->is_sub_map
) {
8615 vm_object_deallocate(entry
->object
.vm_object
);
8617 vm_map_deallocate(entry
->object
.sub_map
);
8619 _vm_map_entry_dispose(&map_header
, entry
);
8623 if (result
== KERN_SUCCESS
) {
8624 target_map
->size
+= size
;
8625 SAVE_HINT(target_map
, insp_entry
);
8627 vm_map_unlock(target_map
);
8629 if (result
== KERN_SUCCESS
&& target_map
->wiring_required
)
8630 result
= vm_map_wire(target_map
, *address
,
8631 *address
+ size
, *cur_protection
, TRUE
);
8636 * Routine: vm_remap_range_allocate
8639 * Allocate a range in the specified virtual address map.
8640 * returns the address and the map entry just before the allocated
8643 * Map must be locked.
8647 vm_remap_range_allocate(
8649 vm_offset_t
*address
, /* IN/OUT */
8653 vm_map_entry_t
*map_entry
) /* OUT */
8655 register vm_map_entry_t entry
;
8656 register vm_offset_t start
;
8657 register vm_offset_t end
;
8658 kern_return_t result
= KERN_SUCCESS
;
8667 * Calculate the first possible address.
8670 if (start
< map
->min_offset
)
8671 start
= map
->min_offset
;
8672 if (start
> map
->max_offset
)
8673 return(KERN_NO_SPACE
);
8676 * Look for the first possible address;
8677 * if there's already something at this
8678 * address, we have to start after it.
8681 assert(first_free_is_valid(map
));
8682 if (start
== map
->min_offset
) {
8683 if ((entry
= map
->first_free
) != vm_map_to_entry(map
))
8684 start
= entry
->vme_end
;
8686 vm_map_entry_t tmp_entry
;
8687 if (vm_map_lookup_entry(map
, start
, &tmp_entry
))
8688 start
= tmp_entry
->vme_end
;
8693 * In any case, the "entry" always precedes
8694 * the proposed new region throughout the
8699 register vm_map_entry_t next
;
8702 * Find the end of the proposed new region.
8703 * Be sure we didn't go beyond the end, or
8704 * wrap around the address.
8707 end
= ((start
+ mask
) & ~mask
);
8709 return(KERN_NO_SPACE
);
8713 if ((end
> map
->max_offset
) || (end
< start
)) {
8714 if (map
->wait_for_space
) {
8715 if (size
<= (map
->max_offset
-
8717 assert_wait((event_t
) map
, THREAD_INTERRUPTIBLE
);
8719 thread_block((void (*)(void))0);
8725 return(KERN_NO_SPACE
);
8729 * If there are no more entries, we must win.
8732 next
= entry
->vme_next
;
8733 if (next
== vm_map_to_entry(map
))
8737 * If there is another entry, it must be
8738 * after the end of the potential new region.
8741 if (next
->vme_start
>= end
)
8745 * Didn't fit -- move to the next entry.
8749 start
= entry
->vme_end
;
8753 vm_map_entry_t temp_entry
;
8757 * the address doesn't itself violate
8758 * the mask requirement.
8761 if ((start
& mask
) != 0)
8762 return(KERN_NO_SPACE
);
8766 * ... the address is within bounds
8771 if ((start
< map
->min_offset
) ||
8772 (end
> map
->max_offset
) ||
8774 return(KERN_INVALID_ADDRESS
);
8778 * ... the starting address isn't allocated
8781 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
8782 return(KERN_NO_SPACE
);
8787 * ... the next region doesn't overlap the
8791 if ((entry
->vme_next
!= vm_map_to_entry(map
)) &&
8792 (entry
->vme_next
->vme_start
< end
))
8793 return(KERN_NO_SPACE
);
8796 return(KERN_SUCCESS
);
8802 * Set the address map for the current thr_act to the specified map
8810 thread_act_t thr_act
= current_act();
8811 vm_map_t oldmap
= thr_act
->map
;
8813 mp_disable_preemption();
8814 mycpu
= cpu_number();
8817 * Deactivate the current map and activate the requested map
8819 PMAP_SWITCH_USER(thr_act
, map
, mycpu
);
8821 mp_enable_preemption();
8827 * Routine: vm_map_write_user
8830 * Copy out data from a kernel space into space in the
8831 * destination map. The space must already exist in the
8833 * NOTE: This routine should only be called by threads
8834 * which can block on a page fault. i.e. kernel mode user
8841 vm_offset_t src_addr
,
8842 vm_offset_t dst_addr
,
8845 thread_act_t thr_act
= current_act();
8846 kern_return_t kr
= KERN_SUCCESS
;
8848 if(thr_act
->map
== map
) {
8849 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8850 kr
= KERN_INVALID_ADDRESS
;
8855 /* take on the identity of the target map while doing */
8858 vm_map_reference(map
);
8859 oldmap
= vm_map_switch(map
);
8860 if (copyout((char *)src_addr
, (char *)dst_addr
, size
)) {
8861 kr
= KERN_INVALID_ADDRESS
;
8863 vm_map_switch(oldmap
);
8864 vm_map_deallocate(map
);
8870 * Routine: vm_map_read_user
8873 * Copy in data from a user space source map into the
8874 * kernel map. The space must already exist in the
8876 * NOTE: This routine should only be called by threads
8877 * which can block on a page fault. i.e. kernel mode user
8884 vm_offset_t src_addr
,
8885 vm_offset_t dst_addr
,
8888 thread_act_t thr_act
= current_act();
8889 kern_return_t kr
= KERN_SUCCESS
;
8891 if(thr_act
->map
== map
) {
8892 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
8893 kr
= KERN_INVALID_ADDRESS
;
8898 /* take on the identity of the target map while doing */
8901 vm_map_reference(map
);
8902 oldmap
= vm_map_switch(map
);
8903 if (copyin((char *)src_addr
, (char *)dst_addr
, size
)) {
8904 kr
= KERN_INVALID_ADDRESS
;
8906 vm_map_switch(oldmap
);
8907 vm_map_deallocate(map
);
8912 /* Takes existing source and destination sub-maps and clones the contents of */
8913 /* the source map */
8917 ipc_port_t src_region
,
8918 ipc_port_t dst_region
)
8920 vm_named_entry_t src_object
;
8921 vm_named_entry_t dst_object
;
8925 vm_offset_t max_off
;
8926 vm_map_entry_t entry
;
8927 vm_map_entry_t new_entry
;
8928 vm_map_entry_t insert_point
;
8930 src_object
= (vm_named_entry_t
)src_region
->ip_kobject
;
8931 dst_object
= (vm_named_entry_t
)dst_region
->ip_kobject
;
8932 if((!src_object
->is_sub_map
) || (!dst_object
->is_sub_map
)) {
8933 return KERN_INVALID_ARGUMENT
;
8935 src_map
= (vm_map_t
)src_object
->backing
.map
;
8936 dst_map
= (vm_map_t
)dst_object
->backing
.map
;
8937 /* destination map is assumed to be unavailable to any other */
8938 /* activity. i.e. it is new */
8939 vm_map_lock(src_map
);
8940 if((src_map
->min_offset
!= dst_map
->min_offset
)
8941 || (src_map
->max_offset
!= dst_map
->max_offset
)) {
8942 vm_map_unlock(src_map
);
8943 return KERN_INVALID_ARGUMENT
;
8945 addr
= src_map
->min_offset
;
8946 vm_map_lookup_entry(dst_map
, addr
, &entry
);
8947 if(entry
== vm_map_to_entry(dst_map
)) {
8948 entry
= entry
->vme_next
;
8950 if(entry
== vm_map_to_entry(dst_map
)) {
8951 max_off
= src_map
->max_offset
;
8953 max_off
= entry
->vme_start
;
8955 vm_map_lookup_entry(src_map
, addr
, &entry
);
8956 if(entry
== vm_map_to_entry(src_map
)) {
8957 entry
= entry
->vme_next
;
8959 vm_map_lookup_entry(dst_map
, addr
, &insert_point
);
8960 while((entry
!= vm_map_to_entry(src_map
)) &&
8961 (entry
->vme_end
<= max_off
)) {
8962 addr
= entry
->vme_start
;
8963 new_entry
= vm_map_entry_create(dst_map
);
8964 vm_map_entry_copy(new_entry
, entry
);
8965 vm_map_entry_link(dst_map
, insert_point
, new_entry
);
8966 insert_point
= new_entry
;
8967 if (entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
8968 if (new_entry
->is_sub_map
) {
8969 vm_map_reference(new_entry
->object
.sub_map
);
8971 vm_object_reference(
8972 new_entry
->object
.vm_object
);
8975 dst_map
->size
+= new_entry
->vme_end
- new_entry
->vme_start
;
8976 entry
= entry
->vme_next
;
8978 vm_map_unlock(src_map
);
8979 return KERN_SUCCESS
;
8983 * Export routines to other components for the things we access locally through
8990 return (current_map_fast());
8994 * vm_map_check_protection:
8996 * Assert that the target map allows the specified
8997 * privilege on the entire address region given.
8998 * The entire region must be allocated.
9000 boolean_t
vm_map_check_protection(map
, start
, end
, protection
)
9001 register vm_map_t map
;
9002 register vm_offset_t start
;
9003 register vm_offset_t end
;
9004 register vm_prot_t protection
;
9006 register vm_map_entry_t entry
;
9007 vm_map_entry_t tmp_entry
;
9011 if (start
< vm_map_min(map
) || end
> vm_map_max(map
) || start
> end
)
9017 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
9024 while (start
< end
) {
9025 if (entry
== vm_map_to_entry(map
)) {
9034 if (start
< entry
->vme_start
) {
9040 * Check protection associated with entry.
9043 if ((entry
->protection
& protection
) != protection
) {
9048 /* go to next entry */
9050 start
= entry
->vme_end
;
9051 entry
= entry
->vme_next
;