2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Kernel memory management.
60 #include <mach/kern_return.h>
61 #include <mach/vm_param.h>
62 #include <kern/assert.h>
63 #include <kern/lock.h>
64 #include <kern/thread.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pageout.h>
70 #include <kern/misc_protos.h>
75 * Variables exported by this module.
79 vm_map_t kernel_pageable_map
;
82 * Forward declarations for internal functions.
84 extern kern_return_t
kmem_alloc_pages(
85 register vm_object_t object
,
86 register vm_object_offset_t offset
,
87 register vm_object_size_t size
);
89 extern void kmem_remap_pages(
90 register vm_object_t object
,
91 register vm_object_offset_t offset
,
92 register vm_offset_t start
,
93 register vm_offset_t end
,
94 vm_prot_t protection
);
105 vm_object_offset_t offset
;
106 vm_map_offset_t map_addr
;
107 vm_map_offset_t map_mask
;
108 vm_map_size_t map_size
, i
;
109 vm_map_entry_t entry
;
113 if (map
== VM_MAP_NULL
|| (flags
&& (flags
^ KMA_KOBJECT
)))
114 return KERN_INVALID_ARGUMENT
;
118 return KERN_INVALID_ARGUMENT
;
121 map_size
= vm_map_round_page(size
);
122 map_mask
= (vm_map_offset_t
)mask
;
125 * Allocate a new object (if necessary) and the reference we
126 * will be donating to the map entry. We must do this before
127 * locking the map, or risk deadlock with the default pager.
129 if ((flags
& KMA_KOBJECT
) != 0) {
130 object
= kernel_object
;
131 vm_object_reference(object
);
133 object
= vm_object_allocate(map_size
);
136 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
137 if (KERN_SUCCESS
!= kr
) {
138 vm_object_deallocate(object
);
142 entry
->object
.vm_object
= object
;
143 entry
->offset
= offset
= (object
== kernel_object
) ?
144 map_addr
- VM_MIN_KERNEL_ADDRESS
: 0;
146 /* Take an extra object ref in case the map entry gets deleted */
147 vm_object_reference(object
);
150 kr
= cpm_allocate(CAST_DOWN(vm_size_t
, map_size
), &pages
, FALSE
);
152 if (kr
!= KERN_SUCCESS
) {
153 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
154 vm_map_round_page(map_addr
+ map_size
), 0);
155 vm_object_deallocate(object
);
160 vm_object_lock(object
);
161 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
163 pages
= NEXT_PAGE(m
);
164 *(NEXT_PAGE_PTR(m
)) = VM_PAGE_NULL
;
166 vm_page_insert(m
, object
, offset
+ i
);
168 vm_object_unlock(object
);
170 if ((kr
= vm_map_wire(map
, vm_map_trunc_page(map_addr
),
171 vm_map_round_page(map_addr
+ map_size
), VM_PROT_DEFAULT
, FALSE
))
173 if (object
== kernel_object
) {
174 vm_object_lock(object
);
175 vm_object_page_remove(object
, offset
, offset
+ map_size
);
176 vm_object_unlock(object
);
178 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
179 vm_map_round_page(map_addr
+ map_size
), 0);
180 vm_object_deallocate(object
);
183 vm_object_deallocate(object
);
185 if (object
== kernel_object
)
186 vm_map_simplify(map
, map_addr
);
193 * Master entry point for allocating kernel memory.
194 * NOTE: this routine is _never_ interrupt safe.
196 * map : map to allocate into
197 * addrp : pointer to start address of new memory
198 * size : size of memory requested
200 * KMA_HERE *addrp is base address, else "anywhere"
201 * KMA_NOPAGEWAIT don't wait for pages if unavailable
202 * KMA_KOBJECT use kernel_object
203 * KMA_LOMEM support for 32 bit devices in a 64 bit world
204 * if set and a lomemory pool is available
205 * grab pages from it... this also implies
210 kernel_memory_allocate(
211 register vm_map_t map
,
212 register vm_offset_t
*addrp
,
213 register vm_size_t size
,
214 register vm_offset_t mask
,
218 vm_object_offset_t offset
;
219 vm_map_entry_t entry
;
220 vm_map_offset_t map_addr
;
221 vm_map_offset_t map_mask
;
222 vm_map_size_t map_size
;
228 return KERN_INVALID_ARGUMENT
;
230 if (flags
& KMA_LOMEM
) {
231 if ( !(flags
& KMA_NOPAGEWAIT
) ) {
233 return KERN_INVALID_ARGUMENT
;
237 map_size
= vm_map_round_page(size
);
238 map_mask
= (vm_map_offset_t
) mask
;
241 * Allocate a new object (if necessary). We must do this before
242 * locking the map, or risk deadlock with the default pager.
244 if ((flags
& KMA_KOBJECT
) != 0) {
245 object
= kernel_object
;
246 vm_object_reference(object
);
248 object
= vm_object_allocate(map_size
);
251 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
252 if (KERN_SUCCESS
!= kr
) {
253 vm_object_deallocate(object
);
256 entry
->object
.vm_object
= object
;
257 entry
->offset
= offset
= (object
== kernel_object
) ?
258 map_addr
- VM_MIN_KERNEL_ADDRESS
: 0;
260 vm_object_reference(object
);
263 vm_object_lock(object
);
264 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
268 if (flags
& KMA_LOMEM
)
269 mem
= vm_page_alloclo(object
, offset
+ i
);
271 mem
= vm_page_alloc(object
, offset
+ i
);
273 if (mem
!= VM_PAGE_NULL
)
276 if (flags
& KMA_NOPAGEWAIT
) {
277 if (object
== kernel_object
)
278 vm_object_page_remove(object
, offset
, offset
+ i
);
279 vm_object_unlock(object
);
280 vm_map_remove(map
, map_addr
, map_addr
+ map_size
, 0);
281 vm_object_deallocate(object
);
282 return KERN_RESOURCE_SHORTAGE
;
284 vm_object_unlock(object
);
286 vm_object_lock(object
);
290 vm_object_unlock(object
);
292 if ((kr
= vm_map_wire(map
, map_addr
, map_addr
+ map_size
, VM_PROT_DEFAULT
, FALSE
))
294 if (object
== kernel_object
) {
295 vm_object_lock(object
);
296 vm_object_page_remove(object
, offset
, offset
+ map_size
);
297 vm_object_unlock(object
);
299 vm_map_remove(map
, map_addr
, map_addr
+ map_size
, 0);
300 vm_object_deallocate(object
);
303 /* now that the page is wired, we no longer have to fear coalesce */
304 vm_object_deallocate(object
);
305 if (object
== kernel_object
)
306 vm_map_simplify(map
, map_addr
);
309 * Return the memory, not zeroed.
311 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
318 * Allocate wired-down memory in the kernel's address map
319 * or a submap. The memory is not zero-filled.
328 return kernel_memory_allocate(map
, addrp
, size
, 0, 0);
334 * Reallocate wired-down memory in the kernel's address map
335 * or a submap. Newly allocated pages are not zeroed.
336 * This can only be used on regions allocated with kmem_alloc.
338 * If successful, the pages in the old region are mapped twice.
339 * The old region is unchanged. Use kmem_free to get rid of it.
346 vm_offset_t
*newaddrp
,
350 vm_object_offset_t offset
;
351 vm_map_offset_t oldmapmin
;
352 vm_map_offset_t oldmapmax
;
353 vm_map_offset_t newmapaddr
;
354 vm_map_size_t oldmapsize
;
355 vm_map_size_t newmapsize
;
356 vm_map_entry_t oldentry
;
357 vm_map_entry_t newentry
;
361 oldmapmin
= vm_map_trunc_page(oldaddr
);
362 oldmapmax
= vm_map_round_page(oldaddr
+ oldsize
);
363 oldmapsize
= oldmapmax
- oldmapmin
;
364 newmapsize
= vm_map_round_page(newsize
);
368 * Find the VM object backing the old region.
373 if (!vm_map_lookup_entry(map
, oldmapmin
, &oldentry
))
374 panic("kmem_realloc");
375 object
= oldentry
->object
.vm_object
;
378 * Increase the size of the object and
379 * fill in the new region.
382 vm_object_reference(object
);
383 /* by grabbing the object lock before unlocking the map */
384 /* we guarantee that we will panic if more than one */
385 /* attempt is made to realloc a kmem_alloc'd area */
386 vm_object_lock(object
);
388 if (object
->size
!= oldmapsize
)
389 panic("kmem_realloc");
390 object
->size
= newmapsize
;
391 vm_object_unlock(object
);
393 /* allocate the new pages while expanded portion of the */
394 /* object is still not mapped */
395 kmem_alloc_pages(object
, vm_object_round_page(oldmapsize
),
396 vm_object_round_page(newmapsize
-oldmapsize
));
399 * Find space for the new region.
402 kr
= vm_map_find_space(map
, &newmapaddr
, newmapsize
,
403 (vm_map_offset_t
) 0, 0, &newentry
);
404 if (kr
!= KERN_SUCCESS
) {
405 vm_object_lock(object
);
406 for(offset
= oldmapsize
;
407 offset
< newmapsize
; offset
+= PAGE_SIZE
) {
408 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
409 vm_page_lock_queues();
411 vm_page_unlock_queues();
414 object
->size
= oldmapsize
;
415 vm_object_unlock(object
);
416 vm_object_deallocate(object
);
419 newentry
->object
.vm_object
= object
;
420 newentry
->offset
= 0;
421 assert (newentry
->wired_count
== 0);
424 /* add an extra reference in case we have someone doing an */
425 /* unexpected deallocate */
426 vm_object_reference(object
);
429 kr
= vm_map_wire(map
, newmapaddr
, newmapaddr
+ newmapsize
, VM_PROT_DEFAULT
, FALSE
);
430 if (KERN_SUCCESS
!= kr
) {
431 vm_map_remove(map
, newmapaddr
, newmapaddr
+ newmapsize
, 0);
432 vm_object_lock(object
);
433 for(offset
= oldsize
; offset
< newmapsize
; offset
+= PAGE_SIZE
) {
434 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
435 vm_page_lock_queues();
437 vm_page_unlock_queues();
440 object
->size
= oldmapsize
;
441 vm_object_unlock(object
);
442 vm_object_deallocate(object
);
445 vm_object_deallocate(object
);
447 *newaddrp
= CAST_DOWN(vm_offset_t
, newmapaddr
);
454 * Allocate wired-down memory in the kernel's address map
455 * or a submap. The memory is not zero-filled.
457 * The memory is allocated in the kernel_object.
458 * It may not be copied with vm_map_copy, and
459 * it may not be reallocated with kmem_realloc.
468 return kernel_memory_allocate(map
, addrp
, size
, 0, KMA_KOBJECT
);
472 * kmem_alloc_aligned:
474 * Like kmem_alloc_wired, except that the memory is aligned.
475 * The size should be a power-of-2.
484 if ((size
& (size
- 1)) != 0)
485 panic("kmem_alloc_aligned: size not aligned");
486 return kernel_memory_allocate(map
, addrp
, size
, size
- 1, KMA_KOBJECT
);
490 * kmem_alloc_pageable:
492 * Allocate pageable memory in the kernel's address map.
501 vm_map_offset_t map_addr
;
502 vm_map_size_t map_size
;
506 map_addr
= (vm_map_min(map
)) + 0x1000;
508 map_addr
= vm_map_min(map
);
510 map_size
= vm_map_round_page(size
);
512 kr
= vm_map_enter(map
, &map_addr
, map_size
,
513 (vm_map_offset_t
) 0, VM_FLAGS_ANYWHERE
,
514 VM_OBJECT_NULL
, (vm_object_offset_t
) 0, FALSE
,
515 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
517 if (kr
!= KERN_SUCCESS
)
520 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
527 * Release a region of kernel virtual memory allocated
528 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
529 * and return the physical pages associated with that region.
540 kr
= vm_map_remove(map
, vm_map_trunc_page(addr
),
541 vm_map_round_page(addr
+ size
),
542 VM_MAP_REMOVE_KUNWIRE
);
543 if (kr
!= KERN_SUCCESS
)
548 * Allocate new pages in an object.
553 register vm_object_t object
,
554 register vm_object_offset_t offset
,
555 register vm_object_size_t size
)
557 vm_object_size_t alloc_size
;
559 alloc_size
= vm_object_round_page(size
);
560 vm_object_lock(object
);
562 register vm_page_t mem
;
568 while (VM_PAGE_NULL
==
569 (mem
= vm_page_alloc(object
, offset
))) {
570 vm_object_unlock(object
);
572 vm_object_lock(object
);
576 alloc_size
-= PAGE_SIZE
;
579 vm_object_unlock(object
);
584 * Remap wired pages in an object into a new region.
585 * The object is assumed to be mapped into the kernel map or
590 register vm_object_t object
,
591 register vm_object_offset_t offset
,
592 register vm_offset_t start
,
593 register vm_offset_t end
,
594 vm_prot_t protection
)
597 vm_map_offset_t map_start
;
598 vm_map_offset_t map_end
;
601 * Mark the pmap region as not pageable.
603 map_start
= vm_map_trunc_page(start
);
604 map_end
= vm_map_round_page(end
);
606 pmap_pageable(kernel_pmap
, map_start
, map_end
, FALSE
);
608 while (map_start
< map_end
) {
609 register vm_page_t mem
;
611 vm_object_lock(object
);
616 if ((mem
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
617 panic("kmem_remap_pages");
620 * Wire it down (again)
622 vm_page_lock_queues();
624 vm_page_unlock_queues();
625 vm_object_unlock(object
);
629 * The page is supposed to be wired now, so it
630 * shouldn't be encrypted at this point. It can
631 * safely be entered in the page table.
633 ASSERT_PAGE_DECRYPTED(mem
);
636 * Enter it in the kernel pmap. The page isn't busy,
637 * but this shouldn't be a problem because it is wired.
639 PMAP_ENTER(kernel_pmap
, map_start
, mem
, protection
,
640 ((unsigned int)(mem
->object
->wimg_bits
))
644 map_start
+= PAGE_SIZE
;
652 * Allocates a map to manage a subrange
653 * of the kernel virtual address space.
655 * Arguments are as follows:
657 * parent Map to take range from
658 * addr Address of start of range (IN/OUT)
659 * size Size of range to find
660 * pageable Can region be paged
661 * anywhere Can region be located anywhere in map
662 * new_map Pointer to new submap
674 vm_map_offset_t map_addr
;
675 vm_map_size_t map_size
;
678 map_size
= vm_map_round_page(size
);
681 * Need reference on submap object because it is internal
682 * to the vm_system. vm_object_enter will never be called
683 * on it (usual source of reference for vm_map_enter).
685 vm_object_reference(vm_submap_object
);
687 map_addr
= (flags
& VM_FLAGS_ANYWHERE
) ?
688 vm_map_min(parent
) : vm_map_trunc_page(*addr
);
690 kr
= vm_map_enter(parent
, &map_addr
, map_size
,
691 (vm_map_offset_t
) 0, flags
,
692 vm_submap_object
, (vm_object_offset_t
) 0, FALSE
,
693 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
694 if (kr
!= KERN_SUCCESS
) {
695 vm_object_deallocate(vm_submap_object
);
699 pmap_reference(vm_map_pmap(parent
));
700 map
= vm_map_create(vm_map_pmap(parent
), map_addr
, map_addr
+ map_size
, pageable
);
701 if (map
== VM_MAP_NULL
)
702 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
704 kr
= vm_map_submap(parent
, map_addr
, map_addr
+ map_size
, map
, map_addr
, FALSE
);
705 if (kr
!= KERN_SUCCESS
) {
707 * See comment preceding vm_map_submap().
709 vm_map_remove(parent
, map_addr
, map_addr
+ map_size
, VM_MAP_NO_FLAGS
);
710 vm_map_deallocate(map
); /* also removes ref to pmap */
711 vm_object_deallocate(vm_submap_object
);
714 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
716 return (KERN_SUCCESS
);
722 * Initialize the kernel's virtual memory map, taking
723 * into account all memory allocated up to this time.
730 vm_map_offset_t map_start
;
731 vm_map_offset_t map_end
;
733 map_start
= vm_map_trunc_page(start
);
734 map_end
= vm_map_round_page(end
);
736 kernel_map
= vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS
,
740 * Reserve virtual memory allocated up to this time.
743 if (start
!= VM_MIN_KERNEL_ADDRESS
) {
744 vm_map_offset_t map_addr
;
746 map_addr
= VM_MIN_KERNEL_ADDRESS
;
747 (void) vm_map_enter(kernel_map
,
749 (vm_map_size_t
)(map_start
- VM_MIN_KERNEL_ADDRESS
),
751 VM_FLAGS_ANYWHERE
| VM_FLAGS_NO_PMAP_CHECK
,
753 (vm_object_offset_t
) 0, FALSE
,
754 VM_PROT_DEFAULT
, VM_PROT_ALL
,
759 * Account for kernel memory (text, data, bss, vm shenanigans).
760 * This may include inaccessible "holes" as determined by what
761 * the machine-dependent init code includes in max_mem.
763 vm_page_wire_count
= (atop_64(max_mem
) - (vm_page_free_count
764 + vm_page_active_count
765 + vm_page_inactive_count
));
772 * Like copyin, except that fromaddr is an address
773 * in the specified VM map. This implementation
774 * is incomplete; it handles the current user map
775 * and the kernel map/submaps.
780 vm_map_offset_t fromaddr
,
784 kern_return_t kr
= KERN_SUCCESS
;
787 if (vm_map_pmap(map
) == pmap_kernel())
789 /* assume a correct copy */
790 memcpy(todata
, CAST_DOWN(void *, fromaddr
), length
);
792 else if (current_map() == map
)
794 if (copyin(fromaddr
, todata
, length
) != 0)
795 kr
= KERN_INVALID_ADDRESS
;
799 vm_map_reference(map
);
800 oldmap
= vm_map_switch(map
);
801 if (copyin(fromaddr
, todata
, length
) != 0)
802 kr
= KERN_INVALID_ADDRESS
;
803 vm_map_switch(oldmap
);
804 vm_map_deallocate(map
);
810 * Routine: copyoutmap
812 * Like copyout, except that toaddr is an address
813 * in the specified VM map. This implementation
814 * is incomplete; it handles the current user map
815 * and the kernel map/submaps.
821 vm_map_address_t toaddr
,
824 if (vm_map_pmap(map
) == pmap_kernel()) {
825 /* assume a correct copy */
826 memcpy(CAST_DOWN(void *, toaddr
), fromdata
, length
);
830 if (current_map() != map
)
831 return KERN_NOT_SUPPORTED
;
833 if (copyout(fromdata
, toaddr
, length
) != 0)
834 return KERN_INVALID_ADDRESS
;
845 memory_object_t pager
,
846 vm_object_offset_t file_off
)
848 vm_map_entry_t entry
;
850 vm_object_offset_t obj_off
;
852 vm_map_offset_t base_offset
;
853 vm_map_offset_t original_offset
;
855 vm_map_size_t local_len
;
859 original_offset
= off
;
862 while(vm_map_lookup_entry(map
, off
, &entry
)) {
865 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
869 if (entry
->is_sub_map
) {
873 vm_map_lock(entry
->object
.sub_map
);
874 map
= entry
->object
.sub_map
;
875 off
= entry
->offset
+ (off
- entry
->vme_start
);
876 vm_map_unlock(old_map
);
879 obj
= entry
->object
.vm_object
;
880 obj_off
= (off
- entry
->vme_start
) + entry
->offset
;
882 obj_off
+= obj
->shadow_offset
;
885 if((obj
->pager_created
) && (obj
->pager
== pager
)) {
886 if(((obj
->paging_offset
) + obj_off
) == file_off
) {
887 if(off
!= base_offset
) {
891 kr
= KERN_ALREADY_WAITING
;
893 vm_object_offset_t obj_off_aligned
;
894 vm_object_offset_t file_off_aligned
;
896 obj_off_aligned
= obj_off
& ~PAGE_MASK
;
897 file_off_aligned
= file_off
& ~PAGE_MASK
;
899 if (file_off_aligned
== (obj
->paging_offset
+ obj_off_aligned
)) {
901 * the target map and the file offset start in the same page
902 * but are not identical...
907 if ((file_off
< (obj
->paging_offset
+ obj_off_aligned
)) &&
908 ((file_off
+ len
) > (obj
->paging_offset
+ obj_off_aligned
))) {
910 * some portion of the tail of the I/O will fall
911 * within the encompass of the target map
916 if ((file_off_aligned
> (obj
->paging_offset
+ obj_off
)) &&
917 (file_off_aligned
< (obj
->paging_offset
+ obj_off
) + len
)) {
919 * the beginning page of the file offset falls within
920 * the target map's encompass
926 } else if(kr
!= KERN_SUCCESS
) {
931 if(len
<= ((entry
->vme_end
- entry
->vme_start
) -
932 (off
- entry
->vme_start
))) {
936 len
-= (entry
->vme_end
- entry
->vme_start
) -
937 (off
- entry
->vme_start
);
939 base_offset
= base_offset
+ (local_len
- len
);
940 file_off
= file_off
+ (local_len
- len
);
942 if(map
!= base_map
) {
944 vm_map_lock(base_map
);