2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
65 * Kernel memory management.
68 #include <mach/kern_return.h>
69 #include <mach/vm_param.h>
70 #include <kern/assert.h>
71 #include <kern/lock.h>
72 #include <kern/thread.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pageout.h>
78 #include <kern/misc_protos.h>
83 * Variables exported by this module.
87 vm_map_t kernel_pageable_map
;
90 * Forward declarations for internal functions.
92 extern kern_return_t
kmem_alloc_pages(
93 register vm_object_t object
,
94 register vm_object_offset_t offset
,
95 register vm_object_size_t size
);
97 extern void kmem_remap_pages(
98 register vm_object_t object
,
99 register vm_object_offset_t offset
,
100 register vm_offset_t start
,
101 register vm_offset_t end
,
102 vm_prot_t protection
);
113 vm_object_offset_t offset
;
114 vm_map_offset_t map_addr
;
115 vm_map_offset_t map_mask
;
116 vm_map_size_t map_size
, i
;
117 vm_map_entry_t entry
;
121 if (map
== VM_MAP_NULL
|| (flags
&& (flags
^ KMA_KOBJECT
)))
122 return KERN_INVALID_ARGUMENT
;
126 return KERN_INVALID_ARGUMENT
;
129 map_size
= vm_map_round_page(size
);
130 map_mask
= (vm_map_offset_t
)mask
;
133 * Allocate a new object (if necessary) and the reference we
134 * will be donating to the map entry. We must do this before
135 * locking the map, or risk deadlock with the default pager.
137 if ((flags
& KMA_KOBJECT
) != 0) {
138 object
= kernel_object
;
139 vm_object_reference(object
);
141 object
= vm_object_allocate(map_size
);
144 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
145 if (KERN_SUCCESS
!= kr
) {
146 vm_object_deallocate(object
);
150 entry
->object
.vm_object
= object
;
151 entry
->offset
= offset
= (object
== kernel_object
) ?
152 map_addr
- VM_MIN_KERNEL_ADDRESS
: 0;
154 /* Take an extra object ref in case the map entry gets deleted */
155 vm_object_reference(object
);
158 kr
= cpm_allocate(CAST_DOWN(vm_size_t
, map_size
), &pages
, FALSE
);
160 if (kr
!= KERN_SUCCESS
) {
161 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
162 vm_map_round_page(map_addr
+ map_size
), 0);
163 vm_object_deallocate(object
);
168 vm_object_lock(object
);
169 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
171 pages
= NEXT_PAGE(m
);
172 *(NEXT_PAGE_PTR(m
)) = VM_PAGE_NULL
;
174 vm_page_insert(m
, object
, offset
+ i
);
176 vm_object_unlock(object
);
178 if ((kr
= vm_map_wire(map
, vm_map_trunc_page(map_addr
),
179 vm_map_round_page(map_addr
+ map_size
), VM_PROT_DEFAULT
, FALSE
))
181 if (object
== kernel_object
) {
182 vm_object_lock(object
);
183 vm_object_page_remove(object
, offset
, offset
+ map_size
);
184 vm_object_unlock(object
);
186 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
187 vm_map_round_page(map_addr
+ map_size
), 0);
188 vm_object_deallocate(object
);
191 vm_object_deallocate(object
);
193 if (object
== kernel_object
)
194 vm_map_simplify(map
, map_addr
);
201 * Master entry point for allocating kernel memory.
202 * NOTE: this routine is _never_ interrupt safe.
204 * map : map to allocate into
205 * addrp : pointer to start address of new memory
206 * size : size of memory requested
208 * KMA_HERE *addrp is base address, else "anywhere"
209 * KMA_NOPAGEWAIT don't wait for pages if unavailable
210 * KMA_KOBJECT use kernel_object
211 * KMA_LOMEM support for 32 bit devices in a 64 bit world
212 * if set and a lomemory pool is available
213 * grab pages from it... this also implies
218 kernel_memory_allocate(
219 register vm_map_t map
,
220 register vm_offset_t
*addrp
,
221 register vm_size_t size
,
222 register vm_offset_t mask
,
226 vm_object_offset_t offset
;
227 vm_map_entry_t entry
;
228 vm_map_offset_t map_addr
;
229 vm_map_offset_t map_mask
;
230 vm_map_size_t map_size
;
236 return KERN_INVALID_ARGUMENT
;
238 if (flags
& KMA_LOMEM
) {
239 if ( !(flags
& KMA_NOPAGEWAIT
) ) {
241 return KERN_INVALID_ARGUMENT
;
245 map_size
= vm_map_round_page(size
);
246 map_mask
= (vm_map_offset_t
) mask
;
249 * Allocate a new object (if necessary). We must do this before
250 * locking the map, or risk deadlock with the default pager.
252 if ((flags
& KMA_KOBJECT
) != 0) {
253 object
= kernel_object
;
254 vm_object_reference(object
);
256 object
= vm_object_allocate(map_size
);
259 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
260 if (KERN_SUCCESS
!= kr
) {
261 vm_object_deallocate(object
);
264 entry
->object
.vm_object
= object
;
265 entry
->offset
= offset
= (object
== kernel_object
) ?
266 map_addr
- VM_MIN_KERNEL_ADDRESS
: 0;
268 vm_object_reference(object
);
271 vm_object_lock(object
);
272 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
276 if (flags
& KMA_LOMEM
)
277 mem
= vm_page_alloclo(object
, offset
+ i
);
279 mem
= vm_page_alloc(object
, offset
+ i
);
281 if (mem
!= VM_PAGE_NULL
)
284 if (flags
& KMA_NOPAGEWAIT
) {
285 if (object
== kernel_object
)
286 vm_object_page_remove(object
, offset
, offset
+ i
);
287 vm_object_unlock(object
);
288 vm_map_remove(map
, map_addr
, map_addr
+ map_size
, 0);
289 vm_object_deallocate(object
);
290 return KERN_RESOURCE_SHORTAGE
;
292 vm_object_unlock(object
);
294 vm_object_lock(object
);
298 vm_object_unlock(object
);
300 if ((kr
= vm_map_wire(map
, map_addr
, map_addr
+ map_size
, VM_PROT_DEFAULT
, FALSE
))
302 if (object
== kernel_object
) {
303 vm_object_lock(object
);
304 vm_object_page_remove(object
, offset
, offset
+ map_size
);
305 vm_object_unlock(object
);
307 vm_map_remove(map
, map_addr
, map_addr
+ map_size
, 0);
308 vm_object_deallocate(object
);
311 /* now that the page is wired, we no longer have to fear coalesce */
312 vm_object_deallocate(object
);
313 if (object
== kernel_object
)
314 vm_map_simplify(map
, map_addr
);
317 * Return the memory, not zeroed.
319 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
326 * Allocate wired-down memory in the kernel's address map
327 * or a submap. The memory is not zero-filled.
336 return kernel_memory_allocate(map
, addrp
, size
, 0, 0);
342 * Reallocate wired-down memory in the kernel's address map
343 * or a submap. Newly allocated pages are not zeroed.
344 * This can only be used on regions allocated with kmem_alloc.
346 * If successful, the pages in the old region are mapped twice.
347 * The old region is unchanged. Use kmem_free to get rid of it.
354 vm_offset_t
*newaddrp
,
358 vm_object_offset_t offset
;
359 vm_map_offset_t oldmapmin
;
360 vm_map_offset_t oldmapmax
;
361 vm_map_offset_t newmapaddr
;
362 vm_map_size_t oldmapsize
;
363 vm_map_size_t newmapsize
;
364 vm_map_entry_t oldentry
;
365 vm_map_entry_t newentry
;
369 oldmapmin
= vm_map_trunc_page(oldaddr
);
370 oldmapmax
= vm_map_round_page(oldaddr
+ oldsize
);
371 oldmapsize
= oldmapmax
- oldmapmin
;
372 newmapsize
= vm_map_round_page(newsize
);
376 * Find the VM object backing the old region.
381 if (!vm_map_lookup_entry(map
, oldmapmin
, &oldentry
))
382 panic("kmem_realloc");
383 object
= oldentry
->object
.vm_object
;
386 * Increase the size of the object and
387 * fill in the new region.
390 vm_object_reference(object
);
391 /* by grabbing the object lock before unlocking the map */
392 /* we guarantee that we will panic if more than one */
393 /* attempt is made to realloc a kmem_alloc'd area */
394 vm_object_lock(object
);
396 if (object
->size
!= oldmapsize
)
397 panic("kmem_realloc");
398 object
->size
= newmapsize
;
399 vm_object_unlock(object
);
401 /* allocate the new pages while expanded portion of the */
402 /* object is still not mapped */
403 kmem_alloc_pages(object
, vm_object_round_page(oldmapsize
),
404 vm_object_round_page(newmapsize
-oldmapsize
));
407 * Find space for the new region.
410 kr
= vm_map_find_space(map
, &newmapaddr
, newmapsize
,
411 (vm_map_offset_t
) 0, 0, &newentry
);
412 if (kr
!= KERN_SUCCESS
) {
413 vm_object_lock(object
);
414 for(offset
= oldmapsize
;
415 offset
< newmapsize
; offset
+= PAGE_SIZE
) {
416 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
417 vm_page_lock_queues();
419 vm_page_unlock_queues();
422 object
->size
= oldmapsize
;
423 vm_object_unlock(object
);
424 vm_object_deallocate(object
);
427 newentry
->object
.vm_object
= object
;
428 newentry
->offset
= 0;
429 assert (newentry
->wired_count
== 0);
432 /* add an extra reference in case we have someone doing an */
433 /* unexpected deallocate */
434 vm_object_reference(object
);
437 kr
= vm_map_wire(map
, newmapaddr
, newmapaddr
+ newmapsize
, VM_PROT_DEFAULT
, FALSE
);
438 if (KERN_SUCCESS
!= kr
) {
439 vm_map_remove(map
, newmapaddr
, newmapaddr
+ newmapsize
, 0);
440 vm_object_lock(object
);
441 for(offset
= oldsize
; offset
< newmapsize
; offset
+= PAGE_SIZE
) {
442 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
443 vm_page_lock_queues();
445 vm_page_unlock_queues();
448 object
->size
= oldmapsize
;
449 vm_object_unlock(object
);
450 vm_object_deallocate(object
);
453 vm_object_deallocate(object
);
455 *newaddrp
= CAST_DOWN(vm_offset_t
, newmapaddr
);
462 * Allocate wired-down memory in the kernel's address map
463 * or a submap. The memory is not zero-filled.
465 * The memory is allocated in the kernel_object.
466 * It may not be copied with vm_map_copy, and
467 * it may not be reallocated with kmem_realloc.
476 return kernel_memory_allocate(map
, addrp
, size
, 0, KMA_KOBJECT
);
480 * kmem_alloc_aligned:
482 * Like kmem_alloc_wired, except that the memory is aligned.
483 * The size should be a power-of-2.
492 if ((size
& (size
- 1)) != 0)
493 panic("kmem_alloc_aligned: size not aligned");
494 return kernel_memory_allocate(map
, addrp
, size
, size
- 1, KMA_KOBJECT
);
498 * kmem_alloc_pageable:
500 * Allocate pageable memory in the kernel's address map.
509 vm_map_offset_t map_addr
;
510 vm_map_size_t map_size
;
514 map_addr
= (vm_map_min(map
)) + 0x1000;
516 map_addr
= vm_map_min(map
);
518 map_size
= vm_map_round_page(size
);
520 kr
= vm_map_enter(map
, &map_addr
, map_size
,
521 (vm_map_offset_t
) 0, VM_FLAGS_ANYWHERE
,
522 VM_OBJECT_NULL
, (vm_object_offset_t
) 0, FALSE
,
523 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
525 if (kr
!= KERN_SUCCESS
)
528 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
535 * Release a region of kernel virtual memory allocated
536 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
537 * and return the physical pages associated with that region.
548 kr
= vm_map_remove(map
, vm_map_trunc_page(addr
),
549 vm_map_round_page(addr
+ size
),
550 VM_MAP_REMOVE_KUNWIRE
);
551 if (kr
!= KERN_SUCCESS
)
556 * Allocate new pages in an object.
561 register vm_object_t object
,
562 register vm_object_offset_t offset
,
563 register vm_object_size_t size
)
565 vm_object_size_t alloc_size
;
567 alloc_size
= vm_object_round_page(size
);
568 vm_object_lock(object
);
570 register vm_page_t mem
;
576 while (VM_PAGE_NULL
==
577 (mem
= vm_page_alloc(object
, offset
))) {
578 vm_object_unlock(object
);
580 vm_object_lock(object
);
584 alloc_size
-= PAGE_SIZE
;
587 vm_object_unlock(object
);
592 * Remap wired pages in an object into a new region.
593 * The object is assumed to be mapped into the kernel map or
598 register vm_object_t object
,
599 register vm_object_offset_t offset
,
600 register vm_offset_t start
,
601 register vm_offset_t end
,
602 vm_prot_t protection
)
605 vm_map_offset_t map_start
;
606 vm_map_offset_t map_end
;
609 * Mark the pmap region as not pageable.
611 map_start
= vm_map_trunc_page(start
);
612 map_end
= vm_map_round_page(end
);
614 pmap_pageable(kernel_pmap
, map_start
, map_end
, FALSE
);
616 while (map_start
< map_end
) {
617 register vm_page_t mem
;
619 vm_object_lock(object
);
624 if ((mem
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
625 panic("kmem_remap_pages");
628 * Wire it down (again)
630 vm_page_lock_queues();
632 vm_page_unlock_queues();
633 vm_object_unlock(object
);
637 * The page is supposed to be wired now, so it
638 * shouldn't be encrypted at this point. It can
639 * safely be entered in the page table.
641 ASSERT_PAGE_DECRYPTED(mem
);
644 * Enter it in the kernel pmap. The page isn't busy,
645 * but this shouldn't be a problem because it is wired.
647 PMAP_ENTER(kernel_pmap
, map_start
, mem
, protection
,
648 ((unsigned int)(mem
->object
->wimg_bits
))
652 map_start
+= PAGE_SIZE
;
660 * Allocates a map to manage a subrange
661 * of the kernel virtual address space.
663 * Arguments are as follows:
665 * parent Map to take range from
666 * addr Address of start of range (IN/OUT)
667 * size Size of range to find
668 * pageable Can region be paged
669 * anywhere Can region be located anywhere in map
670 * new_map Pointer to new submap
682 vm_map_offset_t map_addr
;
683 vm_map_size_t map_size
;
686 map_size
= vm_map_round_page(size
);
689 * Need reference on submap object because it is internal
690 * to the vm_system. vm_object_enter will never be called
691 * on it (usual source of reference for vm_map_enter).
693 vm_object_reference(vm_submap_object
);
695 map_addr
= (flags
& VM_FLAGS_ANYWHERE
) ?
696 vm_map_min(parent
) : vm_map_trunc_page(*addr
);
698 kr
= vm_map_enter(parent
, &map_addr
, map_size
,
699 (vm_map_offset_t
) 0, flags
,
700 vm_submap_object
, (vm_object_offset_t
) 0, FALSE
,
701 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
702 if (kr
!= KERN_SUCCESS
) {
703 vm_object_deallocate(vm_submap_object
);
707 pmap_reference(vm_map_pmap(parent
));
708 map
= vm_map_create(vm_map_pmap(parent
), map_addr
, map_addr
+ map_size
, pageable
);
709 if (map
== VM_MAP_NULL
)
710 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
712 kr
= vm_map_submap(parent
, map_addr
, map_addr
+ map_size
, map
, map_addr
, FALSE
);
713 if (kr
!= KERN_SUCCESS
) {
715 * See comment preceding vm_map_submap().
717 vm_map_remove(parent
, map_addr
, map_addr
+ map_size
, VM_MAP_NO_FLAGS
);
718 vm_map_deallocate(map
); /* also removes ref to pmap */
719 vm_object_deallocate(vm_submap_object
);
722 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
724 return (KERN_SUCCESS
);
730 * Initialize the kernel's virtual memory map, taking
731 * into account all memory allocated up to this time.
738 vm_map_offset_t map_start
;
739 vm_map_offset_t map_end
;
741 map_start
= vm_map_trunc_page(start
);
742 map_end
= vm_map_round_page(end
);
744 kernel_map
= vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS
,
748 * Reserve virtual memory allocated up to this time.
751 if (start
!= VM_MIN_KERNEL_ADDRESS
) {
752 vm_map_offset_t map_addr
;
754 map_addr
= VM_MIN_KERNEL_ADDRESS
;
755 (void) vm_map_enter(kernel_map
,
757 (vm_map_size_t
)(map_start
- VM_MIN_KERNEL_ADDRESS
),
759 VM_FLAGS_ANYWHERE
| VM_FLAGS_NO_PMAP_CHECK
,
761 (vm_object_offset_t
) 0, FALSE
,
762 VM_PROT_DEFAULT
, VM_PROT_ALL
,
767 * Account for kernel memory (text, data, bss, vm shenanigans).
768 * This may include inaccessible "holes" as determined by what
769 * the machine-dependent init code includes in max_mem.
771 vm_page_wire_count
= (atop_64(max_mem
) - (vm_page_free_count
772 + vm_page_active_count
773 + vm_page_inactive_count
));
780 * Like copyin, except that fromaddr is an address
781 * in the specified VM map. This implementation
782 * is incomplete; it handles the current user map
783 * and the kernel map/submaps.
788 vm_map_offset_t fromaddr
,
792 kern_return_t kr
= KERN_SUCCESS
;
795 if (vm_map_pmap(map
) == pmap_kernel())
797 /* assume a correct copy */
798 memcpy(todata
, CAST_DOWN(void *, fromaddr
), length
);
800 else if (current_map() == map
)
802 if (copyin(fromaddr
, todata
, length
) != 0)
803 kr
= KERN_INVALID_ADDRESS
;
807 vm_map_reference(map
);
808 oldmap
= vm_map_switch(map
);
809 if (copyin(fromaddr
, todata
, length
) != 0)
810 kr
= KERN_INVALID_ADDRESS
;
811 vm_map_switch(oldmap
);
812 vm_map_deallocate(map
);
818 * Routine: copyoutmap
820 * Like copyout, except that toaddr is an address
821 * in the specified VM map. This implementation
822 * is incomplete; it handles the current user map
823 * and the kernel map/submaps.
829 vm_map_address_t toaddr
,
832 if (vm_map_pmap(map
) == pmap_kernel()) {
833 /* assume a correct copy */
834 memcpy(CAST_DOWN(void *, toaddr
), fromdata
, length
);
838 if (current_map() != map
)
839 return KERN_NOT_SUPPORTED
;
841 if (copyout(fromdata
, toaddr
, length
) != 0)
842 return KERN_INVALID_ADDRESS
;
853 memory_object_t pager
,
854 vm_object_offset_t file_off
)
856 vm_map_entry_t entry
;
858 vm_object_offset_t obj_off
;
860 vm_map_offset_t base_offset
;
861 vm_map_offset_t original_offset
;
863 vm_map_size_t local_len
;
867 original_offset
= off
;
870 while(vm_map_lookup_entry(map
, off
, &entry
)) {
873 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
877 if (entry
->is_sub_map
) {
881 vm_map_lock(entry
->object
.sub_map
);
882 map
= entry
->object
.sub_map
;
883 off
= entry
->offset
+ (off
- entry
->vme_start
);
884 vm_map_unlock(old_map
);
887 obj
= entry
->object
.vm_object
;
888 obj_off
= (off
- entry
->vme_start
) + entry
->offset
;
890 obj_off
+= obj
->shadow_offset
;
893 if((obj
->pager_created
) && (obj
->pager
== pager
)) {
894 if(((obj
->paging_offset
) + obj_off
) == file_off
) {
895 if(off
!= base_offset
) {
899 kr
= KERN_ALREADY_WAITING
;
901 vm_object_offset_t obj_off_aligned
;
902 vm_object_offset_t file_off_aligned
;
904 obj_off_aligned
= obj_off
& ~PAGE_MASK
;
905 file_off_aligned
= file_off
& ~PAGE_MASK
;
907 if (file_off_aligned
== (obj
->paging_offset
+ obj_off_aligned
)) {
909 * the target map and the file offset start in the same page
910 * but are not identical...
915 if ((file_off
< (obj
->paging_offset
+ obj_off_aligned
)) &&
916 ((file_off
+ len
) > (obj
->paging_offset
+ obj_off_aligned
))) {
918 * some portion of the tail of the I/O will fall
919 * within the encompass of the target map
924 if ((file_off_aligned
> (obj
->paging_offset
+ obj_off
)) &&
925 (file_off_aligned
< (obj
->paging_offset
+ obj_off
) + len
)) {
927 * the beginning page of the file offset falls within
928 * the target map's encompass
934 } else if(kr
!= KERN_SUCCESS
) {
939 if(len
<= ((entry
->vme_end
- entry
->vme_start
) -
940 (off
- entry
->vme_start
))) {
944 len
-= (entry
->vme_end
- entry
->vme_start
) -
945 (off
- entry
->vme_start
);
947 base_offset
= base_offset
+ (local_len
- len
);
948 file_off
= file_off
+ (local_len
- len
);
950 if(map
!= base_map
) {
952 vm_map_lock(base_map
);