2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Kernel memory management.
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/lock.h>
70 #include <kern/thread.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <kern/misc_protos.h>
81 #include <libkern/OSDebug.h>
82 #include <sys/kdebug.h>
85 * Variables exported by this module.
89 vm_map_t kernel_pageable_map
;
91 extern boolean_t vm_kernel_ready
;
94 * Forward declarations for internal functions.
96 extern kern_return_t
kmem_alloc_pages(
97 register vm_object_t object
,
98 register vm_object_offset_t offset
,
99 register vm_object_size_t size
);
101 extern void kmem_remap_pages(
102 register vm_object_t object
,
103 register vm_object_offset_t offset
,
104 register vm_offset_t start
,
105 register vm_offset_t end
,
106 vm_prot_t protection
);
119 vm_object_offset_t offset
;
120 vm_map_offset_t map_addr
;
121 vm_map_offset_t map_mask
;
122 vm_map_size_t map_size
, i
;
123 vm_map_entry_t entry
;
127 if (map
== VM_MAP_NULL
|| (flags
& ~(KMA_KOBJECT
| KMA_LOMEM
| KMA_NOPAGEWAIT
)))
128 return KERN_INVALID_ARGUMENT
;
132 return KERN_INVALID_ARGUMENT
;
135 map_size
= vm_map_round_page(size
);
136 map_mask
= (vm_map_offset_t
)mask
;
139 * Allocate a new object (if necessary) and the reference we
140 * will be donating to the map entry. We must do this before
141 * locking the map, or risk deadlock with the default pager.
143 if ((flags
& KMA_KOBJECT
) != 0) {
144 object
= kernel_object
;
145 vm_object_reference(object
);
147 object
= vm_object_allocate(map_size
);
150 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
151 if (KERN_SUCCESS
!= kr
) {
152 vm_object_deallocate(object
);
156 entry
->object
.vm_object
= object
;
157 entry
->offset
= offset
= (object
== kernel_object
) ?
160 /* Take an extra object ref in case the map entry gets deleted */
161 vm_object_reference(object
);
164 kr
= cpm_allocate(CAST_DOWN(vm_size_t
, map_size
), &pages
, max_pnum
, pnum_mask
, FALSE
, flags
);
166 if (kr
!= KERN_SUCCESS
) {
167 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
168 vm_map_round_page(map_addr
+ map_size
), 0);
169 vm_object_deallocate(object
);
174 vm_object_lock(object
);
175 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
177 pages
= NEXT_PAGE(m
);
178 *(NEXT_PAGE_PTR(m
)) = VM_PAGE_NULL
;
180 vm_page_insert(m
, object
, offset
+ i
);
182 vm_object_unlock(object
);
184 if ((kr
= vm_map_wire(map
, vm_map_trunc_page(map_addr
),
185 vm_map_round_page(map_addr
+ map_size
), VM_PROT_DEFAULT
, FALSE
))
187 if (object
== kernel_object
) {
188 vm_object_lock(object
);
189 vm_object_page_remove(object
, offset
, offset
+ map_size
);
190 vm_object_unlock(object
);
192 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
193 vm_map_round_page(map_addr
+ map_size
), 0);
194 vm_object_deallocate(object
);
197 vm_object_deallocate(object
);
199 if (object
== kernel_object
)
200 vm_map_simplify(map
, map_addr
);
202 *addrp
= (vm_offset_t
) map_addr
;
203 assert((vm_map_offset_t
) *addrp
== map_addr
);
208 * Master entry point for allocating kernel memory.
209 * NOTE: this routine is _never_ interrupt safe.
211 * map : map to allocate into
212 * addrp : pointer to start address of new memory
213 * size : size of memory requested
215 * KMA_HERE *addrp is base address, else "anywhere"
216 * KMA_NOPAGEWAIT don't wait for pages if unavailable
217 * KMA_KOBJECT use kernel_object
218 * KMA_LOMEM support for 32 bit devices in a 64 bit world
219 * if set and a lomemory pool is available
220 * grab pages from it... this also implies
225 kernel_memory_allocate(
226 register vm_map_t map
,
227 register vm_offset_t
*addrp
,
228 register vm_size_t size
,
229 register vm_offset_t mask
,
233 vm_object_offset_t offset
;
234 vm_object_offset_t pg_offset
;
235 vm_map_entry_t entry
;
236 vm_map_offset_t map_addr
, fill_start
;
237 vm_map_offset_t map_mask
;
238 vm_map_size_t map_size
, fill_size
;
241 vm_page_t guard_page_list
= NULL
;
242 vm_page_t wired_page_list
= NULL
;
243 int guard_page_count
= 0;
244 int wired_page_count
= 0;
248 if (! vm_kernel_ready
) {
249 panic("kernel_memory_allocate: VM is not ready");
254 return KERN_INVALID_ARGUMENT
;
256 map_size
= vm_map_round_page(size
);
257 map_mask
= (vm_map_offset_t
) mask
;
262 * limit the size of a single extent of wired memory
263 * to try and limit the damage to the system if
264 * too many pages get wired down
266 if (map_size
> (1 << 30)) {
267 return KERN_RESOURCE_SHORTAGE
;
273 * Guard pages are implemented as ficticious pages. By placing guard pages
274 * on either end of a stack, they can help detect cases where a thread walks
275 * off either end of its stack. They are allocated and set up here and attempts
276 * to access those pages are trapped in vm_fault_page().
278 * The map_size we were passed may include extra space for
279 * guard pages. If those were requested, then back it out of fill_size
280 * since vm_map_find_space() takes just the actual size not including
281 * guard pages. Similarly, fill_start indicates where the actual pages
282 * will begin in the range.
286 fill_size
= map_size
;
288 if (flags
& KMA_GUARD_FIRST
) {
289 vm_alloc_flags
|= VM_FLAGS_GUARD_BEFORE
;
290 fill_start
+= PAGE_SIZE_64
;
291 fill_size
-= PAGE_SIZE_64
;
292 if (map_size
< fill_start
+ fill_size
) {
293 /* no space for a guard page */
295 return KERN_INVALID_ARGUMENT
;
299 if (flags
& KMA_GUARD_LAST
) {
300 vm_alloc_flags
|= VM_FLAGS_GUARD_AFTER
;
301 fill_size
-= PAGE_SIZE_64
;
302 if (map_size
<= fill_start
+ fill_size
) {
303 /* no space for a guard page */
305 return KERN_INVALID_ARGUMENT
;
309 wired_page_count
= (int) (fill_size
/ PAGE_SIZE_64
);
310 assert(wired_page_count
* PAGE_SIZE_64
== fill_size
);
312 for (i
= 0; i
< guard_page_count
; i
++) {
314 mem
= vm_page_grab_guard();
316 if (mem
!= VM_PAGE_NULL
)
318 if (flags
& KMA_NOPAGEWAIT
) {
319 kr
= KERN_RESOURCE_SHORTAGE
;
322 vm_page_more_fictitious();
324 mem
->pageq
.next
= (queue_entry_t
)guard_page_list
;
325 guard_page_list
= mem
;
328 for (i
= 0; i
< wired_page_count
; i
++) {
329 uint64_t unavailable
;
332 if (flags
& KMA_LOMEM
)
333 mem
= vm_page_grablo();
335 mem
= vm_page_grab();
337 if (mem
!= VM_PAGE_NULL
)
340 if (flags
& KMA_NOPAGEWAIT
) {
341 kr
= KERN_RESOURCE_SHORTAGE
;
344 if ((flags
& KMA_LOMEM
) && (vm_lopage_needed
== TRUE
)) {
345 kr
= KERN_RESOURCE_SHORTAGE
;
348 unavailable
= (vm_page_wire_count
+ vm_page_free_target
) * PAGE_SIZE
;
350 if (unavailable
> max_mem
|| map_size
> (max_mem
- unavailable
)) {
351 kr
= KERN_RESOURCE_SHORTAGE
;
356 mem
->pageq
.next
= (queue_entry_t
)wired_page_list
;
357 wired_page_list
= mem
;
361 * Allocate a new object (if necessary). We must do this before
362 * locking the map, or risk deadlock with the default pager.
364 if ((flags
& KMA_KOBJECT
) != 0) {
365 object
= kernel_object
;
366 vm_object_reference(object
);
368 object
= vm_object_allocate(map_size
);
371 kr
= vm_map_find_space(map
, &map_addr
,
373 vm_alloc_flags
, &entry
);
374 if (KERN_SUCCESS
!= kr
) {
375 vm_object_deallocate(object
);
379 entry
->object
.vm_object
= object
;
380 entry
->offset
= offset
= (object
== kernel_object
) ?
383 entry
->wired_count
++;
385 if (flags
& KMA_PERMANENT
)
386 entry
->permanent
= TRUE
;
388 if (object
!= kernel_object
)
389 vm_object_reference(object
);
391 vm_object_lock(object
);
397 if (guard_page_list
== NULL
)
398 panic("kernel_memory_allocate: guard_page_list == NULL");
400 mem
= guard_page_list
;
401 guard_page_list
= (vm_page_t
)mem
->pageq
.next
;
402 mem
->pageq
.next
= NULL
;
404 vm_page_insert(mem
, object
, offset
+ pg_offset
);
407 pg_offset
+= PAGE_SIZE_64
;
409 for (pg_offset
= fill_start
; pg_offset
< fill_start
+ fill_size
; pg_offset
+= PAGE_SIZE_64
) {
410 if (wired_page_list
== NULL
)
411 panic("kernel_memory_allocate: wired_page_list == NULL");
413 mem
= wired_page_list
;
414 wired_page_list
= (vm_page_t
)mem
->pageq
.next
;
415 mem
->pageq
.next
= NULL
;
418 vm_page_insert(mem
, object
, offset
+ pg_offset
);
422 mem
->wpmapped
= TRUE
;
424 PMAP_ENTER(kernel_pmap
, map_addr
+ pg_offset
, mem
,
425 VM_PROT_READ
| VM_PROT_WRITE
, 0, TRUE
);
427 if (flags
& KMA_NOENCRYPT
) {
428 bzero(CAST_DOWN(void *, (map_addr
+ pg_offset
)), PAGE_SIZE
);
430 pmap_set_noencrypt(mem
->phys_page
);
433 if ((fill_start
+ fill_size
) < map_size
) {
434 if (guard_page_list
== NULL
)
435 panic("kernel_memory_allocate: guard_page_list == NULL");
437 mem
= guard_page_list
;
438 guard_page_list
= (vm_page_t
)mem
->pageq
.next
;
439 mem
->pageq
.next
= NULL
;
441 vm_page_insert(mem
, object
, offset
+ pg_offset
);
445 if (guard_page_list
|| wired_page_list
)
446 panic("kernel_memory_allocate: non empty list\n");
448 vm_page_lockspin_queues();
449 vm_page_wire_count
+= wired_page_count
;
450 vm_page_unlock_queues();
452 vm_object_unlock(object
);
455 * now that the pages are wired, we no longer have to fear coalesce
457 if (object
== kernel_object
)
458 vm_map_simplify(map
, map_addr
);
460 vm_object_deallocate(object
);
463 * Return the memory, not zeroed.
465 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
470 vm_page_free_list(guard_page_list
, FALSE
);
473 vm_page_free_list(wired_page_list
, FALSE
);
481 * Allocate wired-down memory in the kernel's address map
482 * or a submap. The memory is not zero-filled.
491 kern_return_t kr
= kernel_memory_allocate(map
, addrp
, size
, 0, 0);
492 TRACE_MACHLEAKS(KMEM_ALLOC_CODE
, KMEM_ALLOC_CODE_2
, size
, *addrp
);
499 * Reallocate wired-down memory in the kernel's address map
500 * or a submap. Newly allocated pages are not zeroed.
501 * This can only be used on regions allocated with kmem_alloc.
503 * If successful, the pages in the old region are mapped twice.
504 * The old region is unchanged. Use kmem_free to get rid of it.
511 vm_offset_t
*newaddrp
,
515 vm_object_offset_t offset
;
516 vm_map_offset_t oldmapmin
;
517 vm_map_offset_t oldmapmax
;
518 vm_map_offset_t newmapaddr
;
519 vm_map_size_t oldmapsize
;
520 vm_map_size_t newmapsize
;
521 vm_map_entry_t oldentry
;
522 vm_map_entry_t newentry
;
526 oldmapmin
= vm_map_trunc_page(oldaddr
);
527 oldmapmax
= vm_map_round_page(oldaddr
+ oldsize
);
528 oldmapsize
= oldmapmax
- oldmapmin
;
529 newmapsize
= vm_map_round_page(newsize
);
533 * Find the VM object backing the old region.
538 if (!vm_map_lookup_entry(map
, oldmapmin
, &oldentry
))
539 panic("kmem_realloc");
540 object
= oldentry
->object
.vm_object
;
543 * Increase the size of the object and
544 * fill in the new region.
547 vm_object_reference(object
);
548 /* by grabbing the object lock before unlocking the map */
549 /* we guarantee that we will panic if more than one */
550 /* attempt is made to realloc a kmem_alloc'd area */
551 vm_object_lock(object
);
553 if (object
->vo_size
!= oldmapsize
)
554 panic("kmem_realloc");
555 object
->vo_size
= newmapsize
;
556 vm_object_unlock(object
);
558 /* allocate the new pages while expanded portion of the */
559 /* object is still not mapped */
560 kmem_alloc_pages(object
, vm_object_round_page(oldmapsize
),
561 vm_object_round_page(newmapsize
-oldmapsize
));
564 * Find space for the new region.
567 kr
= vm_map_find_space(map
, &newmapaddr
, newmapsize
,
568 (vm_map_offset_t
) 0, 0, &newentry
);
569 if (kr
!= KERN_SUCCESS
) {
570 vm_object_lock(object
);
571 for(offset
= oldmapsize
;
572 offset
< newmapsize
; offset
+= PAGE_SIZE
) {
573 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
577 object
->vo_size
= oldmapsize
;
578 vm_object_unlock(object
);
579 vm_object_deallocate(object
);
582 newentry
->object
.vm_object
= object
;
583 newentry
->offset
= 0;
584 assert (newentry
->wired_count
== 0);
587 /* add an extra reference in case we have someone doing an */
588 /* unexpected deallocate */
589 vm_object_reference(object
);
592 kr
= vm_map_wire(map
, newmapaddr
, newmapaddr
+ newmapsize
, VM_PROT_DEFAULT
, FALSE
);
593 if (KERN_SUCCESS
!= kr
) {
594 vm_map_remove(map
, newmapaddr
, newmapaddr
+ newmapsize
, 0);
595 vm_object_lock(object
);
596 for(offset
= oldsize
; offset
< newmapsize
; offset
+= PAGE_SIZE
) {
597 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
601 object
->vo_size
= oldmapsize
;
602 vm_object_unlock(object
);
603 vm_object_deallocate(object
);
606 vm_object_deallocate(object
);
608 *newaddrp
= CAST_DOWN(vm_offset_t
, newmapaddr
);
613 * kmem_alloc_kobject:
615 * Allocate wired-down memory in the kernel's address map
616 * or a submap. The memory is not zero-filled.
618 * The memory is allocated in the kernel_object.
619 * It may not be copied with vm_map_copy, and
620 * it may not be reallocated with kmem_realloc.
629 return kernel_memory_allocate(map
, addrp
, size
, 0, KMA_KOBJECT
);
633 * kmem_alloc_aligned:
635 * Like kmem_alloc_kobject, except that the memory is aligned.
636 * The size should be a power-of-2.
645 if ((size
& (size
- 1)) != 0)
646 panic("kmem_alloc_aligned: size not aligned");
647 return kernel_memory_allocate(map
, addrp
, size
, size
- 1, KMA_KOBJECT
);
651 * kmem_alloc_pageable:
653 * Allocate pageable memory in the kernel's address map.
662 vm_map_offset_t map_addr
;
663 vm_map_size_t map_size
;
667 map_addr
= (vm_map_min(map
)) + 0x1000;
669 map_addr
= vm_map_min(map
);
671 map_size
= vm_map_round_page(size
);
673 kr
= vm_map_enter(map
, &map_addr
, map_size
,
674 (vm_map_offset_t
) 0, VM_FLAGS_ANYWHERE
,
675 VM_OBJECT_NULL
, (vm_object_offset_t
) 0, FALSE
,
676 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
678 if (kr
!= KERN_SUCCESS
)
681 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
688 * Release a region of kernel virtual memory allocated
689 * with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
690 * and return the physical pages associated with that region.
701 assert(addr
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
703 TRACE_MACHLEAKS(KMEM_FREE_CODE
, KMEM_FREE_CODE_2
, size
, addr
);
707 printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map
,(uint64_t)addr
);
712 kr
= vm_map_remove(map
, vm_map_trunc_page(addr
),
713 vm_map_round_page(addr
+ size
),
714 VM_MAP_REMOVE_KUNWIRE
);
715 if (kr
!= KERN_SUCCESS
)
720 * Allocate new pages in an object.
725 register vm_object_t object
,
726 register vm_object_offset_t offset
,
727 register vm_object_size_t size
)
729 vm_object_size_t alloc_size
;
731 alloc_size
= vm_object_round_page(size
);
732 vm_object_lock(object
);
734 register vm_page_t mem
;
740 while (VM_PAGE_NULL
==
741 (mem
= vm_page_alloc(object
, offset
))) {
742 vm_object_unlock(object
);
744 vm_object_lock(object
);
748 alloc_size
-= PAGE_SIZE
;
751 vm_object_unlock(object
);
756 * Remap wired pages in an object into a new region.
757 * The object is assumed to be mapped into the kernel map or
762 register vm_object_t object
,
763 register vm_object_offset_t offset
,
764 register vm_offset_t start
,
765 register vm_offset_t end
,
766 vm_prot_t protection
)
769 vm_map_offset_t map_start
;
770 vm_map_offset_t map_end
;
773 * Mark the pmap region as not pageable.
775 map_start
= vm_map_trunc_page(start
);
776 map_end
= vm_map_round_page(end
);
778 pmap_pageable(kernel_pmap
, map_start
, map_end
, FALSE
);
780 while (map_start
< map_end
) {
781 register vm_page_t mem
;
783 vm_object_lock(object
);
788 if ((mem
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
789 panic("kmem_remap_pages");
792 * Wire it down (again)
794 vm_page_lockspin_queues();
796 vm_page_unlock_queues();
797 vm_object_unlock(object
);
801 * The page is supposed to be wired now, so it
802 * shouldn't be encrypted at this point. It can
803 * safely be entered in the page table.
805 ASSERT_PAGE_DECRYPTED(mem
);
808 * Enter it in the kernel pmap. The page isn't busy,
809 * but this shouldn't be a problem because it is wired.
813 mem
->wpmapped
= TRUE
;
815 PMAP_ENTER(kernel_pmap
, map_start
, mem
, protection
, 0, TRUE
);
817 map_start
+= PAGE_SIZE
;
825 * Allocates a map to manage a subrange
826 * of the kernel virtual address space.
828 * Arguments are as follows:
830 * parent Map to take range from
831 * addr Address of start of range (IN/OUT)
832 * size Size of range to find
833 * pageable Can region be paged
834 * anywhere Can region be located anywhere in map
835 * new_map Pointer to new submap
847 vm_map_offset_t map_addr
;
848 vm_map_size_t map_size
;
851 map_size
= vm_map_round_page(size
);
854 * Need reference on submap object because it is internal
855 * to the vm_system. vm_object_enter will never be called
856 * on it (usual source of reference for vm_map_enter).
858 vm_object_reference(vm_submap_object
);
860 map_addr
= (flags
& VM_FLAGS_ANYWHERE
) ?
861 vm_map_min(parent
) : vm_map_trunc_page(*addr
);
863 kr
= vm_map_enter(parent
, &map_addr
, map_size
,
864 (vm_map_offset_t
) 0, flags
,
865 vm_submap_object
, (vm_object_offset_t
) 0, FALSE
,
866 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
867 if (kr
!= KERN_SUCCESS
) {
868 vm_object_deallocate(vm_submap_object
);
872 pmap_reference(vm_map_pmap(parent
));
873 map
= vm_map_create(vm_map_pmap(parent
), map_addr
, map_addr
+ map_size
, pageable
);
874 if (map
== VM_MAP_NULL
)
875 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
877 kr
= vm_map_submap(parent
, map_addr
, map_addr
+ map_size
, map
, map_addr
, FALSE
);
878 if (kr
!= KERN_SUCCESS
) {
880 * See comment preceding vm_map_submap().
882 vm_map_remove(parent
, map_addr
, map_addr
+ map_size
, VM_MAP_NO_FLAGS
);
883 vm_map_deallocate(map
); /* also removes ref to pmap */
884 vm_object_deallocate(vm_submap_object
);
887 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
889 return (KERN_SUCCESS
);
895 * Initialize the kernel's virtual memory map, taking
896 * into account all memory allocated up to this time.
903 vm_map_offset_t map_start
;
904 vm_map_offset_t map_end
;
906 map_start
= vm_map_trunc_page(start
);
907 map_end
= vm_map_round_page(end
);
909 kernel_map
= vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
912 * Reserve virtual memory allocated up to this time.
914 if (start
!= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
915 vm_map_offset_t map_addr
;
918 map_addr
= VM_MIN_KERNEL_AND_KEXT_ADDRESS
;
919 kr
= vm_map_enter(kernel_map
,
921 (vm_map_size_t
)(map_start
- VM_MIN_KERNEL_AND_KEXT_ADDRESS
),
923 VM_FLAGS_FIXED
| VM_FLAGS_NO_PMAP_CHECK
,
925 (vm_object_offset_t
) 0, FALSE
,
926 VM_PROT_NONE
, VM_PROT_NONE
,
929 if (kr
!= KERN_SUCCESS
) {
930 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
931 (uint64_t) start
, (uint64_t) end
,
932 (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
933 (uint64_t) (map_start
- VM_MIN_KERNEL_AND_KEXT_ADDRESS
),
939 * Set the default global user wire limit which limits the amount of
940 * memory that can be locked via mlock(). We set this to the total
941 * amount of memory that are potentially usable by a user app (max_mem)
942 * minus a certain amount. This can be overridden via a sysctl.
944 vm_global_no_user_wire_amount
= MIN(max_mem
*20/100,
945 VM_NOT_USER_WIREABLE
);
946 vm_global_user_wire_limit
= max_mem
- vm_global_no_user_wire_amount
;
948 /* the default per user limit is the same as the global limit */
949 vm_user_wire_limit
= vm_global_user_wire_limit
;
956 * Like copyin, except that fromaddr is an address
957 * in the specified VM map. This implementation
958 * is incomplete; it handles the current user map
959 * and the kernel map/submaps.
964 vm_map_offset_t fromaddr
,
968 kern_return_t kr
= KERN_SUCCESS
;
971 if (vm_map_pmap(map
) == pmap_kernel())
973 /* assume a correct copy */
974 memcpy(todata
, CAST_DOWN(void *, fromaddr
), length
);
976 else if (current_map() == map
)
978 if (copyin(fromaddr
, todata
, length
) != 0)
979 kr
= KERN_INVALID_ADDRESS
;
983 vm_map_reference(map
);
984 oldmap
= vm_map_switch(map
);
985 if (copyin(fromaddr
, todata
, length
) != 0)
986 kr
= KERN_INVALID_ADDRESS
;
987 vm_map_switch(oldmap
);
988 vm_map_deallocate(map
);
994 * Routine: copyoutmap
996 * Like copyout, except that toaddr is an address
997 * in the specified VM map. This implementation
998 * is incomplete; it handles the current user map
999 * and the kernel map/submaps.
1005 vm_map_address_t toaddr
,
1008 if (vm_map_pmap(map
) == pmap_kernel()) {
1009 /* assume a correct copy */
1010 memcpy(CAST_DOWN(void *, toaddr
), fromdata
, length
);
1011 return KERN_SUCCESS
;
1014 if (current_map() != map
)
1015 return KERN_NOT_SUPPORTED
;
1017 if (copyout(fromdata
, toaddr
, length
) != 0)
1018 return KERN_INVALID_ADDRESS
;
1020 return KERN_SUCCESS
;
1027 vm_map_offset_t off
,
1029 memory_object_t pager
,
1030 vm_object_offset_t file_off
)
1032 vm_map_entry_t entry
;
1034 vm_object_offset_t obj_off
;
1036 vm_map_offset_t base_offset
;
1037 vm_map_offset_t original_offset
;
1039 vm_map_size_t local_len
;
1043 original_offset
= off
;
1046 while(vm_map_lookup_entry(map
, off
, &entry
)) {
1049 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1051 return KERN_SUCCESS
;
1053 if (entry
->is_sub_map
) {
1057 vm_map_lock(entry
->object
.sub_map
);
1058 map
= entry
->object
.sub_map
;
1059 off
= entry
->offset
+ (off
- entry
->vme_start
);
1060 vm_map_unlock(old_map
);
1063 obj
= entry
->object
.vm_object
;
1064 obj_off
= (off
- entry
->vme_start
) + entry
->offset
;
1065 while(obj
->shadow
) {
1066 obj_off
+= obj
->vo_shadow_offset
;
1069 if((obj
->pager_created
) && (obj
->pager
== pager
)) {
1070 if(((obj
->paging_offset
) + obj_off
) == file_off
) {
1071 if(off
!= base_offset
) {
1073 return KERN_FAILURE
;
1075 kr
= KERN_ALREADY_WAITING
;
1077 vm_object_offset_t obj_off_aligned
;
1078 vm_object_offset_t file_off_aligned
;
1080 obj_off_aligned
= obj_off
& ~PAGE_MASK
;
1081 file_off_aligned
= file_off
& ~PAGE_MASK
;
1083 if (file_off_aligned
== (obj
->paging_offset
+ obj_off_aligned
)) {
1085 * the target map and the file offset start in the same page
1086 * but are not identical...
1089 return KERN_FAILURE
;
1091 if ((file_off
< (obj
->paging_offset
+ obj_off_aligned
)) &&
1092 ((file_off
+ len
) > (obj
->paging_offset
+ obj_off_aligned
))) {
1094 * some portion of the tail of the I/O will fall
1095 * within the encompass of the target map
1098 return KERN_FAILURE
;
1100 if ((file_off_aligned
> (obj
->paging_offset
+ obj_off
)) &&
1101 (file_off_aligned
< (obj
->paging_offset
+ obj_off
) + len
)) {
1103 * the beginning page of the file offset falls within
1104 * the target map's encompass
1107 return KERN_FAILURE
;
1110 } else if(kr
!= KERN_SUCCESS
) {
1112 return KERN_FAILURE
;
1115 if(len
<= ((entry
->vme_end
- entry
->vme_start
) -
1116 (off
- entry
->vme_start
))) {
1120 len
-= (entry
->vme_end
- entry
->vme_start
) -
1121 (off
- entry
->vme_start
);
1123 base_offset
= base_offset
+ (local_len
- len
);
1124 file_off
= file_off
+ (local_len
- len
);
1126 if(map
!= base_map
) {
1128 vm_map_lock(base_map
);