2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Kernel memory management.
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/thread.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pageout.h>
75 #include <kern/misc_protos.h>
80 #include <libkern/OSDebug.h>
81 #include <sys/kdebug.h>
84 * Variables exported by this module.
88 vm_map_t kernel_pageable_map
;
90 extern boolean_t vm_kernel_ready
;
93 * Forward declarations for internal functions.
95 extern kern_return_t
kmem_alloc_pages(
96 register vm_object_t object
,
97 register vm_object_offset_t offset
,
98 register vm_object_size_t size
);
100 extern void kmem_remap_pages(
101 register vm_object_t object
,
102 register vm_object_offset_t offset
,
103 register vm_offset_t start
,
104 register vm_offset_t end
,
105 vm_prot_t protection
);
118 vm_object_offset_t offset
;
119 vm_map_offset_t map_addr
;
120 vm_map_offset_t map_mask
;
121 vm_map_size_t map_size
, i
;
122 vm_map_entry_t entry
;
126 if (map
== VM_MAP_NULL
|| (flags
& ~(KMA_KOBJECT
| KMA_LOMEM
| KMA_NOPAGEWAIT
)))
127 return KERN_INVALID_ARGUMENT
;
129 map_size
= vm_map_round_page(size
,
130 VM_MAP_PAGE_MASK(map
));
131 map_mask
= (vm_map_offset_t
)mask
;
133 /* Check for zero allocation size (either directly or via overflow) */
136 return KERN_INVALID_ARGUMENT
;
140 * Allocate a new object (if necessary) and the reference we
141 * will be donating to the map entry. We must do this before
142 * locking the map, or risk deadlock with the default pager.
144 if ((flags
& KMA_KOBJECT
) != 0) {
145 object
= kernel_object
;
146 vm_object_reference(object
);
148 object
= vm_object_allocate(map_size
);
151 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
152 if (KERN_SUCCESS
!= kr
) {
153 vm_object_deallocate(object
);
157 entry
->object
.vm_object
= object
;
158 entry
->offset
= offset
= (object
== kernel_object
) ?
161 /* Take an extra object ref in case the map entry gets deleted */
162 vm_object_reference(object
);
165 kr
= cpm_allocate(CAST_DOWN(vm_size_t
, map_size
), &pages
, max_pnum
, pnum_mask
, FALSE
, flags
);
167 if (kr
!= KERN_SUCCESS
) {
169 vm_map_trunc_page(map_addr
,
170 VM_MAP_PAGE_MASK(map
)),
171 vm_map_round_page(map_addr
+ map_size
,
172 VM_MAP_PAGE_MASK(map
)),
174 vm_object_deallocate(object
);
179 vm_object_lock(object
);
180 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
182 pages
= NEXT_PAGE(m
);
183 *(NEXT_PAGE_PTR(m
)) = VM_PAGE_NULL
;
185 vm_page_insert(m
, object
, offset
+ i
);
187 vm_object_unlock(object
);
189 kr
= vm_map_wire(map
,
190 vm_map_trunc_page(map_addr
,
191 VM_MAP_PAGE_MASK(map
)),
192 vm_map_round_page(map_addr
+ map_size
,
193 VM_MAP_PAGE_MASK(map
)),
196 if (kr
!= KERN_SUCCESS
) {
197 if (object
== kernel_object
) {
198 vm_object_lock(object
);
199 vm_object_page_remove(object
, offset
, offset
+ map_size
);
200 vm_object_unlock(object
);
203 vm_map_trunc_page(map_addr
,
204 VM_MAP_PAGE_MASK(map
)),
205 vm_map_round_page(map_addr
+ map_size
,
206 VM_MAP_PAGE_MASK(map
)),
208 vm_object_deallocate(object
);
211 vm_object_deallocate(object
);
213 if (object
== kernel_object
)
214 vm_map_simplify(map
, map_addr
);
216 *addrp
= (vm_offset_t
) map_addr
;
217 assert((vm_map_offset_t
) *addrp
== map_addr
);
222 * Master entry point for allocating kernel memory.
223 * NOTE: this routine is _never_ interrupt safe.
225 * map : map to allocate into
226 * addrp : pointer to start address of new memory
227 * size : size of memory requested
229 * KMA_HERE *addrp is base address, else "anywhere"
230 * KMA_NOPAGEWAIT don't wait for pages if unavailable
231 * KMA_KOBJECT use kernel_object
232 * KMA_LOMEM support for 32 bit devices in a 64 bit world
233 * if set and a lomemory pool is available
234 * grab pages from it... this also implies
239 kernel_memory_allocate(
240 register vm_map_t map
,
241 register vm_offset_t
*addrp
,
242 register vm_size_t size
,
243 register vm_offset_t mask
,
247 vm_object_offset_t offset
;
248 vm_object_offset_t pg_offset
;
249 vm_map_entry_t entry
= NULL
;
250 vm_map_offset_t map_addr
, fill_start
;
251 vm_map_offset_t map_mask
;
252 vm_map_size_t map_size
, fill_size
;
253 kern_return_t kr
, pe_result
;
255 vm_page_t guard_page_list
= NULL
;
256 vm_page_t wired_page_list
= NULL
;
257 int guard_page_count
= 0;
258 int wired_page_count
= 0;
263 if (! vm_kernel_ready
) {
264 panic("kernel_memory_allocate: VM is not ready");
267 map_size
= vm_map_round_page(size
,
268 VM_MAP_PAGE_MASK(map
));
269 map_mask
= (vm_map_offset_t
) mask
;
272 /* Check for zero allocation size (either directly or via overflow) */
275 return KERN_INVALID_ARGUMENT
;
279 * limit the size of a single extent of wired memory
280 * to try and limit the damage to the system if
281 * too many pages get wired down
282 * limit raised to 2GB with 128GB max physical limit
284 if (map_size
> (1ULL << 31)) {
285 return KERN_RESOURCE_SHORTAGE
;
291 * Guard pages are implemented as ficticious pages. By placing guard pages
292 * on either end of a stack, they can help detect cases where a thread walks
293 * off either end of its stack. They are allocated and set up here and attempts
294 * to access those pages are trapped in vm_fault_page().
296 * The map_size we were passed may include extra space for
297 * guard pages. If those were requested, then back it out of fill_size
298 * since vm_map_find_space() takes just the actual size not including
299 * guard pages. Similarly, fill_start indicates where the actual pages
300 * will begin in the range.
304 fill_size
= map_size
;
306 if (flags
& KMA_GUARD_FIRST
) {
307 vm_alloc_flags
|= VM_FLAGS_GUARD_BEFORE
;
308 fill_start
+= PAGE_SIZE_64
;
309 fill_size
-= PAGE_SIZE_64
;
310 if (map_size
< fill_start
+ fill_size
) {
311 /* no space for a guard page */
313 return KERN_INVALID_ARGUMENT
;
317 if (flags
& KMA_GUARD_LAST
) {
318 vm_alloc_flags
|= VM_FLAGS_GUARD_AFTER
;
319 fill_size
-= PAGE_SIZE_64
;
320 if (map_size
<= fill_start
+ fill_size
) {
321 /* no space for a guard page */
323 return KERN_INVALID_ARGUMENT
;
327 wired_page_count
= (int) (fill_size
/ PAGE_SIZE_64
);
328 assert(wired_page_count
* PAGE_SIZE_64
== fill_size
);
330 for (i
= 0; i
< guard_page_count
; i
++) {
332 mem
= vm_page_grab_guard();
334 if (mem
!= VM_PAGE_NULL
)
336 if (flags
& KMA_NOPAGEWAIT
) {
337 kr
= KERN_RESOURCE_SHORTAGE
;
340 vm_page_more_fictitious();
342 mem
->pageq
.next
= (queue_entry_t
)guard_page_list
;
343 guard_page_list
= mem
;
346 if (! (flags
& KMA_VAONLY
)) {
347 for (i
= 0; i
< wired_page_count
; i
++) {
348 uint64_t unavailable
;
351 if (flags
& KMA_LOMEM
)
352 mem
= vm_page_grablo();
354 mem
= vm_page_grab();
356 if (mem
!= VM_PAGE_NULL
)
359 if (flags
& KMA_NOPAGEWAIT
) {
360 kr
= KERN_RESOURCE_SHORTAGE
;
363 if ((flags
& KMA_LOMEM
) && (vm_lopage_needed
== TRUE
)) {
364 kr
= KERN_RESOURCE_SHORTAGE
;
367 unavailable
= (vm_page_wire_count
+ vm_page_free_target
) * PAGE_SIZE
;
369 if (unavailable
> max_mem
|| map_size
> (max_mem
- unavailable
)) {
370 kr
= KERN_RESOURCE_SHORTAGE
;
375 mem
->pageq
.next
= (queue_entry_t
)wired_page_list
;
376 wired_page_list
= mem
;
381 * Allocate a new object (if necessary). We must do this before
382 * locking the map, or risk deadlock with the default pager.
384 if ((flags
& KMA_KOBJECT
) != 0) {
385 object
= kernel_object
;
386 vm_object_reference(object
);
387 } else if ((flags
& KMA_COMPRESSOR
) != 0) {
388 object
= compressor_object
;
389 vm_object_reference(object
);
391 object
= vm_object_allocate(map_size
);
394 kr
= vm_map_find_space(map
, &map_addr
,
396 vm_alloc_flags
, &entry
);
397 if (KERN_SUCCESS
!= kr
) {
398 vm_object_deallocate(object
);
402 entry
->object
.vm_object
= object
;
403 entry
->offset
= offset
= (object
== kernel_object
|| object
== compressor_object
) ?
406 if (object
!= compressor_object
)
407 entry
->wired_count
++;
409 if (flags
& KMA_PERMANENT
)
410 entry
->permanent
= TRUE
;
412 if (object
!= kernel_object
&& object
!= compressor_object
)
413 vm_object_reference(object
);
415 vm_object_lock(object
);
421 if (guard_page_list
== NULL
)
422 panic("kernel_memory_allocate: guard_page_list == NULL");
424 mem
= guard_page_list
;
425 guard_page_list
= (vm_page_t
)mem
->pageq
.next
;
426 mem
->pageq
.next
= NULL
;
428 vm_page_insert(mem
, object
, offset
+ pg_offset
);
431 pg_offset
+= PAGE_SIZE_64
;
434 kma_prot
= VM_PROT_READ
| VM_PROT_WRITE
;
436 if (flags
& KMA_VAONLY
) {
437 pg_offset
= fill_start
+ fill_size
;
439 for (pg_offset
= fill_start
; pg_offset
< fill_start
+ fill_size
; pg_offset
+= PAGE_SIZE_64
) {
440 if (wired_page_list
== NULL
)
441 panic("kernel_memory_allocate: wired_page_list == NULL");
443 mem
= wired_page_list
;
444 wired_page_list
= (vm_page_t
)mem
->pageq
.next
;
445 mem
->pageq
.next
= NULL
;
448 vm_page_insert(mem
, object
, offset
+ pg_offset
);
452 mem
->wpmapped
= TRUE
;
454 PMAP_ENTER_OPTIONS(kernel_pmap
, map_addr
+ pg_offset
, mem
,
455 kma_prot
, VM_PROT_NONE
, ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
,
456 PMAP_OPTIONS_NOWAIT
, pe_result
);
458 if (pe_result
== KERN_RESOURCE_SHORTAGE
) {
459 vm_object_unlock(object
);
461 PMAP_ENTER(kernel_pmap
, map_addr
+ pg_offset
, mem
,
462 kma_prot
, VM_PROT_NONE
, ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
);
464 vm_object_lock(object
);
466 if (flags
& KMA_NOENCRYPT
) {
467 bzero(CAST_DOWN(void *, (map_addr
+ pg_offset
)), PAGE_SIZE
);
469 pmap_set_noencrypt(mem
->phys_page
);
473 if ((fill_start
+ fill_size
) < map_size
) {
474 if (guard_page_list
== NULL
)
475 panic("kernel_memory_allocate: guard_page_list == NULL");
477 mem
= guard_page_list
;
478 guard_page_list
= (vm_page_t
)mem
->pageq
.next
;
479 mem
->pageq
.next
= NULL
;
481 vm_page_insert(mem
, object
, offset
+ pg_offset
);
485 if (guard_page_list
|| wired_page_list
)
486 panic("kernel_memory_allocate: non empty list\n");
488 if (! (flags
& KMA_VAONLY
)) {
489 vm_page_lockspin_queues();
490 vm_page_wire_count
+= wired_page_count
;
491 vm_page_unlock_queues();
494 vm_object_unlock(object
);
497 * now that the pages are wired, we no longer have to fear coalesce
499 if (object
== kernel_object
|| object
== compressor_object
)
500 vm_map_simplify(map
, map_addr
);
502 vm_object_deallocate(object
);
505 * Return the memory, not zeroed.
507 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
512 vm_page_free_list(guard_page_list
, FALSE
);
515 vm_page_free_list(wired_page_list
, FALSE
);
521 kernel_memory_populate(
528 vm_object_offset_t offset
, pg_offset
;
529 kern_return_t kr
, pe_result
;
531 vm_page_t page_list
= NULL
;
535 page_count
= (int) (size
/ PAGE_SIZE_64
);
537 assert((flags
& (KMA_COMPRESSOR
|KMA_KOBJECT
)) != (KMA_COMPRESSOR
|KMA_KOBJECT
));
539 if (flags
& KMA_COMPRESSOR
) {
541 for (i
= 0; i
< page_count
; i
++) {
543 mem
= vm_page_grab();
545 if (mem
!= VM_PAGE_NULL
)
550 mem
->pageq
.next
= (queue_entry_t
) page_list
;
554 object
= compressor_object
;
556 vm_object_lock(object
);
560 pg_offset
+= PAGE_SIZE_64
) {
563 page_list
= (vm_page_t
) mem
->pageq
.next
;
564 mem
->pageq
.next
= NULL
;
566 vm_page_insert(mem
, object
, offset
+ pg_offset
);
569 PMAP_ENTER_OPTIONS(kernel_pmap
, addr
+ pg_offset
, mem
,
570 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
571 0, TRUE
, PMAP_OPTIONS_NOWAIT
, pe_result
);
573 if (pe_result
== KERN_RESOURCE_SHORTAGE
) {
575 vm_object_unlock(object
);
577 PMAP_ENTER(kernel_pmap
, addr
+ pg_offset
, mem
,
578 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, 0, TRUE
);
580 vm_object_lock(object
);
584 mem
->wpmapped
= TRUE
;
585 mem
->compressor
= TRUE
;
587 vm_object_unlock(object
);
592 for (i
= 0; i
< page_count
; i
++) {
594 if (flags
& KMA_LOMEM
)
595 mem
= vm_page_grablo();
597 mem
= vm_page_grab();
599 if (mem
!= VM_PAGE_NULL
)
602 if (flags
& KMA_NOPAGEWAIT
) {
603 kr
= KERN_RESOURCE_SHORTAGE
;
606 if ((flags
& KMA_LOMEM
) &&
607 (vm_lopage_needed
== TRUE
)) {
608 kr
= KERN_RESOURCE_SHORTAGE
;
613 mem
->pageq
.next
= (queue_entry_t
) page_list
;
616 if (flags
& KMA_KOBJECT
) {
618 object
= kernel_object
;
620 vm_object_lock(object
);
623 * If it's not the kernel object, we need to:
627 * take reference on object;
630 panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
632 map
, (uint64_t) addr
, (uint64_t) size
, flags
);
637 pg_offset
+= PAGE_SIZE_64
) {
639 if (page_list
== NULL
)
640 panic("kernel_memory_populate: page_list == NULL");
643 page_list
= (vm_page_t
) mem
->pageq
.next
;
644 mem
->pageq
.next
= NULL
;
648 vm_page_insert(mem
, object
, offset
+ pg_offset
);
652 mem
->wpmapped
= TRUE
;
654 PMAP_ENTER_OPTIONS(kernel_pmap
, addr
+ pg_offset
, mem
,
655 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
656 ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
,
657 PMAP_OPTIONS_NOWAIT
, pe_result
);
659 if (pe_result
== KERN_RESOURCE_SHORTAGE
) {
661 vm_object_unlock(object
);
663 PMAP_ENTER(kernel_pmap
, addr
+ pg_offset
, mem
,
664 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
665 ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
);
667 vm_object_lock(object
);
669 if (flags
& KMA_NOENCRYPT
) {
670 bzero(CAST_DOWN(void *, (addr
+ pg_offset
)), PAGE_SIZE
);
671 pmap_set_noencrypt(mem
->phys_page
);
674 vm_page_lock_queues();
675 vm_page_wire_count
+= page_count
;
676 vm_page_unlock_queues();
678 vm_object_unlock(object
);
684 vm_page_free_list(page_list
, FALSE
);
691 kernel_memory_depopulate(
698 vm_object_offset_t offset
, pg_offset
;
700 vm_page_t local_freeq
= NULL
;
702 assert((flags
& (KMA_COMPRESSOR
|KMA_KOBJECT
)) != (KMA_COMPRESSOR
|KMA_KOBJECT
));
704 if (flags
& KMA_COMPRESSOR
) {
706 object
= compressor_object
;
708 vm_object_lock(object
);
709 } else if (flags
& KMA_KOBJECT
) {
711 object
= kernel_object
;
713 vm_object_lock(object
);
718 * If it's not the kernel object, we need to:
724 panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
726 map
, (uint64_t) addr
, (uint64_t) size
, flags
);
728 pmap_protect(kernel_map
->pmap
, offset
, offset
+ size
, VM_PROT_NONE
);
732 pg_offset
+= PAGE_SIZE_64
) {
734 mem
= vm_page_lookup(object
, offset
+ pg_offset
);
738 pmap_disconnect(mem
->phys_page
);
743 vm_page_remove(mem
, TRUE
);
746 assert(mem
->pageq
.next
== NULL
&&
747 mem
->pageq
.prev
== NULL
);
748 mem
->pageq
.next
= (queue_entry_t
)local_freeq
;
751 vm_object_unlock(object
);
754 vm_page_free_list(local_freeq
, TRUE
);
760 * Allocate wired-down memory in the kernel's address map
761 * or a submap. The memory is not zero-filled.
770 kern_return_t kr
= kernel_memory_allocate(map
, addrp
, size
, 0, 0);
771 TRACE_MACHLEAKS(KMEM_ALLOC_CODE
, KMEM_ALLOC_CODE_2
, size
, *addrp
);
778 * Reallocate wired-down memory in the kernel's address map
779 * or a submap. Newly allocated pages are not zeroed.
780 * This can only be used on regions allocated with kmem_alloc.
782 * If successful, the pages in the old region are mapped twice.
783 * The old region is unchanged. Use kmem_free to get rid of it.
790 vm_offset_t
*newaddrp
,
794 vm_object_offset_t offset
;
795 vm_map_offset_t oldmapmin
;
796 vm_map_offset_t oldmapmax
;
797 vm_map_offset_t newmapaddr
;
798 vm_map_size_t oldmapsize
;
799 vm_map_size_t newmapsize
;
800 vm_map_entry_t oldentry
;
801 vm_map_entry_t newentry
;
805 oldmapmin
= vm_map_trunc_page(oldaddr
,
806 VM_MAP_PAGE_MASK(map
));
807 oldmapmax
= vm_map_round_page(oldaddr
+ oldsize
,
808 VM_MAP_PAGE_MASK(map
));
809 oldmapsize
= oldmapmax
- oldmapmin
;
810 newmapsize
= vm_map_round_page(newsize
,
811 VM_MAP_PAGE_MASK(map
));
815 * Find the VM object backing the old region.
820 if (!vm_map_lookup_entry(map
, oldmapmin
, &oldentry
))
821 panic("kmem_realloc");
822 object
= oldentry
->object
.vm_object
;
825 * Increase the size of the object and
826 * fill in the new region.
829 vm_object_reference(object
);
830 /* by grabbing the object lock before unlocking the map */
831 /* we guarantee that we will panic if more than one */
832 /* attempt is made to realloc a kmem_alloc'd area */
833 vm_object_lock(object
);
835 if (object
->vo_size
!= oldmapsize
)
836 panic("kmem_realloc");
837 object
->vo_size
= newmapsize
;
838 vm_object_unlock(object
);
840 /* allocate the new pages while expanded portion of the */
841 /* object is still not mapped */
842 kmem_alloc_pages(object
, vm_object_round_page(oldmapsize
),
843 vm_object_round_page(newmapsize
-oldmapsize
));
846 * Find space for the new region.
849 kr
= vm_map_find_space(map
, &newmapaddr
, newmapsize
,
850 (vm_map_offset_t
) 0, 0, &newentry
);
851 if (kr
!= KERN_SUCCESS
) {
852 vm_object_lock(object
);
853 for(offset
= oldmapsize
;
854 offset
< newmapsize
; offset
+= PAGE_SIZE
) {
855 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
859 object
->vo_size
= oldmapsize
;
860 vm_object_unlock(object
);
861 vm_object_deallocate(object
);
864 newentry
->object
.vm_object
= object
;
865 newentry
->offset
= 0;
866 assert (newentry
->wired_count
== 0);
869 /* add an extra reference in case we have someone doing an */
870 /* unexpected deallocate */
871 vm_object_reference(object
);
874 kr
= vm_map_wire(map
, newmapaddr
, newmapaddr
+ newmapsize
, VM_PROT_DEFAULT
, FALSE
);
875 if (KERN_SUCCESS
!= kr
) {
876 vm_map_remove(map
, newmapaddr
, newmapaddr
+ newmapsize
, 0);
877 vm_object_lock(object
);
878 for(offset
= oldsize
; offset
< newmapsize
; offset
+= PAGE_SIZE
) {
879 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
883 object
->vo_size
= oldmapsize
;
884 vm_object_unlock(object
);
885 vm_object_deallocate(object
);
888 vm_object_deallocate(object
);
890 *newaddrp
= CAST_DOWN(vm_offset_t
, newmapaddr
);
895 * kmem_alloc_kobject:
897 * Allocate wired-down memory in the kernel's address map
898 * or a submap. The memory is not zero-filled.
900 * The memory is allocated in the kernel_object.
901 * It may not be copied with vm_map_copy, and
902 * it may not be reallocated with kmem_realloc.
911 return kernel_memory_allocate(map
, addrp
, size
, 0, KMA_KOBJECT
);
915 * kmem_alloc_aligned:
917 * Like kmem_alloc_kobject, except that the memory is aligned.
918 * The size should be a power-of-2.
927 if ((size
& (size
- 1)) != 0)
928 panic("kmem_alloc_aligned: size not aligned");
929 return kernel_memory_allocate(map
, addrp
, size
, size
- 1, KMA_KOBJECT
);
933 * kmem_alloc_pageable:
935 * Allocate pageable memory in the kernel's address map.
944 vm_map_offset_t map_addr
;
945 vm_map_size_t map_size
;
949 map_addr
= (vm_map_min(map
)) + PAGE_SIZE
;
951 map_addr
= vm_map_min(map
);
953 map_size
= vm_map_round_page(size
,
954 VM_MAP_PAGE_MASK(map
));
956 kr
= vm_map_enter(map
, &map_addr
, map_size
,
957 (vm_map_offset_t
) 0, VM_FLAGS_ANYWHERE
,
958 VM_OBJECT_NULL
, (vm_object_offset_t
) 0, FALSE
,
959 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
961 if (kr
!= KERN_SUCCESS
)
964 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
971 * Release a region of kernel virtual memory allocated
972 * with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
973 * and return the physical pages associated with that region.
984 assert(addr
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
986 TRACE_MACHLEAKS(KMEM_FREE_CODE
, KMEM_FREE_CODE_2
, size
, addr
);
990 printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map
,(uint64_t)addr
);
995 kr
= vm_map_remove(map
,
996 vm_map_trunc_page(addr
,
997 VM_MAP_PAGE_MASK(map
)),
998 vm_map_round_page(addr
+ size
,
999 VM_MAP_PAGE_MASK(map
)),
1000 VM_MAP_REMOVE_KUNWIRE
);
1001 if (kr
!= KERN_SUCCESS
)
1006 * Allocate new pages in an object.
1011 register vm_object_t object
,
1012 register vm_object_offset_t offset
,
1013 register vm_object_size_t size
)
1015 vm_object_size_t alloc_size
;
1017 alloc_size
= vm_object_round_page(size
);
1018 vm_object_lock(object
);
1019 while (alloc_size
) {
1020 register vm_page_t mem
;
1026 while (VM_PAGE_NULL
==
1027 (mem
= vm_page_alloc(object
, offset
))) {
1028 vm_object_unlock(object
);
1030 vm_object_lock(object
);
1034 alloc_size
-= PAGE_SIZE
;
1035 offset
+= PAGE_SIZE
;
1037 vm_object_unlock(object
);
1038 return KERN_SUCCESS
;
1042 * Remap wired pages in an object into a new region.
1043 * The object is assumed to be mapped into the kernel map or
1048 register vm_object_t object
,
1049 register vm_object_offset_t offset
,
1050 register vm_offset_t start
,
1051 register vm_offset_t end
,
1052 vm_prot_t protection
)
1055 vm_map_offset_t map_start
;
1056 vm_map_offset_t map_end
;
1059 * Mark the pmap region as not pageable.
1061 map_start
= vm_map_trunc_page(start
,
1062 VM_MAP_PAGE_MASK(kernel_map
));
1063 map_end
= vm_map_round_page(end
,
1064 VM_MAP_PAGE_MASK(kernel_map
));
1066 pmap_pageable(kernel_pmap
, map_start
, map_end
, FALSE
);
1068 while (map_start
< map_end
) {
1069 register vm_page_t mem
;
1071 vm_object_lock(object
);
1076 if ((mem
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
1077 panic("kmem_remap_pages");
1080 * Wire it down (again)
1082 vm_page_lockspin_queues();
1084 vm_page_unlock_queues();
1085 vm_object_unlock(object
);
1089 * The page is supposed to be wired now, so it
1090 * shouldn't be encrypted at this point. It can
1091 * safely be entered in the page table.
1093 ASSERT_PAGE_DECRYPTED(mem
);
1096 * Enter it in the kernel pmap. The page isn't busy,
1097 * but this shouldn't be a problem because it is wired.
1100 mem
->pmapped
= TRUE
;
1101 mem
->wpmapped
= TRUE
;
1103 PMAP_ENTER(kernel_pmap
, map_start
, mem
, protection
, VM_PROT_NONE
, 0, TRUE
);
1105 map_start
+= PAGE_SIZE
;
1106 offset
+= PAGE_SIZE
;
1113 * Allocates a map to manage a subrange
1114 * of the kernel virtual address space.
1116 * Arguments are as follows:
1118 * parent Map to take range from
1119 * addr Address of start of range (IN/OUT)
1120 * size Size of range to find
1121 * pageable Can region be paged
1122 * anywhere Can region be located anywhere in map
1123 * new_map Pointer to new submap
1135 vm_map_offset_t map_addr
;
1136 vm_map_size_t map_size
;
1139 map_size
= vm_map_round_page(size
,
1140 VM_MAP_PAGE_MASK(parent
));
1143 * Need reference on submap object because it is internal
1144 * to the vm_system. vm_object_enter will never be called
1145 * on it (usual source of reference for vm_map_enter).
1147 vm_object_reference(vm_submap_object
);
1149 map_addr
= ((flags
& VM_FLAGS_ANYWHERE
)
1150 ? vm_map_min(parent
)
1151 : vm_map_trunc_page(*addr
,
1152 VM_MAP_PAGE_MASK(parent
)));
1154 kr
= vm_map_enter(parent
, &map_addr
, map_size
,
1155 (vm_map_offset_t
) 0, flags
,
1156 vm_submap_object
, (vm_object_offset_t
) 0, FALSE
,
1157 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
1158 if (kr
!= KERN_SUCCESS
) {
1159 vm_object_deallocate(vm_submap_object
);
1163 pmap_reference(vm_map_pmap(parent
));
1164 map
= vm_map_create(vm_map_pmap(parent
), map_addr
, map_addr
+ map_size
, pageable
);
1165 if (map
== VM_MAP_NULL
)
1166 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
1167 /* inherit the parent map's page size */
1168 vm_map_set_page_shift(map
, VM_MAP_PAGE_SHIFT(parent
));
1170 kr
= vm_map_submap(parent
, map_addr
, map_addr
+ map_size
, map
, map_addr
, FALSE
);
1171 if (kr
!= KERN_SUCCESS
) {
1173 * See comment preceding vm_map_submap().
1175 vm_map_remove(parent
, map_addr
, map_addr
+ map_size
, VM_MAP_NO_FLAGS
);
1176 vm_map_deallocate(map
); /* also removes ref to pmap */
1177 vm_object_deallocate(vm_submap_object
);
1180 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
1182 return (KERN_SUCCESS
);
1188 * Initialize the kernel's virtual memory map, taking
1189 * into account all memory allocated up to this time.
1196 vm_map_offset_t map_start
;
1197 vm_map_offset_t map_end
;
1199 map_start
= vm_map_trunc_page(start
,
1200 VM_MAP_PAGE_MASK(kernel_map
));
1201 map_end
= vm_map_round_page(end
,
1202 VM_MAP_PAGE_MASK(kernel_map
));
1204 kernel_map
= vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1207 * Reserve virtual memory allocated up to this time.
1209 if (start
!= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
1210 vm_map_offset_t map_addr
;
1213 map_addr
= VM_MIN_KERNEL_AND_KEXT_ADDRESS
;
1214 kr
= vm_map_enter(kernel_map
,
1216 (vm_map_size_t
)(map_start
- VM_MIN_KERNEL_AND_KEXT_ADDRESS
),
1217 (vm_map_offset_t
) 0,
1218 VM_FLAGS_FIXED
| VM_FLAGS_NO_PMAP_CHECK
,
1220 (vm_object_offset_t
) 0, FALSE
,
1221 VM_PROT_NONE
, VM_PROT_NONE
,
1222 VM_INHERIT_DEFAULT
);
1224 if (kr
!= KERN_SUCCESS
) {
1225 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
1226 (uint64_t) start
, (uint64_t) end
,
1227 (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1228 (uint64_t) (map_start
- VM_MIN_KERNEL_AND_KEXT_ADDRESS
),
1234 * Set the default global user wire limit which limits the amount of
1235 * memory that can be locked via mlock(). We set this to the total
1236 * amount of memory that are potentially usable by a user app (max_mem)
1237 * minus a certain amount. This can be overridden via a sysctl.
1239 vm_global_no_user_wire_amount
= MIN(max_mem
*20/100,
1240 VM_NOT_USER_WIREABLE
);
1241 vm_global_user_wire_limit
= max_mem
- vm_global_no_user_wire_amount
;
1243 /* the default per user limit is the same as the global limit */
1244 vm_user_wire_limit
= vm_global_user_wire_limit
;
1249 * Routine: copyinmap
1251 * Like copyin, except that fromaddr is an address
1252 * in the specified VM map. This implementation
1253 * is incomplete; it handles the current user map
1254 * and the kernel map/submaps.
1259 vm_map_offset_t fromaddr
,
1263 kern_return_t kr
= KERN_SUCCESS
;
1266 if (vm_map_pmap(map
) == pmap_kernel())
1268 /* assume a correct copy */
1269 memcpy(todata
, CAST_DOWN(void *, fromaddr
), length
);
1271 else if (current_map() == map
)
1273 if (copyin(fromaddr
, todata
, length
) != 0)
1274 kr
= KERN_INVALID_ADDRESS
;
1278 vm_map_reference(map
);
1279 oldmap
= vm_map_switch(map
);
1280 if (copyin(fromaddr
, todata
, length
) != 0)
1281 kr
= KERN_INVALID_ADDRESS
;
1282 vm_map_switch(oldmap
);
1283 vm_map_deallocate(map
);
1289 * Routine: copyoutmap
1291 * Like copyout, except that toaddr is an address
1292 * in the specified VM map. This implementation
1293 * is incomplete; it handles the current user map
1294 * and the kernel map/submaps.
1300 vm_map_address_t toaddr
,
1303 if (vm_map_pmap(map
) == pmap_kernel()) {
1304 /* assume a correct copy */
1305 memcpy(CAST_DOWN(void *, toaddr
), fromdata
, length
);
1306 return KERN_SUCCESS
;
1309 if (current_map() != map
)
1310 return KERN_NOT_SUPPORTED
;
1312 if (copyout(fromdata
, toaddr
, length
) != 0)
1313 return KERN_INVALID_ADDRESS
;
1315 return KERN_SUCCESS
;
1322 vm_map_offset_t off
,
1324 memory_object_t pager
,
1325 vm_object_offset_t file_off
)
1327 vm_map_entry_t entry
;
1329 vm_object_offset_t obj_off
;
1331 vm_map_offset_t base_offset
;
1332 vm_map_offset_t original_offset
;
1334 vm_map_size_t local_len
;
1338 original_offset
= off
;
1341 while(vm_map_lookup_entry(map
, off
, &entry
)) {
1344 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1346 return KERN_SUCCESS
;
1348 if (entry
->is_sub_map
) {
1352 vm_map_lock(entry
->object
.sub_map
);
1353 map
= entry
->object
.sub_map
;
1354 off
= entry
->offset
+ (off
- entry
->vme_start
);
1355 vm_map_unlock(old_map
);
1358 obj
= entry
->object
.vm_object
;
1359 obj_off
= (off
- entry
->vme_start
) + entry
->offset
;
1360 while(obj
->shadow
) {
1361 obj_off
+= obj
->vo_shadow_offset
;
1364 if((obj
->pager_created
) && (obj
->pager
== pager
)) {
1365 if(((obj
->paging_offset
) + obj_off
) == file_off
) {
1366 if(off
!= base_offset
) {
1368 return KERN_FAILURE
;
1370 kr
= KERN_ALREADY_WAITING
;
1372 vm_object_offset_t obj_off_aligned
;
1373 vm_object_offset_t file_off_aligned
;
1375 obj_off_aligned
= obj_off
& ~PAGE_MASK
;
1376 file_off_aligned
= file_off
& ~PAGE_MASK
;
1378 if (file_off_aligned
== (obj
->paging_offset
+ obj_off_aligned
)) {
1380 * the target map and the file offset start in the same page
1381 * but are not identical...
1384 return KERN_FAILURE
;
1386 if ((file_off
< (obj
->paging_offset
+ obj_off_aligned
)) &&
1387 ((file_off
+ len
) > (obj
->paging_offset
+ obj_off_aligned
))) {
1389 * some portion of the tail of the I/O will fall
1390 * within the encompass of the target map
1393 return KERN_FAILURE
;
1395 if ((file_off_aligned
> (obj
->paging_offset
+ obj_off
)) &&
1396 (file_off_aligned
< (obj
->paging_offset
+ obj_off
) + len
)) {
1398 * the beginning page of the file offset falls within
1399 * the target map's encompass
1402 return KERN_FAILURE
;
1405 } else if(kr
!= KERN_SUCCESS
) {
1407 return KERN_FAILURE
;
1410 if(len
<= ((entry
->vme_end
- entry
->vme_start
) -
1411 (off
- entry
->vme_start
))) {
1415 len
-= (entry
->vme_end
- entry
->vme_start
) -
1416 (off
- entry
->vme_start
);
1418 base_offset
= base_offset
+ (local_len
- len
);
1419 file_off
= file_off
+ (local_len
- len
);
1421 if(map
!= base_map
) {
1423 vm_map_lock(base_map
);