2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Kernel memory management.
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/thread.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_compressor.h>
75 #include <vm/vm_pageout.h>
76 #include <kern/misc_protos.h>
78 #include <kern/ledger.h>
79 #include <kern/bits.h>
83 #include <libkern/OSDebug.h>
84 #include <libkern/crypto/sha2.h>
85 #include <libkern/section_keywords.h>
86 #include <sys/kdebug.h>
88 #include <san/kasan.h>
91 * Variables exported by this module.
94 SECURITY_READ_ONLY_LATE(vm_map_t
) kernel_map
;
95 vm_map_t kernel_pageable_map
;
97 extern boolean_t vm_kernel_ready
;
100 * Forward declarations for internal functions.
102 extern kern_return_t
kmem_alloc_pages(
104 vm_object_offset_t offset
,
105 vm_object_size_t size
);
119 vm_object_offset_t offset
;
120 vm_map_offset_t map_addr
;
121 vm_map_offset_t map_mask
;
122 vm_map_size_t map_size
, i
;
123 vm_map_entry_t entry
;
127 assert(VM_KERN_MEMORY_NONE
!= tag
);
129 if (map
== VM_MAP_NULL
|| (flags
& ~(KMA_KOBJECT
| KMA_LOMEM
| KMA_NOPAGEWAIT
))) {
130 return KERN_INVALID_ARGUMENT
;
133 map_size
= vm_map_round_page(size
,
134 VM_MAP_PAGE_MASK(map
));
135 map_mask
= (vm_map_offset_t
)mask
;
137 /* Check for zero allocation size (either directly or via overflow) */
140 return KERN_INVALID_ARGUMENT
;
144 * Allocate a new object (if necessary) and the reference we
145 * will be donating to the map entry. We must do this before
146 * locking the map, or risk deadlock with the default pager.
148 if ((flags
& KMA_KOBJECT
) != 0) {
149 object
= kernel_object
;
150 vm_object_reference(object
);
152 object
= vm_object_allocate(map_size
);
155 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0,
156 VM_MAP_KERNEL_FLAGS_NONE
, tag
, &entry
);
157 if (KERN_SUCCESS
!= kr
) {
158 vm_object_deallocate(object
);
162 if (object
== kernel_object
) {
167 VME_OBJECT_SET(entry
, object
);
168 VME_OFFSET_SET(entry
, offset
);
170 /* Take an extra object ref in case the map entry gets deleted */
171 vm_object_reference(object
);
174 kr
= cpm_allocate(CAST_DOWN(vm_size_t
, map_size
), &pages
, max_pnum
, pnum_mask
, FALSE
, flags
);
176 if (kr
!= KERN_SUCCESS
) {
178 vm_map_trunc_page(map_addr
,
179 VM_MAP_PAGE_MASK(map
)),
180 vm_map_round_page(map_addr
+ map_size
,
181 VM_MAP_PAGE_MASK(map
)),
182 VM_MAP_REMOVE_NO_FLAGS
);
183 vm_object_deallocate(object
);
188 vm_object_lock(object
);
189 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
191 pages
= NEXT_PAGE(m
);
192 *(NEXT_PAGE_PTR(m
)) = VM_PAGE_NULL
;
194 vm_page_insert(m
, object
, offset
+ i
);
196 vm_object_unlock(object
);
198 kr
= vm_map_wire_kernel(map
,
199 vm_map_trunc_page(map_addr
,
200 VM_MAP_PAGE_MASK(map
)),
201 vm_map_round_page(map_addr
+ map_size
,
202 VM_MAP_PAGE_MASK(map
)),
203 VM_PROT_DEFAULT
, tag
,
206 if (kr
!= KERN_SUCCESS
) {
207 if (object
== kernel_object
) {
208 vm_object_lock(object
);
209 vm_object_page_remove(object
, offset
, offset
+ map_size
);
210 vm_object_unlock(object
);
213 vm_map_trunc_page(map_addr
,
214 VM_MAP_PAGE_MASK(map
)),
215 vm_map_round_page(map_addr
+ map_size
,
216 VM_MAP_PAGE_MASK(map
)),
217 VM_MAP_REMOVE_NO_FLAGS
);
218 vm_object_deallocate(object
);
221 vm_object_deallocate(object
);
223 if (object
== kernel_object
) {
224 vm_map_simplify(map
, map_addr
);
225 vm_tag_update_size(tag
, map_size
);
227 *addrp
= (vm_offset_t
) map_addr
;
228 assert((vm_map_offset_t
) *addrp
== map_addr
);
234 * Master entry point for allocating kernel memory.
235 * NOTE: this routine is _never_ interrupt safe.
237 * map : map to allocate into
238 * addrp : pointer to start address of new memory
239 * size : size of memory requested
241 * KMA_HERE *addrp is base address, else "anywhere"
242 * KMA_NOPAGEWAIT don't wait for pages if unavailable
243 * KMA_KOBJECT use kernel_object
244 * KMA_LOMEM support for 32 bit devices in a 64 bit world
245 * if set and a lomemory pool is available
246 * grab pages from it... this also implies
251 kernel_memory_allocate(
260 vm_object_offset_t offset
;
261 vm_object_offset_t pg_offset
;
262 vm_map_entry_t entry
= NULL
;
263 vm_map_offset_t map_addr
, fill_start
;
264 vm_map_offset_t map_mask
;
265 vm_map_size_t map_size
, fill_size
;
266 kern_return_t kr
, pe_result
;
268 vm_page_t guard_page_list
= NULL
;
269 vm_page_t wired_page_list
= NULL
;
270 int guard_page_count
= 0;
271 int wired_page_count
= 0;
272 int page_grab_count
= 0;
275 vm_map_kernel_flags_t vmk_flags
;
277 #if DEVELOPMENT || DEBUG
278 task_t task
= current_task();
279 #endif /* DEVELOPMENT || DEBUG */
281 if (!vm_kernel_ready
) {
282 panic("kernel_memory_allocate: VM is not ready");
285 map_size
= vm_map_round_page(size
,
286 VM_MAP_PAGE_MASK(map
));
287 map_mask
= (vm_map_offset_t
) mask
;
289 vm_alloc_flags
= 0; //VM_MAKE_TAG(tag);
290 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
292 /* Check for zero allocation size (either directly or via overflow) */
295 return KERN_INVALID_ARGUMENT
;
299 * limit the size of a single extent of wired memory
300 * to try and limit the damage to the system if
301 * too many pages get wired down
302 * limit raised to 2GB with 128GB max physical limit,
303 * but scaled by installed memory above this
305 if (!(flags
& (KMA_VAONLY
| KMA_PAGEABLE
)) &&
306 map_size
> MAX(1ULL << 31, sane_size
/ 64)) {
307 return KERN_RESOURCE_SHORTAGE
;
313 * Guard pages are implemented as ficticious pages. By placing guard pages
314 * on either end of a stack, they can help detect cases where a thread walks
315 * off either end of its stack. They are allocated and set up here and attempts
316 * to access those pages are trapped in vm_fault_page().
318 * The map_size we were passed may include extra space for
319 * guard pages. If those were requested, then back it out of fill_size
320 * since vm_map_find_space() takes just the actual size not including
321 * guard pages. Similarly, fill_start indicates where the actual pages
322 * will begin in the range.
326 fill_size
= map_size
;
328 if (flags
& KMA_GUARD_FIRST
) {
329 vmk_flags
.vmkf_guard_before
= TRUE
;
330 fill_start
+= PAGE_SIZE_64
;
331 fill_size
-= PAGE_SIZE_64
;
332 if (map_size
< fill_start
+ fill_size
) {
333 /* no space for a guard page */
335 return KERN_INVALID_ARGUMENT
;
339 if (flags
& KMA_GUARD_LAST
) {
340 vmk_flags
.vmkf_guard_after
= TRUE
;
341 fill_size
-= PAGE_SIZE_64
;
342 if (map_size
<= fill_start
+ fill_size
) {
343 /* no space for a guard page */
345 return KERN_INVALID_ARGUMENT
;
349 wired_page_count
= (int) (fill_size
/ PAGE_SIZE_64
);
350 assert(wired_page_count
* PAGE_SIZE_64
== fill_size
);
352 #if DEBUG || DEVELOPMENT
353 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_START
, size
, 0, 0, 0);
356 for (i
= 0; i
< guard_page_count
; i
++) {
358 mem
= vm_page_grab_guard();
360 if (mem
!= VM_PAGE_NULL
) {
363 if (flags
& KMA_NOPAGEWAIT
) {
364 kr
= KERN_RESOURCE_SHORTAGE
;
367 vm_page_more_fictitious();
369 mem
->vmp_snext
= guard_page_list
;
370 guard_page_list
= mem
;
373 if (!(flags
& (KMA_VAONLY
| KMA_PAGEABLE
))) {
374 for (i
= 0; i
< wired_page_count
; i
++) {
376 if (flags
& KMA_LOMEM
) {
377 mem
= vm_page_grablo();
379 mem
= vm_page_grab();
382 if (mem
!= VM_PAGE_NULL
) {
386 if (flags
& KMA_NOPAGEWAIT
) {
387 kr
= KERN_RESOURCE_SHORTAGE
;
390 if ((flags
& KMA_LOMEM
) && (vm_lopage_needed
== TRUE
)) {
391 kr
= KERN_RESOURCE_SHORTAGE
;
395 /* VM privileged threads should have waited in vm_page_grab() and not get here. */
396 assert(!(current_thread()->options
& TH_OPT_VMPRIV
));
398 uint64_t unavailable
= (vm_page_wire_count
+ vm_page_free_target
) * PAGE_SIZE
;
399 if (unavailable
> max_mem
|| map_size
> (max_mem
- unavailable
)) {
400 kr
= KERN_RESOURCE_SHORTAGE
;
406 if (KMA_ZERO
& flags
) {
407 vm_page_zero_fill(mem
);
409 mem
->vmp_snext
= wired_page_list
;
410 wired_page_list
= mem
;
415 * Allocate a new object (if necessary). We must do this before
416 * locking the map, or risk deadlock with the default pager.
418 if ((flags
& KMA_KOBJECT
) != 0) {
419 object
= kernel_object
;
420 vm_object_reference(object
);
421 } else if ((flags
& KMA_COMPRESSOR
) != 0) {
422 object
= compressor_object
;
423 vm_object_reference(object
);
425 object
= vm_object_allocate(map_size
);
428 if (flags
& KMA_ATOMIC
) {
429 vmk_flags
.vmkf_atomic_entry
= TRUE
;
432 kr
= vm_map_find_space(map
, &map_addr
,
434 vm_alloc_flags
, vmk_flags
, tag
, &entry
);
435 if (KERN_SUCCESS
!= kr
) {
436 vm_object_deallocate(object
);
440 if (object
== kernel_object
|| object
== compressor_object
) {
445 VME_OBJECT_SET(entry
, object
);
446 VME_OFFSET_SET(entry
, offset
);
448 if (!(flags
& (KMA_COMPRESSOR
| KMA_PAGEABLE
))) {
449 entry
->wired_count
++;
452 if (flags
& KMA_PERMANENT
) {
453 entry
->permanent
= TRUE
;
456 if (object
!= kernel_object
&& object
!= compressor_object
) {
457 vm_object_reference(object
);
460 vm_object_lock(object
);
466 if (guard_page_list
== NULL
) {
467 panic("kernel_memory_allocate: guard_page_list == NULL");
470 mem
= guard_page_list
;
471 guard_page_list
= mem
->vmp_snext
;
472 mem
->vmp_snext
= NULL
;
474 vm_page_insert(mem
, object
, offset
+ pg_offset
);
476 mem
->vmp_busy
= FALSE
;
477 pg_offset
+= PAGE_SIZE_64
;
480 kma_prot
= VM_PROT_READ
| VM_PROT_WRITE
;
483 if (!(flags
& KMA_VAONLY
)) {
484 /* for VAONLY mappings we notify in populate only */
485 kasan_notify_address(map_addr
, size
);
489 if (flags
& (KMA_VAONLY
| KMA_PAGEABLE
)) {
490 pg_offset
= fill_start
+ fill_size
;
492 for (pg_offset
= fill_start
; pg_offset
< fill_start
+ fill_size
; pg_offset
+= PAGE_SIZE_64
) {
493 if (wired_page_list
== NULL
) {
494 panic("kernel_memory_allocate: wired_page_list == NULL");
497 mem
= wired_page_list
;
498 wired_page_list
= mem
->vmp_snext
;
499 mem
->vmp_snext
= NULL
;
501 assert(mem
->vmp_wire_count
== 0);
502 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
504 mem
->vmp_q_state
= VM_PAGE_IS_WIRED
;
505 mem
->vmp_wire_count
++;
506 if (__improbable(mem
->vmp_wire_count
== 0)) {
507 panic("kernel_memory_allocate(%p): wire_count overflow",
511 vm_page_insert_wired(mem
, object
, offset
+ pg_offset
, tag
);
513 mem
->vmp_busy
= FALSE
;
514 mem
->vmp_pmapped
= TRUE
;
515 mem
->vmp_wpmapped
= TRUE
;
517 PMAP_ENTER_OPTIONS(kernel_pmap
, map_addr
+ pg_offset
, mem
,
518 kma_prot
, VM_PROT_NONE
, ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
,
519 PMAP_OPTIONS_NOWAIT
, pe_result
);
521 if (pe_result
== KERN_RESOURCE_SHORTAGE
) {
522 vm_object_unlock(object
);
524 PMAP_ENTER(kernel_pmap
, map_addr
+ pg_offset
, mem
,
525 kma_prot
, VM_PROT_NONE
, ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
,
528 vm_object_lock(object
);
531 assert(pe_result
== KERN_SUCCESS
);
533 if (flags
& KMA_NOENCRYPT
) {
534 bzero(CAST_DOWN(void *, (map_addr
+ pg_offset
)), PAGE_SIZE
);
536 pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
539 if (kernel_object
== object
) {
540 vm_tag_update_size(tag
, fill_size
);
543 if ((fill_start
+ fill_size
) < map_size
) {
544 if (guard_page_list
== NULL
) {
545 panic("kernel_memory_allocate: guard_page_list == NULL");
548 mem
= guard_page_list
;
549 guard_page_list
= mem
->vmp_snext
;
550 mem
->vmp_snext
= NULL
;
552 vm_page_insert(mem
, object
, offset
+ pg_offset
);
554 mem
->vmp_busy
= FALSE
;
556 if (guard_page_list
|| wired_page_list
) {
557 panic("kernel_memory_allocate: non empty list\n");
560 if (!(flags
& (KMA_VAONLY
| KMA_PAGEABLE
))) {
561 vm_page_lockspin_queues();
562 vm_page_wire_count
+= wired_page_count
;
563 vm_page_unlock_queues();
566 vm_object_unlock(object
);
569 * now that the pages are wired, we no longer have to fear coalesce
571 if (object
== kernel_object
|| object
== compressor_object
) {
572 vm_map_simplify(map
, map_addr
);
574 vm_object_deallocate(object
);
577 #if DEBUG || DEVELOPMENT
578 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
580 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_kern
, page_grab_count
);
585 * Return the memory, not zeroed.
587 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
591 if (guard_page_list
) {
592 vm_page_free_list(guard_page_list
, FALSE
);
595 if (wired_page_list
) {
596 vm_page_free_list(wired_page_list
, FALSE
);
599 #if DEBUG || DEVELOPMENT
600 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
601 if (task
!= NULL
&& kr
== KERN_SUCCESS
) {
602 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_kern
, page_grab_count
);
610 kernel_memory_populate(
618 vm_object_offset_t offset
, pg_offset
;
619 kern_return_t kr
, pe_result
;
621 vm_page_t page_list
= NULL
;
623 int page_grab_count
= 0;
626 #if DEBUG || DEVELOPMENT
627 task_t task
= current_task();
628 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_START
, size
, 0, 0, 0);
631 page_count
= (int) (size
/ PAGE_SIZE_64
);
633 assert((flags
& (KMA_COMPRESSOR
| KMA_KOBJECT
)) != (KMA_COMPRESSOR
| KMA_KOBJECT
));
635 if (flags
& KMA_COMPRESSOR
) {
636 pg_offset
= page_count
* PAGE_SIZE_64
;
640 mem
= vm_page_grab();
642 if (mem
!= VM_PAGE_NULL
) {
649 if (KMA_ZERO
& flags
) {
650 vm_page_zero_fill(mem
);
652 mem
->vmp_snext
= page_list
;
655 pg_offset
-= PAGE_SIZE_64
;
657 kr
= pmap_enter_options(kernel_pmap
,
658 addr
+ pg_offset
, VM_PAGE_GET_PHYS_PAGE(mem
),
659 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, 0, TRUE
,
660 PMAP_OPTIONS_INTERNAL
, NULL
);
661 assert(kr
== KERN_SUCCESS
);
665 object
= compressor_object
;
667 vm_object_lock(object
);
671 pg_offset
+= PAGE_SIZE_64
) {
673 page_list
= mem
->vmp_snext
;
674 mem
->vmp_snext
= NULL
;
676 vm_page_insert(mem
, object
, offset
+ pg_offset
);
677 assert(mem
->vmp_busy
);
679 mem
->vmp_busy
= FALSE
;
680 mem
->vmp_pmapped
= TRUE
;
681 mem
->vmp_wpmapped
= TRUE
;
682 mem
->vmp_q_state
= VM_PAGE_USED_BY_COMPRESSOR
;
684 vm_object_unlock(object
);
687 if (map
== compressor_map
) {
688 kasan_notify_address_nopoison(addr
, size
);
690 kasan_notify_address(addr
, size
);
694 #if DEBUG || DEVELOPMENT
695 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
697 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_kern
, page_grab_count
);
703 for (i
= 0; i
< page_count
; i
++) {
705 if (flags
& KMA_LOMEM
) {
706 mem
= vm_page_grablo();
708 mem
= vm_page_grab();
711 if (mem
!= VM_PAGE_NULL
) {
715 if (flags
& KMA_NOPAGEWAIT
) {
716 kr
= KERN_RESOURCE_SHORTAGE
;
719 if ((flags
& KMA_LOMEM
) &&
720 (vm_lopage_needed
== TRUE
)) {
721 kr
= KERN_RESOURCE_SHORTAGE
;
727 if (KMA_ZERO
& flags
) {
728 vm_page_zero_fill(mem
);
730 mem
->vmp_snext
= page_list
;
733 if (flags
& KMA_KOBJECT
) {
735 object
= kernel_object
;
737 vm_object_lock(object
);
740 * If it's not the kernel object, we need to:
744 * take reference on object;
747 panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
749 map
, (uint64_t) addr
, (uint64_t) size
, flags
);
754 pg_offset
+= PAGE_SIZE_64
) {
755 if (page_list
== NULL
) {
756 panic("kernel_memory_populate: page_list == NULL");
760 page_list
= mem
->vmp_snext
;
761 mem
->vmp_snext
= NULL
;
763 assert(mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
764 mem
->vmp_q_state
= VM_PAGE_IS_WIRED
;
765 mem
->vmp_wire_count
++;
766 if (__improbable(mem
->vmp_wire_count
== 0)) {
767 panic("kernel_memory_populate(%p): wire_count overflow", mem
);
770 vm_page_insert_wired(mem
, object
, offset
+ pg_offset
, tag
);
772 mem
->vmp_busy
= FALSE
;
773 mem
->vmp_pmapped
= TRUE
;
774 mem
->vmp_wpmapped
= TRUE
;
776 PMAP_ENTER_OPTIONS(kernel_pmap
, addr
+ pg_offset
, mem
,
777 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
778 ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
,
779 PMAP_OPTIONS_NOWAIT
, pe_result
);
781 if (pe_result
== KERN_RESOURCE_SHORTAGE
) {
782 vm_object_unlock(object
);
784 PMAP_ENTER(kernel_pmap
, addr
+ pg_offset
, mem
,
785 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
,
786 ((flags
& KMA_KSTACK
) ? VM_MEM_STACK
: 0), TRUE
,
789 vm_object_lock(object
);
792 assert(pe_result
== KERN_SUCCESS
);
794 if (flags
& KMA_NOENCRYPT
) {
795 bzero(CAST_DOWN(void *, (addr
+ pg_offset
)), PAGE_SIZE
);
796 pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem
));
799 vm_page_lockspin_queues();
800 vm_page_wire_count
+= page_count
;
801 vm_page_unlock_queues();
803 #if DEBUG || DEVELOPMENT
804 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
806 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_kern
, page_grab_count
);
810 if (kernel_object
== object
) {
811 vm_tag_update_size(tag
, size
);
814 vm_object_unlock(object
);
817 if (map
== compressor_map
) {
818 kasan_notify_address_nopoison(addr
, size
);
820 kasan_notify_address(addr
, size
);
827 vm_page_free_list(page_list
, FALSE
);
830 #if DEBUG || DEVELOPMENT
831 VM_DEBUG_CONSTANT_EVENT(vm_kern_request
, VM_KERN_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
832 if (task
!= NULL
&& kr
== KERN_SUCCESS
) {
833 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_kern
, page_grab_count
);
842 kernel_memory_depopulate(
849 vm_object_offset_t offset
, pg_offset
;
851 vm_page_t local_freeq
= NULL
;
853 assert((flags
& (KMA_COMPRESSOR
| KMA_KOBJECT
)) != (KMA_COMPRESSOR
| KMA_KOBJECT
));
855 if (flags
& KMA_COMPRESSOR
) {
857 object
= compressor_object
;
859 vm_object_lock(object
);
860 } else if (flags
& KMA_KOBJECT
) {
862 object
= kernel_object
;
863 vm_object_lock(object
);
868 * If it's not the kernel object, we need to:
874 panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
876 map
, (uint64_t) addr
, (uint64_t) size
, flags
);
878 pmap_protect(kernel_map
->pmap
, offset
, offset
+ size
, VM_PROT_NONE
);
882 pg_offset
+= PAGE_SIZE_64
) {
883 mem
= vm_page_lookup(object
, offset
+ pg_offset
);
887 if (mem
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
) {
888 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem
));
891 mem
->vmp_busy
= TRUE
;
893 assert(mem
->vmp_tabled
);
894 vm_page_remove(mem
, TRUE
);
895 assert(mem
->vmp_busy
);
897 assert(mem
->vmp_pageq
.next
== 0 && mem
->vmp_pageq
.prev
== 0);
898 assert((mem
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) ||
899 (mem
->vmp_q_state
== VM_PAGE_NOT_ON_Q
));
901 mem
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
902 mem
->vmp_snext
= local_freeq
;
905 vm_object_unlock(object
);
908 vm_page_free_list(local_freeq
, TRUE
);
915 * Allocate wired-down memory in the kernel's address map
916 * or a submap. The memory is not zero-filled.
925 return kmem_alloc(map
, addrp
, size
, vm_tag_bt());
936 return kmem_alloc_flags(map
, addrp
, size
, tag
, 0);
947 kern_return_t kr
= kernel_memory_allocate(map
, addrp
, size
, 0, flags
, tag
);
948 TRACE_MACHLEAKS(KMEM_ALLOC_CODE
, KMEM_ALLOC_CODE_2
, size
, *addrp
);
955 * Reallocate wired-down memory in the kernel's address map
956 * or a submap. Newly allocated pages are not zeroed.
957 * This can only be used on regions allocated with kmem_alloc.
959 * If successful, the pages in the old region are mapped twice.
960 * The old region is unchanged. Use kmem_free to get rid of it.
967 vm_offset_t
*newaddrp
,
972 vm_object_offset_t offset
;
973 vm_map_offset_t oldmapmin
;
974 vm_map_offset_t oldmapmax
;
975 vm_map_offset_t newmapaddr
;
976 vm_map_size_t oldmapsize
;
977 vm_map_size_t newmapsize
;
978 vm_map_entry_t oldentry
;
979 vm_map_entry_t newentry
;
983 oldmapmin
= vm_map_trunc_page(oldaddr
,
984 VM_MAP_PAGE_MASK(map
));
985 oldmapmax
= vm_map_round_page(oldaddr
+ oldsize
,
986 VM_MAP_PAGE_MASK(map
));
987 oldmapsize
= oldmapmax
- oldmapmin
;
988 newmapsize
= vm_map_round_page(newsize
,
989 VM_MAP_PAGE_MASK(map
));
990 if (newmapsize
< newsize
) {
993 return KERN_INVALID_ARGUMENT
;
997 * Find the VM object backing the old region.
1002 if (!vm_map_lookup_entry(map
, oldmapmin
, &oldentry
)) {
1003 panic("kmem_realloc");
1005 object
= VME_OBJECT(oldentry
);
1008 * Increase the size of the object and
1009 * fill in the new region.
1012 vm_object_reference(object
);
1013 /* by grabbing the object lock before unlocking the map */
1014 /* we guarantee that we will panic if more than one */
1015 /* attempt is made to realloc a kmem_alloc'd area */
1016 vm_object_lock(object
);
1018 if (object
->vo_size
!= oldmapsize
) {
1019 panic("kmem_realloc");
1021 object
->vo_size
= newmapsize
;
1022 vm_object_unlock(object
);
1024 /* allocate the new pages while expanded portion of the */
1025 /* object is still not mapped */
1026 kmem_alloc_pages(object
, vm_object_round_page(oldmapsize
),
1027 vm_object_round_page(newmapsize
- oldmapsize
));
1030 * Find space for the new region.
1033 kr
= vm_map_find_space(map
, &newmapaddr
, newmapsize
,
1034 (vm_map_offset_t
) 0, 0,
1035 VM_MAP_KERNEL_FLAGS_NONE
,
1038 if (kr
!= KERN_SUCCESS
) {
1039 vm_object_lock(object
);
1040 for (offset
= oldmapsize
;
1041 offset
< newmapsize
; offset
+= PAGE_SIZE
) {
1042 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
1046 object
->vo_size
= oldmapsize
;
1047 vm_object_unlock(object
);
1048 vm_object_deallocate(object
);
1051 VME_OBJECT_SET(newentry
, object
);
1052 VME_OFFSET_SET(newentry
, 0);
1053 assert(newentry
->wired_count
== 0);
1056 /* add an extra reference in case we have someone doing an */
1057 /* unexpected deallocate */
1058 vm_object_reference(object
);
1061 kr
= vm_map_wire_kernel(map
, newmapaddr
, newmapaddr
+ newmapsize
,
1062 VM_PROT_DEFAULT
, tag
, FALSE
);
1063 if (KERN_SUCCESS
!= kr
) {
1064 vm_map_remove(map
, newmapaddr
, newmapaddr
+ newmapsize
, VM_MAP_REMOVE_NO_FLAGS
);
1065 vm_object_lock(object
);
1066 for (offset
= oldsize
; offset
< newmapsize
; offset
+= PAGE_SIZE
) {
1067 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
1071 object
->vo_size
= oldmapsize
;
1072 vm_object_unlock(object
);
1073 vm_object_deallocate(object
);
1076 vm_object_deallocate(object
);
1078 if (kernel_object
== object
) {
1079 vm_tag_update_size(tag
, newmapsize
);
1082 *newaddrp
= CAST_DOWN(vm_offset_t
, newmapaddr
);
1083 return KERN_SUCCESS
;
1087 * kmem_alloc_kobject:
1089 * Allocate wired-down memory in the kernel's address map
1090 * or a submap. The memory is not zero-filled.
1092 * The memory is allocated in the kernel_object.
1093 * It may not be copied with vm_map_copy, and
1094 * it may not be reallocated with kmem_realloc.
1098 kmem_alloc_kobject_external(
1103 return kmem_alloc_kobject(map
, addrp
, size
, vm_tag_bt());
1113 return kernel_memory_allocate(map
, addrp
, size
, 0, KMA_KOBJECT
, tag
);
1117 * kmem_alloc_aligned:
1119 * Like kmem_alloc_kobject, except that the memory is aligned.
1120 * The size should be a power-of-2.
1130 if ((size
& (size
- 1)) != 0) {
1131 panic("kmem_alloc_aligned: size not aligned");
1133 return kernel_memory_allocate(map
, addrp
, size
, size
- 1, KMA_KOBJECT
, tag
);
1137 * kmem_alloc_pageable:
1139 * Allocate pageable memory in the kernel's address map.
1143 kmem_alloc_pageable_external(
1148 return kmem_alloc_pageable(map
, addrp
, size
, vm_tag_bt());
1152 kmem_alloc_pageable(
1158 vm_map_offset_t map_addr
;
1159 vm_map_size_t map_size
;
1163 map_addr
= (vm_map_min(map
)) + PAGE_SIZE
;
1165 map_addr
= vm_map_min(map
);
1167 map_size
= vm_map_round_page(size
,
1168 VM_MAP_PAGE_MASK(map
));
1169 if (map_size
< size
) {
1172 return KERN_INVALID_ARGUMENT
;
1175 kr
= vm_map_enter(map
, &map_addr
, map_size
,
1176 (vm_map_offset_t
) 0,
1178 VM_MAP_KERNEL_FLAGS_NONE
,
1180 VM_OBJECT_NULL
, (vm_object_offset_t
) 0, FALSE
,
1181 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
1183 if (kr
!= KERN_SUCCESS
) {
1188 kasan_notify_address(map_addr
, map_size
);
1190 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
1191 return KERN_SUCCESS
;
1197 * Release a region of kernel virtual memory allocated
1198 * with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
1199 * and return the physical pages associated with that region.
1210 assert(addr
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
1212 TRACE_MACHLEAKS(KMEM_FREE_CODE
, KMEM_FREE_CODE_2
, size
, addr
);
1216 printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n", map
, (uint64_t)addr
);
1221 kr
= vm_map_remove(map
,
1222 vm_map_trunc_page(addr
,
1223 VM_MAP_PAGE_MASK(map
)),
1224 vm_map_round_page(addr
+ size
,
1225 VM_MAP_PAGE_MASK(map
)),
1226 VM_MAP_REMOVE_KUNWIRE
);
1227 if (kr
!= KERN_SUCCESS
) {
1233 * Allocate new pages in an object.
1239 vm_object_offset_t offset
,
1240 vm_object_size_t size
)
1242 vm_object_size_t alloc_size
;
1244 alloc_size
= vm_object_round_page(size
);
1245 vm_object_lock(object
);
1246 while (alloc_size
) {
1253 while (VM_PAGE_NULL
==
1254 (mem
= vm_page_alloc(object
, offset
))) {
1255 vm_object_unlock(object
);
1257 vm_object_lock(object
);
1259 mem
->vmp_busy
= FALSE
;
1261 alloc_size
-= PAGE_SIZE
;
1262 offset
+= PAGE_SIZE
;
1264 vm_object_unlock(object
);
1265 return KERN_SUCCESS
;
1271 * Allocates a map to manage a subrange
1272 * of the kernel virtual address space.
1274 * Arguments are as follows:
1276 * parent Map to take range from
1277 * addr Address of start of range (IN/OUT)
1278 * size Size of range to find
1279 * pageable Can region be paged
1280 * anywhere Can region be located anywhere in map
1281 * new_map Pointer to new submap
1290 vm_map_kernel_flags_t vmk_flags
,
1295 vm_map_offset_t map_addr
;
1296 vm_map_size_t map_size
;
1299 map_size
= vm_map_round_page(size
,
1300 VM_MAP_PAGE_MASK(parent
));
1301 if (map_size
< size
) {
1304 return KERN_INVALID_ARGUMENT
;
1308 * Need reference on submap object because it is internal
1309 * to the vm_system. vm_object_enter will never be called
1310 * on it (usual source of reference for vm_map_enter).
1312 vm_object_reference(vm_submap_object
);
1314 map_addr
= ((flags
& VM_FLAGS_ANYWHERE
)
1315 ? vm_map_min(parent
)
1316 : vm_map_trunc_page(*addr
,
1317 VM_MAP_PAGE_MASK(parent
)));
1319 kr
= vm_map_enter(parent
, &map_addr
, map_size
,
1320 (vm_map_offset_t
) 0, flags
, vmk_flags
, tag
,
1321 vm_submap_object
, (vm_object_offset_t
) 0, FALSE
,
1322 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
1323 if (kr
!= KERN_SUCCESS
) {
1324 vm_object_deallocate(vm_submap_object
);
1328 pmap_reference(vm_map_pmap(parent
));
1329 map
= vm_map_create(vm_map_pmap(parent
), map_addr
, map_addr
+ map_size
, pageable
);
1330 if (map
== VM_MAP_NULL
) {
1331 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
1333 /* inherit the parent map's page size */
1334 vm_map_set_page_shift(map
, VM_MAP_PAGE_SHIFT(parent
));
1336 kr
= vm_map_submap(parent
, map_addr
, map_addr
+ map_size
, map
, map_addr
, FALSE
);
1337 if (kr
!= KERN_SUCCESS
) {
1339 * See comment preceding vm_map_submap().
1341 vm_map_remove(parent
, map_addr
, map_addr
+ map_size
,
1342 VM_MAP_REMOVE_NO_FLAGS
);
1343 vm_map_deallocate(map
); /* also removes ref to pmap */
1344 vm_object_deallocate(vm_submap_object
);
1347 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
1349 return KERN_SUCCESS
;
1352 * The default percentage of memory that can be mlocked is scaled based on the total
1353 * amount of memory in the system. These percentages are caclulated
1354 * offline and stored in this table. We index this table by
1355 * log2(max_mem) - VM_USER_WIREABLE_MIN_CONFIG. We clamp this index in the range
1356 * [0, sizeof(wire_limit_percents) / sizeof(vm_map_size_t))
1358 * Note that these values were picked for mac.
1359 * If we ever have very large memory config arm devices, we may want to revisit
1360 * since the kernel overhead is smaller there due to the larger page size.
1363 /* Start scaling iff we're managing > 2^32 = 4GB of RAM. */
1364 #define VM_USER_WIREABLE_MIN_CONFIG 32
1365 static vm_map_size_t wire_limit_percents
[] =
1366 { 70, 73, 76, 79, 82, 85, 88, 91, 94, 97};
1369 * Sets the default global user wire limit which limits the amount of
1370 * memory that can be locked via mlock() based on the above algorithm..
1371 * This can be overridden via a sysctl.
1374 kmem_set_user_wire_limits(void)
1376 uint64_t available_mem_log
;
1377 uint64_t max_wire_percent
;
1378 size_t wire_limit_percents_length
= sizeof(wire_limit_percents
) /
1379 sizeof(vm_map_size_t
);
1380 vm_map_size_t limit
;
1381 available_mem_log
= bit_floor(max_mem
);
1383 if (available_mem_log
< VM_USER_WIREABLE_MIN_CONFIG
) {
1384 available_mem_log
= 0;
1386 available_mem_log
-= VM_USER_WIREABLE_MIN_CONFIG
;
1388 if (available_mem_log
>= wire_limit_percents_length
) {
1389 available_mem_log
= wire_limit_percents_length
- 1;
1391 max_wire_percent
= wire_limit_percents
[available_mem_log
];
1393 limit
= max_mem
* max_wire_percent
/ 100;
1394 /* Cap the number of non lockable bytes at VM_NOT_USER_WIREABLE_MAX */
1395 if (max_mem
- limit
> VM_NOT_USER_WIREABLE_MAX
) {
1396 limit
= max_mem
- VM_NOT_USER_WIREABLE_MAX
;
1399 vm_global_user_wire_limit
= limit
;
1400 /* the default per task limit is the same as the global limit */
1401 vm_per_task_user_wire_limit
= limit
;
1408 * Initialize the kernel's virtual memory map, taking
1409 * into account all memory allocated up to this time.
1416 vm_map_offset_t map_start
;
1417 vm_map_offset_t map_end
;
1418 vm_map_kernel_flags_t vmk_flags
;
1420 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1421 vmk_flags
.vmkf_permanent
= TRUE
;
1422 vmk_flags
.vmkf_no_pmap_check
= TRUE
;
1424 map_start
= vm_map_trunc_page(start
,
1425 VM_MAP_PAGE_MASK(kernel_map
));
1426 map_end
= vm_map_round_page(end
,
1427 VM_MAP_PAGE_MASK(kernel_map
));
1429 #if defined(__arm__) || defined(__arm64__)
1430 kernel_map
= vm_map_create(pmap_kernel(), VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1431 VM_MAX_KERNEL_ADDRESS
, FALSE
);
1433 * Reserve virtual memory allocated up to this time.
1436 unsigned int region_select
= 0;
1437 vm_map_offset_t region_start
;
1438 vm_map_size_t region_size
;
1439 vm_map_offset_t map_addr
;
1442 while (pmap_virtual_region(region_select
, ®ion_start
, ®ion_size
)) {
1443 map_addr
= region_start
;
1444 kr
= vm_map_enter(kernel_map
, &map_addr
,
1445 vm_map_round_page(region_size
,
1446 VM_MAP_PAGE_MASK(kernel_map
)),
1447 (vm_map_offset_t
) 0,
1450 VM_KERN_MEMORY_NONE
,
1452 (vm_object_offset_t
) 0, FALSE
, VM_PROT_NONE
, VM_PROT_NONE
,
1453 VM_INHERIT_DEFAULT
);
1455 if (kr
!= KERN_SUCCESS
) {
1456 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
1457 (uint64_t) start
, (uint64_t) end
, (uint64_t) region_start
,
1458 (uint64_t) region_size
, kr
);
1465 kernel_map
= vm_map_create(pmap_kernel(), VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1468 * Reserve virtual memory allocated up to this time.
1470 if (start
!= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
1471 vm_map_offset_t map_addr
;
1474 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1475 vmk_flags
.vmkf_no_pmap_check
= TRUE
;
1477 map_addr
= VM_MIN_KERNEL_AND_KEXT_ADDRESS
;
1478 kr
= vm_map_enter(kernel_map
,
1480 (vm_map_size_t
)(map_start
- VM_MIN_KERNEL_AND_KEXT_ADDRESS
),
1481 (vm_map_offset_t
) 0,
1484 VM_KERN_MEMORY_NONE
,
1486 (vm_object_offset_t
) 0, FALSE
,
1487 VM_PROT_NONE
, VM_PROT_NONE
,
1488 VM_INHERIT_DEFAULT
);
1490 if (kr
!= KERN_SUCCESS
) {
1491 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
1492 (uint64_t) start
, (uint64_t) end
,
1493 (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1494 (uint64_t) (map_start
- VM_MIN_KERNEL_AND_KEXT_ADDRESS
),
1500 kmem_set_user_wire_limits();
1504 * Routine: copyinmap
1506 * Like copyin, except that fromaddr is an address
1507 * in the specified VM map. This implementation
1508 * is incomplete; it handles the current user map
1509 * and the kernel map/submaps.
1514 vm_map_offset_t fromaddr
,
1518 kern_return_t kr
= KERN_SUCCESS
;
1521 if (vm_map_pmap(map
) == pmap_kernel()) {
1522 /* assume a correct copy */
1523 memcpy(todata
, CAST_DOWN(void *, fromaddr
), length
);
1524 } else if (current_map() == map
) {
1525 if (copyin(fromaddr
, todata
, length
) != 0) {
1526 kr
= KERN_INVALID_ADDRESS
;
1529 vm_map_reference(map
);
1530 oldmap
= vm_map_switch(map
);
1531 if (copyin(fromaddr
, todata
, length
) != 0) {
1532 kr
= KERN_INVALID_ADDRESS
;
1534 vm_map_switch(oldmap
);
1535 vm_map_deallocate(map
);
1541 * Routine: copyoutmap
1543 * Like copyout, except that toaddr is an address
1544 * in the specified VM map. This implementation
1545 * is incomplete; it handles the current user map
1546 * and the kernel map/submaps.
1552 vm_map_address_t toaddr
,
1555 if (vm_map_pmap(map
) == pmap_kernel()) {
1556 /* assume a correct copy */
1557 memcpy(CAST_DOWN(void *, toaddr
), fromdata
, length
);
1558 return KERN_SUCCESS
;
1561 if (current_map() != map
) {
1562 return KERN_NOT_SUPPORTED
;
1565 if (copyout(fromdata
, toaddr
, length
) != 0) {
1566 return KERN_INVALID_ADDRESS
;
1569 return KERN_SUCCESS
;
1574 * The following two functions are to be used when exposing kernel
1575 * addresses to userspace via any of the various debug or info
1576 * facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM()
1577 * and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and
1578 * are exported to KEXTs.
1580 * NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
1584 vm_kernel_addrhash_internal(
1586 vm_offset_t
*hash_addr
,
1596 if (VM_KERNEL_IS_SLID(addr
)) {
1597 *hash_addr
= VM_KERNEL_UNSLIDE(addr
);
1601 vm_offset_t sha_digest
[SHA256_DIGEST_LENGTH
/ sizeof(vm_offset_t
)];
1604 SHA256_Init(&sha_ctx
);
1605 SHA256_Update(&sha_ctx
, &salt
, sizeof(salt
));
1606 SHA256_Update(&sha_ctx
, &addr
, sizeof(addr
));
1607 SHA256_Final(sha_digest
, &sha_ctx
);
1609 *hash_addr
= sha_digest
[0];
1613 vm_kernel_addrhash_external(
1615 vm_offset_t
*hash_addr
)
1617 return vm_kernel_addrhash_internal(addr
, hash_addr
, vm_kernel_addrhash_salt_ext
);
1621 vm_kernel_addrhash(vm_offset_t addr
)
1623 vm_offset_t hash_addr
;
1624 vm_kernel_addrhash_internal(addr
, &hash_addr
, vm_kernel_addrhash_salt
);
1631 vm_offset_t
*hide_addr
)
1633 *hide_addr
= VM_KERNEL_ADDRHIDE(addr
);
1637 * vm_kernel_addrperm_external:
1638 * vm_kernel_unslide_or_perm_external:
1640 * Use these macros when exposing an address to userspace that could come from
1641 * either kernel text/data *or* the heap.
1644 vm_kernel_addrperm_external(
1646 vm_offset_t
*perm_addr
)
1648 if (VM_KERNEL_IS_SLID(addr
)) {
1649 *perm_addr
= VM_KERNEL_UNSLIDE(addr
);
1650 } else if (VM_KERNEL_ADDRESS(addr
)) {
1651 *perm_addr
= addr
+ vm_kernel_addrperm_ext
;
1658 vm_kernel_unslide_or_perm_external(
1660 vm_offset_t
*up_addr
)
1662 vm_kernel_addrperm_external(addr
, up_addr
);