]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management.
64 */
65
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/thread.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pageout.h>
75 #include <kern/misc_protos.h>
76 #include <vm/cpm.h>
77
78 #include <string.h>
79
80 #include <libkern/OSDebug.h>
81 #include <sys/kdebug.h>
82
83 /*
84 * Variables exported by this module.
85 */
86
87 vm_map_t kernel_map;
88 vm_map_t kernel_pageable_map;
89
90 extern boolean_t vm_kernel_ready;
91
92 /*
93 * Forward declarations for internal functions.
94 */
95 extern kern_return_t kmem_alloc_pages(
96 register vm_object_t object,
97 register vm_object_offset_t offset,
98 register vm_object_size_t size);
99
100 kern_return_t
101 kmem_alloc_contig(
102 vm_map_t map,
103 vm_offset_t *addrp,
104 vm_size_t size,
105 vm_offset_t mask,
106 ppnum_t max_pnum,
107 ppnum_t pnum_mask,
108 int flags,
109 vm_tag_t tag)
110 {
111 vm_object_t object;
112 vm_object_offset_t offset;
113 vm_map_offset_t map_addr;
114 vm_map_offset_t map_mask;
115 vm_map_size_t map_size, i;
116 vm_map_entry_t entry;
117 vm_page_t m, pages;
118 kern_return_t kr;
119
120 if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT)))
121 return KERN_INVALID_ARGUMENT;
122
123 map_size = vm_map_round_page(size,
124 VM_MAP_PAGE_MASK(map));
125 map_mask = (vm_map_offset_t)mask;
126
127 /* Check for zero allocation size (either directly or via overflow) */
128 if (map_size == 0) {
129 *addrp = 0;
130 return KERN_INVALID_ARGUMENT;
131 }
132
133 /*
134 * Allocate a new object (if necessary) and the reference we
135 * will be donating to the map entry. We must do this before
136 * locking the map, or risk deadlock with the default pager.
137 */
138 if ((flags & KMA_KOBJECT) != 0) {
139 object = kernel_object;
140 vm_object_reference(object);
141 } else {
142 object = vm_object_allocate(map_size);
143 }
144
145 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
146 if (KERN_SUCCESS != kr) {
147 vm_object_deallocate(object);
148 return kr;
149 }
150
151 if (object == kernel_object) {
152 offset = map_addr;
153 } else {
154 offset = 0;
155 }
156 VME_OBJECT_SET(entry, object);
157 VME_OFFSET_SET(entry, offset);
158 VME_ALIAS_SET(entry, tag);
159
160 /* Take an extra object ref in case the map entry gets deleted */
161 vm_object_reference(object);
162 vm_map_unlock(map);
163
164 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);
165
166 if (kr != KERN_SUCCESS) {
167 vm_map_remove(map,
168 vm_map_trunc_page(map_addr,
169 VM_MAP_PAGE_MASK(map)),
170 vm_map_round_page(map_addr + map_size,
171 VM_MAP_PAGE_MASK(map)),
172 0);
173 vm_object_deallocate(object);
174 *addrp = 0;
175 return kr;
176 }
177
178 vm_object_lock(object);
179 for (i = 0; i < map_size; i += PAGE_SIZE) {
180 m = pages;
181 pages = NEXT_PAGE(m);
182 *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
183 m->busy = FALSE;
184 vm_page_insert(m, object, offset + i);
185 }
186 vm_object_unlock(object);
187
188 kr = vm_map_wire(map,
189 vm_map_trunc_page(map_addr,
190 VM_MAP_PAGE_MASK(map)),
191 vm_map_round_page(map_addr + map_size,
192 VM_MAP_PAGE_MASK(map)),
193 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag),
194 FALSE);
195
196 if (kr != KERN_SUCCESS) {
197 if (object == kernel_object) {
198 vm_object_lock(object);
199 vm_object_page_remove(object, offset, offset + map_size);
200 vm_object_unlock(object);
201 }
202 vm_map_remove(map,
203 vm_map_trunc_page(map_addr,
204 VM_MAP_PAGE_MASK(map)),
205 vm_map_round_page(map_addr + map_size,
206 VM_MAP_PAGE_MASK(map)),
207 0);
208 vm_object_deallocate(object);
209 return kr;
210 }
211 vm_object_deallocate(object);
212
213 if (object == kernel_object)
214 vm_map_simplify(map, map_addr);
215
216 *addrp = (vm_offset_t) map_addr;
217 assert((vm_map_offset_t) *addrp == map_addr);
218 return KERN_SUCCESS;
219 }
220
221 /*
222 * Master entry point for allocating kernel memory.
223 * NOTE: this routine is _never_ interrupt safe.
224 *
225 * map : map to allocate into
226 * addrp : pointer to start address of new memory
227 * size : size of memory requested
228 * flags : options
229 * KMA_HERE *addrp is base address, else "anywhere"
230 * KMA_NOPAGEWAIT don't wait for pages if unavailable
231 * KMA_KOBJECT use kernel_object
232 * KMA_LOMEM support for 32 bit devices in a 64 bit world
233 * if set and a lomemory pool is available
234 * grab pages from it... this also implies
235 * KMA_NOPAGEWAIT
236 */
237
238 kern_return_t
239 kernel_memory_allocate(
240 register vm_map_t map,
241 register vm_offset_t *addrp,
242 register vm_size_t size,
243 register vm_offset_t mask,
244 int flags,
245 vm_tag_t tag)
246 {
247 vm_object_t object;
248 vm_object_offset_t offset;
249 vm_object_offset_t pg_offset;
250 vm_map_entry_t entry = NULL;
251 vm_map_offset_t map_addr, fill_start;
252 vm_map_offset_t map_mask;
253 vm_map_size_t map_size, fill_size;
254 kern_return_t kr, pe_result;
255 vm_page_t mem;
256 vm_page_t guard_page_list = NULL;
257 vm_page_t wired_page_list = NULL;
258 int guard_page_count = 0;
259 int wired_page_count = 0;
260 int i;
261 int vm_alloc_flags;
262 vm_prot_t kma_prot;
263
264 if (! vm_kernel_ready) {
265 panic("kernel_memory_allocate: VM is not ready");
266 }
267
268 map_size = vm_map_round_page(size,
269 VM_MAP_PAGE_MASK(map));
270 map_mask = (vm_map_offset_t) mask;
271
272 vm_alloc_flags = VM_MAKE_TAG(tag);
273
274 /* Check for zero allocation size (either directly or via overflow) */
275 if (map_size == 0) {
276 *addrp = 0;
277 return KERN_INVALID_ARGUMENT;
278 }
279
280 /*
281 * limit the size of a single extent of wired memory
282 * to try and limit the damage to the system if
283 * too many pages get wired down
284 * limit raised to 2GB with 128GB max physical limit
285 */
286 if ( !(flags & KMA_VAONLY) && map_size > (1ULL << 31)) {
287 return KERN_RESOURCE_SHORTAGE;
288 }
289
290 /*
291 * Guard pages:
292 *
293 * Guard pages are implemented as ficticious pages. By placing guard pages
294 * on either end of a stack, they can help detect cases where a thread walks
295 * off either end of its stack. They are allocated and set up here and attempts
296 * to access those pages are trapped in vm_fault_page().
297 *
298 * The map_size we were passed may include extra space for
299 * guard pages. If those were requested, then back it out of fill_size
300 * since vm_map_find_space() takes just the actual size not including
301 * guard pages. Similarly, fill_start indicates where the actual pages
302 * will begin in the range.
303 */
304
305 fill_start = 0;
306 fill_size = map_size;
307
308 if (flags & KMA_GUARD_FIRST) {
309 vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
310 fill_start += PAGE_SIZE_64;
311 fill_size -= PAGE_SIZE_64;
312 if (map_size < fill_start + fill_size) {
313 /* no space for a guard page */
314 *addrp = 0;
315 return KERN_INVALID_ARGUMENT;
316 }
317 guard_page_count++;
318 }
319 if (flags & KMA_GUARD_LAST) {
320 vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
321 fill_size -= PAGE_SIZE_64;
322 if (map_size <= fill_start + fill_size) {
323 /* no space for a guard page */
324 *addrp = 0;
325 return KERN_INVALID_ARGUMENT;
326 }
327 guard_page_count++;
328 }
329 wired_page_count = (int) (fill_size / PAGE_SIZE_64);
330 assert(wired_page_count * PAGE_SIZE_64 == fill_size);
331
332 for (i = 0; i < guard_page_count; i++) {
333 for (;;) {
334 mem = vm_page_grab_guard();
335
336 if (mem != VM_PAGE_NULL)
337 break;
338 if (flags & KMA_NOPAGEWAIT) {
339 kr = KERN_RESOURCE_SHORTAGE;
340 goto out;
341 }
342 vm_page_more_fictitious();
343 }
344 mem->pageq.next = (queue_entry_t)guard_page_list;
345 guard_page_list = mem;
346 }
347
348 if (! (flags & KMA_VAONLY)) {
349 for (i = 0; i < wired_page_count; i++) {
350 uint64_t unavailable;
351
352 for (;;) {
353 if (flags & KMA_LOMEM)
354 mem = vm_page_grablo();
355 else
356 mem = vm_page_grab();
357
358 if (mem != VM_PAGE_NULL)
359 break;
360
361 if (flags & KMA_NOPAGEWAIT) {
362 kr = KERN_RESOURCE_SHORTAGE;
363 goto out;
364 }
365 if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
366 kr = KERN_RESOURCE_SHORTAGE;
367 goto out;
368 }
369 unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
370
371 if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
372 kr = KERN_RESOURCE_SHORTAGE;
373 goto out;
374 }
375 VM_PAGE_WAIT();
376 }
377 mem->pageq.next = (queue_entry_t)wired_page_list;
378 wired_page_list = mem;
379 }
380 }
381
382 /*
383 * Allocate a new object (if necessary). We must do this before
384 * locking the map, or risk deadlock with the default pager.
385 */
386 if ((flags & KMA_KOBJECT) != 0) {
387 object = kernel_object;
388 vm_object_reference(object);
389 } else if ((flags & KMA_COMPRESSOR) != 0) {
390 object = compressor_object;
391 vm_object_reference(object);
392 } else {
393 object = vm_object_allocate(map_size);
394 }
395
396 kr = vm_map_find_space(map, &map_addr,
397 fill_size, map_mask,
398 vm_alloc_flags, &entry);
399 if (KERN_SUCCESS != kr) {
400 vm_object_deallocate(object);
401 goto out;
402 }
403
404 if (object == kernel_object || object == compressor_object) {
405 offset = map_addr;
406 } else {
407 offset = 0;
408 }
409 VME_OBJECT_SET(entry, object);
410 VME_OFFSET_SET(entry, offset);
411
412 if (object != compressor_object)
413 entry->wired_count++;
414
415 if (flags & KMA_PERMANENT)
416 entry->permanent = TRUE;
417
418 if (object != kernel_object && object != compressor_object)
419 vm_object_reference(object);
420
421 vm_object_lock(object);
422 vm_map_unlock(map);
423
424 pg_offset = 0;
425
426 if (fill_start) {
427 if (guard_page_list == NULL)
428 panic("kernel_memory_allocate: guard_page_list == NULL");
429
430 mem = guard_page_list;
431 guard_page_list = (vm_page_t)mem->pageq.next;
432 mem->pageq.next = NULL;
433
434 vm_page_insert(mem, object, offset + pg_offset);
435
436 mem->busy = FALSE;
437 pg_offset += PAGE_SIZE_64;
438 }
439
440 kma_prot = VM_PROT_READ | VM_PROT_WRITE;
441
442 if (flags & KMA_VAONLY) {
443 pg_offset = fill_start + fill_size;
444 } else {
445 for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
446 if (wired_page_list == NULL)
447 panic("kernel_memory_allocate: wired_page_list == NULL");
448
449 mem = wired_page_list;
450 wired_page_list = (vm_page_t)mem->pageq.next;
451 mem->pageq.next = NULL;
452 mem->wire_count++;
453
454 vm_page_insert_wired(mem, object, offset + pg_offset, tag);
455
456 mem->busy = FALSE;
457 mem->pmapped = TRUE;
458 mem->wpmapped = TRUE;
459
460 PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
461 kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
462 PMAP_OPTIONS_NOWAIT, pe_result);
463
464 if (pe_result == KERN_RESOURCE_SHORTAGE) {
465 vm_object_unlock(object);
466
467 PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
468 kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
469
470 vm_object_lock(object);
471 }
472 if (flags & KMA_NOENCRYPT) {
473 bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
474
475 pmap_set_noencrypt(mem->phys_page);
476 }
477 }
478 }
479 if ((fill_start + fill_size) < map_size) {
480 if (guard_page_list == NULL)
481 panic("kernel_memory_allocate: guard_page_list == NULL");
482
483 mem = guard_page_list;
484 guard_page_list = (vm_page_t)mem->pageq.next;
485 mem->pageq.next = NULL;
486
487 vm_page_insert(mem, object, offset + pg_offset);
488
489 mem->busy = FALSE;
490 }
491 if (guard_page_list || wired_page_list)
492 panic("kernel_memory_allocate: non empty list\n");
493
494 if (! (flags & KMA_VAONLY)) {
495 vm_page_lockspin_queues();
496 vm_page_wire_count += wired_page_count;
497 vm_page_unlock_queues();
498 }
499
500 vm_object_unlock(object);
501
502 /*
503 * now that the pages are wired, we no longer have to fear coalesce
504 */
505 if (object == kernel_object || object == compressor_object)
506 vm_map_simplify(map, map_addr);
507 else
508 vm_object_deallocate(object);
509
510 /*
511 * Return the memory, not zeroed.
512 */
513 *addrp = CAST_DOWN(vm_offset_t, map_addr);
514 return KERN_SUCCESS;
515
516 out:
517 if (guard_page_list)
518 vm_page_free_list(guard_page_list, FALSE);
519
520 if (wired_page_list)
521 vm_page_free_list(wired_page_list, FALSE);
522
523 return kr;
524 }
525
526 kern_return_t
527 kernel_memory_populate(
528 vm_map_t map,
529 vm_offset_t addr,
530 vm_size_t size,
531 int flags,
532 vm_tag_t tag)
533 {
534 vm_object_t object;
535 vm_object_offset_t offset, pg_offset;
536 kern_return_t kr, pe_result;
537 vm_page_t mem;
538 vm_page_t page_list = NULL;
539 int page_count = 0;
540 int i;
541
542 page_count = (int) (size / PAGE_SIZE_64);
543
544 assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
545
546 if (flags & KMA_COMPRESSOR) {
547
548 pg_offset = page_count * PAGE_SIZE_64;
549
550 do {
551 for (;;) {
552 mem = vm_page_grab();
553
554 if (mem != VM_PAGE_NULL)
555 break;
556
557 VM_PAGE_WAIT();
558 }
559 mem->pageq.next = (queue_entry_t) page_list;
560 page_list = mem;
561
562 pg_offset -= PAGE_SIZE_64;
563
564 kr = pmap_enter_options(kernel_pmap,
565 addr + pg_offset, mem->phys_page,
566 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
567 PMAP_OPTIONS_INTERNAL, NULL);
568 assert(kr == KERN_SUCCESS);
569
570 } while (pg_offset);
571
572 offset = addr;
573 object = compressor_object;
574
575 vm_object_lock(object);
576
577 for (pg_offset = 0;
578 pg_offset < size;
579 pg_offset += PAGE_SIZE_64) {
580
581 mem = page_list;
582 page_list = (vm_page_t) mem->pageq.next;
583 mem->pageq.next = NULL;
584
585 vm_page_insert(mem, object, offset + pg_offset);
586 assert(mem->busy);
587
588 mem->busy = FALSE;
589 mem->pmapped = TRUE;
590 mem->wpmapped = TRUE;
591 mem->compressor = TRUE;
592 }
593 vm_object_unlock(object);
594
595 return KERN_SUCCESS;
596 }
597
598 for (i = 0; i < page_count; i++) {
599 for (;;) {
600 if (flags & KMA_LOMEM)
601 mem = vm_page_grablo();
602 else
603 mem = vm_page_grab();
604
605 if (mem != VM_PAGE_NULL)
606 break;
607
608 if (flags & KMA_NOPAGEWAIT) {
609 kr = KERN_RESOURCE_SHORTAGE;
610 goto out;
611 }
612 if ((flags & KMA_LOMEM) &&
613 (vm_lopage_needed == TRUE)) {
614 kr = KERN_RESOURCE_SHORTAGE;
615 goto out;
616 }
617 VM_PAGE_WAIT();
618 }
619 mem->pageq.next = (queue_entry_t) page_list;
620 page_list = mem;
621 }
622 if (flags & KMA_KOBJECT) {
623 offset = addr;
624 object = kernel_object;
625
626 vm_object_lock(object);
627 } else {
628 /*
629 * If it's not the kernel object, we need to:
630 * lock map;
631 * lookup entry;
632 * lock object;
633 * take reference on object;
634 * unlock map;
635 */
636 panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
637 "!KMA_KOBJECT",
638 map, (uint64_t) addr, (uint64_t) size, flags);
639 }
640
641 for (pg_offset = 0;
642 pg_offset < size;
643 pg_offset += PAGE_SIZE_64) {
644
645 if (page_list == NULL)
646 panic("kernel_memory_populate: page_list == NULL");
647
648 mem = page_list;
649 page_list = (vm_page_t) mem->pageq.next;
650 mem->pageq.next = NULL;
651
652 mem->wire_count++;
653
654 vm_page_insert_wired(mem, object, offset + pg_offset, tag);
655
656 mem->busy = FALSE;
657 mem->pmapped = TRUE;
658 mem->wpmapped = TRUE;
659
660 PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
661 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
662 ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
663 PMAP_OPTIONS_NOWAIT, pe_result);
664
665 if (pe_result == KERN_RESOURCE_SHORTAGE) {
666
667 vm_object_unlock(object);
668
669 PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
670 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
671 ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
672
673 vm_object_lock(object);
674 }
675 if (flags & KMA_NOENCRYPT) {
676 bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
677 pmap_set_noencrypt(mem->phys_page);
678 }
679 }
680 vm_page_lock_queues();
681 vm_page_wire_count += page_count;
682 vm_page_unlock_queues();
683
684 vm_object_unlock(object);
685
686 return KERN_SUCCESS;
687
688 out:
689 if (page_list)
690 vm_page_free_list(page_list, FALSE);
691
692 return kr;
693 }
694
695
696 void
697 kernel_memory_depopulate(
698 vm_map_t map,
699 vm_offset_t addr,
700 vm_size_t size,
701 int flags)
702 {
703 vm_object_t object;
704 vm_object_offset_t offset, pg_offset;
705 vm_page_t mem;
706 vm_page_t local_freeq = NULL;
707
708 assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
709
710 if (flags & KMA_COMPRESSOR) {
711 offset = addr;
712 object = compressor_object;
713
714 vm_object_lock(object);
715 } else if (flags & KMA_KOBJECT) {
716 offset = addr;
717 object = kernel_object;
718
719 vm_object_lock(object);
720 } else {
721 offset = 0;
722 object = NULL;
723 /*
724 * If it's not the kernel object, we need to:
725 * lock map;
726 * lookup entry;
727 * lock object;
728 * unlock map;
729 */
730 panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
731 "!KMA_KOBJECT",
732 map, (uint64_t) addr, (uint64_t) size, flags);
733 }
734 pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
735
736 for (pg_offset = 0;
737 pg_offset < size;
738 pg_offset += PAGE_SIZE_64) {
739
740 mem = vm_page_lookup(object, offset + pg_offset);
741
742 assert(mem);
743
744 pmap_disconnect(mem->phys_page);
745
746 mem->busy = TRUE;
747
748 assert(mem->tabled);
749 vm_page_remove(mem, TRUE);
750 assert(mem->busy);
751
752 assert(mem->pageq.next == NULL &&
753 mem->pageq.prev == NULL);
754 mem->pageq.next = (queue_entry_t)local_freeq;
755 local_freeq = mem;
756 }
757 vm_object_unlock(object);
758
759 if (local_freeq)
760 vm_page_free_list(local_freeq, TRUE);
761 }
762
763 /*
764 * kmem_alloc:
765 *
766 * Allocate wired-down memory in the kernel's address map
767 * or a submap. The memory is not zero-filled.
768 */
769
770 kern_return_t
771 kmem_alloc_external(
772 vm_map_t map,
773 vm_offset_t *addrp,
774 vm_size_t size)
775 {
776 return (kmem_alloc(map, addrp, size, vm_tag_bt()));
777 }
778
779 kern_return_t
780 kmem_alloc(
781 vm_map_t map,
782 vm_offset_t *addrp,
783 vm_size_t size,
784 vm_tag_t tag)
785 {
786 kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, 0, tag);
787 TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
788 return kr;
789 }
790
791 /*
792 * kmem_realloc:
793 *
794 * Reallocate wired-down memory in the kernel's address map
795 * or a submap. Newly allocated pages are not zeroed.
796 * This can only be used on regions allocated with kmem_alloc.
797 *
798 * If successful, the pages in the old region are mapped twice.
799 * The old region is unchanged. Use kmem_free to get rid of it.
800 */
801 kern_return_t
802 kmem_realloc(
803 vm_map_t map,
804 vm_offset_t oldaddr,
805 vm_size_t oldsize,
806 vm_offset_t *newaddrp,
807 vm_size_t newsize,
808 vm_tag_t tag)
809 {
810 vm_object_t object;
811 vm_object_offset_t offset;
812 vm_map_offset_t oldmapmin;
813 vm_map_offset_t oldmapmax;
814 vm_map_offset_t newmapaddr;
815 vm_map_size_t oldmapsize;
816 vm_map_size_t newmapsize;
817 vm_map_entry_t oldentry;
818 vm_map_entry_t newentry;
819 vm_page_t mem;
820 kern_return_t kr;
821
822 oldmapmin = vm_map_trunc_page(oldaddr,
823 VM_MAP_PAGE_MASK(map));
824 oldmapmax = vm_map_round_page(oldaddr + oldsize,
825 VM_MAP_PAGE_MASK(map));
826 oldmapsize = oldmapmax - oldmapmin;
827 newmapsize = vm_map_round_page(newsize,
828 VM_MAP_PAGE_MASK(map));
829
830
831 /*
832 * Find the VM object backing the old region.
833 */
834
835 vm_map_lock(map);
836
837 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
838 panic("kmem_realloc");
839 object = VME_OBJECT(oldentry);
840
841 /*
842 * Increase the size of the object and
843 * fill in the new region.
844 */
845
846 vm_object_reference(object);
847 /* by grabbing the object lock before unlocking the map */
848 /* we guarantee that we will panic if more than one */
849 /* attempt is made to realloc a kmem_alloc'd area */
850 vm_object_lock(object);
851 vm_map_unlock(map);
852 if (object->vo_size != oldmapsize)
853 panic("kmem_realloc");
854 object->vo_size = newmapsize;
855 vm_object_unlock(object);
856
857 /* allocate the new pages while expanded portion of the */
858 /* object is still not mapped */
859 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
860 vm_object_round_page(newmapsize-oldmapsize));
861
862 /*
863 * Find space for the new region.
864 */
865
866 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
867 (vm_map_offset_t) 0, 0, &newentry);
868 if (kr != KERN_SUCCESS) {
869 vm_object_lock(object);
870 for(offset = oldmapsize;
871 offset < newmapsize; offset += PAGE_SIZE) {
872 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
873 VM_PAGE_FREE(mem);
874 }
875 }
876 object->vo_size = oldmapsize;
877 vm_object_unlock(object);
878 vm_object_deallocate(object);
879 return kr;
880 }
881 VME_OBJECT_SET(newentry, object);
882 VME_OFFSET_SET(newentry, 0);
883 VME_ALIAS_SET(newentry, tag);
884 assert(newentry->wired_count == 0);
885
886
887 /* add an extra reference in case we have someone doing an */
888 /* unexpected deallocate */
889 vm_object_reference(object);
890 vm_map_unlock(map);
891
892 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize,
893 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag), FALSE);
894 if (KERN_SUCCESS != kr) {
895 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
896 vm_object_lock(object);
897 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
898 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
899 VM_PAGE_FREE(mem);
900 }
901 }
902 object->vo_size = oldmapsize;
903 vm_object_unlock(object);
904 vm_object_deallocate(object);
905 return (kr);
906 }
907 vm_object_deallocate(object);
908
909 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
910 return KERN_SUCCESS;
911 }
912
913 /*
914 * kmem_alloc_kobject:
915 *
916 * Allocate wired-down memory in the kernel's address map
917 * or a submap. The memory is not zero-filled.
918 *
919 * The memory is allocated in the kernel_object.
920 * It may not be copied with vm_map_copy, and
921 * it may not be reallocated with kmem_realloc.
922 */
923
924 kern_return_t
925 kmem_alloc_kobject_external(
926 vm_map_t map,
927 vm_offset_t *addrp,
928 vm_size_t size)
929 {
930 return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt()));
931 }
932
933 kern_return_t
934 kmem_alloc_kobject(
935 vm_map_t map,
936 vm_offset_t *addrp,
937 vm_size_t size,
938 vm_tag_t tag)
939 {
940 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag);
941 }
942
943 /*
944 * kmem_alloc_aligned:
945 *
946 * Like kmem_alloc_kobject, except that the memory is aligned.
947 * The size should be a power-of-2.
948 */
949
950 kern_return_t
951 kmem_alloc_aligned(
952 vm_map_t map,
953 vm_offset_t *addrp,
954 vm_size_t size,
955 vm_tag_t tag)
956 {
957 if ((size & (size - 1)) != 0)
958 panic("kmem_alloc_aligned: size not aligned");
959 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag);
960 }
961
962 /*
963 * kmem_alloc_pageable:
964 *
965 * Allocate pageable memory in the kernel's address map.
966 */
967
968 kern_return_t
969 kmem_alloc_pageable_external(
970 vm_map_t map,
971 vm_offset_t *addrp,
972 vm_size_t size)
973 {
974 return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt()));
975 }
976
977 kern_return_t
978 kmem_alloc_pageable(
979 vm_map_t map,
980 vm_offset_t *addrp,
981 vm_size_t size,
982 vm_tag_t tag)
983 {
984 vm_map_offset_t map_addr;
985 vm_map_size_t map_size;
986 kern_return_t kr;
987
988 #ifndef normal
989 map_addr = (vm_map_min(map)) + PAGE_SIZE;
990 #else
991 map_addr = vm_map_min(map);
992 #endif
993 map_size = vm_map_round_page(size,
994 VM_MAP_PAGE_MASK(map));
995
996 kr = vm_map_enter(map, &map_addr, map_size,
997 (vm_map_offset_t) 0,
998 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(tag),
999 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
1000 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
1001
1002 if (kr != KERN_SUCCESS)
1003 return kr;
1004
1005 *addrp = CAST_DOWN(vm_offset_t, map_addr);
1006 return KERN_SUCCESS;
1007 }
1008
1009 /*
1010 * kmem_free:
1011 *
1012 * Release a region of kernel virtual memory allocated
1013 * with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
1014 * and return the physical pages associated with that region.
1015 */
1016
1017 void
1018 kmem_free(
1019 vm_map_t map,
1020 vm_offset_t addr,
1021 vm_size_t size)
1022 {
1023 kern_return_t kr;
1024
1025 assert(addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS);
1026
1027 TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr);
1028
1029 if(size == 0) {
1030 #if MACH_ASSERT
1031 printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr);
1032 #endif
1033 return;
1034 }
1035
1036 kr = vm_map_remove(map,
1037 vm_map_trunc_page(addr,
1038 VM_MAP_PAGE_MASK(map)),
1039 vm_map_round_page(addr + size,
1040 VM_MAP_PAGE_MASK(map)),
1041 VM_MAP_REMOVE_KUNWIRE);
1042 if (kr != KERN_SUCCESS)
1043 panic("kmem_free");
1044 }
1045
1046 /*
1047 * Allocate new pages in an object.
1048 */
1049
1050 kern_return_t
1051 kmem_alloc_pages(
1052 register vm_object_t object,
1053 register vm_object_offset_t offset,
1054 register vm_object_size_t size)
1055 {
1056 vm_object_size_t alloc_size;
1057
1058 alloc_size = vm_object_round_page(size);
1059 vm_object_lock(object);
1060 while (alloc_size) {
1061 register vm_page_t mem;
1062
1063
1064 /*
1065 * Allocate a page
1066 */
1067 while (VM_PAGE_NULL ==
1068 (mem = vm_page_alloc(object, offset))) {
1069 vm_object_unlock(object);
1070 VM_PAGE_WAIT();
1071 vm_object_lock(object);
1072 }
1073 mem->busy = FALSE;
1074
1075 alloc_size -= PAGE_SIZE;
1076 offset += PAGE_SIZE;
1077 }
1078 vm_object_unlock(object);
1079 return KERN_SUCCESS;
1080 }
1081
1082 /*
1083 * kmem_suballoc:
1084 *
1085 * Allocates a map to manage a subrange
1086 * of the kernel virtual address space.
1087 *
1088 * Arguments are as follows:
1089 *
1090 * parent Map to take range from
1091 * addr Address of start of range (IN/OUT)
1092 * size Size of range to find
1093 * pageable Can region be paged
1094 * anywhere Can region be located anywhere in map
1095 * new_map Pointer to new submap
1096 */
1097 kern_return_t
1098 kmem_suballoc(
1099 vm_map_t parent,
1100 vm_offset_t *addr,
1101 vm_size_t size,
1102 boolean_t pageable,
1103 int flags,
1104 vm_map_t *new_map)
1105 {
1106 vm_map_t map;
1107 vm_map_offset_t map_addr;
1108 vm_map_size_t map_size;
1109 kern_return_t kr;
1110
1111 map_size = vm_map_round_page(size,
1112 VM_MAP_PAGE_MASK(parent));
1113
1114 /*
1115 * Need reference on submap object because it is internal
1116 * to the vm_system. vm_object_enter will never be called
1117 * on it (usual source of reference for vm_map_enter).
1118 */
1119 vm_object_reference(vm_submap_object);
1120
1121 map_addr = ((flags & VM_FLAGS_ANYWHERE)
1122 ? vm_map_min(parent)
1123 : vm_map_trunc_page(*addr,
1124 VM_MAP_PAGE_MASK(parent)));
1125
1126 kr = vm_map_enter(parent, &map_addr, map_size,
1127 (vm_map_offset_t) 0, flags,
1128 vm_submap_object, (vm_object_offset_t) 0, FALSE,
1129 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
1130 if (kr != KERN_SUCCESS) {
1131 vm_object_deallocate(vm_submap_object);
1132 return (kr);
1133 }
1134
1135 pmap_reference(vm_map_pmap(parent));
1136 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
1137 if (map == VM_MAP_NULL)
1138 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
1139 /* inherit the parent map's page size */
1140 vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
1141
1142 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
1143 if (kr != KERN_SUCCESS) {
1144 /*
1145 * See comment preceding vm_map_submap().
1146 */
1147 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
1148 vm_map_deallocate(map); /* also removes ref to pmap */
1149 vm_object_deallocate(vm_submap_object);
1150 return (kr);
1151 }
1152 *addr = CAST_DOWN(vm_offset_t, map_addr);
1153 *new_map = map;
1154 return (KERN_SUCCESS);
1155 }
1156
1157 /*
1158 * kmem_init:
1159 *
1160 * Initialize the kernel's virtual memory map, taking
1161 * into account all memory allocated up to this time.
1162 */
1163 void
1164 kmem_init(
1165 vm_offset_t start,
1166 vm_offset_t end)
1167 {
1168 vm_map_offset_t map_start;
1169 vm_map_offset_t map_end;
1170
1171 map_start = vm_map_trunc_page(start,
1172 VM_MAP_PAGE_MASK(kernel_map));
1173 map_end = vm_map_round_page(end,
1174 VM_MAP_PAGE_MASK(kernel_map));
1175
1176 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1177 map_end, FALSE);
1178 /*
1179 * Reserve virtual memory allocated up to this time.
1180 */
1181 if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
1182 vm_map_offset_t map_addr;
1183 kern_return_t kr;
1184
1185 map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
1186 kr = vm_map_enter(kernel_map,
1187 &map_addr,
1188 (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
1189 (vm_map_offset_t) 0,
1190 VM_FLAGS_FIXED | VM_FLAGS_NO_PMAP_CHECK,
1191 VM_OBJECT_NULL,
1192 (vm_object_offset_t) 0, FALSE,
1193 VM_PROT_NONE, VM_PROT_NONE,
1194 VM_INHERIT_DEFAULT);
1195
1196 if (kr != KERN_SUCCESS) {
1197 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
1198 (uint64_t) start, (uint64_t) end,
1199 (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1200 (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
1201 kr);
1202 }
1203 }
1204
1205 /*
1206 * Set the default global user wire limit which limits the amount of
1207 * memory that can be locked via mlock(). We set this to the total
1208 * amount of memory that are potentially usable by a user app (max_mem)
1209 * minus a certain amount. This can be overridden via a sysctl.
1210 */
1211 vm_global_no_user_wire_amount = MIN(max_mem*20/100,
1212 VM_NOT_USER_WIREABLE);
1213 vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount;
1214
1215 /* the default per user limit is the same as the global limit */
1216 vm_user_wire_limit = vm_global_user_wire_limit;
1217 }
1218
1219
1220 /*
1221 * Routine: copyinmap
1222 * Purpose:
1223 * Like copyin, except that fromaddr is an address
1224 * in the specified VM map. This implementation
1225 * is incomplete; it handles the current user map
1226 * and the kernel map/submaps.
1227 */
1228 kern_return_t
1229 copyinmap(
1230 vm_map_t map,
1231 vm_map_offset_t fromaddr,
1232 void *todata,
1233 vm_size_t length)
1234 {
1235 kern_return_t kr = KERN_SUCCESS;
1236 vm_map_t oldmap;
1237
1238 if (vm_map_pmap(map) == pmap_kernel())
1239 {
1240 /* assume a correct copy */
1241 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
1242 }
1243 else if (current_map() == map)
1244 {
1245 if (copyin(fromaddr, todata, length) != 0)
1246 kr = KERN_INVALID_ADDRESS;
1247 }
1248 else
1249 {
1250 vm_map_reference(map);
1251 oldmap = vm_map_switch(map);
1252 if (copyin(fromaddr, todata, length) != 0)
1253 kr = KERN_INVALID_ADDRESS;
1254 vm_map_switch(oldmap);
1255 vm_map_deallocate(map);
1256 }
1257 return kr;
1258 }
1259
1260 /*
1261 * Routine: copyoutmap
1262 * Purpose:
1263 * Like copyout, except that toaddr is an address
1264 * in the specified VM map. This implementation
1265 * is incomplete; it handles the current user map
1266 * and the kernel map/submaps.
1267 */
1268 kern_return_t
1269 copyoutmap(
1270 vm_map_t map,
1271 void *fromdata,
1272 vm_map_address_t toaddr,
1273 vm_size_t length)
1274 {
1275 if (vm_map_pmap(map) == pmap_kernel()) {
1276 /* assume a correct copy */
1277 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
1278 return KERN_SUCCESS;
1279 }
1280
1281 if (current_map() != map)
1282 return KERN_NOT_SUPPORTED;
1283
1284 if (copyout(fromdata, toaddr, length) != 0)
1285 return KERN_INVALID_ADDRESS;
1286
1287 return KERN_SUCCESS;
1288 }
1289
1290
1291 kern_return_t
1292 vm_conflict_check(
1293 vm_map_t map,
1294 vm_map_offset_t off,
1295 vm_map_size_t len,
1296 memory_object_t pager,
1297 vm_object_offset_t file_off)
1298 {
1299 vm_map_entry_t entry;
1300 vm_object_t obj;
1301 vm_object_offset_t obj_off;
1302 vm_map_t base_map;
1303 vm_map_offset_t base_offset;
1304 vm_map_offset_t original_offset;
1305 kern_return_t kr;
1306 vm_map_size_t local_len;
1307
1308 base_map = map;
1309 base_offset = off;
1310 original_offset = off;
1311 kr = KERN_SUCCESS;
1312 vm_map_lock(map);
1313 while(vm_map_lookup_entry(map, off, &entry)) {
1314 local_len = len;
1315
1316 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
1317 vm_map_unlock(map);
1318 return KERN_SUCCESS;
1319 }
1320 if (entry->is_sub_map) {
1321 vm_map_t old_map;
1322
1323 old_map = map;
1324 vm_map_lock(VME_SUBMAP(entry));
1325 map = VME_SUBMAP(entry);
1326 off = VME_OFFSET(entry) + (off - entry->vme_start);
1327 vm_map_unlock(old_map);
1328 continue;
1329 }
1330 obj = VME_OBJECT(entry);
1331 obj_off = (off - entry->vme_start) + VME_OFFSET(entry);
1332 while(obj->shadow) {
1333 obj_off += obj->vo_shadow_offset;
1334 obj = obj->shadow;
1335 }
1336 if((obj->pager_created) && (obj->pager == pager)) {
1337 if(((obj->paging_offset) + obj_off) == file_off) {
1338 if(off != base_offset) {
1339 vm_map_unlock(map);
1340 return KERN_FAILURE;
1341 }
1342 kr = KERN_ALREADY_WAITING;
1343 } else {
1344 vm_object_offset_t obj_off_aligned;
1345 vm_object_offset_t file_off_aligned;
1346
1347 obj_off_aligned = obj_off & ~PAGE_MASK;
1348 file_off_aligned = file_off & ~PAGE_MASK;
1349
1350 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
1351 /*
1352 * the target map and the file offset start in the same page
1353 * but are not identical...
1354 */
1355 vm_map_unlock(map);
1356 return KERN_FAILURE;
1357 }
1358 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
1359 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
1360 /*
1361 * some portion of the tail of the I/O will fall
1362 * within the encompass of the target map
1363 */
1364 vm_map_unlock(map);
1365 return KERN_FAILURE;
1366 }
1367 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
1368 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
1369 /*
1370 * the beginning page of the file offset falls within
1371 * the target map's encompass
1372 */
1373 vm_map_unlock(map);
1374 return KERN_FAILURE;
1375 }
1376 }
1377 } else if(kr != KERN_SUCCESS) {
1378 vm_map_unlock(map);
1379 return KERN_FAILURE;
1380 }
1381
1382 if(len <= ((entry->vme_end - entry->vme_start) -
1383 (off - entry->vme_start))) {
1384 vm_map_unlock(map);
1385 return kr;
1386 } else {
1387 len -= (entry->vme_end - entry->vme_start) -
1388 (off - entry->vme_start);
1389 }
1390 base_offset = base_offset + (local_len - len);
1391 file_off = file_off + (local_len - len);
1392 off = base_offset;
1393 if(map != base_map) {
1394 vm_map_unlock(map);
1395 vm_map_lock(base_map);
1396 map = base_map;
1397 }
1398 }
1399
1400 vm_map_unlock(map);
1401 return kr;
1402 }
1403
1404 /*
1405 *
1406 * The following two functions are to be used when exposing kernel
1407 * addresses to userspace via any of the various debug or info
1408 * facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM()
1409 * and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and
1410 * are exported to KEXTs.
1411 *
1412 * NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
1413 */
1414
1415 /*
1416 * vm_kernel_addrperm_external:
1417 *
1418 * Used when exposing an address to userspace which is in the kernel's
1419 * "heap". These addresses are not loaded from anywhere and are resultingly
1420 * unslid. We apply a permutation value to obscure the address.
1421 */
1422 void
1423 vm_kernel_addrperm_external(
1424 vm_offset_t addr,
1425 vm_offset_t *perm_addr)
1426 {
1427 if (addr == 0) {
1428 *perm_addr = 0;
1429 return;
1430 }
1431
1432 *perm_addr = (addr + vm_kernel_addrperm_ext);
1433 return;
1434 }
1435
1436 /*
1437 * vm_kernel_unslide_or_perm_external:
1438 *
1439 * Use this macro when exposing an address to userspace that could come from
1440 * either kernel text/data *or* the heap.
1441 */
1442 void
1443 vm_kernel_unslide_or_perm_external(
1444 vm_offset_t addr,
1445 vm_offset_t *up_addr)
1446 {
1447 if (VM_KERNEL_IS_SLID(addr) || VM_KERNEL_IS_KEXT(addr) ||
1448 VM_KERNEL_IS_PRELINKTEXT(addr) || VM_KERNEL_IS_PRELINKINFO(addr) ||
1449 VM_KERNEL_IS_KEXT_LINKEDIT(addr)) {
1450 *up_addr = addr - vm_kernel_slide;
1451 return;
1452 }
1453
1454 vm_kernel_addrperm_external(addr, up_addr);
1455 return;
1456 }