]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management.
64 */
65
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/thread.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pageout.h>
75 #include <kern/misc_protos.h>
76 #include <vm/cpm.h>
77
78 #include <string.h>
79
80 #include <libkern/OSDebug.h>
81 #include <sys/kdebug.h>
82
83 /*
84 * Variables exported by this module.
85 */
86
87 vm_map_t kernel_map;
88 vm_map_t kernel_pageable_map;
89
90 extern boolean_t vm_kernel_ready;
91
92 /*
93 * Forward declarations for internal functions.
94 */
95 extern kern_return_t kmem_alloc_pages(
96 vm_object_t object,
97 vm_object_offset_t offset,
98 vm_object_size_t size);
99
100 kern_return_t
101 kmem_alloc_contig(
102 vm_map_t map,
103 vm_offset_t *addrp,
104 vm_size_t size,
105 vm_offset_t mask,
106 ppnum_t max_pnum,
107 ppnum_t pnum_mask,
108 int flags,
109 vm_tag_t tag)
110 {
111 vm_object_t object;
112 vm_object_offset_t offset;
113 vm_map_offset_t map_addr;
114 vm_map_offset_t map_mask;
115 vm_map_size_t map_size, i;
116 vm_map_entry_t entry;
117 vm_page_t m, pages;
118 kern_return_t kr;
119
120 if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT)))
121 return KERN_INVALID_ARGUMENT;
122
123 map_size = vm_map_round_page(size,
124 VM_MAP_PAGE_MASK(map));
125 map_mask = (vm_map_offset_t)mask;
126
127 /* Check for zero allocation size (either directly or via overflow) */
128 if (map_size == 0) {
129 *addrp = 0;
130 return KERN_INVALID_ARGUMENT;
131 }
132
133 /*
134 * Allocate a new object (if necessary) and the reference we
135 * will be donating to the map entry. We must do this before
136 * locking the map, or risk deadlock with the default pager.
137 */
138 if ((flags & KMA_KOBJECT) != 0) {
139 object = kernel_object;
140 vm_object_reference(object);
141 } else {
142 object = vm_object_allocate(map_size);
143 }
144
145 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
146 if (KERN_SUCCESS != kr) {
147 vm_object_deallocate(object);
148 return kr;
149 }
150
151 if (object == kernel_object) {
152 offset = map_addr;
153 } else {
154 offset = 0;
155 }
156 VME_OBJECT_SET(entry, object);
157 VME_OFFSET_SET(entry, offset);
158 VME_ALIAS_SET(entry, tag);
159
160 /* Take an extra object ref in case the map entry gets deleted */
161 vm_object_reference(object);
162 vm_map_unlock(map);
163
164 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);
165
166 if (kr != KERN_SUCCESS) {
167 vm_map_remove(map,
168 vm_map_trunc_page(map_addr,
169 VM_MAP_PAGE_MASK(map)),
170 vm_map_round_page(map_addr + map_size,
171 VM_MAP_PAGE_MASK(map)),
172 0);
173 vm_object_deallocate(object);
174 *addrp = 0;
175 return kr;
176 }
177
178 vm_object_lock(object);
179 for (i = 0; i < map_size; i += PAGE_SIZE) {
180 m = pages;
181 pages = NEXT_PAGE(m);
182 *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
183 m->busy = FALSE;
184 vm_page_insert(m, object, offset + i);
185 }
186 vm_object_unlock(object);
187
188 kr = vm_map_wire(map,
189 vm_map_trunc_page(map_addr,
190 VM_MAP_PAGE_MASK(map)),
191 vm_map_round_page(map_addr + map_size,
192 VM_MAP_PAGE_MASK(map)),
193 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag),
194 FALSE);
195
196 if (kr != KERN_SUCCESS) {
197 if (object == kernel_object) {
198 vm_object_lock(object);
199 vm_object_page_remove(object, offset, offset + map_size);
200 vm_object_unlock(object);
201 }
202 vm_map_remove(map,
203 vm_map_trunc_page(map_addr,
204 VM_MAP_PAGE_MASK(map)),
205 vm_map_round_page(map_addr + map_size,
206 VM_MAP_PAGE_MASK(map)),
207 0);
208 vm_object_deallocate(object);
209 return kr;
210 }
211 vm_object_deallocate(object);
212
213 if (object == kernel_object)
214 vm_map_simplify(map, map_addr);
215
216 *addrp = (vm_offset_t) map_addr;
217 assert((vm_map_offset_t) *addrp == map_addr);
218 return KERN_SUCCESS;
219 }
220
221 /*
222 * Master entry point for allocating kernel memory.
223 * NOTE: this routine is _never_ interrupt safe.
224 *
225 * map : map to allocate into
226 * addrp : pointer to start address of new memory
227 * size : size of memory requested
228 * flags : options
229 * KMA_HERE *addrp is base address, else "anywhere"
230 * KMA_NOPAGEWAIT don't wait for pages if unavailable
231 * KMA_KOBJECT use kernel_object
232 * KMA_LOMEM support for 32 bit devices in a 64 bit world
233 * if set and a lomemory pool is available
234 * grab pages from it... this also implies
235 * KMA_NOPAGEWAIT
236 */
237
238 kern_return_t
239 kernel_memory_allocate(
240 vm_map_t map,
241 vm_offset_t *addrp,
242 vm_size_t size,
243 vm_offset_t mask,
244 int flags,
245 vm_tag_t tag)
246 {
247 vm_object_t object;
248 vm_object_offset_t offset;
249 vm_object_offset_t pg_offset;
250 vm_map_entry_t entry = NULL;
251 vm_map_offset_t map_addr, fill_start;
252 vm_map_offset_t map_mask;
253 vm_map_size_t map_size, fill_size;
254 kern_return_t kr, pe_result;
255 vm_page_t mem;
256 vm_page_t guard_page_list = NULL;
257 vm_page_t wired_page_list = NULL;
258 int guard_page_count = 0;
259 int wired_page_count = 0;
260 int i;
261 int vm_alloc_flags;
262 vm_prot_t kma_prot;
263
264 if (! vm_kernel_ready) {
265 panic("kernel_memory_allocate: VM is not ready");
266 }
267
268 map_size = vm_map_round_page(size,
269 VM_MAP_PAGE_MASK(map));
270 map_mask = (vm_map_offset_t) mask;
271
272 vm_alloc_flags = VM_MAKE_TAG(tag);
273
274 /* Check for zero allocation size (either directly or via overflow) */
275 if (map_size == 0) {
276 *addrp = 0;
277 return KERN_INVALID_ARGUMENT;
278 }
279
280 /*
281 * limit the size of a single extent of wired memory
282 * to try and limit the damage to the system if
283 * too many pages get wired down
284 * limit raised to 2GB with 128GB max physical limit,
285 * but scaled by installed memory above this
286 */
287 if ( !(flags & KMA_VAONLY) && map_size > MAX(1ULL<<31, sane_size/64)) {
288 return KERN_RESOURCE_SHORTAGE;
289 }
290
291 /*
292 * Guard pages:
293 *
294 * Guard pages are implemented as ficticious pages. By placing guard pages
295 * on either end of a stack, they can help detect cases where a thread walks
296 * off either end of its stack. They are allocated and set up here and attempts
297 * to access those pages are trapped in vm_fault_page().
298 *
299 * The map_size we were passed may include extra space for
300 * guard pages. If those were requested, then back it out of fill_size
301 * since vm_map_find_space() takes just the actual size not including
302 * guard pages. Similarly, fill_start indicates where the actual pages
303 * will begin in the range.
304 */
305
306 fill_start = 0;
307 fill_size = map_size;
308
309 if (flags & KMA_GUARD_FIRST) {
310 vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
311 fill_start += PAGE_SIZE_64;
312 fill_size -= PAGE_SIZE_64;
313 if (map_size < fill_start + fill_size) {
314 /* no space for a guard page */
315 *addrp = 0;
316 return KERN_INVALID_ARGUMENT;
317 }
318 guard_page_count++;
319 }
320 if (flags & KMA_GUARD_LAST) {
321 vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
322 fill_size -= PAGE_SIZE_64;
323 if (map_size <= fill_start + fill_size) {
324 /* no space for a guard page */
325 *addrp = 0;
326 return KERN_INVALID_ARGUMENT;
327 }
328 guard_page_count++;
329 }
330 wired_page_count = (int) (fill_size / PAGE_SIZE_64);
331 assert(wired_page_count * PAGE_SIZE_64 == fill_size);
332
333 for (i = 0; i < guard_page_count; i++) {
334 for (;;) {
335 mem = vm_page_grab_guard();
336
337 if (mem != VM_PAGE_NULL)
338 break;
339 if (flags & KMA_NOPAGEWAIT) {
340 kr = KERN_RESOURCE_SHORTAGE;
341 goto out;
342 }
343 vm_page_more_fictitious();
344 }
345 mem->snext = guard_page_list;
346 guard_page_list = mem;
347 }
348
349 if (! (flags & KMA_VAONLY)) {
350 for (i = 0; i < wired_page_count; i++) {
351 uint64_t unavailable;
352
353 for (;;) {
354 if (flags & KMA_LOMEM)
355 mem = vm_page_grablo();
356 else
357 mem = vm_page_grab();
358
359 if (mem != VM_PAGE_NULL)
360 break;
361
362 if (flags & KMA_NOPAGEWAIT) {
363 kr = KERN_RESOURCE_SHORTAGE;
364 goto out;
365 }
366 if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
367 kr = KERN_RESOURCE_SHORTAGE;
368 goto out;
369 }
370 unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
371
372 if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
373 kr = KERN_RESOURCE_SHORTAGE;
374 goto out;
375 }
376 VM_PAGE_WAIT();
377 }
378 mem->snext = wired_page_list;
379 wired_page_list = mem;
380 }
381 }
382
383 /*
384 * Allocate a new object (if necessary). We must do this before
385 * locking the map, or risk deadlock with the default pager.
386 */
387 if ((flags & KMA_KOBJECT) != 0) {
388 object = kernel_object;
389 vm_object_reference(object);
390 } else if ((flags & KMA_COMPRESSOR) != 0) {
391 object = compressor_object;
392 vm_object_reference(object);
393 } else {
394 object = vm_object_allocate(map_size);
395 }
396
397 if (flags & KMA_ATOMIC)
398 vm_alloc_flags |= VM_FLAGS_ATOMIC_ENTRY;
399
400 kr = vm_map_find_space(map, &map_addr,
401 fill_size, map_mask,
402 vm_alloc_flags, &entry);
403 if (KERN_SUCCESS != kr) {
404 vm_object_deallocate(object);
405 goto out;
406 }
407
408 if (object == kernel_object || object == compressor_object) {
409 offset = map_addr;
410 } else {
411 offset = 0;
412 }
413 VME_OBJECT_SET(entry, object);
414 VME_OFFSET_SET(entry, offset);
415
416 if (object != compressor_object)
417 entry->wired_count++;
418
419 if (flags & KMA_PERMANENT)
420 entry->permanent = TRUE;
421
422 if (object != kernel_object && object != compressor_object)
423 vm_object_reference(object);
424
425 vm_object_lock(object);
426 vm_map_unlock(map);
427
428 pg_offset = 0;
429
430 if (fill_start) {
431 if (guard_page_list == NULL)
432 panic("kernel_memory_allocate: guard_page_list == NULL");
433
434 mem = guard_page_list;
435 guard_page_list = mem->snext;
436 mem->snext = NULL;
437
438 vm_page_insert(mem, object, offset + pg_offset);
439
440 mem->busy = FALSE;
441 pg_offset += PAGE_SIZE_64;
442 }
443
444 kma_prot = VM_PROT_READ | VM_PROT_WRITE;
445
446 if (flags & KMA_VAONLY) {
447 pg_offset = fill_start + fill_size;
448 } else {
449 for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
450 if (wired_page_list == NULL)
451 panic("kernel_memory_allocate: wired_page_list == NULL");
452
453 mem = wired_page_list;
454 wired_page_list = mem->snext;
455 mem->snext = NULL;
456
457 assert(mem->wire_count == 0);
458 assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
459
460 mem->vm_page_q_state = VM_PAGE_IS_WIRED;
461 mem->wire_count++;
462 if (__improbable(mem->wire_count == 0)) {
463 panic("kernel_memory_allocate(%p): wire_count overflow",
464 mem);
465 }
466
467 vm_page_insert_wired(mem, object, offset + pg_offset, tag);
468
469 mem->busy = FALSE;
470 mem->pmapped = TRUE;
471 mem->wpmapped = TRUE;
472
473 PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
474 kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
475 PMAP_OPTIONS_NOWAIT, pe_result);
476
477 if (pe_result == KERN_RESOURCE_SHORTAGE) {
478 vm_object_unlock(object);
479
480 PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
481 kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
482
483 vm_object_lock(object);
484 }
485 if (flags & KMA_NOENCRYPT) {
486 bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
487
488 pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
489 }
490 }
491 }
492 if ((fill_start + fill_size) < map_size) {
493 if (guard_page_list == NULL)
494 panic("kernel_memory_allocate: guard_page_list == NULL");
495
496 mem = guard_page_list;
497 guard_page_list = mem->snext;
498 mem->snext = NULL;
499
500 vm_page_insert(mem, object, offset + pg_offset);
501
502 mem->busy = FALSE;
503 }
504 if (guard_page_list || wired_page_list)
505 panic("kernel_memory_allocate: non empty list\n");
506
507 if (! (flags & KMA_VAONLY)) {
508 vm_page_lockspin_queues();
509 vm_page_wire_count += wired_page_count;
510 vm_page_unlock_queues();
511 }
512
513 vm_object_unlock(object);
514
515 /*
516 * now that the pages are wired, we no longer have to fear coalesce
517 */
518 if (object == kernel_object || object == compressor_object)
519 vm_map_simplify(map, map_addr);
520 else
521 vm_object_deallocate(object);
522
523 /*
524 * Return the memory, not zeroed.
525 */
526 *addrp = CAST_DOWN(vm_offset_t, map_addr);
527 return KERN_SUCCESS;
528
529 out:
530 if (guard_page_list)
531 vm_page_free_list(guard_page_list, FALSE);
532
533 if (wired_page_list)
534 vm_page_free_list(wired_page_list, FALSE);
535
536 return kr;
537 }
538
539 kern_return_t
540 kernel_memory_populate(
541 vm_map_t map,
542 vm_offset_t addr,
543 vm_size_t size,
544 int flags,
545 vm_tag_t tag)
546 {
547 vm_object_t object;
548 vm_object_offset_t offset, pg_offset;
549 kern_return_t kr, pe_result;
550 vm_page_t mem;
551 vm_page_t page_list = NULL;
552 int page_count = 0;
553 int i;
554
555 page_count = (int) (size / PAGE_SIZE_64);
556
557 assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
558
559 if (flags & KMA_COMPRESSOR) {
560
561 pg_offset = page_count * PAGE_SIZE_64;
562
563 do {
564 for (;;) {
565 mem = vm_page_grab();
566
567 if (mem != VM_PAGE_NULL)
568 break;
569
570 VM_PAGE_WAIT();
571 }
572 mem->snext = page_list;
573 page_list = mem;
574
575 pg_offset -= PAGE_SIZE_64;
576
577 kr = pmap_enter_options(kernel_pmap,
578 addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
579 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
580 PMAP_OPTIONS_INTERNAL, NULL);
581 assert(kr == KERN_SUCCESS);
582
583 } while (pg_offset);
584
585 offset = addr;
586 object = compressor_object;
587
588 vm_object_lock(object);
589
590 for (pg_offset = 0;
591 pg_offset < size;
592 pg_offset += PAGE_SIZE_64) {
593
594 mem = page_list;
595 page_list = mem->snext;
596 mem->snext = NULL;
597
598 vm_page_insert(mem, object, offset + pg_offset);
599 assert(mem->busy);
600
601 mem->busy = FALSE;
602 mem->pmapped = TRUE;
603 mem->wpmapped = TRUE;
604 mem->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR;
605 }
606 vm_object_unlock(object);
607
608 return KERN_SUCCESS;
609 }
610
611 for (i = 0; i < page_count; i++) {
612 for (;;) {
613 if (flags & KMA_LOMEM)
614 mem = vm_page_grablo();
615 else
616 mem = vm_page_grab();
617
618 if (mem != VM_PAGE_NULL)
619 break;
620
621 if (flags & KMA_NOPAGEWAIT) {
622 kr = KERN_RESOURCE_SHORTAGE;
623 goto out;
624 }
625 if ((flags & KMA_LOMEM) &&
626 (vm_lopage_needed == TRUE)) {
627 kr = KERN_RESOURCE_SHORTAGE;
628 goto out;
629 }
630 VM_PAGE_WAIT();
631 }
632 mem->snext = page_list;
633 page_list = mem;
634 }
635 if (flags & KMA_KOBJECT) {
636 offset = addr;
637 object = kernel_object;
638
639 vm_object_lock(object);
640 } else {
641 /*
642 * If it's not the kernel object, we need to:
643 * lock map;
644 * lookup entry;
645 * lock object;
646 * take reference on object;
647 * unlock map;
648 */
649 panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
650 "!KMA_KOBJECT",
651 map, (uint64_t) addr, (uint64_t) size, flags);
652 }
653
654 for (pg_offset = 0;
655 pg_offset < size;
656 pg_offset += PAGE_SIZE_64) {
657
658 if (page_list == NULL)
659 panic("kernel_memory_populate: page_list == NULL");
660
661 mem = page_list;
662 page_list = mem->snext;
663 mem->snext = NULL;
664
665 assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
666 mem->vm_page_q_state = VM_PAGE_IS_WIRED;
667 mem->wire_count++;
668 if (__improbable(mem->wire_count == 0)) {
669 panic("kernel_memory_populate(%p): wire_count overflow",
670 mem);
671 }
672
673 vm_page_insert_wired(mem, object, offset + pg_offset, tag);
674
675 mem->busy = FALSE;
676 mem->pmapped = TRUE;
677 mem->wpmapped = TRUE;
678
679 PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
680 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
681 ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
682 PMAP_OPTIONS_NOWAIT, pe_result);
683
684 if (pe_result == KERN_RESOURCE_SHORTAGE) {
685
686 vm_object_unlock(object);
687
688 PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
689 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
690 ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE);
691
692 vm_object_lock(object);
693 }
694 if (flags & KMA_NOENCRYPT) {
695 bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
696 pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
697 }
698 }
699 vm_page_lock_queues();
700 vm_page_wire_count += page_count;
701 vm_page_unlock_queues();
702
703 vm_object_unlock(object);
704
705 return KERN_SUCCESS;
706
707 out:
708 if (page_list)
709 vm_page_free_list(page_list, FALSE);
710
711 return kr;
712 }
713
714
715 void
716 kernel_memory_depopulate(
717 vm_map_t map,
718 vm_offset_t addr,
719 vm_size_t size,
720 int flags)
721 {
722 vm_object_t object;
723 vm_object_offset_t offset, pg_offset;
724 vm_page_t mem;
725 vm_page_t local_freeq = NULL;
726
727 assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
728
729 if (flags & KMA_COMPRESSOR) {
730 offset = addr;
731 object = compressor_object;
732
733 vm_object_lock(object);
734 } else if (flags & KMA_KOBJECT) {
735 offset = addr;
736 object = kernel_object;
737
738 vm_object_lock(object);
739 } else {
740 offset = 0;
741 object = NULL;
742 /*
743 * If it's not the kernel object, we need to:
744 * lock map;
745 * lookup entry;
746 * lock object;
747 * unlock map;
748 */
749 panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
750 "!KMA_KOBJECT",
751 map, (uint64_t) addr, (uint64_t) size, flags);
752 }
753 pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
754
755 for (pg_offset = 0;
756 pg_offset < size;
757 pg_offset += PAGE_SIZE_64) {
758
759 mem = vm_page_lookup(object, offset + pg_offset);
760
761 assert(mem);
762
763 if (mem->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR)
764 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
765
766 mem->busy = TRUE;
767
768 assert(mem->tabled);
769 vm_page_remove(mem, TRUE);
770 assert(mem->busy);
771
772 assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
773 assert((mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
774 (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q));
775
776 mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
777 mem->snext = local_freeq;
778 local_freeq = mem;
779 }
780 vm_object_unlock(object);
781
782 if (local_freeq)
783 vm_page_free_list(local_freeq, TRUE);
784 }
785
786 /*
787 * kmem_alloc:
788 *
789 * Allocate wired-down memory in the kernel's address map
790 * or a submap. The memory is not zero-filled.
791 */
792
793 kern_return_t
794 kmem_alloc_external(
795 vm_map_t map,
796 vm_offset_t *addrp,
797 vm_size_t size)
798 {
799 return (kmem_alloc(map, addrp, size, vm_tag_bt()));
800 }
801
802
803 kern_return_t
804 kmem_alloc(
805 vm_map_t map,
806 vm_offset_t *addrp,
807 vm_size_t size,
808 vm_tag_t tag)
809 {
810 return kmem_alloc_flags(map, addrp, size, tag, 0);
811 }
812
813 kern_return_t
814 kmem_alloc_flags(
815 vm_map_t map,
816 vm_offset_t *addrp,
817 vm_size_t size,
818 vm_tag_t tag,
819 int flags)
820 {
821 kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag);
822 TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
823 return kr;
824 }
825
826 /*
827 * kmem_realloc:
828 *
829 * Reallocate wired-down memory in the kernel's address map
830 * or a submap. Newly allocated pages are not zeroed.
831 * This can only be used on regions allocated with kmem_alloc.
832 *
833 * If successful, the pages in the old region are mapped twice.
834 * The old region is unchanged. Use kmem_free to get rid of it.
835 */
836 kern_return_t
837 kmem_realloc(
838 vm_map_t map,
839 vm_offset_t oldaddr,
840 vm_size_t oldsize,
841 vm_offset_t *newaddrp,
842 vm_size_t newsize,
843 vm_tag_t tag)
844 {
845 vm_object_t object;
846 vm_object_offset_t offset;
847 vm_map_offset_t oldmapmin;
848 vm_map_offset_t oldmapmax;
849 vm_map_offset_t newmapaddr;
850 vm_map_size_t oldmapsize;
851 vm_map_size_t newmapsize;
852 vm_map_entry_t oldentry;
853 vm_map_entry_t newentry;
854 vm_page_t mem;
855 kern_return_t kr;
856
857 oldmapmin = vm_map_trunc_page(oldaddr,
858 VM_MAP_PAGE_MASK(map));
859 oldmapmax = vm_map_round_page(oldaddr + oldsize,
860 VM_MAP_PAGE_MASK(map));
861 oldmapsize = oldmapmax - oldmapmin;
862 newmapsize = vm_map_round_page(newsize,
863 VM_MAP_PAGE_MASK(map));
864
865
866 /*
867 * Find the VM object backing the old region.
868 */
869
870 vm_map_lock(map);
871
872 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
873 panic("kmem_realloc");
874 object = VME_OBJECT(oldentry);
875
876 /*
877 * Increase the size of the object and
878 * fill in the new region.
879 */
880
881 vm_object_reference(object);
882 /* by grabbing the object lock before unlocking the map */
883 /* we guarantee that we will panic if more than one */
884 /* attempt is made to realloc a kmem_alloc'd area */
885 vm_object_lock(object);
886 vm_map_unlock(map);
887 if (object->vo_size != oldmapsize)
888 panic("kmem_realloc");
889 object->vo_size = newmapsize;
890 vm_object_unlock(object);
891
892 /* allocate the new pages while expanded portion of the */
893 /* object is still not mapped */
894 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
895 vm_object_round_page(newmapsize-oldmapsize));
896
897 /*
898 * Find space for the new region.
899 */
900
901 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
902 (vm_map_offset_t) 0, 0, &newentry);
903 if (kr != KERN_SUCCESS) {
904 vm_object_lock(object);
905 for(offset = oldmapsize;
906 offset < newmapsize; offset += PAGE_SIZE) {
907 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
908 VM_PAGE_FREE(mem);
909 }
910 }
911 object->vo_size = oldmapsize;
912 vm_object_unlock(object);
913 vm_object_deallocate(object);
914 return kr;
915 }
916 VME_OBJECT_SET(newentry, object);
917 VME_OFFSET_SET(newentry, 0);
918 VME_ALIAS_SET(newentry, tag);
919 assert(newentry->wired_count == 0);
920
921
922 /* add an extra reference in case we have someone doing an */
923 /* unexpected deallocate */
924 vm_object_reference(object);
925 vm_map_unlock(map);
926
927 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize,
928 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(tag), FALSE);
929 if (KERN_SUCCESS != kr) {
930 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
931 vm_object_lock(object);
932 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
933 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
934 VM_PAGE_FREE(mem);
935 }
936 }
937 object->vo_size = oldmapsize;
938 vm_object_unlock(object);
939 vm_object_deallocate(object);
940 return (kr);
941 }
942 vm_object_deallocate(object);
943
944 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
945 return KERN_SUCCESS;
946 }
947
948 /*
949 * kmem_alloc_kobject:
950 *
951 * Allocate wired-down memory in the kernel's address map
952 * or a submap. The memory is not zero-filled.
953 *
954 * The memory is allocated in the kernel_object.
955 * It may not be copied with vm_map_copy, and
956 * it may not be reallocated with kmem_realloc.
957 */
958
959 kern_return_t
960 kmem_alloc_kobject_external(
961 vm_map_t map,
962 vm_offset_t *addrp,
963 vm_size_t size)
964 {
965 return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt()));
966 }
967
968 kern_return_t
969 kmem_alloc_kobject(
970 vm_map_t map,
971 vm_offset_t *addrp,
972 vm_size_t size,
973 vm_tag_t tag)
974 {
975 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag);
976 }
977
978 /*
979 * kmem_alloc_aligned:
980 *
981 * Like kmem_alloc_kobject, except that the memory is aligned.
982 * The size should be a power-of-2.
983 */
984
985 kern_return_t
986 kmem_alloc_aligned(
987 vm_map_t map,
988 vm_offset_t *addrp,
989 vm_size_t size,
990 vm_tag_t tag)
991 {
992 if ((size & (size - 1)) != 0)
993 panic("kmem_alloc_aligned: size not aligned");
994 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag);
995 }
996
997 /*
998 * kmem_alloc_pageable:
999 *
1000 * Allocate pageable memory in the kernel's address map.
1001 */
1002
1003 kern_return_t
1004 kmem_alloc_pageable_external(
1005 vm_map_t map,
1006 vm_offset_t *addrp,
1007 vm_size_t size)
1008 {
1009 return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt()));
1010 }
1011
1012 kern_return_t
1013 kmem_alloc_pageable(
1014 vm_map_t map,
1015 vm_offset_t *addrp,
1016 vm_size_t size,
1017 vm_tag_t tag)
1018 {
1019 vm_map_offset_t map_addr;
1020 vm_map_size_t map_size;
1021 kern_return_t kr;
1022
1023 #ifndef normal
1024 map_addr = (vm_map_min(map)) + PAGE_SIZE;
1025 #else
1026 map_addr = vm_map_min(map);
1027 #endif
1028 map_size = vm_map_round_page(size,
1029 VM_MAP_PAGE_MASK(map));
1030
1031 kr = vm_map_enter(map, &map_addr, map_size,
1032 (vm_map_offset_t) 0,
1033 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(tag),
1034 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
1035 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
1036
1037 if (kr != KERN_SUCCESS)
1038 return kr;
1039
1040 *addrp = CAST_DOWN(vm_offset_t, map_addr);
1041 return KERN_SUCCESS;
1042 }
1043
1044 /*
1045 * kmem_free:
1046 *
1047 * Release a region of kernel virtual memory allocated
1048 * with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable,
1049 * and return the physical pages associated with that region.
1050 */
1051
1052 void
1053 kmem_free(
1054 vm_map_t map,
1055 vm_offset_t addr,
1056 vm_size_t size)
1057 {
1058 kern_return_t kr;
1059
1060 assert(addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS);
1061
1062 TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr);
1063
1064 if(size == 0) {
1065 #if MACH_ASSERT
1066 printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr);
1067 #endif
1068 return;
1069 }
1070
1071 kr = vm_map_remove(map,
1072 vm_map_trunc_page(addr,
1073 VM_MAP_PAGE_MASK(map)),
1074 vm_map_round_page(addr + size,
1075 VM_MAP_PAGE_MASK(map)),
1076 VM_MAP_REMOVE_KUNWIRE);
1077 if (kr != KERN_SUCCESS)
1078 panic("kmem_free");
1079 }
1080
1081 /*
1082 * Allocate new pages in an object.
1083 */
1084
1085 kern_return_t
1086 kmem_alloc_pages(
1087 vm_object_t object,
1088 vm_object_offset_t offset,
1089 vm_object_size_t size)
1090 {
1091 vm_object_size_t alloc_size;
1092
1093 alloc_size = vm_object_round_page(size);
1094 vm_object_lock(object);
1095 while (alloc_size) {
1096 vm_page_t mem;
1097
1098
1099 /*
1100 * Allocate a page
1101 */
1102 while (VM_PAGE_NULL ==
1103 (mem = vm_page_alloc(object, offset))) {
1104 vm_object_unlock(object);
1105 VM_PAGE_WAIT();
1106 vm_object_lock(object);
1107 }
1108 mem->busy = FALSE;
1109
1110 alloc_size -= PAGE_SIZE;
1111 offset += PAGE_SIZE;
1112 }
1113 vm_object_unlock(object);
1114 return KERN_SUCCESS;
1115 }
1116
1117 /*
1118 * kmem_suballoc:
1119 *
1120 * Allocates a map to manage a subrange
1121 * of the kernel virtual address space.
1122 *
1123 * Arguments are as follows:
1124 *
1125 * parent Map to take range from
1126 * addr Address of start of range (IN/OUT)
1127 * size Size of range to find
1128 * pageable Can region be paged
1129 * anywhere Can region be located anywhere in map
1130 * new_map Pointer to new submap
1131 */
1132 kern_return_t
1133 kmem_suballoc(
1134 vm_map_t parent,
1135 vm_offset_t *addr,
1136 vm_size_t size,
1137 boolean_t pageable,
1138 int flags,
1139 vm_map_t *new_map)
1140 {
1141 vm_map_t map;
1142 vm_map_offset_t map_addr;
1143 vm_map_size_t map_size;
1144 kern_return_t kr;
1145
1146 map_size = vm_map_round_page(size,
1147 VM_MAP_PAGE_MASK(parent));
1148
1149 /*
1150 * Need reference on submap object because it is internal
1151 * to the vm_system. vm_object_enter will never be called
1152 * on it (usual source of reference for vm_map_enter).
1153 */
1154 vm_object_reference(vm_submap_object);
1155
1156 map_addr = ((flags & VM_FLAGS_ANYWHERE)
1157 ? vm_map_min(parent)
1158 : vm_map_trunc_page(*addr,
1159 VM_MAP_PAGE_MASK(parent)));
1160
1161 kr = vm_map_enter(parent, &map_addr, map_size,
1162 (vm_map_offset_t) 0, flags,
1163 vm_submap_object, (vm_object_offset_t) 0, FALSE,
1164 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
1165 if (kr != KERN_SUCCESS) {
1166 vm_object_deallocate(vm_submap_object);
1167 return (kr);
1168 }
1169
1170 pmap_reference(vm_map_pmap(parent));
1171 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
1172 if (map == VM_MAP_NULL)
1173 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
1174 /* inherit the parent map's page size */
1175 vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
1176
1177 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
1178 if (kr != KERN_SUCCESS) {
1179 /*
1180 * See comment preceding vm_map_submap().
1181 */
1182 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
1183 vm_map_deallocate(map); /* also removes ref to pmap */
1184 vm_object_deallocate(vm_submap_object);
1185 return (kr);
1186 }
1187 *addr = CAST_DOWN(vm_offset_t, map_addr);
1188 *new_map = map;
1189 return (KERN_SUCCESS);
1190 }
1191
1192 /*
1193 * kmem_init:
1194 *
1195 * Initialize the kernel's virtual memory map, taking
1196 * into account all memory allocated up to this time.
1197 */
1198 void
1199 kmem_init(
1200 vm_offset_t start,
1201 vm_offset_t end)
1202 {
1203 vm_map_offset_t map_start;
1204 vm_map_offset_t map_end;
1205
1206 map_start = vm_map_trunc_page(start,
1207 VM_MAP_PAGE_MASK(kernel_map));
1208 map_end = vm_map_round_page(end,
1209 VM_MAP_PAGE_MASK(kernel_map));
1210
1211 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1212 map_end, FALSE);
1213 /*
1214 * Reserve virtual memory allocated up to this time.
1215 */
1216 if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
1217 vm_map_offset_t map_addr;
1218 kern_return_t kr;
1219
1220 map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
1221 kr = vm_map_enter(kernel_map,
1222 &map_addr,
1223 (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
1224 (vm_map_offset_t) 0,
1225 VM_FLAGS_FIXED | VM_FLAGS_NO_PMAP_CHECK,
1226 VM_OBJECT_NULL,
1227 (vm_object_offset_t) 0, FALSE,
1228 VM_PROT_NONE, VM_PROT_NONE,
1229 VM_INHERIT_DEFAULT);
1230
1231 if (kr != KERN_SUCCESS) {
1232 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
1233 (uint64_t) start, (uint64_t) end,
1234 (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1235 (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
1236 kr);
1237 }
1238 }
1239
1240 /*
1241 * Set the default global user wire limit which limits the amount of
1242 * memory that can be locked via mlock(). We set this to the total
1243 * amount of memory that are potentially usable by a user app (max_mem)
1244 * minus a certain amount. This can be overridden via a sysctl.
1245 */
1246 vm_global_no_user_wire_amount = MIN(max_mem*20/100,
1247 VM_NOT_USER_WIREABLE);
1248 vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount;
1249
1250 /* the default per user limit is the same as the global limit */
1251 vm_user_wire_limit = vm_global_user_wire_limit;
1252 }
1253
1254
1255 /*
1256 * Routine: copyinmap
1257 * Purpose:
1258 * Like copyin, except that fromaddr is an address
1259 * in the specified VM map. This implementation
1260 * is incomplete; it handles the current user map
1261 * and the kernel map/submaps.
1262 */
1263 kern_return_t
1264 copyinmap(
1265 vm_map_t map,
1266 vm_map_offset_t fromaddr,
1267 void *todata,
1268 vm_size_t length)
1269 {
1270 kern_return_t kr = KERN_SUCCESS;
1271 vm_map_t oldmap;
1272
1273 if (vm_map_pmap(map) == pmap_kernel())
1274 {
1275 /* assume a correct copy */
1276 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
1277 }
1278 else if (current_map() == map)
1279 {
1280 if (copyin(fromaddr, todata, length) != 0)
1281 kr = KERN_INVALID_ADDRESS;
1282 }
1283 else
1284 {
1285 vm_map_reference(map);
1286 oldmap = vm_map_switch(map);
1287 if (copyin(fromaddr, todata, length) != 0)
1288 kr = KERN_INVALID_ADDRESS;
1289 vm_map_switch(oldmap);
1290 vm_map_deallocate(map);
1291 }
1292 return kr;
1293 }
1294
1295 /*
1296 * Routine: copyoutmap
1297 * Purpose:
1298 * Like copyout, except that toaddr is an address
1299 * in the specified VM map. This implementation
1300 * is incomplete; it handles the current user map
1301 * and the kernel map/submaps.
1302 */
1303 kern_return_t
1304 copyoutmap(
1305 vm_map_t map,
1306 void *fromdata,
1307 vm_map_address_t toaddr,
1308 vm_size_t length)
1309 {
1310 if (vm_map_pmap(map) == pmap_kernel()) {
1311 /* assume a correct copy */
1312 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
1313 return KERN_SUCCESS;
1314 }
1315
1316 if (current_map() != map)
1317 return KERN_NOT_SUPPORTED;
1318
1319 if (copyout(fromdata, toaddr, length) != 0)
1320 return KERN_INVALID_ADDRESS;
1321
1322 return KERN_SUCCESS;
1323 }
1324
1325
1326 kern_return_t
1327 vm_conflict_check(
1328 vm_map_t map,
1329 vm_map_offset_t off,
1330 vm_map_size_t len,
1331 memory_object_t pager,
1332 vm_object_offset_t file_off)
1333 {
1334 vm_map_entry_t entry;
1335 vm_object_t obj;
1336 vm_object_offset_t obj_off;
1337 vm_map_t base_map;
1338 vm_map_offset_t base_offset;
1339 vm_map_offset_t original_offset;
1340 kern_return_t kr;
1341 vm_map_size_t local_len;
1342
1343 base_map = map;
1344 base_offset = off;
1345 original_offset = off;
1346 kr = KERN_SUCCESS;
1347 vm_map_lock(map);
1348 while(vm_map_lookup_entry(map, off, &entry)) {
1349 local_len = len;
1350
1351 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
1352 vm_map_unlock(map);
1353 return KERN_SUCCESS;
1354 }
1355 if (entry->is_sub_map) {
1356 vm_map_t old_map;
1357
1358 old_map = map;
1359 vm_map_lock(VME_SUBMAP(entry));
1360 map = VME_SUBMAP(entry);
1361 off = VME_OFFSET(entry) + (off - entry->vme_start);
1362 vm_map_unlock(old_map);
1363 continue;
1364 }
1365 obj = VME_OBJECT(entry);
1366 obj_off = (off - entry->vme_start) + VME_OFFSET(entry);
1367 while(obj->shadow) {
1368 obj_off += obj->vo_shadow_offset;
1369 obj = obj->shadow;
1370 }
1371 if((obj->pager_created) && (obj->pager == pager)) {
1372 if(((obj->paging_offset) + obj_off) == file_off) {
1373 if(off != base_offset) {
1374 vm_map_unlock(map);
1375 return KERN_FAILURE;
1376 }
1377 kr = KERN_ALREADY_WAITING;
1378 } else {
1379 vm_object_offset_t obj_off_aligned;
1380 vm_object_offset_t file_off_aligned;
1381
1382 obj_off_aligned = obj_off & ~PAGE_MASK;
1383 file_off_aligned = file_off & ~PAGE_MASK;
1384
1385 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
1386 /*
1387 * the target map and the file offset start in the same page
1388 * but are not identical...
1389 */
1390 vm_map_unlock(map);
1391 return KERN_FAILURE;
1392 }
1393 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
1394 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
1395 /*
1396 * some portion of the tail of the I/O will fall
1397 * within the encompass of the target map
1398 */
1399 vm_map_unlock(map);
1400 return KERN_FAILURE;
1401 }
1402 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
1403 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
1404 /*
1405 * the beginning page of the file offset falls within
1406 * the target map's encompass
1407 */
1408 vm_map_unlock(map);
1409 return KERN_FAILURE;
1410 }
1411 }
1412 } else if(kr != KERN_SUCCESS) {
1413 vm_map_unlock(map);
1414 return KERN_FAILURE;
1415 }
1416
1417 if(len <= ((entry->vme_end - entry->vme_start) -
1418 (off - entry->vme_start))) {
1419 vm_map_unlock(map);
1420 return kr;
1421 } else {
1422 len -= (entry->vme_end - entry->vme_start) -
1423 (off - entry->vme_start);
1424 }
1425 base_offset = base_offset + (local_len - len);
1426 file_off = file_off + (local_len - len);
1427 off = base_offset;
1428 if(map != base_map) {
1429 vm_map_unlock(map);
1430 vm_map_lock(base_map);
1431 map = base_map;
1432 }
1433 }
1434
1435 vm_map_unlock(map);
1436 return kr;
1437 }
1438
1439 /*
1440 *
1441 * The following two functions are to be used when exposing kernel
1442 * addresses to userspace via any of the various debug or info
1443 * facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM()
1444 * and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and
1445 * are exported to KEXTs.
1446 *
1447 * NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
1448 */
1449
1450 /*
1451 * vm_kernel_addrperm_external:
1452 *
1453 * Used when exposing an address to userspace which is in the kernel's
1454 * "heap". These addresses are not loaded from anywhere and are resultingly
1455 * unslid. We apply a permutation value to obscure the address.
1456 */
1457 void
1458 vm_kernel_addrperm_external(
1459 vm_offset_t addr,
1460 vm_offset_t *perm_addr)
1461 {
1462 if (addr == 0) {
1463 *perm_addr = 0;
1464 return;
1465 }
1466
1467 *perm_addr = (addr + vm_kernel_addrperm_ext);
1468 return;
1469 }
1470
1471 /*
1472 * vm_kernel_unslide_or_perm_external:
1473 *
1474 * Use this macro when exposing an address to userspace that could come from
1475 * either kernel text/data *or* the heap.
1476 */
1477 void
1478 vm_kernel_unslide_or_perm_external(
1479 vm_offset_t addr,
1480 vm_offset_t *up_addr)
1481 {
1482 if (VM_KERNEL_IS_SLID(addr)) {
1483 *up_addr = addr - vm_kernel_slide;
1484 return;
1485 }
1486
1487 vm_kernel_addrperm_external(addr, up_addr);
1488 return;
1489 }