]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_kern.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Kernel memory management. | |
64 | */ | |
65 | ||
66 | #include <mach/kern_return.h> | |
67 | #include <mach/vm_param.h> | |
68 | #include <kern/assert.h> | |
69 | #include <kern/thread.h> | |
70 | #include <vm/vm_kern.h> | |
71 | #include <vm/vm_map.h> | |
72 | #include <vm/vm_object.h> | |
73 | #include <vm/vm_page.h> | |
74 | #include <vm/vm_compressor.h> | |
75 | #include <vm/vm_pageout.h> | |
76 | #include <kern/misc_protos.h> | |
77 | #include <vm/cpm.h> | |
78 | ||
79 | #include <string.h> | |
80 | ||
81 | #include <libkern/OSDebug.h> | |
82 | #include <libkern/crypto/sha2.h> | |
83 | #include <sys/kdebug.h> | |
84 | ||
85 | #include <san/kasan.h> | |
86 | ||
87 | /* | |
88 | * Variables exported by this module. | |
89 | */ | |
90 | ||
91 | vm_map_t kernel_map; | |
92 | vm_map_t kernel_pageable_map; | |
93 | ||
94 | extern boolean_t vm_kernel_ready; | |
95 | ||
96 | /* | |
97 | * Forward declarations for internal functions. | |
98 | */ | |
99 | extern kern_return_t kmem_alloc_pages( | |
100 | vm_object_t object, | |
101 | vm_object_offset_t offset, | |
102 | vm_object_size_t size); | |
103 | ||
104 | kern_return_t | |
105 | kmem_alloc_contig( | |
106 | vm_map_t map, | |
107 | vm_offset_t *addrp, | |
108 | vm_size_t size, | |
109 | vm_offset_t mask, | |
110 | ppnum_t max_pnum, | |
111 | ppnum_t pnum_mask, | |
112 | int flags, | |
113 | vm_tag_t tag) | |
114 | { | |
115 | vm_object_t object; | |
116 | vm_object_offset_t offset; | |
117 | vm_map_offset_t map_addr; | |
118 | vm_map_offset_t map_mask; | |
119 | vm_map_size_t map_size, i; | |
120 | vm_map_entry_t entry; | |
121 | vm_page_t m, pages; | |
122 | kern_return_t kr; | |
123 | ||
124 | assert(VM_KERN_MEMORY_NONE != tag); | |
125 | ||
126 | if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) | |
127 | return KERN_INVALID_ARGUMENT; | |
128 | ||
129 | map_size = vm_map_round_page(size, | |
130 | VM_MAP_PAGE_MASK(map)); | |
131 | map_mask = (vm_map_offset_t)mask; | |
132 | ||
133 | /* Check for zero allocation size (either directly or via overflow) */ | |
134 | if (map_size == 0) { | |
135 | *addrp = 0; | |
136 | return KERN_INVALID_ARGUMENT; | |
137 | } | |
138 | ||
139 | /* | |
140 | * Allocate a new object (if necessary) and the reference we | |
141 | * will be donating to the map entry. We must do this before | |
142 | * locking the map, or risk deadlock with the default pager. | |
143 | */ | |
144 | if ((flags & KMA_KOBJECT) != 0) { | |
145 | object = kernel_object; | |
146 | vm_object_reference(object); | |
147 | } else { | |
148 | object = vm_object_allocate(map_size); | |
149 | } | |
150 | ||
151 | kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, | |
152 | VM_MAP_KERNEL_FLAGS_NONE, tag, &entry); | |
153 | if (KERN_SUCCESS != kr) { | |
154 | vm_object_deallocate(object); | |
155 | return kr; | |
156 | } | |
157 | ||
158 | if (object == kernel_object) { | |
159 | offset = map_addr; | |
160 | } else { | |
161 | offset = 0; | |
162 | } | |
163 | VME_OBJECT_SET(entry, object); | |
164 | VME_OFFSET_SET(entry, offset); | |
165 | ||
166 | /* Take an extra object ref in case the map entry gets deleted */ | |
167 | vm_object_reference(object); | |
168 | vm_map_unlock(map); | |
169 | ||
170 | kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags); | |
171 | ||
172 | if (kr != KERN_SUCCESS) { | |
173 | vm_map_remove(map, | |
174 | vm_map_trunc_page(map_addr, | |
175 | VM_MAP_PAGE_MASK(map)), | |
176 | vm_map_round_page(map_addr + map_size, | |
177 | VM_MAP_PAGE_MASK(map)), | |
178 | 0); | |
179 | vm_object_deallocate(object); | |
180 | *addrp = 0; | |
181 | return kr; | |
182 | } | |
183 | ||
184 | vm_object_lock(object); | |
185 | for (i = 0; i < map_size; i += PAGE_SIZE) { | |
186 | m = pages; | |
187 | pages = NEXT_PAGE(m); | |
188 | *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL; | |
189 | m->busy = FALSE; | |
190 | vm_page_insert(m, object, offset + i); | |
191 | } | |
192 | vm_object_unlock(object); | |
193 | ||
194 | kr = vm_map_wire_kernel(map, | |
195 | vm_map_trunc_page(map_addr, | |
196 | VM_MAP_PAGE_MASK(map)), | |
197 | vm_map_round_page(map_addr + map_size, | |
198 | VM_MAP_PAGE_MASK(map)), | |
199 | VM_PROT_DEFAULT, tag, | |
200 | FALSE); | |
201 | ||
202 | if (kr != KERN_SUCCESS) { | |
203 | if (object == kernel_object) { | |
204 | vm_object_lock(object); | |
205 | vm_object_page_remove(object, offset, offset + map_size); | |
206 | vm_object_unlock(object); | |
207 | } | |
208 | vm_map_remove(map, | |
209 | vm_map_trunc_page(map_addr, | |
210 | VM_MAP_PAGE_MASK(map)), | |
211 | vm_map_round_page(map_addr + map_size, | |
212 | VM_MAP_PAGE_MASK(map)), | |
213 | 0); | |
214 | vm_object_deallocate(object); | |
215 | return kr; | |
216 | } | |
217 | vm_object_deallocate(object); | |
218 | ||
219 | if (object == kernel_object) { | |
220 | vm_map_simplify(map, map_addr); | |
221 | vm_tag_update_size(tag, map_size); | |
222 | } | |
223 | *addrp = (vm_offset_t) map_addr; | |
224 | assert((vm_map_offset_t) *addrp == map_addr); | |
225 | ||
226 | return KERN_SUCCESS; | |
227 | } | |
228 | ||
229 | /* | |
230 | * Master entry point for allocating kernel memory. | |
231 | * NOTE: this routine is _never_ interrupt safe. | |
232 | * | |
233 | * map : map to allocate into | |
234 | * addrp : pointer to start address of new memory | |
235 | * size : size of memory requested | |
236 | * flags : options | |
237 | * KMA_HERE *addrp is base address, else "anywhere" | |
238 | * KMA_NOPAGEWAIT don't wait for pages if unavailable | |
239 | * KMA_KOBJECT use kernel_object | |
240 | * KMA_LOMEM support for 32 bit devices in a 64 bit world | |
241 | * if set and a lomemory pool is available | |
242 | * grab pages from it... this also implies | |
243 | * KMA_NOPAGEWAIT | |
244 | */ | |
245 | ||
246 | kern_return_t | |
247 | kernel_memory_allocate( | |
248 | vm_map_t map, | |
249 | vm_offset_t *addrp, | |
250 | vm_size_t size, | |
251 | vm_offset_t mask, | |
252 | int flags, | |
253 | vm_tag_t tag) | |
254 | { | |
255 | vm_object_t object; | |
256 | vm_object_offset_t offset; | |
257 | vm_object_offset_t pg_offset; | |
258 | vm_map_entry_t entry = NULL; | |
259 | vm_map_offset_t map_addr, fill_start; | |
260 | vm_map_offset_t map_mask; | |
261 | vm_map_size_t map_size, fill_size; | |
262 | kern_return_t kr, pe_result; | |
263 | vm_page_t mem; | |
264 | vm_page_t guard_page_list = NULL; | |
265 | vm_page_t wired_page_list = NULL; | |
266 | int guard_page_count = 0; | |
267 | int wired_page_count = 0; | |
268 | int i; | |
269 | int vm_alloc_flags; | |
270 | vm_map_kernel_flags_t vmk_flags; | |
271 | vm_prot_t kma_prot; | |
272 | ||
273 | if (! vm_kernel_ready) { | |
274 | panic("kernel_memory_allocate: VM is not ready"); | |
275 | } | |
276 | ||
277 | map_size = vm_map_round_page(size, | |
278 | VM_MAP_PAGE_MASK(map)); | |
279 | map_mask = (vm_map_offset_t) mask; | |
280 | ||
281 | vm_alloc_flags = 0; //VM_MAKE_TAG(tag); | |
282 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
283 | ||
284 | /* Check for zero allocation size (either directly or via overflow) */ | |
285 | if (map_size == 0) { | |
286 | *addrp = 0; | |
287 | return KERN_INVALID_ARGUMENT; | |
288 | } | |
289 | ||
290 | /* | |
291 | * limit the size of a single extent of wired memory | |
292 | * to try and limit the damage to the system if | |
293 | * too many pages get wired down | |
294 | * limit raised to 2GB with 128GB max physical limit, | |
295 | * but scaled by installed memory above this | |
296 | */ | |
297 | if ( !(flags & KMA_VAONLY) && map_size > MAX(1ULL<<31, sane_size/64)) { | |
298 | return KERN_RESOURCE_SHORTAGE; | |
299 | } | |
300 | ||
301 | /* | |
302 | * Guard pages: | |
303 | * | |
304 | * Guard pages are implemented as ficticious pages. By placing guard pages | |
305 | * on either end of a stack, they can help detect cases where a thread walks | |
306 | * off either end of its stack. They are allocated and set up here and attempts | |
307 | * to access those pages are trapped in vm_fault_page(). | |
308 | * | |
309 | * The map_size we were passed may include extra space for | |
310 | * guard pages. If those were requested, then back it out of fill_size | |
311 | * since vm_map_find_space() takes just the actual size not including | |
312 | * guard pages. Similarly, fill_start indicates where the actual pages | |
313 | * will begin in the range. | |
314 | */ | |
315 | ||
316 | fill_start = 0; | |
317 | fill_size = map_size; | |
318 | ||
319 | if (flags & KMA_GUARD_FIRST) { | |
320 | vmk_flags.vmkf_guard_before = TRUE; | |
321 | fill_start += PAGE_SIZE_64; | |
322 | fill_size -= PAGE_SIZE_64; | |
323 | if (map_size < fill_start + fill_size) { | |
324 | /* no space for a guard page */ | |
325 | *addrp = 0; | |
326 | return KERN_INVALID_ARGUMENT; | |
327 | } | |
328 | guard_page_count++; | |
329 | } | |
330 | if (flags & KMA_GUARD_LAST) { | |
331 | vmk_flags.vmkf_guard_after = TRUE; | |
332 | fill_size -= PAGE_SIZE_64; | |
333 | if (map_size <= fill_start + fill_size) { | |
334 | /* no space for a guard page */ | |
335 | *addrp = 0; | |
336 | return KERN_INVALID_ARGUMENT; | |
337 | } | |
338 | guard_page_count++; | |
339 | } | |
340 | wired_page_count = (int) (fill_size / PAGE_SIZE_64); | |
341 | assert(wired_page_count * PAGE_SIZE_64 == fill_size); | |
342 | ||
343 | for (i = 0; i < guard_page_count; i++) { | |
344 | for (;;) { | |
345 | mem = vm_page_grab_guard(); | |
346 | ||
347 | if (mem != VM_PAGE_NULL) | |
348 | break; | |
349 | if (flags & KMA_NOPAGEWAIT) { | |
350 | kr = KERN_RESOURCE_SHORTAGE; | |
351 | goto out; | |
352 | } | |
353 | vm_page_more_fictitious(); | |
354 | } | |
355 | mem->snext = guard_page_list; | |
356 | guard_page_list = mem; | |
357 | } | |
358 | ||
359 | if (! (flags & KMA_VAONLY)) { | |
360 | for (i = 0; i < wired_page_count; i++) { | |
361 | uint64_t unavailable; | |
362 | ||
363 | for (;;) { | |
364 | if (flags & KMA_LOMEM) | |
365 | mem = vm_page_grablo(); | |
366 | else | |
367 | mem = vm_page_grab(); | |
368 | ||
369 | if (mem != VM_PAGE_NULL) | |
370 | break; | |
371 | ||
372 | if (flags & KMA_NOPAGEWAIT) { | |
373 | kr = KERN_RESOURCE_SHORTAGE; | |
374 | goto out; | |
375 | } | |
376 | if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) { | |
377 | kr = KERN_RESOURCE_SHORTAGE; | |
378 | goto out; | |
379 | } | |
380 | unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE; | |
381 | ||
382 | if (unavailable > max_mem || map_size > (max_mem - unavailable)) { | |
383 | kr = KERN_RESOURCE_SHORTAGE; | |
384 | goto out; | |
385 | } | |
386 | VM_PAGE_WAIT(); | |
387 | } | |
388 | if (KMA_ZERO & flags) vm_page_zero_fill(mem); | |
389 | mem->snext = wired_page_list; | |
390 | wired_page_list = mem; | |
391 | } | |
392 | } | |
393 | ||
394 | /* | |
395 | * Allocate a new object (if necessary). We must do this before | |
396 | * locking the map, or risk deadlock with the default pager. | |
397 | */ | |
398 | if ((flags & KMA_KOBJECT) != 0) { | |
399 | object = kernel_object; | |
400 | vm_object_reference(object); | |
401 | } else if ((flags & KMA_COMPRESSOR) != 0) { | |
402 | object = compressor_object; | |
403 | vm_object_reference(object); | |
404 | } else { | |
405 | object = vm_object_allocate(map_size); | |
406 | } | |
407 | ||
408 | if (flags & KMA_ATOMIC) | |
409 | vmk_flags.vmkf_atomic_entry = TRUE; | |
410 | ||
411 | kr = vm_map_find_space(map, &map_addr, | |
412 | fill_size, map_mask, | |
413 | vm_alloc_flags, vmk_flags, tag, &entry); | |
414 | if (KERN_SUCCESS != kr) { | |
415 | vm_object_deallocate(object); | |
416 | goto out; | |
417 | } | |
418 | ||
419 | if (object == kernel_object || object == compressor_object) { | |
420 | offset = map_addr; | |
421 | } else { | |
422 | offset = 0; | |
423 | } | |
424 | VME_OBJECT_SET(entry, object); | |
425 | VME_OFFSET_SET(entry, offset); | |
426 | ||
427 | if (object != compressor_object) | |
428 | entry->wired_count++; | |
429 | ||
430 | if (flags & KMA_PERMANENT) | |
431 | entry->permanent = TRUE; | |
432 | ||
433 | if (object != kernel_object && object != compressor_object) | |
434 | vm_object_reference(object); | |
435 | ||
436 | vm_object_lock(object); | |
437 | vm_map_unlock(map); | |
438 | ||
439 | pg_offset = 0; | |
440 | ||
441 | if (fill_start) { | |
442 | if (guard_page_list == NULL) | |
443 | panic("kernel_memory_allocate: guard_page_list == NULL"); | |
444 | ||
445 | mem = guard_page_list; | |
446 | guard_page_list = mem->snext; | |
447 | mem->snext = NULL; | |
448 | ||
449 | vm_page_insert(mem, object, offset + pg_offset); | |
450 | ||
451 | mem->busy = FALSE; | |
452 | pg_offset += PAGE_SIZE_64; | |
453 | } | |
454 | ||
455 | kma_prot = VM_PROT_READ | VM_PROT_WRITE; | |
456 | ||
457 | #if KASAN | |
458 | if (!(flags & KMA_VAONLY)) { | |
459 | /* for VAONLY mappings we notify in populate only */ | |
460 | kasan_notify_address(map_addr, size); | |
461 | } | |
462 | #endif | |
463 | ||
464 | if (flags & KMA_VAONLY) { | |
465 | pg_offset = fill_start + fill_size; | |
466 | } else { | |
467 | for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) { | |
468 | if (wired_page_list == NULL) | |
469 | panic("kernel_memory_allocate: wired_page_list == NULL"); | |
470 | ||
471 | mem = wired_page_list; | |
472 | wired_page_list = mem->snext; | |
473 | mem->snext = NULL; | |
474 | ||
475 | assert(mem->wire_count == 0); | |
476 | assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q); | |
477 | ||
478 | mem->vm_page_q_state = VM_PAGE_IS_WIRED; | |
479 | mem->wire_count++; | |
480 | if (__improbable(mem->wire_count == 0)) { | |
481 | panic("kernel_memory_allocate(%p): wire_count overflow", | |
482 | mem); | |
483 | } | |
484 | ||
485 | vm_page_insert_wired(mem, object, offset + pg_offset, tag); | |
486 | ||
487 | mem->busy = FALSE; | |
488 | mem->pmapped = TRUE; | |
489 | mem->wpmapped = TRUE; | |
490 | ||
491 | PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem, | |
492 | kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, | |
493 | PMAP_OPTIONS_NOWAIT, pe_result); | |
494 | ||
495 | if (pe_result == KERN_RESOURCE_SHORTAGE) { | |
496 | vm_object_unlock(object); | |
497 | ||
498 | PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, | |
499 | kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, | |
500 | pe_result); | |
501 | ||
502 | vm_object_lock(object); | |
503 | } | |
504 | ||
505 | assert(pe_result == KERN_SUCCESS); | |
506 | ||
507 | if (flags & KMA_NOENCRYPT) { | |
508 | bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE); | |
509 | ||
510 | pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); | |
511 | } | |
512 | } | |
513 | if (kernel_object == object) vm_tag_update_size(tag, fill_size); | |
514 | } | |
515 | if ((fill_start + fill_size) < map_size) { | |
516 | if (guard_page_list == NULL) | |
517 | panic("kernel_memory_allocate: guard_page_list == NULL"); | |
518 | ||
519 | mem = guard_page_list; | |
520 | guard_page_list = mem->snext; | |
521 | mem->snext = NULL; | |
522 | ||
523 | vm_page_insert(mem, object, offset + pg_offset); | |
524 | ||
525 | mem->busy = FALSE; | |
526 | } | |
527 | if (guard_page_list || wired_page_list) | |
528 | panic("kernel_memory_allocate: non empty list\n"); | |
529 | ||
530 | if (! (flags & KMA_VAONLY)) { | |
531 | vm_page_lockspin_queues(); | |
532 | vm_page_wire_count += wired_page_count; | |
533 | vm_page_unlock_queues(); | |
534 | } | |
535 | ||
536 | vm_object_unlock(object); | |
537 | ||
538 | /* | |
539 | * now that the pages are wired, we no longer have to fear coalesce | |
540 | */ | |
541 | if (object == kernel_object || object == compressor_object) | |
542 | vm_map_simplify(map, map_addr); | |
543 | else | |
544 | vm_object_deallocate(object); | |
545 | ||
546 | /* | |
547 | * Return the memory, not zeroed. | |
548 | */ | |
549 | *addrp = CAST_DOWN(vm_offset_t, map_addr); | |
550 | return KERN_SUCCESS; | |
551 | ||
552 | out: | |
553 | if (guard_page_list) | |
554 | vm_page_free_list(guard_page_list, FALSE); | |
555 | ||
556 | if (wired_page_list) | |
557 | vm_page_free_list(wired_page_list, FALSE); | |
558 | ||
559 | return kr; | |
560 | } | |
561 | ||
562 | kern_return_t | |
563 | kernel_memory_populate( | |
564 | vm_map_t map, | |
565 | vm_offset_t addr, | |
566 | vm_size_t size, | |
567 | int flags, | |
568 | vm_tag_t tag) | |
569 | { | |
570 | vm_object_t object; | |
571 | vm_object_offset_t offset, pg_offset; | |
572 | kern_return_t kr, pe_result; | |
573 | vm_page_t mem; | |
574 | vm_page_t page_list = NULL; | |
575 | int page_count = 0; | |
576 | int i; | |
577 | ||
578 | page_count = (int) (size / PAGE_SIZE_64); | |
579 | ||
580 | assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT)); | |
581 | ||
582 | if (flags & KMA_COMPRESSOR) { | |
583 | ||
584 | pg_offset = page_count * PAGE_SIZE_64; | |
585 | ||
586 | do { | |
587 | for (;;) { | |
588 | mem = vm_page_grab(); | |
589 | ||
590 | if (mem != VM_PAGE_NULL) | |
591 | break; | |
592 | ||
593 | VM_PAGE_WAIT(); | |
594 | } | |
595 | if (KMA_ZERO & flags) vm_page_zero_fill(mem); | |
596 | mem->snext = page_list; | |
597 | page_list = mem; | |
598 | ||
599 | pg_offset -= PAGE_SIZE_64; | |
600 | ||
601 | kr = pmap_enter_options(kernel_pmap, | |
602 | addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem), | |
603 | VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, | |
604 | PMAP_OPTIONS_INTERNAL, NULL); | |
605 | assert(kr == KERN_SUCCESS); | |
606 | ||
607 | } while (pg_offset); | |
608 | ||
609 | offset = addr; | |
610 | object = compressor_object; | |
611 | ||
612 | vm_object_lock(object); | |
613 | ||
614 | for (pg_offset = 0; | |
615 | pg_offset < size; | |
616 | pg_offset += PAGE_SIZE_64) { | |
617 | ||
618 | mem = page_list; | |
619 | page_list = mem->snext; | |
620 | mem->snext = NULL; | |
621 | ||
622 | vm_page_insert(mem, object, offset + pg_offset); | |
623 | assert(mem->busy); | |
624 | ||
625 | mem->busy = FALSE; | |
626 | mem->pmapped = TRUE; | |
627 | mem->wpmapped = TRUE; | |
628 | mem->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR; | |
629 | } | |
630 | vm_object_unlock(object); | |
631 | ||
632 | #if KASAN | |
633 | if (map == compressor_map) { | |
634 | kasan_notify_address_nopoison(addr, size); | |
635 | } else { | |
636 | kasan_notify_address(addr, size); | |
637 | } | |
638 | #endif | |
639 | return KERN_SUCCESS; | |
640 | } | |
641 | ||
642 | for (i = 0; i < page_count; i++) { | |
643 | for (;;) { | |
644 | if (flags & KMA_LOMEM) | |
645 | mem = vm_page_grablo(); | |
646 | else | |
647 | mem = vm_page_grab(); | |
648 | ||
649 | if (mem != VM_PAGE_NULL) | |
650 | break; | |
651 | ||
652 | if (flags & KMA_NOPAGEWAIT) { | |
653 | kr = KERN_RESOURCE_SHORTAGE; | |
654 | goto out; | |
655 | } | |
656 | if ((flags & KMA_LOMEM) && | |
657 | (vm_lopage_needed == TRUE)) { | |
658 | kr = KERN_RESOURCE_SHORTAGE; | |
659 | goto out; | |
660 | } | |
661 | VM_PAGE_WAIT(); | |
662 | } | |
663 | if (KMA_ZERO & flags) vm_page_zero_fill(mem); | |
664 | mem->snext = page_list; | |
665 | page_list = mem; | |
666 | } | |
667 | if (flags & KMA_KOBJECT) { | |
668 | offset = addr; | |
669 | object = kernel_object; | |
670 | ||
671 | vm_object_lock(object); | |
672 | } else { | |
673 | /* | |
674 | * If it's not the kernel object, we need to: | |
675 | * lock map; | |
676 | * lookup entry; | |
677 | * lock object; | |
678 | * take reference on object; | |
679 | * unlock map; | |
680 | */ | |
681 | panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): " | |
682 | "!KMA_KOBJECT", | |
683 | map, (uint64_t) addr, (uint64_t) size, flags); | |
684 | } | |
685 | ||
686 | for (pg_offset = 0; | |
687 | pg_offset < size; | |
688 | pg_offset += PAGE_SIZE_64) { | |
689 | ||
690 | if (page_list == NULL) | |
691 | panic("kernel_memory_populate: page_list == NULL"); | |
692 | ||
693 | mem = page_list; | |
694 | page_list = mem->snext; | |
695 | mem->snext = NULL; | |
696 | ||
697 | assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q); | |
698 | mem->vm_page_q_state = VM_PAGE_IS_WIRED; | |
699 | mem->wire_count++; | |
700 | if (__improbable(mem->wire_count == 0)) { | |
701 | panic("kernel_memory_populate(%p): wire_count overflow", | |
702 | mem); | |
703 | } | |
704 | ||
705 | vm_page_insert_wired(mem, object, offset + pg_offset, tag); | |
706 | ||
707 | mem->busy = FALSE; | |
708 | mem->pmapped = TRUE; | |
709 | mem->wpmapped = TRUE; | |
710 | ||
711 | PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem, | |
712 | VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, | |
713 | ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, | |
714 | PMAP_OPTIONS_NOWAIT, pe_result); | |
715 | ||
716 | if (pe_result == KERN_RESOURCE_SHORTAGE) { | |
717 | ||
718 | vm_object_unlock(object); | |
719 | ||
720 | PMAP_ENTER(kernel_pmap, addr + pg_offset, mem, | |
721 | VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, | |
722 | ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, | |
723 | pe_result); | |
724 | ||
725 | vm_object_lock(object); | |
726 | } | |
727 | ||
728 | assert(pe_result == KERN_SUCCESS); | |
729 | ||
730 | if (flags & KMA_NOENCRYPT) { | |
731 | bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE); | |
732 | pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); | |
733 | } | |
734 | } | |
735 | vm_page_lock_queues(); | |
736 | vm_page_wire_count += page_count; | |
737 | vm_page_unlock_queues(); | |
738 | ||
739 | if (kernel_object == object) vm_tag_update_size(tag, size); | |
740 | ||
741 | vm_object_unlock(object); | |
742 | ||
743 | #if KASAN | |
744 | if (map == compressor_map) { | |
745 | kasan_notify_address_nopoison(addr, size); | |
746 | } else { | |
747 | kasan_notify_address(addr, size); | |
748 | } | |
749 | #endif | |
750 | return KERN_SUCCESS; | |
751 | ||
752 | out: | |
753 | if (page_list) | |
754 | vm_page_free_list(page_list, FALSE); | |
755 | ||
756 | return kr; | |
757 | } | |
758 | ||
759 | ||
760 | void | |
761 | kernel_memory_depopulate( | |
762 | vm_map_t map, | |
763 | vm_offset_t addr, | |
764 | vm_size_t size, | |
765 | int flags) | |
766 | { | |
767 | vm_object_t object; | |
768 | vm_object_offset_t offset, pg_offset; | |
769 | vm_page_t mem; | |
770 | vm_page_t local_freeq = NULL; | |
771 | ||
772 | assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT)); | |
773 | ||
774 | if (flags & KMA_COMPRESSOR) { | |
775 | offset = addr; | |
776 | object = compressor_object; | |
777 | ||
778 | vm_object_lock(object); | |
779 | } else if (flags & KMA_KOBJECT) { | |
780 | offset = addr; | |
781 | object = kernel_object; | |
782 | vm_object_lock(object); | |
783 | } else { | |
784 | offset = 0; | |
785 | object = NULL; | |
786 | /* | |
787 | * If it's not the kernel object, we need to: | |
788 | * lock map; | |
789 | * lookup entry; | |
790 | * lock object; | |
791 | * unlock map; | |
792 | */ | |
793 | panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): " | |
794 | "!KMA_KOBJECT", | |
795 | map, (uint64_t) addr, (uint64_t) size, flags); | |
796 | } | |
797 | pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE); | |
798 | ||
799 | for (pg_offset = 0; | |
800 | pg_offset < size; | |
801 | pg_offset += PAGE_SIZE_64) { | |
802 | ||
803 | mem = vm_page_lookup(object, offset + pg_offset); | |
804 | ||
805 | assert(mem); | |
806 | ||
807 | if (mem->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR) | |
808 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem)); | |
809 | ||
810 | mem->busy = TRUE; | |
811 | ||
812 | assert(mem->tabled); | |
813 | vm_page_remove(mem, TRUE); | |
814 | assert(mem->busy); | |
815 | ||
816 | assert(mem->pageq.next == 0 && mem->pageq.prev == 0); | |
817 | assert((mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) || | |
818 | (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q)); | |
819 | ||
820 | mem->vm_page_q_state = VM_PAGE_NOT_ON_Q; | |
821 | mem->snext = local_freeq; | |
822 | local_freeq = mem; | |
823 | } | |
824 | vm_object_unlock(object); | |
825 | ||
826 | if (local_freeq) | |
827 | vm_page_free_list(local_freeq, TRUE); | |
828 | } | |
829 | ||
830 | /* | |
831 | * kmem_alloc: | |
832 | * | |
833 | * Allocate wired-down memory in the kernel's address map | |
834 | * or a submap. The memory is not zero-filled. | |
835 | */ | |
836 | ||
837 | kern_return_t | |
838 | kmem_alloc_external( | |
839 | vm_map_t map, | |
840 | vm_offset_t *addrp, | |
841 | vm_size_t size) | |
842 | { | |
843 | return (kmem_alloc(map, addrp, size, vm_tag_bt())); | |
844 | } | |
845 | ||
846 | ||
847 | kern_return_t | |
848 | kmem_alloc( | |
849 | vm_map_t map, | |
850 | vm_offset_t *addrp, | |
851 | vm_size_t size, | |
852 | vm_tag_t tag) | |
853 | { | |
854 | return kmem_alloc_flags(map, addrp, size, tag, 0); | |
855 | } | |
856 | ||
857 | kern_return_t | |
858 | kmem_alloc_flags( | |
859 | vm_map_t map, | |
860 | vm_offset_t *addrp, | |
861 | vm_size_t size, | |
862 | vm_tag_t tag, | |
863 | int flags) | |
864 | { | |
865 | kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag); | |
866 | TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp); | |
867 | return kr; | |
868 | } | |
869 | ||
870 | /* | |
871 | * kmem_realloc: | |
872 | * | |
873 | * Reallocate wired-down memory in the kernel's address map | |
874 | * or a submap. Newly allocated pages are not zeroed. | |
875 | * This can only be used on regions allocated with kmem_alloc. | |
876 | * | |
877 | * If successful, the pages in the old region are mapped twice. | |
878 | * The old region is unchanged. Use kmem_free to get rid of it. | |
879 | */ | |
880 | kern_return_t | |
881 | kmem_realloc( | |
882 | vm_map_t map, | |
883 | vm_offset_t oldaddr, | |
884 | vm_size_t oldsize, | |
885 | vm_offset_t *newaddrp, | |
886 | vm_size_t newsize, | |
887 | vm_tag_t tag) | |
888 | { | |
889 | vm_object_t object; | |
890 | vm_object_offset_t offset; | |
891 | vm_map_offset_t oldmapmin; | |
892 | vm_map_offset_t oldmapmax; | |
893 | vm_map_offset_t newmapaddr; | |
894 | vm_map_size_t oldmapsize; | |
895 | vm_map_size_t newmapsize; | |
896 | vm_map_entry_t oldentry; | |
897 | vm_map_entry_t newentry; | |
898 | vm_page_t mem; | |
899 | kern_return_t kr; | |
900 | ||
901 | oldmapmin = vm_map_trunc_page(oldaddr, | |
902 | VM_MAP_PAGE_MASK(map)); | |
903 | oldmapmax = vm_map_round_page(oldaddr + oldsize, | |
904 | VM_MAP_PAGE_MASK(map)); | |
905 | oldmapsize = oldmapmax - oldmapmin; | |
906 | newmapsize = vm_map_round_page(newsize, | |
907 | VM_MAP_PAGE_MASK(map)); | |
908 | if (newmapsize < newsize) { | |
909 | /* overflow */ | |
910 | *newaddrp = 0; | |
911 | return KERN_INVALID_ARGUMENT; | |
912 | } | |
913 | ||
914 | /* | |
915 | * Find the VM object backing the old region. | |
916 | */ | |
917 | ||
918 | vm_map_lock(map); | |
919 | ||
920 | if (!vm_map_lookup_entry(map, oldmapmin, &oldentry)) | |
921 | panic("kmem_realloc"); | |
922 | object = VME_OBJECT(oldentry); | |
923 | ||
924 | /* | |
925 | * Increase the size of the object and | |
926 | * fill in the new region. | |
927 | */ | |
928 | ||
929 | vm_object_reference(object); | |
930 | /* by grabbing the object lock before unlocking the map */ | |
931 | /* we guarantee that we will panic if more than one */ | |
932 | /* attempt is made to realloc a kmem_alloc'd area */ | |
933 | vm_object_lock(object); | |
934 | vm_map_unlock(map); | |
935 | if (object->vo_size != oldmapsize) | |
936 | panic("kmem_realloc"); | |
937 | object->vo_size = newmapsize; | |
938 | vm_object_unlock(object); | |
939 | ||
940 | /* allocate the new pages while expanded portion of the */ | |
941 | /* object is still not mapped */ | |
942 | kmem_alloc_pages(object, vm_object_round_page(oldmapsize), | |
943 | vm_object_round_page(newmapsize-oldmapsize)); | |
944 | ||
945 | /* | |
946 | * Find space for the new region. | |
947 | */ | |
948 | ||
949 | kr = vm_map_find_space(map, &newmapaddr, newmapsize, | |
950 | (vm_map_offset_t) 0, 0, | |
951 | VM_MAP_KERNEL_FLAGS_NONE, | |
952 | tag, | |
953 | &newentry); | |
954 | if (kr != KERN_SUCCESS) { | |
955 | vm_object_lock(object); | |
956 | for(offset = oldmapsize; | |
957 | offset < newmapsize; offset += PAGE_SIZE) { | |
958 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
959 | VM_PAGE_FREE(mem); | |
960 | } | |
961 | } | |
962 | object->vo_size = oldmapsize; | |
963 | vm_object_unlock(object); | |
964 | vm_object_deallocate(object); | |
965 | return kr; | |
966 | } | |
967 | VME_OBJECT_SET(newentry, object); | |
968 | VME_OFFSET_SET(newentry, 0); | |
969 | assert(newentry->wired_count == 0); | |
970 | ||
971 | ||
972 | /* add an extra reference in case we have someone doing an */ | |
973 | /* unexpected deallocate */ | |
974 | vm_object_reference(object); | |
975 | vm_map_unlock(map); | |
976 | ||
977 | kr = vm_map_wire_kernel(map, newmapaddr, newmapaddr + newmapsize, | |
978 | VM_PROT_DEFAULT, tag, FALSE); | |
979 | if (KERN_SUCCESS != kr) { | |
980 | vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0); | |
981 | vm_object_lock(object); | |
982 | for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) { | |
983 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
984 | VM_PAGE_FREE(mem); | |
985 | } | |
986 | } | |
987 | object->vo_size = oldmapsize; | |
988 | vm_object_unlock(object); | |
989 | vm_object_deallocate(object); | |
990 | return (kr); | |
991 | } | |
992 | vm_object_deallocate(object); | |
993 | ||
994 | if (kernel_object == object) vm_tag_update_size(tag, newmapsize); | |
995 | ||
996 | *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr); | |
997 | return KERN_SUCCESS; | |
998 | } | |
999 | ||
1000 | /* | |
1001 | * kmem_alloc_kobject: | |
1002 | * | |
1003 | * Allocate wired-down memory in the kernel's address map | |
1004 | * or a submap. The memory is not zero-filled. | |
1005 | * | |
1006 | * The memory is allocated in the kernel_object. | |
1007 | * It may not be copied with vm_map_copy, and | |
1008 | * it may not be reallocated with kmem_realloc. | |
1009 | */ | |
1010 | ||
1011 | kern_return_t | |
1012 | kmem_alloc_kobject_external( | |
1013 | vm_map_t map, | |
1014 | vm_offset_t *addrp, | |
1015 | vm_size_t size) | |
1016 | { | |
1017 | return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt())); | |
1018 | } | |
1019 | ||
1020 | kern_return_t | |
1021 | kmem_alloc_kobject( | |
1022 | vm_map_t map, | |
1023 | vm_offset_t *addrp, | |
1024 | vm_size_t size, | |
1025 | vm_tag_t tag) | |
1026 | { | |
1027 | return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag); | |
1028 | } | |
1029 | ||
1030 | /* | |
1031 | * kmem_alloc_aligned: | |
1032 | * | |
1033 | * Like kmem_alloc_kobject, except that the memory is aligned. | |
1034 | * The size should be a power-of-2. | |
1035 | */ | |
1036 | ||
1037 | kern_return_t | |
1038 | kmem_alloc_aligned( | |
1039 | vm_map_t map, | |
1040 | vm_offset_t *addrp, | |
1041 | vm_size_t size, | |
1042 | vm_tag_t tag) | |
1043 | { | |
1044 | if ((size & (size - 1)) != 0) | |
1045 | panic("kmem_alloc_aligned: size not aligned"); | |
1046 | return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag); | |
1047 | } | |
1048 | ||
1049 | /* | |
1050 | * kmem_alloc_pageable: | |
1051 | * | |
1052 | * Allocate pageable memory in the kernel's address map. | |
1053 | */ | |
1054 | ||
1055 | kern_return_t | |
1056 | kmem_alloc_pageable_external( | |
1057 | vm_map_t map, | |
1058 | vm_offset_t *addrp, | |
1059 | vm_size_t size) | |
1060 | { | |
1061 | return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt())); | |
1062 | } | |
1063 | ||
1064 | kern_return_t | |
1065 | kmem_alloc_pageable( | |
1066 | vm_map_t map, | |
1067 | vm_offset_t *addrp, | |
1068 | vm_size_t size, | |
1069 | vm_tag_t tag) | |
1070 | { | |
1071 | vm_map_offset_t map_addr; | |
1072 | vm_map_size_t map_size; | |
1073 | kern_return_t kr; | |
1074 | ||
1075 | #ifndef normal | |
1076 | map_addr = (vm_map_min(map)) + PAGE_SIZE; | |
1077 | #else | |
1078 | map_addr = vm_map_min(map); | |
1079 | #endif | |
1080 | map_size = vm_map_round_page(size, | |
1081 | VM_MAP_PAGE_MASK(map)); | |
1082 | if (map_size < size) { | |
1083 | /* overflow */ | |
1084 | *addrp = 0; | |
1085 | return KERN_INVALID_ARGUMENT; | |
1086 | } | |
1087 | ||
1088 | kr = vm_map_enter(map, &map_addr, map_size, | |
1089 | (vm_map_offset_t) 0, | |
1090 | VM_FLAGS_ANYWHERE, | |
1091 | VM_MAP_KERNEL_FLAGS_NONE, | |
1092 | tag, | |
1093 | VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, | |
1094 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
1095 | ||
1096 | if (kr != KERN_SUCCESS) | |
1097 | return kr; | |
1098 | ||
1099 | #if KASAN | |
1100 | kasan_notify_address(map_addr, map_size); | |
1101 | #endif | |
1102 | *addrp = CAST_DOWN(vm_offset_t, map_addr); | |
1103 | return KERN_SUCCESS; | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * kmem_free: | |
1108 | * | |
1109 | * Release a region of kernel virtual memory allocated | |
1110 | * with kmem_alloc, kmem_alloc_kobject, or kmem_alloc_pageable, | |
1111 | * and return the physical pages associated with that region. | |
1112 | */ | |
1113 | ||
1114 | void | |
1115 | kmem_free( | |
1116 | vm_map_t map, | |
1117 | vm_offset_t addr, | |
1118 | vm_size_t size) | |
1119 | { | |
1120 | kern_return_t kr; | |
1121 | ||
1122 | assert(addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS); | |
1123 | ||
1124 | TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr); | |
1125 | ||
1126 | if(size == 0) { | |
1127 | #if MACH_ASSERT | |
1128 | printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr); | |
1129 | #endif | |
1130 | return; | |
1131 | } | |
1132 | ||
1133 | kr = vm_map_remove(map, | |
1134 | vm_map_trunc_page(addr, | |
1135 | VM_MAP_PAGE_MASK(map)), | |
1136 | vm_map_round_page(addr + size, | |
1137 | VM_MAP_PAGE_MASK(map)), | |
1138 | VM_MAP_REMOVE_KUNWIRE); | |
1139 | if (kr != KERN_SUCCESS) | |
1140 | panic("kmem_free"); | |
1141 | } | |
1142 | ||
1143 | /* | |
1144 | * Allocate new pages in an object. | |
1145 | */ | |
1146 | ||
1147 | kern_return_t | |
1148 | kmem_alloc_pages( | |
1149 | vm_object_t object, | |
1150 | vm_object_offset_t offset, | |
1151 | vm_object_size_t size) | |
1152 | { | |
1153 | vm_object_size_t alloc_size; | |
1154 | ||
1155 | alloc_size = vm_object_round_page(size); | |
1156 | vm_object_lock(object); | |
1157 | while (alloc_size) { | |
1158 | vm_page_t mem; | |
1159 | ||
1160 | ||
1161 | /* | |
1162 | * Allocate a page | |
1163 | */ | |
1164 | while (VM_PAGE_NULL == | |
1165 | (mem = vm_page_alloc(object, offset))) { | |
1166 | vm_object_unlock(object); | |
1167 | VM_PAGE_WAIT(); | |
1168 | vm_object_lock(object); | |
1169 | } | |
1170 | mem->busy = FALSE; | |
1171 | ||
1172 | alloc_size -= PAGE_SIZE; | |
1173 | offset += PAGE_SIZE; | |
1174 | } | |
1175 | vm_object_unlock(object); | |
1176 | return KERN_SUCCESS; | |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * kmem_suballoc: | |
1181 | * | |
1182 | * Allocates a map to manage a subrange | |
1183 | * of the kernel virtual address space. | |
1184 | * | |
1185 | * Arguments are as follows: | |
1186 | * | |
1187 | * parent Map to take range from | |
1188 | * addr Address of start of range (IN/OUT) | |
1189 | * size Size of range to find | |
1190 | * pageable Can region be paged | |
1191 | * anywhere Can region be located anywhere in map | |
1192 | * new_map Pointer to new submap | |
1193 | */ | |
1194 | kern_return_t | |
1195 | kmem_suballoc( | |
1196 | vm_map_t parent, | |
1197 | vm_offset_t *addr, | |
1198 | vm_size_t size, | |
1199 | boolean_t pageable, | |
1200 | int flags, | |
1201 | vm_map_kernel_flags_t vmk_flags, | |
1202 | vm_tag_t tag, | |
1203 | vm_map_t *new_map) | |
1204 | { | |
1205 | vm_map_t map; | |
1206 | vm_map_offset_t map_addr; | |
1207 | vm_map_size_t map_size; | |
1208 | kern_return_t kr; | |
1209 | ||
1210 | map_size = vm_map_round_page(size, | |
1211 | VM_MAP_PAGE_MASK(parent)); | |
1212 | if (map_size < size) { | |
1213 | /* overflow */ | |
1214 | *addr = 0; | |
1215 | return KERN_INVALID_ARGUMENT; | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * Need reference on submap object because it is internal | |
1220 | * to the vm_system. vm_object_enter will never be called | |
1221 | * on it (usual source of reference for vm_map_enter). | |
1222 | */ | |
1223 | vm_object_reference(vm_submap_object); | |
1224 | ||
1225 | map_addr = ((flags & VM_FLAGS_ANYWHERE) | |
1226 | ? vm_map_min(parent) | |
1227 | : vm_map_trunc_page(*addr, | |
1228 | VM_MAP_PAGE_MASK(parent))); | |
1229 | ||
1230 | kr = vm_map_enter(parent, &map_addr, map_size, | |
1231 | (vm_map_offset_t) 0, flags, vmk_flags, tag, | |
1232 | vm_submap_object, (vm_object_offset_t) 0, FALSE, | |
1233 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
1234 | if (kr != KERN_SUCCESS) { | |
1235 | vm_object_deallocate(vm_submap_object); | |
1236 | return (kr); | |
1237 | } | |
1238 | ||
1239 | pmap_reference(vm_map_pmap(parent)); | |
1240 | map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable); | |
1241 | if (map == VM_MAP_NULL) | |
1242 | panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ | |
1243 | /* inherit the parent map's page size */ | |
1244 | vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent)); | |
1245 | ||
1246 | kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE); | |
1247 | if (kr != KERN_SUCCESS) { | |
1248 | /* | |
1249 | * See comment preceding vm_map_submap(). | |
1250 | */ | |
1251 | vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS); | |
1252 | vm_map_deallocate(map); /* also removes ref to pmap */ | |
1253 | vm_object_deallocate(vm_submap_object); | |
1254 | return (kr); | |
1255 | } | |
1256 | *addr = CAST_DOWN(vm_offset_t, map_addr); | |
1257 | *new_map = map; | |
1258 | return (KERN_SUCCESS); | |
1259 | } | |
1260 | ||
1261 | /* | |
1262 | * kmem_init: | |
1263 | * | |
1264 | * Initialize the kernel's virtual memory map, taking | |
1265 | * into account all memory allocated up to this time. | |
1266 | */ | |
1267 | void | |
1268 | kmem_init( | |
1269 | vm_offset_t start, | |
1270 | vm_offset_t end) | |
1271 | { | |
1272 | vm_map_offset_t map_start; | |
1273 | vm_map_offset_t map_end; | |
1274 | vm_map_kernel_flags_t vmk_flags; | |
1275 | ||
1276 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
1277 | vmk_flags.vmkf_permanent = TRUE; | |
1278 | vmk_flags.vmkf_no_pmap_check = TRUE; | |
1279 | ||
1280 | map_start = vm_map_trunc_page(start, | |
1281 | VM_MAP_PAGE_MASK(kernel_map)); | |
1282 | map_end = vm_map_round_page(end, | |
1283 | VM_MAP_PAGE_MASK(kernel_map)); | |
1284 | ||
1285 | #if defined(__arm__) || defined(__arm64__) | |
1286 | kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS, | |
1287 | VM_MAX_KERNEL_ADDRESS, FALSE); | |
1288 | /* | |
1289 | * Reserve virtual memory allocated up to this time. | |
1290 | */ | |
1291 | { | |
1292 | unsigned int region_select = 0; | |
1293 | vm_map_offset_t region_start; | |
1294 | vm_map_size_t region_size; | |
1295 | vm_map_offset_t map_addr; | |
1296 | kern_return_t kr; | |
1297 | ||
1298 | while (pmap_virtual_region(region_select, ®ion_start, ®ion_size)) { | |
1299 | ||
1300 | map_addr = region_start; | |
1301 | kr = vm_map_enter(kernel_map, &map_addr, | |
1302 | vm_map_round_page(region_size, | |
1303 | VM_MAP_PAGE_MASK(kernel_map)), | |
1304 | (vm_map_offset_t) 0, | |
1305 | VM_FLAGS_FIXED, | |
1306 | vmk_flags, | |
1307 | VM_KERN_MEMORY_NONE, | |
1308 | VM_OBJECT_NULL, | |
1309 | (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, | |
1310 | VM_INHERIT_DEFAULT); | |
1311 | ||
1312 | if (kr != KERN_SUCCESS) { | |
1313 | panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n", | |
1314 | (uint64_t) start, (uint64_t) end, (uint64_t) region_start, | |
1315 | (uint64_t) region_size, kr); | |
1316 | } | |
1317 | ||
1318 | region_select++; | |
1319 | } | |
1320 | } | |
1321 | #else | |
1322 | kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS, | |
1323 | map_end, FALSE); | |
1324 | /* | |
1325 | * Reserve virtual memory allocated up to this time. | |
1326 | */ | |
1327 | if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) { | |
1328 | vm_map_offset_t map_addr; | |
1329 | kern_return_t kr; | |
1330 | ||
1331 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
1332 | vmk_flags.vmkf_no_pmap_check = TRUE; | |
1333 | ||
1334 | map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; | |
1335 | kr = vm_map_enter(kernel_map, | |
1336 | &map_addr, | |
1337 | (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS), | |
1338 | (vm_map_offset_t) 0, | |
1339 | VM_FLAGS_FIXED, | |
1340 | vmk_flags, | |
1341 | VM_KERN_MEMORY_NONE, | |
1342 | VM_OBJECT_NULL, | |
1343 | (vm_object_offset_t) 0, FALSE, | |
1344 | VM_PROT_NONE, VM_PROT_NONE, | |
1345 | VM_INHERIT_DEFAULT); | |
1346 | ||
1347 | if (kr != KERN_SUCCESS) { | |
1348 | panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n", | |
1349 | (uint64_t) start, (uint64_t) end, | |
1350 | (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS, | |
1351 | (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS), | |
1352 | kr); | |
1353 | } | |
1354 | } | |
1355 | #endif | |
1356 | ||
1357 | /* | |
1358 | * Set the default global user wire limit which limits the amount of | |
1359 | * memory that can be locked via mlock(). We set this to the total | |
1360 | * amount of memory that are potentially usable by a user app (max_mem) | |
1361 | * minus a certain amount. This can be overridden via a sysctl. | |
1362 | */ | |
1363 | vm_global_no_user_wire_amount = MIN(max_mem*20/100, | |
1364 | VM_NOT_USER_WIREABLE); | |
1365 | vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount; | |
1366 | ||
1367 | /* the default per user limit is the same as the global limit */ | |
1368 | vm_user_wire_limit = vm_global_user_wire_limit; | |
1369 | } | |
1370 | ||
1371 | ||
1372 | /* | |
1373 | * Routine: copyinmap | |
1374 | * Purpose: | |
1375 | * Like copyin, except that fromaddr is an address | |
1376 | * in the specified VM map. This implementation | |
1377 | * is incomplete; it handles the current user map | |
1378 | * and the kernel map/submaps. | |
1379 | */ | |
1380 | kern_return_t | |
1381 | copyinmap( | |
1382 | vm_map_t map, | |
1383 | vm_map_offset_t fromaddr, | |
1384 | void *todata, | |
1385 | vm_size_t length) | |
1386 | { | |
1387 | kern_return_t kr = KERN_SUCCESS; | |
1388 | vm_map_t oldmap; | |
1389 | ||
1390 | if (vm_map_pmap(map) == pmap_kernel()) | |
1391 | { | |
1392 | /* assume a correct copy */ | |
1393 | memcpy(todata, CAST_DOWN(void *, fromaddr), length); | |
1394 | } | |
1395 | else if (current_map() == map) | |
1396 | { | |
1397 | if (copyin(fromaddr, todata, length) != 0) | |
1398 | kr = KERN_INVALID_ADDRESS; | |
1399 | } | |
1400 | else | |
1401 | { | |
1402 | vm_map_reference(map); | |
1403 | oldmap = vm_map_switch(map); | |
1404 | if (copyin(fromaddr, todata, length) != 0) | |
1405 | kr = KERN_INVALID_ADDRESS; | |
1406 | vm_map_switch(oldmap); | |
1407 | vm_map_deallocate(map); | |
1408 | } | |
1409 | return kr; | |
1410 | } | |
1411 | ||
1412 | /* | |
1413 | * Routine: copyoutmap | |
1414 | * Purpose: | |
1415 | * Like copyout, except that toaddr is an address | |
1416 | * in the specified VM map. This implementation | |
1417 | * is incomplete; it handles the current user map | |
1418 | * and the kernel map/submaps. | |
1419 | */ | |
1420 | kern_return_t | |
1421 | copyoutmap( | |
1422 | vm_map_t map, | |
1423 | void *fromdata, | |
1424 | vm_map_address_t toaddr, | |
1425 | vm_size_t length) | |
1426 | { | |
1427 | if (vm_map_pmap(map) == pmap_kernel()) { | |
1428 | /* assume a correct copy */ | |
1429 | memcpy(CAST_DOWN(void *, toaddr), fromdata, length); | |
1430 | return KERN_SUCCESS; | |
1431 | } | |
1432 | ||
1433 | if (current_map() != map) | |
1434 | return KERN_NOT_SUPPORTED; | |
1435 | ||
1436 | if (copyout(fromdata, toaddr, length) != 0) | |
1437 | return KERN_INVALID_ADDRESS; | |
1438 | ||
1439 | return KERN_SUCCESS; | |
1440 | } | |
1441 | ||
1442 | /* | |
1443 | * | |
1444 | * The following two functions are to be used when exposing kernel | |
1445 | * addresses to userspace via any of the various debug or info | |
1446 | * facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM() | |
1447 | * and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and | |
1448 | * are exported to KEXTs. | |
1449 | * | |
1450 | * NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL | |
1451 | */ | |
1452 | ||
1453 | static void | |
1454 | vm_kernel_addrhash_internal( | |
1455 | vm_offset_t addr, | |
1456 | vm_offset_t *hash_addr, | |
1457 | uint64_t salt) | |
1458 | { | |
1459 | assert(salt != 0); | |
1460 | ||
1461 | if (addr == 0) { | |
1462 | *hash_addr = 0; | |
1463 | return; | |
1464 | } | |
1465 | ||
1466 | if (VM_KERNEL_IS_SLID(addr)) { | |
1467 | *hash_addr = VM_KERNEL_UNSLIDE(addr); | |
1468 | return; | |
1469 | } | |
1470 | ||
1471 | vm_offset_t sha_digest[SHA256_DIGEST_LENGTH/sizeof(vm_offset_t)]; | |
1472 | SHA256_CTX sha_ctx; | |
1473 | ||
1474 | SHA256_Init(&sha_ctx); | |
1475 | SHA256_Update(&sha_ctx, &salt, sizeof(salt)); | |
1476 | SHA256_Update(&sha_ctx, &addr, sizeof(addr)); | |
1477 | SHA256_Final(sha_digest, &sha_ctx); | |
1478 | ||
1479 | *hash_addr = sha_digest[0]; | |
1480 | } | |
1481 | ||
1482 | void | |
1483 | vm_kernel_addrhash_external( | |
1484 | vm_offset_t addr, | |
1485 | vm_offset_t *hash_addr) | |
1486 | { | |
1487 | return vm_kernel_addrhash_internal(addr, hash_addr, vm_kernel_addrhash_salt_ext); | |
1488 | } | |
1489 | ||
1490 | vm_offset_t | |
1491 | vm_kernel_addrhash(vm_offset_t addr) | |
1492 | { | |
1493 | vm_offset_t hash_addr; | |
1494 | vm_kernel_addrhash_internal(addr, &hash_addr, vm_kernel_addrhash_salt); | |
1495 | return hash_addr; | |
1496 | } | |
1497 | ||
1498 | void | |
1499 | vm_kernel_addrhide( | |
1500 | vm_offset_t addr, | |
1501 | vm_offset_t *hide_addr) | |
1502 | { | |
1503 | *hide_addr = VM_KERNEL_ADDRHIDE(addr); | |
1504 | } | |
1505 | ||
1506 | /* | |
1507 | * vm_kernel_addrperm_external: | |
1508 | * vm_kernel_unslide_or_perm_external: | |
1509 | * | |
1510 | * Use these macros when exposing an address to userspace that could come from | |
1511 | * either kernel text/data *or* the heap. | |
1512 | */ | |
1513 | void | |
1514 | vm_kernel_addrperm_external( | |
1515 | vm_offset_t addr, | |
1516 | vm_offset_t *perm_addr) | |
1517 | { | |
1518 | if (VM_KERNEL_IS_SLID(addr)) { | |
1519 | *perm_addr = VM_KERNEL_UNSLIDE(addr); | |
1520 | } else if (VM_KERNEL_ADDRESS(addr)) { | |
1521 | *perm_addr = addr + vm_kernel_addrperm_ext; | |
1522 | } else { | |
1523 | *perm_addr = addr; | |
1524 | } | |
1525 | } | |
1526 | ||
1527 | void | |
1528 | vm_kernel_unslide_or_perm_external( | |
1529 | vm_offset_t addr, | |
1530 | vm_offset_t *up_addr) | |
1531 | { | |
1532 | vm_kernel_addrperm_external(addr, up_addr); | |
1533 | } |