]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/pmap.h | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine address mapping definitions -- machine-independent | |
64 | * section. [For machine-dependent section, see "machine/pmap.h".] | |
65 | */ | |
66 | ||
67 | #ifndef _VM_PMAP_H_ | |
68 | #define _VM_PMAP_H_ | |
69 | ||
70 | #include <mach/kern_return.h> | |
71 | #include <mach/vm_param.h> | |
72 | #include <mach/vm_types.h> | |
73 | #include <mach/vm_attributes.h> | |
74 | #include <mach/boolean.h> | |
75 | #include <mach/vm_prot.h> | |
76 | ||
77 | #include <kern/trustcache.h> | |
78 | ||
79 | ||
80 | #ifdef KERNEL_PRIVATE | |
81 | ||
82 | /* | |
83 | * The following is a description of the interface to the | |
84 | * machine-dependent "physical map" data structure. The module | |
85 | * must provide a "pmap_t" data type that represents the | |
86 | * set of valid virtual-to-physical addresses for one user | |
87 | * address space. [The kernel address space is represented | |
88 | * by a distinguished "pmap_t".] The routines described manage | |
89 | * this type, install and update virtual-to-physical mappings, | |
90 | * and perform operations on physical addresses common to | |
91 | * many address spaces. | |
92 | */ | |
93 | ||
94 | /* Copy between a physical page and a virtual address */ | |
95 | /* LP64todo - switch to vm_map_offset_t when it grows */ | |
96 | extern kern_return_t copypv( | |
97 | addr64_t source, | |
98 | addr64_t sink, | |
99 | unsigned int size, | |
100 | int which); | |
101 | #define cppvPsnk 1 | |
102 | #define cppvPsnkb 31 | |
103 | #define cppvPsrc 2 | |
104 | #define cppvPsrcb 30 | |
105 | #define cppvFsnk 4 | |
106 | #define cppvFsnkb 29 | |
107 | #define cppvFsrc 8 | |
108 | #define cppvFsrcb 28 | |
109 | #define cppvNoModSnk 16 | |
110 | #define cppvNoModSnkb 27 | |
111 | #define cppvNoRefSrc 32 | |
112 | #define cppvNoRefSrcb 26 | |
113 | #define cppvKmap 64 /* Use the kernel's vm_map */ | |
114 | #define cppvKmapb 25 | |
115 | ||
116 | extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); | |
117 | ||
118 | #ifdef MACH_KERNEL_PRIVATE | |
119 | ||
120 | #include <mach_assert.h> | |
121 | ||
122 | #include <machine/pmap.h> | |
123 | #include <vm/memory_types.h> | |
124 | ||
125 | /* | |
126 | * Routines used for initialization. | |
127 | * There is traditionally also a pmap_bootstrap, | |
128 | * used very early by machine-dependent code, | |
129 | * but it is not part of the interface. | |
130 | * | |
131 | * LP64todo - | |
132 | * These interfaces are tied to the size of the | |
133 | * kernel pmap - and therefore use the "local" | |
134 | * vm_offset_t, etc... types. | |
135 | */ | |
136 | ||
137 | extern void *pmap_steal_memory(vm_size_t size); /* Early memory allocation */ | |
138 | extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */ | |
139 | ||
140 | extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */ | |
141 | #if defined(__arm__) || defined(__arm64__) | |
142 | extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */ | |
143 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
144 | ||
145 | extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */ | |
146 | ||
147 | extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */ | |
148 | ||
149 | extern void mapping_adjust(void); /* Adjust free mapping count */ | |
150 | ||
151 | extern void mapping_free_prime(void); /* Primes the mapping block release list */ | |
152 | ||
153 | #ifndef MACHINE_PAGES | |
154 | /* | |
155 | * If machine/pmap.h defines MACHINE_PAGES, it must implement | |
156 | * the above functions. The pmap module has complete control. | |
157 | * Otherwise, it must implement the following functions: | |
158 | * pmap_free_pages | |
159 | * pmap_virtual_space | |
160 | * pmap_next_page | |
161 | * pmap_init | |
162 | * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup | |
163 | * using pmap_free_pages, pmap_next_page, pmap_virtual_space, | |
164 | * and pmap_enter. pmap_free_pages may over-estimate the number | |
165 | * of unused physical pages, and pmap_next_page may return FALSE | |
166 | * to indicate that there are no more unused pages to return. | |
167 | * However, for best performance pmap_free_pages should be accurate. | |
168 | */ | |
169 | ||
170 | /* | |
171 | * Routines to return the next unused physical page. | |
172 | */ | |
173 | extern boolean_t pmap_next_page(ppnum_t *pnum); | |
174 | extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free); | |
175 | #ifdef __x86_64__ | |
176 | extern kern_return_t pmap_next_page_large(ppnum_t *pnum); | |
177 | extern void pmap_hi_pages_done(void); | |
178 | #endif | |
179 | ||
180 | /* | |
181 | * Report virtual space available for the kernel. | |
182 | */ | |
183 | extern void pmap_virtual_space( | |
184 | vm_offset_t *virtual_start, | |
185 | vm_offset_t *virtual_end); | |
186 | #endif /* MACHINE_PAGES */ | |
187 | ||
188 | /* | |
189 | * Routines to manage the physical map data structure. | |
190 | */ | |
191 | extern pmap_t pmap_create_options( /* Create a pmap_t. */ | |
192 | ledger_t ledger, | |
193 | vm_map_size_t size, | |
194 | unsigned int flags); | |
195 | ||
196 | extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */ | |
197 | extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ | |
198 | extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ | |
199 | extern void pmap_switch(pmap_t); | |
200 | extern void pmap_require(pmap_t pmap); | |
201 | ||
202 | #if MACH_ASSERT | |
203 | extern void pmap_set_process(pmap_t pmap, | |
204 | int pid, | |
205 | char *procname); | |
206 | #endif /* MACH_ASSERT */ | |
207 | ||
208 | extern kern_return_t pmap_enter( /* Enter a mapping */ | |
209 | pmap_t pmap, | |
210 | vm_map_offset_t v, | |
211 | ppnum_t pn, | |
212 | vm_prot_t prot, | |
213 | vm_prot_t fault_type, | |
214 | unsigned int flags, | |
215 | boolean_t wired); | |
216 | ||
217 | extern kern_return_t pmap_enter_options( | |
218 | pmap_t pmap, | |
219 | vm_map_offset_t v, | |
220 | ppnum_t pn, | |
221 | vm_prot_t prot, | |
222 | vm_prot_t fault_type, | |
223 | unsigned int flags, | |
224 | boolean_t wired, | |
225 | unsigned int options, | |
226 | void *arg); | |
227 | extern kern_return_t pmap_enter_options_addr( | |
228 | pmap_t pmap, | |
229 | vm_map_offset_t v, | |
230 | pmap_paddr_t pa, | |
231 | vm_prot_t prot, | |
232 | vm_prot_t fault_type, | |
233 | unsigned int flags, | |
234 | boolean_t wired, | |
235 | unsigned int options, | |
236 | void *arg); | |
237 | ||
238 | extern void pmap_remove_some_phys( | |
239 | pmap_t pmap, | |
240 | ppnum_t pn); | |
241 | ||
242 | extern void pmap_lock_phys_page( | |
243 | ppnum_t pn); | |
244 | ||
245 | extern void pmap_unlock_phys_page( | |
246 | ppnum_t pn); | |
247 | ||
248 | ||
249 | /* | |
250 | * Routines that operate on physical addresses. | |
251 | */ | |
252 | ||
253 | extern void pmap_page_protect( /* Restrict access to page. */ | |
254 | ppnum_t phys, | |
255 | vm_prot_t prot); | |
256 | ||
257 | extern void pmap_page_protect_options( /* Restrict access to page. */ | |
258 | ppnum_t phys, | |
259 | vm_prot_t prot, | |
260 | unsigned int options, | |
261 | void *arg); | |
262 | ||
263 | extern void(pmap_zero_page)( | |
264 | ppnum_t pn); | |
265 | ||
266 | extern void(pmap_zero_part_page)( | |
267 | ppnum_t pn, | |
268 | vm_offset_t offset, | |
269 | vm_size_t len); | |
270 | ||
271 | extern void(pmap_copy_page)( | |
272 | ppnum_t src, | |
273 | ppnum_t dest); | |
274 | ||
275 | extern void(pmap_copy_part_page)( | |
276 | ppnum_t src, | |
277 | vm_offset_t src_offset, | |
278 | ppnum_t dst, | |
279 | vm_offset_t dst_offset, | |
280 | vm_size_t len); | |
281 | ||
282 | extern void(pmap_copy_part_lpage)( | |
283 | vm_offset_t src, | |
284 | ppnum_t dst, | |
285 | vm_offset_t dst_offset, | |
286 | vm_size_t len); | |
287 | ||
288 | extern void(pmap_copy_part_rpage)( | |
289 | ppnum_t src, | |
290 | vm_offset_t src_offset, | |
291 | vm_offset_t dst, | |
292 | vm_size_t len); | |
293 | ||
294 | extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */ | |
295 | ppnum_t phys); | |
296 | ||
297 | extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */ | |
298 | ppnum_t phys, | |
299 | unsigned int options, | |
300 | void *arg); | |
301 | ||
302 | extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate | |
303 | * cache based on | |
304 | * page number sent */ | |
305 | ppnum_t pn, | |
306 | vm_size_t size, | |
307 | vm_machine_attribute_t attribute, | |
308 | vm_machine_attribute_val_t* value); | |
309 | ||
310 | extern unsigned int(pmap_cache_attributes)( | |
311 | ppnum_t pn); | |
312 | ||
313 | /* | |
314 | * Set (override) cache attributes for the specified physical page | |
315 | */ | |
316 | extern void pmap_set_cache_attributes( | |
317 | ppnum_t, | |
318 | unsigned int); | |
319 | ||
320 | extern void *pmap_map_compressor_page( | |
321 | ppnum_t); | |
322 | ||
323 | extern void pmap_unmap_compressor_page( | |
324 | ppnum_t, | |
325 | void*); | |
326 | ||
327 | #if defined(__arm__) || defined(__arm64__) | |
328 | /* ARM64_TODO */ | |
329 | extern boolean_t pmap_batch_set_cache_attributes( | |
330 | ppnum_t, | |
331 | unsigned int, | |
332 | unsigned int, | |
333 | unsigned int, | |
334 | boolean_t, | |
335 | unsigned int*); | |
336 | #endif | |
337 | extern void pmap_sync_page_data_phys(ppnum_t pa); | |
338 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); | |
339 | ||
340 | /* | |
341 | * debug/assertions. pmap_verify_free returns true iff | |
342 | * the given physical page is mapped into no pmap. | |
343 | * pmap_assert_free() will panic() if pn is not free. | |
344 | */ | |
345 | extern boolean_t pmap_verify_free(ppnum_t pn); | |
346 | #if MACH_ASSERT | |
347 | extern void pmap_assert_free(ppnum_t pn); | |
348 | #endif | |
349 | ||
350 | /* | |
351 | * Statistics routines | |
352 | */ | |
353 | extern int(pmap_compressed)(pmap_t pmap); | |
354 | extern int(pmap_resident_count)(pmap_t pmap); | |
355 | extern int(pmap_resident_max)(pmap_t pmap); | |
356 | ||
357 | /* | |
358 | * Sundry required (internal) routines | |
359 | */ | |
360 | #ifdef CURRENTLY_UNUSED_AND_UNTESTED | |
361 | extern void pmap_collect(pmap_t pmap);/* Perform garbage | |
362 | * collection, if any */ | |
363 | #endif | |
364 | /* | |
365 | * Optional routines | |
366 | */ | |
367 | extern void(pmap_copy)( /* Copy range of mappings, | |
368 | * if desired. */ | |
369 | pmap_t dest, | |
370 | pmap_t source, | |
371 | vm_map_offset_t dest_va, | |
372 | vm_map_size_t size, | |
373 | vm_map_offset_t source_va); | |
374 | ||
375 | extern kern_return_t(pmap_attribute)( /* Get/Set special memory | |
376 | * attributes */ | |
377 | pmap_t pmap, | |
378 | vm_map_offset_t va, | |
379 | vm_map_size_t size, | |
380 | vm_machine_attribute_t attribute, | |
381 | vm_machine_attribute_val_t* value); | |
382 | ||
383 | /* | |
384 | * Routines defined as macros. | |
385 | */ | |
386 | #ifndef PMAP_ACTIVATE_USER | |
387 | #ifndef PMAP_ACTIVATE | |
388 | #define PMAP_ACTIVATE_USER(thr, cpu) | |
389 | #else /* PMAP_ACTIVATE */ | |
390 | #define PMAP_ACTIVATE_USER(thr, cpu) { \ | |
391 | pmap_t pmap; \ | |
392 | \ | |
393 | pmap = (thr)->map->pmap; \ | |
394 | if (pmap != pmap_kernel()) \ | |
395 | PMAP_ACTIVATE(pmap, (thr), (cpu)); \ | |
396 | } | |
397 | #endif /* PMAP_ACTIVATE */ | |
398 | #endif /* PMAP_ACTIVATE_USER */ | |
399 | ||
400 | #ifndef PMAP_DEACTIVATE_USER | |
401 | #ifndef PMAP_DEACTIVATE | |
402 | #define PMAP_DEACTIVATE_USER(thr, cpu) | |
403 | #else /* PMAP_DEACTIVATE */ | |
404 | #define PMAP_DEACTIVATE_USER(thr, cpu) { \ | |
405 | pmap_t pmap; \ | |
406 | \ | |
407 | pmap = (thr)->map->pmap; \ | |
408 | if ((pmap) != pmap_kernel()) \ | |
409 | PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ | |
410 | } | |
411 | #endif /* PMAP_DEACTIVATE */ | |
412 | #endif /* PMAP_DEACTIVATE_USER */ | |
413 | ||
414 | #ifndef PMAP_ACTIVATE_KERNEL | |
415 | #ifndef PMAP_ACTIVATE | |
416 | #define PMAP_ACTIVATE_KERNEL(cpu) | |
417 | #else /* PMAP_ACTIVATE */ | |
418 | #define PMAP_ACTIVATE_KERNEL(cpu) \ | |
419 | PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) | |
420 | #endif /* PMAP_ACTIVATE */ | |
421 | #endif /* PMAP_ACTIVATE_KERNEL */ | |
422 | ||
423 | #ifndef PMAP_DEACTIVATE_KERNEL | |
424 | #ifndef PMAP_DEACTIVATE | |
425 | #define PMAP_DEACTIVATE_KERNEL(cpu) | |
426 | #else /* PMAP_DEACTIVATE */ | |
427 | #define PMAP_DEACTIVATE_KERNEL(cpu) \ | |
428 | PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) | |
429 | #endif /* PMAP_DEACTIVATE */ | |
430 | #endif /* PMAP_DEACTIVATE_KERNEL */ | |
431 | ||
432 | #ifndef PMAP_ENTER | |
433 | /* | |
434 | * Macro to be used in place of pmap_enter() | |
435 | */ | |
436 | #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \ | |
437 | flags, wired, result) \ | |
438 | MACRO_BEGIN \ | |
439 | pmap_t __pmap = (pmap); \ | |
440 | vm_page_t __page = (page); \ | |
441 | int __options = 0; \ | |
442 | vm_object_t __obj; \ | |
443 | \ | |
444 | PMAP_ENTER_CHECK(__pmap, __page) \ | |
445 | __obj = VM_PAGE_OBJECT(__page); \ | |
446 | if (__obj->internal) { \ | |
447 | __options |= PMAP_OPTIONS_INTERNAL; \ | |
448 | } \ | |
449 | if (__page->vmp_reusable || __obj->all_reusable) { \ | |
450 | __options |= PMAP_OPTIONS_REUSABLE; \ | |
451 | } \ | |
452 | result = pmap_enter_options(__pmap, \ | |
453 | (virtual_address), \ | |
454 | VM_PAGE_GET_PHYS_PAGE(__page), \ | |
455 | (protection), \ | |
456 | (fault_type), \ | |
457 | (flags), \ | |
458 | (wired), \ | |
459 | __options, \ | |
460 | NULL); \ | |
461 | MACRO_END | |
462 | #endif /* !PMAP_ENTER */ | |
463 | ||
464 | #ifndef PMAP_ENTER_OPTIONS | |
465 | #define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset, \ | |
466 | page, protection, \ | |
467 | fault_type, flags, wired, options, result) \ | |
468 | MACRO_BEGIN \ | |
469 | pmap_t __pmap = (pmap); \ | |
470 | vm_page_t __page = (page); \ | |
471 | int __extra_options = 0; \ | |
472 | vm_object_t __obj; \ | |
473 | \ | |
474 | PMAP_ENTER_CHECK(__pmap, __page) \ | |
475 | __obj = VM_PAGE_OBJECT(__page); \ | |
476 | if (__obj->internal) { \ | |
477 | __extra_options |= PMAP_OPTIONS_INTERNAL; \ | |
478 | } \ | |
479 | if (__page->vmp_reusable || __obj->all_reusable) { \ | |
480 | __extra_options |= PMAP_OPTIONS_REUSABLE; \ | |
481 | } \ | |
482 | result = pmap_enter_options_addr(__pmap, \ | |
483 | (virtual_address), \ | |
484 | (((pmap_paddr_t) \ | |
485 | VM_PAGE_GET_PHYS_PAGE(__page) \ | |
486 | << PAGE_SHIFT) \ | |
487 | + fault_phys_offset), \ | |
488 | (protection), \ | |
489 | (fault_type), \ | |
490 | (flags), \ | |
491 | (wired), \ | |
492 | (options) | __extra_options, \ | |
493 | NULL); \ | |
494 | MACRO_END | |
495 | #endif /* !PMAP_ENTER_OPTIONS */ | |
496 | ||
497 | #ifndef PMAP_SET_CACHE_ATTR | |
498 | #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ | |
499 | MACRO_BEGIN \ | |
500 | if (!batch_pmap_op) { \ | |
501 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ | |
502 | object->set_cache_attr = TRUE; \ | |
503 | } \ | |
504 | MACRO_END | |
505 | #endif /* PMAP_SET_CACHE_ATTR */ | |
506 | ||
507 | #ifndef PMAP_BATCH_SET_CACHE_ATTR | |
508 | #if defined(__arm__) || defined(__arm64__) | |
509 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ | |
510 | cache_attr, num_pages, batch_pmap_op) \ | |
511 | MACRO_BEGIN \ | |
512 | if ((batch_pmap_op)) { \ | |
513 | unsigned int __page_idx=0; \ | |
514 | unsigned int res=0; \ | |
515 | boolean_t batch=TRUE; \ | |
516 | while (__page_idx < (num_pages)) { \ | |
517 | if (!pmap_batch_set_cache_attributes( \ | |
518 | user_page_list[__page_idx].phys_addr, \ | |
519 | (cache_attr), \ | |
520 | (num_pages), \ | |
521 | (__page_idx), \ | |
522 | FALSE, \ | |
523 | (&res))) { \ | |
524 | batch = FALSE; \ | |
525 | break; \ | |
526 | } \ | |
527 | __page_idx++; \ | |
528 | } \ | |
529 | __page_idx=0; \ | |
530 | res=0; \ | |
531 | while (__page_idx < (num_pages)) { \ | |
532 | if (batch) \ | |
533 | (void)pmap_batch_set_cache_attributes( \ | |
534 | user_page_list[__page_idx].phys_addr, \ | |
535 | (cache_attr), \ | |
536 | (num_pages), \ | |
537 | (__page_idx), \ | |
538 | TRUE, \ | |
539 | (&res)); \ | |
540 | else \ | |
541 | pmap_set_cache_attributes( \ | |
542 | user_page_list[__page_idx].phys_addr, \ | |
543 | (cache_attr)); \ | |
544 | __page_idx++; \ | |
545 | } \ | |
546 | (object)->set_cache_attr = TRUE; \ | |
547 | } \ | |
548 | MACRO_END | |
549 | #else | |
550 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ | |
551 | cache_attr, num_pages, batch_pmap_op) \ | |
552 | MACRO_BEGIN \ | |
553 | if ((batch_pmap_op)) { \ | |
554 | unsigned int __page_idx=0; \ | |
555 | while (__page_idx < (num_pages)) { \ | |
556 | pmap_set_cache_attributes( \ | |
557 | user_page_list[__page_idx].phys_addr, \ | |
558 | (cache_attr)); \ | |
559 | __page_idx++; \ | |
560 | } \ | |
561 | (object)->set_cache_attr = TRUE; \ | |
562 | } \ | |
563 | MACRO_END | |
564 | #endif | |
565 | #endif /* PMAP_BATCH_SET_CACHE_ATTR */ | |
566 | ||
567 | #define PMAP_ENTER_CHECK(pmap, page) \ | |
568 | { \ | |
569 | if ((page)->vmp_error) { \ | |
570 | panic("VM page %p should not have an error\n", \ | |
571 | (page)); \ | |
572 | } \ | |
573 | } | |
574 | ||
575 | /* | |
576 | * Routines to manage reference/modify bits based on | |
577 | * physical addresses, simulating them if not provided | |
578 | * by the hardware. | |
579 | */ | |
580 | struct pfc { | |
581 | long pfc_cpus; | |
582 | long pfc_invalid_global; | |
583 | }; | |
584 | ||
585 | typedef struct pfc pmap_flush_context; | |
586 | ||
587 | /* Clear reference bit */ | |
588 | extern void pmap_clear_reference(ppnum_t pn); | |
589 | /* Return reference bit */ | |
590 | extern boolean_t(pmap_is_referenced)(ppnum_t pn); | |
591 | /* Set modify bit */ | |
592 | extern void pmap_set_modify(ppnum_t pn); | |
593 | /* Clear modify bit */ | |
594 | extern void pmap_clear_modify(ppnum_t pn); | |
595 | /* Return modify bit */ | |
596 | extern boolean_t pmap_is_modified(ppnum_t pn); | |
597 | /* Return modified and referenced bits */ | |
598 | extern unsigned int pmap_get_refmod(ppnum_t pn); | |
599 | /* Clear modified and referenced bits */ | |
600 | extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); | |
601 | #define VM_MEM_MODIFIED 0x01 /* Modified bit */ | |
602 | #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ | |
603 | extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); | |
604 | ||
605 | /* | |
606 | * Clears the reference and/or modified bits on a range of virtually | |
607 | * contiguous pages. | |
608 | * It returns true if the operation succeeded. If it returns false, | |
609 | * nothing has been modified. | |
610 | * This operation is only supported on some platforms, so callers MUST | |
611 | * handle the case where it returns false. | |
612 | */ | |
613 | extern bool | |
614 | pmap_clear_refmod_range_options( | |
615 | pmap_t pmap, | |
616 | vm_map_address_t start, | |
617 | vm_map_address_t end, | |
618 | unsigned int mask, | |
619 | unsigned int options); | |
620 | ||
621 | ||
622 | extern void pmap_flush_context_init(pmap_flush_context *); | |
623 | extern void pmap_flush(pmap_flush_context *); | |
624 | ||
625 | /* | |
626 | * Routines that operate on ranges of virtual addresses. | |
627 | */ | |
628 | extern void pmap_protect( /* Change protections. */ | |
629 | pmap_t map, | |
630 | vm_map_offset_t s, | |
631 | vm_map_offset_t e, | |
632 | vm_prot_t prot); | |
633 | ||
634 | extern void pmap_protect_options( /* Change protections. */ | |
635 | pmap_t map, | |
636 | vm_map_offset_t s, | |
637 | vm_map_offset_t e, | |
638 | vm_prot_t prot, | |
639 | unsigned int options, | |
640 | void *arg); | |
641 | ||
642 | extern void(pmap_pageable)( | |
643 | pmap_t pmap, | |
644 | vm_map_offset_t start, | |
645 | vm_map_offset_t end, | |
646 | boolean_t pageable); | |
647 | ||
648 | extern uint64_t pmap_shared_region_size_min(pmap_t map); | |
649 | ||
650 | /* TODO: <rdar://problem/65247502> Completely remove pmap_nesting_size_max() */ | |
651 | extern uint64_t pmap_nesting_size_max(pmap_t map); | |
652 | ||
653 | extern kern_return_t pmap_nest(pmap_t, | |
654 | pmap_t, | |
655 | addr64_t, | |
656 | uint64_t); | |
657 | extern kern_return_t pmap_unnest(pmap_t, | |
658 | addr64_t, | |
659 | uint64_t); | |
660 | ||
661 | #define PMAP_UNNEST_CLEAN 1 | |
662 | ||
663 | extern kern_return_t pmap_unnest_options(pmap_t, | |
664 | addr64_t, | |
665 | uint64_t, | |
666 | unsigned int); | |
667 | extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); | |
668 | extern void pmap_advise_pagezero_range(pmap_t, uint64_t); | |
669 | #endif /* MACH_KERNEL_PRIVATE */ | |
670 | ||
671 | extern boolean_t pmap_is_noencrypt(ppnum_t); | |
672 | extern void pmap_set_noencrypt(ppnum_t pn); | |
673 | extern void pmap_clear_noencrypt(ppnum_t pn); | |
674 | ||
675 | /* | |
676 | * JMM - This portion is exported to other kernel components right now, | |
677 | * but will be pulled back in the future when the needed functionality | |
678 | * is provided in a cleaner manner. | |
679 | */ | |
680 | ||
681 | extern pmap_t kernel_pmap; /* The kernel's map */ | |
682 | #define pmap_kernel() (kernel_pmap) | |
683 | ||
684 | #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ | |
685 | #define VM_MEM_STACK 0x200 | |
686 | ||
687 | /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS | |
688 | * definitions in i386/pmap_internal.h | |
689 | */ | |
690 | #define PMAP_CREATE_64BIT 0x1 | |
691 | ||
692 | #if __x86_64__ | |
693 | ||
694 | #define PMAP_CREATE_EPT 0x2 | |
695 | #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) | |
696 | ||
697 | #else | |
698 | ||
699 | #define PMAP_CREATE_STAGE2 0 | |
700 | #if __arm64e__ | |
701 | #define PMAP_CREATE_DISABLE_JOP 0x4 | |
702 | #else | |
703 | #define PMAP_CREATE_DISABLE_JOP 0 | |
704 | #endif | |
705 | #if __ARM_MIXED_PAGE_SIZE__ | |
706 | #define PMAP_CREATE_FORCE_4K_PAGES 0x8 | |
707 | #else | |
708 | #define PMAP_CREATE_FORCE_4K_PAGES 0 | |
709 | #endif /* __ARM_MIXED_PAGE_SIZE__ */ | |
710 | #if __arm64__ | |
711 | #define PMAP_CREATE_X86_64 0 | |
712 | #else | |
713 | #define PMAP_CREATE_X86_64 0 | |
714 | #endif | |
715 | ||
716 | /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */ | |
717 | #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64) | |
718 | ||
719 | #endif /* __x86_64__ */ | |
720 | ||
721 | #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return | |
722 | * KERN_RESOURCE_SHORTAGE | |
723 | * instead */ | |
724 | #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed | |
725 | * but don't enter mapping | |
726 | */ | |
727 | #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for | |
728 | * this operation */ | |
729 | #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ | |
730 | #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ | |
731 | #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ | |
732 | #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ | |
733 | #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ | |
734 | #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ | |
735 | #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ | |
736 | #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ | |
737 | #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor | |
738 | * iff page was modified */ | |
739 | #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be | |
740 | * be upgraded */ | |
741 | #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 | |
742 | #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */ | |
743 | #if defined(__arm__) || defined(__arm64__) | |
744 | #define PMAP_OPTIONS_FF_LOCKED 0x8000 | |
745 | #define PMAP_OPTIONS_FF_WIRED 0x10000 | |
746 | #endif | |
747 | ||
748 | #if !defined(__LP64__) | |
749 | extern vm_offset_t pmap_extract(pmap_t pmap, | |
750 | vm_map_offset_t va); | |
751 | #endif | |
752 | extern void pmap_change_wiring( /* Specify pageability */ | |
753 | pmap_t pmap, | |
754 | vm_map_offset_t va, | |
755 | boolean_t wired); | |
756 | ||
757 | /* LP64todo - switch to vm_map_offset_t when it grows */ | |
758 | extern void pmap_remove( /* Remove mappings. */ | |
759 | pmap_t map, | |
760 | vm_map_offset_t s, | |
761 | vm_map_offset_t e); | |
762 | ||
763 | extern void pmap_remove_options( /* Remove mappings. */ | |
764 | pmap_t map, | |
765 | vm_map_offset_t s, | |
766 | vm_map_offset_t e, | |
767 | int options); | |
768 | ||
769 | extern void fillPage(ppnum_t pa, unsigned int fill); | |
770 | ||
771 | #if defined(__LP64__) | |
772 | extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); | |
773 | extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr); | |
774 | extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr); | |
775 | #endif | |
776 | ||
777 | mach_vm_size_t pmap_query_resident(pmap_t pmap, | |
778 | vm_map_offset_t s, | |
779 | vm_map_offset_t e, | |
780 | mach_vm_size_t *compressed_bytes_p); | |
781 | ||
782 | extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value); | |
783 | extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap); | |
784 | ||
785 | /* Inform the pmap layer that there is a JIT entry in this map. */ | |
786 | extern void pmap_set_jit_entitled(pmap_t pmap); | |
787 | ||
788 | /* Ask the pmap layer if there is a JIT entry in this map. */ | |
789 | extern bool pmap_get_jit_entitled(pmap_t pmap); | |
790 | ||
791 | /* | |
792 | * Tell the pmap layer what range within the nested region the VM intends to | |
793 | * use. | |
794 | */ | |
795 | extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size); | |
796 | ||
797 | /* | |
798 | * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE | |
799 | * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration. | |
800 | * This is expected to only be called from kernel debugger context, | |
801 | * so synchronization is not required. | |
802 | */ | |
803 | ||
804 | extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied); | |
805 | ||
806 | /* | |
807 | * Indicates if any special policy is applied to this protection by the pmap | |
808 | * layer. | |
809 | */ | |
810 | bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot); | |
811 | ||
812 | /* | |
813 | * Causes the pmap to return any available pages that it can return cheaply to | |
814 | * the VM. | |
815 | */ | |
816 | uint64_t pmap_release_pages_fast(void); | |
817 | ||
818 | #define PMAP_QUERY_PAGE_PRESENT 0x01 | |
819 | #define PMAP_QUERY_PAGE_REUSABLE 0x02 | |
820 | #define PMAP_QUERY_PAGE_INTERNAL 0x04 | |
821 | #define PMAP_QUERY_PAGE_ALTACCT 0x08 | |
822 | #define PMAP_QUERY_PAGE_COMPRESSED 0x10 | |
823 | #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 | |
824 | extern kern_return_t pmap_query_page_info( | |
825 | pmap_t pmap, | |
826 | vm_map_offset_t va, | |
827 | int *disp); | |
828 | ||
829 | #if CONFIG_PGTRACE | |
830 | int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
831 | int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
832 | kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss); | |
833 | #endif | |
834 | ||
835 | ||
836 | #ifdef PLATFORM_BridgeOS | |
837 | struct pmap_legacy_trust_cache { | |
838 | struct pmap_legacy_trust_cache *next; | |
839 | uuid_t uuid; | |
840 | uint32_t num_hashes; | |
841 | uint8_t hashes[][CS_CDHASH_LEN]; | |
842 | }; | |
843 | #else | |
844 | struct pmap_legacy_trust_cache; | |
845 | #endif | |
846 | ||
847 | extern kern_return_t pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache *trust_cache, | |
848 | const vm_size_t trust_cache_len); | |
849 | ||
850 | typedef enum { | |
851 | PMAP_TC_TYPE_PERSONALIZED, | |
852 | PMAP_TC_TYPE_PDI, | |
853 | PMAP_TC_TYPE_CRYPTEX, | |
854 | PMAP_TC_TYPE_ENGINEERING, | |
855 | PMAP_TC_TYPE_GLOBAL_FF00, | |
856 | PMAP_TC_TYPE_GLOBAL_FF01, | |
857 | } pmap_tc_type_t; | |
858 | ||
859 | #define PMAP_IMAGE4_TRUST_CACHE_HAS_TYPE 1 | |
860 | struct pmap_image4_trust_cache { | |
861 | // Filled by pmap layer. | |
862 | struct pmap_image4_trust_cache const *next; // linked list linkage | |
863 | struct trust_cache_module1 const *module; // pointer into module (within data below) | |
864 | ||
865 | // Filled by caller. | |
866 | // data is either an image4, | |
867 | // or just the trust cache payload itself if the image4 manifest is external. | |
868 | pmap_tc_type_t type; | |
869 | size_t bnch_len; | |
870 | uint8_t const bnch[48]; | |
871 | size_t data_len; | |
872 | uint8_t const data[]; | |
873 | }; | |
874 | ||
875 | typedef enum { | |
876 | PMAP_TC_SUCCESS = 0, | |
877 | PMAP_TC_UNKNOWN_FORMAT = -1, | |
878 | PMAP_TC_TOO_SMALL_FOR_HEADER = -2, | |
879 | PMAP_TC_TOO_SMALL_FOR_ENTRIES = -3, | |
880 | PMAP_TC_UNKNOWN_VERSION = -4, | |
881 | PMAP_TC_ALREADY_LOADED = -5, | |
882 | PMAP_TC_TOO_BIG = -6, | |
883 | PMAP_TC_RESOURCE_SHORTAGE = -7, | |
884 | PMAP_TC_MANIFEST_TOO_BIG = -8, | |
885 | PMAP_TC_MANIFEST_VIOLATION = -9, | |
886 | PMAP_TC_PAYLOAD_VIOLATION = -10, | |
887 | PMAP_TC_EXPIRED = -11, | |
888 | PMAP_TC_CRYPTO_WRONG = -12, | |
889 | PMAP_TC_OBJECT_WRONG = -13, | |
890 | PMAP_TC_UNKNOWN_CALLER = -14, | |
891 | PMAP_TC_UNKNOWN_FAILURE = -15, | |
892 | } pmap_tc_ret_t; | |
893 | ||
894 | #define PMAP_HAS_LOCKDOWN_IMAGE4_SLAB 1 | |
895 | extern void pmap_lockdown_image4_slab(vm_offset_t slab, vm_size_t slab_len, uint64_t flags); | |
896 | ||
897 | extern pmap_tc_ret_t pmap_load_image4_trust_cache( | |
898 | struct pmap_image4_trust_cache *trust_cache, vm_size_t trust_cache_len, | |
899 | uint8_t const *img4_manifest, | |
900 | vm_size_t img4_manifest_buffer_len, | |
901 | vm_size_t img4_manifest_actual_len, | |
902 | bool dry_run); | |
903 | ||
904 | extern bool pmap_is_trust_cache_loaded(const uuid_t uuid); | |
905 | extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]); | |
906 | extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]); | |
907 | ||
908 | extern void pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]); | |
909 | extern bool pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]); | |
910 | ||
911 | extern bool pmap_in_ppl(void); | |
912 | ||
913 | extern void *pmap_claim_reserved_ppl_page(void); | |
914 | extern void pmap_free_reserved_ppl_page(void *kva); | |
915 | ||
916 | extern void pmap_ledger_alloc_init(size_t); | |
917 | extern ledger_t pmap_ledger_alloc(void); | |
918 | extern void pmap_ledger_free(ledger_t); | |
919 | ||
920 | extern bool pmap_is_bad_ram(ppnum_t ppn); | |
921 | extern void pmap_retire_page(ppnum_t ppn); | |
922 | extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap); | |
923 | ||
924 | #if __arm64__ | |
925 | extern bool pmap_is_exotic(pmap_t pmap); | |
926 | #else /* __arm64__ */ | |
927 | #define pmap_is_exotic(pmap) false | |
928 | #endif /* __arm64__ */ | |
929 | ||
930 | #endif /* KERNEL_PRIVATE */ | |
931 | ||
932 | #endif /* _VM_PMAP_H_ */ |