]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/pmap.h
7fe880f18e568191586950f80227c04363282140
[apple/xnu.git] / osfmk / arm / pmap.h
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _ARM_PMAP_H_
29 #define _ARM_PMAP_H_ 1
30
31 #include <mach_assert.h>
32
33 #include <arm/proc_reg.h>
34 #if defined(__arm64__)
35 #include <arm64/proc_reg.h>
36 #endif
37
38 /*
39 * Machine-dependent structures for the physical map module.
40 */
41
42 #ifndef ASSEMBLER
43
44 #include <stdatomic.h>
45 #include <stdbool.h>
46 #include <libkern/section_keywords.h>
47 #include <mach/kern_return.h>
48 #include <mach/machine/vm_types.h>
49 #include <arm/pmap_public.h>
50 #include <mach/arm/thread_status.h>
51 #if defined(__arm64__)
52 #include <arm64/tlb.h>
53 #else
54 #include <arm/tlb.h>
55 #endif
56
57
58 #define ASID_SHIFT (11) /* Shift for 2048 max virtual ASIDs (2048 pmaps) */
59 #define MAX_ASIDS (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
60 #ifndef ARM_ASID_SHIFT
61 #define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
62 #endif
63 #define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
64 #define NBBY 8
65
66 #if __ARM_KERNEL_PROTECT__
67 #define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1)
68 #else
69 #define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1)
70 #endif
71
72 #ifndef ARM_VMID_SHIFT
73 #define ARM_VMID_SHIFT (8)
74 #endif
75 #define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
76
77 /* XPRR virtual register map */
78
79 #define CPUWINDOWS_MAX 4
80
81 #if defined(__arm64__)
82
83 #if defined(ARM_LARGE_MEMORY)
84 /*
85 * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
86 * Upper limit on how many pages can be consumed by bootstrap page tables
87 */
88 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
89 #else // ARM_LARGE_MEMORY
90 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
91 #endif
92
93 typedef uint64_t tt_entry_t; /* translation table entry type */
94 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
95
96 typedef uint64_t pt_entry_t; /* page table entry type */
97 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
98
99 #elif defined(__arm__)
100
101 typedef uint32_t tt_entry_t; /* translation table entry type */
102 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
103
104 typedef uint32_t pt_entry_t; /* page table entry type */
105 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
106
107 #else
108 #error unknown arch
109 #endif
110
111 /* Forward declaration of the structure that controls page table
112 * geometry and TTE/PTE format. */
113 struct page_table_attr;
114
115 /*
116 * pv_entry_t - structure to track the active mappings for a given page
117 */
118 typedef struct pv_entry {
119 struct pv_entry *pve_next; /* next alias */
120 pt_entry_t *pve_ptep; /* page table entry */
121 }
122 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
123 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
124 * are 32-bit:
125 * Since pt_desc is 64-bit aligned and we cast often from pv_entry to
126 * pt_desc.
127 */
128 __attribute__ ((aligned(8))) pv_entry_t;
129 #else
130 pv_entry_t;
131 #endif
132
133 typedef struct {
134 pv_entry_t *list;
135 uint32_t count;
136 } pv_free_list_t;
137
138 struct pmap_cpu_data {
139 #if XNU_MONITOR
140 void * ppl_kern_saved_sp;
141 void * ppl_stack;
142 arm_context_t * save_area;
143 unsigned int ppl_state;
144 #endif
145 #if defined(__arm64__)
146 pmap_t cpu_nested_pmap;
147 const struct page_table_attr *cpu_nested_pmap_attr;
148 vm_map_address_t cpu_nested_region_addr;
149 vm_map_offset_t cpu_nested_region_size;
150 #else
151 pmap_t cpu_user_pmap;
152 unsigned int cpu_user_pmap_stamp;
153 #endif
154 unsigned int cpu_number;
155 bool copywindow_strong_sync[CPUWINDOWS_MAX];
156 pv_free_list_t pv_free;
157 pv_entry_t *pv_free_tail;
158
159 /*
160 * This supports overloading of ARM ASIDs by the pmap. The field needs
161 * to be wide enough to cover all the virtual bits in a virtual ASID.
162 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
163 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
164 * ASID).
165 *
166 * If we were to use bitfield shenanigans here, we could save a bit of
167 * memory by only having enough bits to support MAX_ASIDS. However, such
168 * an implementation would be more error prone.
169 */
170 uint8_t cpu_sw_asids[MAX_HW_ASIDS];
171 };
172 typedef struct pmap_cpu_data pmap_cpu_data_t;
173
174 #include <mach/vm_prot.h>
175 #include <mach/vm_statistics.h>
176 #include <mach/machine/vm_param.h>
177 #include <kern/kern_types.h>
178 #include <kern/thread.h>
179 #include <kern/queue.h>
180
181
182 #include <sys/cdefs.h>
183
184 /* Base address for low globals. */
185 #if defined(ARM_LARGE_MEMORY)
186 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL
187 #else
188 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
189 #endif
190
191 /*
192 * This indicates (roughly) where there is free space for the VM
193 * to use for the heap; this does not need to be precise.
194 */
195 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
196 #if defined(ARM_LARGE_MEMORY)
197 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
198 #else // ARM_LARGE_MEMORY
199 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
200 #endif // ARM_LARGE_MEMORY
201 #else
202 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
203 #endif
204
205 struct page_table_level_info {
206 const uint64_t size;
207 const uint64_t offmask;
208 const uint64_t shift;
209 const uint64_t index_mask;
210 const uint64_t valid_mask;
211 const uint64_t type_mask;
212 const uint64_t type_block;
213 };
214
215 /*
216 * For setups where the kernel page size does not match the hardware
217 * page size (assumably, the kernel page size must be a multiple of
218 * the hardware page size), we will need to determine what the page
219 * ratio is.
220 */
221 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
222 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
223
224
225 /* superpages */
226 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
227
228 /*
229 * Convert addresses to pages and vice versa.
230 * No rounding is used.
231 */
232 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
233 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
234
235 /*
236 * Round off or truncate to the nearest page. These will work
237 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
238 * bytes.
239 */
240 #define arm_round_page(x) \
241 ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
242 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
243
244 #if __arm__
245 /* Convert address offset to page table index */
246 #define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT))
247 #endif
248
249 #if (__ARM_VMSA__ <= 7)
250 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
251 #define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t))
252 #else
253 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
254 #define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
255 #endif
256
257 extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
258
259 #if defined(__arm64__)
260 extern uint64_t get_mmu_control(void);
261 extern uint64_t get_aux_control(void);
262 extern void set_aux_control(uint64_t);
263 extern void set_mmu_ttb(uint64_t);
264 extern void set_mmu_ttb_alternate(uint64_t);
265 extern uint64_t get_tcr(void);
266 extern void set_tcr(uint64_t);
267 extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
268 #if defined(HAS_VMSA_LOCK)
269 extern void vmsa_lock(void);
270 #endif
271 #else
272 extern uint32_t get_mmu_control(void);
273 extern void set_mmu_control(uint32_t);
274 extern uint32_t get_aux_control(void);
275 extern void set_aux_control(uint32_t);
276 extern void set_mmu_ttb(pmap_paddr_t);
277 extern void set_mmu_ttb_alternate(pmap_paddr_t);
278 extern void set_context_id(uint32_t);
279 #endif
280
281 extern pmap_paddr_t get_mmu_ttb(void);
282 extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
283 extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
284 extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
285
286 #if (__ARM_VMSA__ <= 7)
287 /* Convert address offset to translation table index */
288 #define ttenum(a) ((a) >> ARM_TT_L1_SHIFT)
289
290 /* Convert translation table index to user virtual address */
291 #define tteitova(a) ((a) << ARM_TT_L1_SHIFT)
292
293 #define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK)
294 #define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK)
295
296 #define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK)
297 #define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK)
298
299 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
300 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
301
302 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
303 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
304 #define pte_increment_pa(p) ((p) += ptoa(1))
305
306 #define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE)
307 #define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE))
308
309 #else
310
311 /* Convert address offset to translation table index */
312 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
313 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
314 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
315
316 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
317 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
318
319 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
320 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
321 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
322 #define pte_increment_pa(p) ((p) += ptoa(1))
323
324 #define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL)
325
326 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
327
328 #endif /* __ARM_VMSA__ <= 7 */
329
330 #define PMAP_GC_INFLIGHT 1
331 #define PMAP_GC_WAIT 2
332
333 #if DEVELOPMENT || DEBUG
334 #define pmap_cs_log_h(msg, args...) { if(pmap_cs_log_hacks) printf("PMAP_CS: " msg "\n", args); }
335 #define pmap_cs_log pmap_cs_log_h
336
337 #else
338 #define pmap_cs_log(msg, args...)
339 #define pmap_cs_log_h(msg, args...)
340 #endif /* DEVELOPMENT || DEBUG */
341
342
343
344 /*
345 * Convert translation/page table entry to kernel virtual address
346 */
347 #define ttetokv(a) (phystokv(tte_to_pa(a)))
348 #define ptetokv(a) (phystokv(pte_to_pa(a)))
349
350 struct pmap {
351 tt_entry_t *XNU_PTRAUTH_SIGNED_PTR("pmap.tte") tte; /* translation table entries */
352 pmap_paddr_t ttep; /* translation table physical */
353 vm_map_address_t min; /* min address in pmap */
354 vm_map_address_t max; /* max address in pmap */
355 #if ARM_PARAMETERIZED_PMAP
356 const struct page_table_attr * pmap_pt_attr; /* details about page table layout */
357 #endif /* ARM_PARAMETERIZED_PMAP */
358 ledger_t ledger; /* ledger tracking phys mappings */
359
360 decl_lck_rw_data(, rwlock);
361
362 struct pmap_statistics stats; /* map statistics */
363 queue_chain_t pmaps; /* global list of pmaps */
364 tt_entry_t *tt_entry_free; /* free translation table entries */
365 struct pmap *XNU_PTRAUTH_SIGNED_PTR("pmap.nested_pmap") nested_pmap; /* nested pmap */
366 vm_map_address_t nested_region_addr;
367 vm_map_offset_t nested_region_size;
368 vm_map_offset_t nested_region_true_start;
369 vm_map_offset_t nested_region_true_end;
370 unsigned int *nested_region_asid_bitmap;
371
372 #if (__ARM_VMSA__ <= 7)
373 unsigned int tte_index_max; /* max tte index in translation table entries */
374 #endif
375
376
377 unsigned int stamp; /* creation stamp */
378 _Atomic int32_t ref_count; /* pmap reference count */
379 unsigned int gc_status; /* gc status */
380 unsigned int nested_region_asid_bitmap_size;
381 uint32_t nested_no_bounds_refcnt;/* number of pmaps that nested this pmap without bounds set */
382 uint16_t hw_asid;
383 uint8_t sw_asid;
384
385 #if MACH_ASSERT
386 int pmap_pid;
387 char pmap_procname[17];
388 bool pmap_stats_assert;
389 #endif /* MACH_ASSERT */
390 bool pmap_vm_map_cs_enforced;
391 #if DEVELOPMENT || DEBUG
392 bool footprint_suspended;
393 bool footprint_was_suspended;
394 #endif /* DEVELOPMENT || DEBUG */
395 bool nx_enabled; /* no execute */
396 bool nested; /* is nested */
397 bool is_64bit; /* is 64bit */
398 bool nested_has_no_bounds_ref; /* nested a pmap when the bounds were not set */
399 bool nested_bounds_set; /* The nesting bounds have been set */
400 #if HAS_APPLE_PAC
401 bool disable_jop;
402 #endif /* HAS_APPLE_PAC */
403 };
404
405 #define PMAP_VASID(pmap) (((uint32_t)((pmap)->sw_asid) << 16) | pmap->hw_asid)
406
407 #if VM_DEBUG
408 extern int pmap_list_resident_pages(
409 pmap_t pmap,
410 vm_offset_t *listp,
411 int space
412 );
413 #else /* #if VM_DEBUG */
414 #define pmap_list_resident_pages(pmap, listp, space) (0)
415 #endif /* #if VM_DEBUG */
416
417 extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
418
419 /* globals shared between arm_vm_init and pmap */
420 extern tt_entry_t *cpu_tte; /* first CPUs translation table (shared with kernel pmap) */
421 extern pmap_paddr_t cpu_ttep; /* physical translation table addr */
422
423 #if __arm64__
424 extern void *ropagetable_begin;
425 extern void *ropagetable_end;
426 #endif
427
428 #if __arm64__
429 extern tt_entry_t *invalid_tte; /* global invalid translation table */
430 extern pmap_paddr_t invalid_ttep; /* physical invalid translation table addr */
431 #endif
432
433 #define PMAP_CONTEXT(pmap, thread)
434
435 /*
436 * platform dependent Prototypes
437 */
438 extern void pmap_switch_user_ttb(pmap_t pmap);
439 extern void pmap_clear_user_ttb(void);
440 extern void pmap_bootstrap(vm_offset_t);
441 extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
442 extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va);
443 extern pmap_paddr_t pmap_find_pa_nofault(pmap_t map, addr64_t va);
444 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
445 extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va);
446 extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
447 extern void pmap_collect(pmap_t pmap);
448 extern void pmap_gc(void);
449 #if HAS_APPLE_PAC && XNU_MONITOR
450 extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
451 extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
452 #endif /* HAS_APPLE_PAC && XNU_MONITOR */
453
454 /*
455 * Interfaces implemented as macros.
456 */
457
458 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
459 th->map = new_map; \
460 pmap_set_pmap(vm_map_pmap(new_map), th); \
461 }
462
463 #define pmap_kernel() \
464 (kernel_pmap)
465
466 #define pmap_compressed(pmap) \
467 ((pmap)->stats.compressed)
468
469 #define pmap_resident_count(pmap) \
470 ((pmap)->stats.resident_count)
471
472 #define pmap_resident_max(pmap) \
473 ((pmap)->stats.resident_max)
474
475 #define MACRO_NOOP
476
477 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \
478 MACRO_NOOP
479
480 #define pmap_pageable(pmap, start, end, pageable) \
481 MACRO_NOOP
482
483 #define pmap_kernel_va(VA) \
484 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
485
486 #define pmap_attribute(pmap, addr, size, attr, value) \
487 (KERN_INVALID_ADDRESS)
488
489 #define copyinmsg(from, to, cnt) \
490 copyin(from, to, cnt)
491
492 #define copyoutmsg(from, to, cnt) \
493 copyout(from, to, cnt)
494
495 extern pmap_paddr_t kvtophys(vm_offset_t va);
496 extern vm_map_address_t phystokv(pmap_paddr_t pa);
497 extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
498
499 extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
500 extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
501 extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
502 extern void pmap_map_globals(void);
503
504 #define PMAP_MAP_BD_DEVICE 0x0
505 #define PMAP_MAP_BD_WCOMB 0x1
506 #define PMAP_MAP_BD_POSTED 0x2
507 #define PMAP_MAP_BD_POSTED_REORDERED 0x3
508 #define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
509 #define PMAP_MAP_BD_MASK 0x7
510
511 extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
512 extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
513
514 extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
515
516 extern boolean_t pmap_valid_address(pmap_paddr_t addr);
517 extern void pmap_disable_NX(pmap_t pmap);
518 extern void pmap_set_nested(pmap_t pmap);
519 extern void pmap_create_sharedpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr, vm_map_address_t *user_text_addr);
520 extern void pmap_insert_sharedpage(pmap_t pmap);
521 extern void pmap_protect_sharedpage(void);
522
523 extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
524 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
525 extern void pmap_unmap_cpu_windows_copy(unsigned int index);
526
527 #if XNU_MONITOR
528 /* exposed for use by the HMAC SHA driver */
529 extern void pmap_invoke_with_page(ppnum_t page_number, void *ctx,
530 void (*callback)(void *ctx, ppnum_t page_number, const void *page));
531 extern void pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len));
532 extern void pmap_set_ppl_hashed_flag(const pmap_paddr_t addr);
533 extern void pmap_clear_ppl_hashed_flag_all(void);
534 extern void pmap_check_ppl_hashed_flag_all(void);
535 #endif /* XNU_MONITOR */
536
537 extern boolean_t pmap_valid_page(ppnum_t pn);
538 extern boolean_t pmap_bootloader_page(ppnum_t pn);
539
540 #define MACHINE_PMAP_IS_EMPTY 1
541 extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
542
543 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
544 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
545 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
546 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
547 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
548
549
550 extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
551 extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
552 extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
553
554 boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
555
556 boolean_t pmap_enforces_execute_only(pmap_t pmap);
557
558
559
560 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
561 extern void
562 pmap_disable_user_jop(pmap_t pmap);
563 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
564
565 /* pmap dispatch indices */
566 #define ARM_FAST_FAULT_INDEX 0
567 #define ARM_FORCE_FAST_FAULT_INDEX 1
568 #define MAPPING_FREE_PRIME_INDEX 2
569 #define MAPPING_REPLENISH_INDEX 3
570 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
571 #define PHYS_ATTRIBUTE_SET_INDEX 5
572 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
573 #define PMAP_CHANGE_WIRING_INDEX 7
574 #define PMAP_CREATE_INDEX 8
575 #define PMAP_DESTROY_INDEX 9
576 #define PMAP_ENTER_OPTIONS_INDEX 10
577 /* #define PMAP_EXTRACT_INDEX 11 -- Not used*/
578 #define PMAP_FIND_PA_INDEX 12
579 #define PMAP_INSERT_SHAREDPAGE_INDEX 13
580 #define PMAP_IS_EMPTY_INDEX 14
581 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
582 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
583 #define PMAP_NEST_INDEX 17
584 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
585 #define PMAP_PROTECT_OPTIONS_INDEX 19
586 #define PMAP_QUERY_PAGE_INFO_INDEX 20
587 #define PMAP_QUERY_RESIDENT_INDEX 21
588 #define PMAP_REFERENCE_INDEX 22
589 #define PMAP_REMOVE_OPTIONS_INDEX 23
590 #define PMAP_RETURN_INDEX 24
591 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
592 #define PMAP_SET_NESTED_INDEX 26
593 #define PMAP_SET_PROCESS_INDEX 27
594 #define PMAP_SWITCH_INDEX 28
595 #define PMAP_SWITCH_USER_TTB_INDEX 29
596 #define PMAP_CLEAR_USER_TTB_INDEX 30
597 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
598 #define PMAP_UNNEST_OPTIONS_INDEX 32
599 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
600 #define PMAP_CPU_DATA_INIT_INDEX 34
601 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
602 #define PMAP_SET_JIT_ENTITLED_INDEX 36
603
604
605 #define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55
606 #define PMAP_TRIM_INDEX 56
607 #define PMAP_LEDGER_ALLOC_INIT_INDEX 57
608 #define PMAP_LEDGER_ALLOC_INDEX 58
609 #define PMAP_LEDGER_FREE_INDEX 59
610
611 #if HAS_APPLE_PAC && XNU_MONITOR
612 #define PMAP_SIGN_USER_PTR 60
613 #define PMAP_AUTH_USER_PTR 61
614 #endif /* HAS_APPLE_PAC && XNU_MONITOR */
615
616 #define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66
617
618
619 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
620 #define PMAP_DISABLE_USER_JOP_INDEX 69
621 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
622
623
624
625 #define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
626
627 #define PMAP_COUNT 73
628
629 #define PMAP_INVALID_CPU_NUM (~0U)
630
631 struct pmap_cpu_data_array_entry {
632 pmap_cpu_data_t cpu_data;
633 } __attribute__((aligned(1 << MAX_L2_CLINE)));
634
635 /* Initialize the pmap per-CPU data for the current CPU. */
636 extern void pmap_cpu_data_init(void);
637
638 /* Get the pmap per-CPU data for the current CPU. */
639 extern pmap_cpu_data_t * pmap_get_cpu_data(void);
640
641 #if XNU_MONITOR
642 extern boolean_t pmap_ppl_locked_down;
643
644 /*
645 * Denotes the bounds of the PPL stacks. These are visible so that other code
646 * can check if addresses are part of the PPL stacks.
647 */
648 extern void * pmap_stacks_start;
649 extern void * pmap_stacks_end;
650
651 /* Asks if a page belongs to the monitor. */
652 extern boolean_t pmap_is_monitor(ppnum_t pn);
653
654 /*
655 * Indicates that we are done with our static bootstrap
656 * allocations, so the monitor may now mark the pages
657 * that it owns.
658 */
659 extern void pmap_static_allocations_done(void);
660
661 /*
662 * Indicates that we are done mutating sensitive state in the system, and that
663 * the PPL may now restict write access to PPL owned mappings.
664 */
665 extern void pmap_lockdown_ppl(void);
666
667
668 #ifdef KASAN
669 #define PPL_STACK_SIZE (PAGE_SIZE << 2)
670 #else
671 #define PPL_STACK_SIZE PAGE_SIZE
672 #endif
673
674 /* One stack for each CPU, plus a guard page below each stack and above the last stack */
675 #define PPL_STACK_REGION_SIZE ((MAX_CPUS * (PPL_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
676
677 #define PPL_DATA_SEGMENT_SECTION_NAME "__PPLDATA,__data"
678 #define PPL_TEXT_SEGMENT_SECTION_NAME "__PPLTEXT,__text,regular,pure_instructions"
679 #define PPL_DATACONST_SEGMENT_SECTION_NAME "__PPLDATA,__const"
680
681 #define MARK_AS_PMAP_DATA \
682 __PLACE_IN_SECTION(PPL_DATA_SEGMENT_SECTION_NAME)
683 #define MARK_AS_PMAP_TEXT \
684 __attribute__((used, section(PPL_TEXT_SEGMENT_SECTION_NAME), noinline))
685 #define MARK_AS_PMAP_RODATA \
686 __PLACE_IN_SECTION(PPL_DATACONST_SEGMENT_SECTION_NAME)
687
688 #else /* XNU_MONITOR */
689
690 #define MARK_AS_PMAP_TEXT
691 #define MARK_AS_PMAP_DATA
692 #define MARK_AS_PMAP_RODATA
693
694 #endif /* !XNU_MONITOR */
695
696
697 extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse);
698
699 extern lck_grp_t pmap_lck_grp;
700
701 #if XNU_MONITOR
702 extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va, size_t length);
703 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force_nopreempt(va, sz)
704 #define pmap_simple_lock(l) simple_lock_nopreempt(l, &pmap_lck_grp)
705 #define pmap_simple_unlock(l) simple_unlock_nopreempt(l)
706 #define pmap_simple_lock_try(l) simple_lock_try_nopreempt(l, &pmap_lck_grp)
707 #define pmap_lock_bit(l, i) hw_lock_bit_nopreempt(l, i, &pmap_lck_grp)
708 #define pmap_unlock_bit(l, i) hw_unlock_bit_nopreempt(l, i)
709 #else
710 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
711 #define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
712 #define pmap_simple_unlock(l) simple_unlock(l)
713 #define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
714 #define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp)
715 #define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
716 #endif
717
718 #endif /* #ifndef ASSEMBLER */
719
720 #if __ARM_KERNEL_PROTECT__
721 /*
722 * The exception vector mappings start at the middle of the kernel page table
723 * range (so that the EL0 mapping can be located at the base of the range).
724 */
725 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
726 #endif /* __ARM_KERNEL_PROTECT__ */
727
728 #endif /* #ifndef _ARM_PMAP_H_ */