]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/pmap.h
3d45185eb340994630f6536c7a79f04b37956d15
[apple/xnu.git] / osfmk / arm / pmap.h
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _ARM_PMAP_H_
29 #define _ARM_PMAP_H_ 1
30
31 #include <mach_assert.h>
32
33 #include <arm/proc_reg.h>
34 #if defined(__arm64__)
35 #include <arm64/proc_reg.h>
36 #endif
37
38 /*
39 * Machine-dependent structures for the physical map module.
40 */
41
42 #ifndef ASSEMBLER
43
44 #include <stdatomic.h>
45 #include <stdbool.h>
46 #include <libkern/section_keywords.h>
47 #include <mach/kern_return.h>
48 #include <mach/machine/vm_types.h>
49 #include <arm/pmap_public.h>
50 #include <mach/arm/thread_status.h>
51 #if defined(__arm64__)
52 #include <arm64/tlb.h>
53 #else
54 #include <arm/tlb.h>
55 #endif
56
57
58 #define ASID_SHIFT (11) /* Shift for 2048 max virtual ASIDs (2048 pmaps) */
59 #define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
60 #ifndef ARM_ASID_SHIFT
61 #define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
62 #endif
63 #define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
64 #define NBBY 8
65
66 #if __ARM_KERNEL_PROTECT__
67 #define MAX_HW_ASID ((ARM_MAX_ASID >> 1) - 1)
68 #else
69 #define MAX_HW_ASID (ARM_MAX_ASID - 1)
70 #endif
71
72 #ifndef ARM_VMID_SHIFT
73 #define ARM_VMID_SHIFT (8)
74 #endif
75 #define ARM_MAX_VMID (1 << ARM_VMID_SHIFT)
76
77 /* XPRR virtual register map */
78
79 #define CPUWINDOWS_MAX 4
80
81 struct pmap_cpu_data {
82 #if defined(__arm64__)
83 pmap_t cpu_nested_pmap;
84 #else
85 pmap_t cpu_user_pmap;
86 unsigned int cpu_user_pmap_stamp;
87 #endif
88 unsigned int cpu_number;
89 bool copywindow_strong_sync[CPUWINDOWS_MAX];
90
91 #if MAX_ASID > MAX_HW_ASID
92
93 /*
94 * This supports overloading of ARM ASIDs by the pmap. The field needs
95 * to be wide enough to cover all the virtual bits in a virtual ASID.
96 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
97 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
98 * ASID).
99 *
100 * If we were to use bitfield shenanigans here, we could save a bit of
101 * memory by only having enough bits to support MAX_ASID. However, such
102 * an implementation would be more error prone.
103 */
104 uint8_t cpu_asid_high_bits[MAX_HW_ASID];
105 #endif
106 };
107 typedef struct pmap_cpu_data pmap_cpu_data_t;
108
109 #include <mach/vm_prot.h>
110 #include <mach/vm_statistics.h>
111 #include <mach/machine/vm_param.h>
112 #include <kern/kern_types.h>
113 #include <kern/thread.h>
114 #include <kern/queue.h>
115
116
117 #include <sys/cdefs.h>
118
119 /* Base address for low globals. */
120 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
121
122 /*
123 * This indicates (roughly) where there is free space for the VM
124 * to use for the heap; this does not need to be precise.
125 */
126 #if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__
127 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
128 #else
129 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
130 #endif
131
132 #if defined(__arm64__)
133
134 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
135
136 typedef uint64_t tt_entry_t; /* translation table entry type */
137 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
138
139 typedef uint64_t pt_entry_t; /* page table entry type */
140 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
141
142 #elif defined(__arm__)
143
144 typedef uint32_t tt_entry_t; /* translation table entry type */
145 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
146
147 typedef uint32_t pt_entry_t; /* page table entry type */
148 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
149
150 #else
151 #error unknown arch
152 #endif
153
154 struct page_table_level_info {
155 const uint64_t size;
156 const uint64_t offmask;
157 const uint64_t shift;
158 const uint64_t index_mask;
159 const uint64_t valid_mask;
160 const uint64_t type_mask;
161 const uint64_t type_block;
162 };
163
164
165 /* superpages */
166 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
167
168 /*
169 * Convert addresses to pages and vice versa.
170 * No rounding is used.
171 */
172 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
173 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
174
175 /*
176 * Round off or truncate to the nearest page. These will work
177 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
178 * bytes.
179 */
180 #define arm_round_page(x) \
181 ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
182 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
183
184 /* Convert address offset to page table index */
185 #define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT))
186
187 /*
188 * For setups where the kernel page size does not match the hardware
189 * page size (assumably, the kernel page size must be a multiple of
190 * the hardware page size), we will need to determine what the page
191 * ratio is.
192 */
193 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
194 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
195
196 #if (__ARM_VMSA__ <= 7)
197 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
198 #define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t))
199 #else
200 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
201 #define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
202 #endif
203
204 extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
205
206 #if defined(__arm64__)
207 extern uint64_t get_mmu_control(void);
208 extern uint64_t get_aux_control(void);
209 extern void set_aux_control(uint64_t);
210 extern void set_mmu_ttb(uint64_t);
211 extern void set_mmu_ttb_alternate(uint64_t);
212 extern uint64_t get_tcr(void);
213 extern void set_tcr(uint64_t);
214 extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
215 #else
216 extern uint32_t get_mmu_control(void);
217 extern void set_mmu_control(uint32_t);
218 extern uint32_t get_aux_control(void);
219 extern void set_aux_control(uint32_t);
220 extern void set_mmu_ttb(pmap_paddr_t);
221 extern void set_mmu_ttb_alternate(pmap_paddr_t);
222 extern void set_context_id(uint32_t);
223 #endif
224
225 extern pmap_paddr_t get_mmu_ttb(void);
226 extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
227 extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
228 extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
229
230 #if (__ARM_VMSA__ <= 7)
231 /* Convert address offset to translation table index */
232 #define ttenum(a) ((a) >> ARM_TT_L1_SHIFT)
233
234 /* Convert translation table index to user virtual address */
235 #define tteitova(a) ((a) << ARM_TT_L1_SHIFT)
236
237 #define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK)
238 #define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK)
239
240 #define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK)
241 #define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK)
242
243 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
244 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
245
246 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
247 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
248 #define pte_increment_pa(p) ((p) += ptoa(1))
249
250 #define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE)
251 #define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE))
252
253 #else
254
255 /* Convert address offset to translation table index */
256 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
257 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
258 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
259
260 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
261 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
262
263 #define pa_to_pte(a) ((a) & ARM_PTE_MASK)
264 #define pte_to_pa(p) ((p) & ARM_PTE_MASK)
265 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
266 #define pte_increment_pa(p) ((p) += ptoa(1))
267
268 #define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE)
269 #define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL)
270
271 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
272
273 #endif /* __ARM_VMSA__ <= 7 */
274
275 #define PMAP_GC_INFLIGHT 1
276 #define PMAP_GC_WAIT 2
277
278 #if DEVELOPMENT || DEBUG
279 #define pmap_cs_log(msg, args...) printf("PMAP_CS: " msg "\n", args)
280 #define pmap_cs_log_h(msg, args...) { if(pmap_cs_log_hacks) printf("PMAP_CS: " msg "\n", args); }
281
282 #define PMAP_CS_EXCEPTION_LIST_HACK 1
283
284 #else
285 #define pmap_cs_log(msg, args...)
286 #define pmap_cs_log_h(msg, args...)
287 #endif /* DEVELOPMENT || DEBUG */
288
289
290 /* Forward struct declarations for the pmap data structure */
291 struct page_table_attr;
292
293 /*
294 * Convert translation/page table entry to kernel virtual address
295 */
296 #define ttetokv(a) (phystokv(tte_to_pa(a)))
297 #define ptetokv(a) (phystokv(pte_to_pa(a)))
298
299 struct pmap {
300 tt_entry_t *tte; /* translation table entries */
301 pmap_paddr_t ttep; /* translation table physical */
302 vm_map_address_t min; /* min address in pmap */
303 vm_map_address_t max; /* max address in pmap */
304 #if ARM_PARAMETERIZED_PMAP
305 const struct page_table_attr * pmap_pt_attr; /* details about page table layout */
306 #endif /* ARM_PARAMETERIZED_PMAP */
307 ledger_t ledger; /* ledger tracking phys mappings */
308 decl_simple_lock_data(, lock); /* lock on map */
309 struct pmap_statistics stats; /* map statistics */
310 queue_chain_t pmaps; /* global list of pmaps */
311 tt_entry_t *tt_entry_free; /* free translation table entries */
312 tt_entry_t *prev_tte; /* previous translation table */
313 struct pmap *nested_pmap; /* nested pmap */
314 vm_map_address_t nested_region_grand_addr;
315 vm_map_address_t nested_region_subord_addr;
316 vm_map_offset_t nested_region_size;
317 vm_map_offset_t nested_region_true_start;
318 vm_map_offset_t nested_region_true_end;
319 unsigned int *nested_region_asid_bitmap;
320
321 #if (__ARM_VMSA__ <= 7)
322 decl_simple_lock_data(, tt1_lock); /* lock on tt1 */
323 unsigned int cpu_ref; /* number of cpus using pmap */
324 unsigned int tte_index_max; /* max tte index in translation table entries */
325 #endif
326
327
328 unsigned int stamp; /* creation stamp */
329 _Atomic int32_t ref_count; /* pmap reference count */
330 unsigned int gc_status; /* gc status */
331 unsigned int nested_region_asid_bitmap_size;
332 uint32_t nested_no_bounds_refcnt;/* number of pmaps that nested this pmap without bounds set */
333 uint16_t hw_asid;
334 uint8_t sw_asid;
335
336 #if MACH_ASSERT
337 int pmap_pid;
338 char pmap_procname[17];
339 bool pmap_stats_assert;
340 #endif /* MACH_ASSERT */
341 #if DEVELOPMENT || DEBUG
342 bool footprint_suspended;
343 bool footprint_was_suspended;
344 #endif /* DEVELOPMENT || DEBUG */
345 bool nx_enabled; /* no execute */
346 bool nested; /* is nested */
347 bool is_64bit; /* is 64bit */
348 bool nested_has_no_bounds_ref; /* nested a pmap when the bounds were not set */
349 bool nested_bounds_set; /* The nesting bounds have been set */
350 #if HAS_APPLE_PAC
351 bool disable_jop;
352 #endif /* HAS_APPLE_PAC */
353 };
354
355 #define PMAP_VASID(pmap) (((uint32_t)((pmap)->sw_asid) << 16) | pmap->hw_asid)
356
357 #if VM_DEBUG
358 extern int pmap_list_resident_pages(
359 pmap_t pmap,
360 vm_offset_t *listp,
361 int space
362 );
363 #else /* #if VM_DEBUG */
364 #define pmap_list_resident_pages(pmap, listp, space) (0)
365 #endif /* #if VM_DEBUG */
366
367 extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
368
369 /* globals shared between arm_vm_init and pmap */
370 extern tt_entry_t *cpu_tte; /* first CPUs translation table (shared with kernel pmap) */
371 extern pmap_paddr_t cpu_ttep; /* physical translation table addr */
372
373 #if __arm64__
374 extern void *ropagetable_begin;
375 extern void *ropagetable_end;
376 #endif
377
378 #if __arm64__
379 extern tt_entry_t *invalid_tte; /* global invalid translation table */
380 extern pmap_paddr_t invalid_ttep; /* physical invalid translation table addr */
381 #endif
382
383 #define PMAP_CONTEXT(pmap, thread)
384
385 /*
386 * platform dependent Prototypes
387 */
388 extern void pmap_switch_user_ttb(pmap_t pmap);
389 extern void pmap_clear_user_ttb(void);
390 extern void pmap_bootstrap(vm_offset_t);
391 extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
392 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
393 extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
394 extern void pmap_collect(pmap_t pmap);
395 extern void pmap_gc(void);
396 #if defined(__arm64__)
397 extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va);
398 #endif
399
400 /*
401 * Interfaces implemented as macros.
402 */
403
404 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
405 th->map = new_map; \
406 pmap_set_pmap(vm_map_pmap(new_map), th); \
407 }
408
409 #define pmap_kernel() \
410 (kernel_pmap)
411
412 #define pmap_compressed(pmap) \
413 ((pmap)->stats.compressed)
414
415 #define pmap_resident_count(pmap) \
416 ((pmap)->stats.resident_count)
417
418 #define pmap_resident_max(pmap) \
419 ((pmap)->stats.resident_max)
420
421 #define MACRO_NOOP
422
423 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \
424 MACRO_NOOP
425
426 #define pmap_pageable(pmap, start, end, pageable) \
427 MACRO_NOOP
428
429 #define pmap_kernel_va(VA) \
430 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
431
432 #define pmap_attribute(pmap, addr, size, attr, value) \
433 (KERN_INVALID_ADDRESS)
434
435 #define copyinmsg(from, to, cnt) \
436 copyin(from, to, cnt)
437
438 #define copyoutmsg(from, to, cnt) \
439 copyout(from, to, cnt)
440
441 extern pmap_paddr_t kvtophys(vm_offset_t va);
442 extern vm_map_address_t phystokv(pmap_paddr_t pa);
443 extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
444
445 extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
446 extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
447 extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
448 extern void pmap_map_globals(void);
449
450 #define PMAP_MAP_BD_DEVICE 0x0
451 #define PMAP_MAP_BD_WCOMB 0x1
452 #define PMAP_MAP_BD_POSTED 0x2
453 #define PMAP_MAP_BD_POSTED_REORDERED 0x3
454 #define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
455 #define PMAP_MAP_BD_MASK 0x7
456
457 extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
458 extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
459
460 extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd, boolean_t clear);
461
462 extern boolean_t pmap_valid_address(pmap_paddr_t addr);
463 extern void pmap_disable_NX(pmap_t pmap);
464 extern void pmap_set_nested(pmap_t pmap);
465 extern vm_map_address_t pmap_create_sharedpage(void);
466 extern void pmap_insert_sharedpage(pmap_t pmap);
467 extern void pmap_protect_sharedpage(void);
468
469 extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
470 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
471 extern void pmap_unmap_cpu_windows_copy(unsigned int index);
472
473 extern void pt_fake_zone_init(int);
474 extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
475 uint64_t *, int *, int *, int *);
476
477 extern boolean_t pmap_valid_page(ppnum_t pn);
478
479 #define MACHINE_PMAP_IS_EMPTY 1
480 extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
481
482 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
483 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
484 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
485 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
486 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
487
488
489 extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
490 extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
491 extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
492
493 boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
494
495 boolean_t pmap_enforces_execute_only(pmap_t pmap);
496
497 /* pmap dispatch indices */
498 #define ARM_FAST_FAULT_INDEX 0
499 #define ARM_FORCE_FAST_FAULT_INDEX 1
500 #define MAPPING_FREE_PRIME_INDEX 2
501 #define MAPPING_REPLENISH_INDEX 3
502 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
503 #define PHYS_ATTRIBUTE_SET_INDEX 5
504 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
505 #define PMAP_CHANGE_WIRING_INDEX 7
506 #define PMAP_CREATE_INDEX 8
507 #define PMAP_DESTROY_INDEX 9
508 #define PMAP_ENTER_OPTIONS_INDEX 10
509 #define PMAP_EXTRACT_INDEX 11
510 #define PMAP_FIND_PHYS_INDEX 12
511 #define PMAP_INSERT_SHAREDPAGE_INDEX 13
512 #define PMAP_IS_EMPTY_INDEX 14
513 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
514 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
515 #define PMAP_NEST_INDEX 17
516 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
517 #define PMAP_PROTECT_OPTIONS_INDEX 19
518 #define PMAP_QUERY_PAGE_INFO_INDEX 20
519 #define PMAP_QUERY_RESIDENT_INDEX 21
520 #define PMAP_REFERENCE_INDEX 22
521 #define PMAP_REMOVE_OPTIONS_INDEX 23
522 #define PMAP_RETURN_INDEX 24
523 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
524 #define PMAP_SET_NESTED_INDEX 26
525 #define PMAP_SET_PROCESS_INDEX 27
526 #define PMAP_SWITCH_INDEX 28
527 #define PMAP_SWITCH_USER_TTB_INDEX 29
528 #define PMAP_CLEAR_USER_TTB_INDEX 30
529 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
530 #define PMAP_UNNEST_OPTIONS_INDEX 32
531 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
532 #define PMAP_CPU_DATA_INIT_INDEX 34
533 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
534 #define PMAP_SET_JIT_ENTITLED_INDEX 36
535
536
537 #define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 57
538 #define PMAP_TRIM_INDEX 64
539 #define PMAP_LEDGER_ALLOC_INIT_INDEX 65
540 #define PMAP_LEDGER_ALLOC_INDEX 66
541 #define PMAP_LEDGER_FREE_INDEX 67
542
543
544
545 #define PMAP_COUNT 71
546
547 #define PMAP_INVALID_CPU_NUM (~0U)
548
549 struct pmap_cpu_data_array_entry {
550 pmap_cpu_data_t cpu_data;
551 } __attribute__((aligned(1 << L2_CLINE)));
552
553 /* Initialize the pmap per-CPU data for the current CPU. */
554 extern void pmap_cpu_data_init(void);
555
556 /* Get the pmap per-CPU data for the current CPU. */
557 extern pmap_cpu_data_t * pmap_get_cpu_data(void);
558
559
560 #define MARK_AS_PMAP_TEXT
561 #define MARK_AS_PMAP_DATA
562 #define MARK_AS_PMAP_RODATA
563
564
565
566 extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse);
567
568 extern lck_grp_t pmap_lck_grp;
569
570 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
571 #define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
572 #define pmap_simple_unlock(l) simple_unlock(l)
573 #define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
574 #define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp)
575 #define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
576
577 #endif /* #ifndef ASSEMBLER */
578
579 #if __ARM_KERNEL_PROTECT__
580 /*
581 * The exception vector mappings start at the middle of the kernel page table
582 * range (so that the EL0 mapping can be located at the base of the range).
583 */
584 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
585 #endif /* __ARM_KERNEL_PROTECT__ */
586
587 #endif /* #ifndef _ARM_PMAP_H_ */