2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define _ARM_PMAP_H_ 1
31 #include <mach_assert.h>
33 #include <arm/proc_reg.h>
34 #if defined(__arm64__)
35 #include <arm64/proc_reg.h>
39 * Machine-dependent structures for the physical map module.
44 #include <stdatomic.h>
46 #include <libkern/section_keywords.h>
47 #include <mach/kern_return.h>
48 #include <mach/machine/vm_types.h>
49 #include <arm/pmap_public.h>
51 #include <mach/arm/thread_status.h>
52 #if defined(__arm64__)
53 #include <arm64/tlb.h>
59 #define ASID_SHIFT (11) /* Shift for 2048 max virtual ASIDs (2048 pmaps) */
60 #define MAX_ASIDS (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */
61 #ifndef ARM_ASID_SHIFT
62 #define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */
64 #define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */
67 #if __ARM_KERNEL_PROTECT__
68 #define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1)
70 #define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1)
73 #ifndef ARM_VMID_SHIFT
74 #define ARM_VMID_SHIFT (8)
76 #define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
78 /* XPRR virtual register map */
80 #define CPUWINDOWS_MAX 4
82 #if defined(__arm64__)
84 #if defined(ARM_LARGE_MEMORY)
86 * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
87 * Upper limit on how many pages can be consumed by bootstrap page tables
89 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
90 #else // ARM_LARGE_MEMORY
91 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
94 typedef uint64_t tt_entry_t
; /* translation table entry type */
95 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
97 typedef uint64_t pt_entry_t
; /* page table entry type */
98 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
100 #elif defined(__arm__)
102 typedef uint32_t tt_entry_t
; /* translation table entry type */
103 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
105 typedef uint32_t pt_entry_t
; /* page table entry type */
106 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
112 /* Forward declaration of the structure that controls page table
113 * geometry and TTE/PTE format. */
114 struct page_table_attr
;
117 * pv_entry_t - structure to track the active mappings for a given page
119 typedef struct pv_entry
{
120 struct pv_entry
*pve_next
; /* next alias */
121 pt_entry_t
*pve_ptep
; /* page table entry */
123 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
124 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
126 * Since pt_desc is 64-bit aligned and we cast often from pv_entry to
129 __attribute__ ((aligned(8))) pv_entry_t
;
139 struct pmap_cpu_data
{
141 void * ppl_kern_saved_sp
;
143 arm_context_t
* save_area
;
144 unsigned int ppl_state
;
146 #if defined(__arm64__)
147 pmap_t cpu_nested_pmap
;
148 const struct page_table_attr
*cpu_nested_pmap_attr
;
149 vm_map_address_t cpu_nested_region_addr
;
150 vm_map_offset_t cpu_nested_region_size
;
152 pmap_t cpu_user_pmap
;
153 unsigned int cpu_user_pmap_stamp
;
155 unsigned int cpu_number
;
156 bool copywindow_strong_sync
[CPUWINDOWS_MAX
];
157 pv_free_list_t pv_free
;
158 pv_entry_t
*pv_free_tail
;
161 * This supports overloading of ARM ASIDs by the pmap. The field needs
162 * to be wide enough to cover all the virtual bits in a virtual ASID.
163 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
164 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
167 * If we were to use bitfield shenanigans here, we could save a bit of
168 * memory by only having enough bits to support MAX_ASIDS. However, such
169 * an implementation would be more error prone.
171 uint8_t cpu_sw_asids
[MAX_HW_ASIDS
];
173 typedef struct pmap_cpu_data pmap_cpu_data_t
;
175 #include <mach/vm_prot.h>
176 #include <mach/vm_statistics.h>
177 #include <mach/machine/vm_param.h>
178 #include <kern/kern_types.h>
179 #include <kern/thread.h>
180 #include <kern/queue.h>
183 #include <sys/cdefs.h>
185 /* Base address for low globals. */
186 #if defined(ARM_LARGE_MEMORY)
187 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL
189 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
193 * This indicates (roughly) where there is free space for the VM
194 * to use for the heap; this does not need to be precise.
196 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
197 #if defined(ARM_LARGE_MEMORY)
198 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
199 #else // ARM_LARGE_MEMORY
200 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
201 #endif // ARM_LARGE_MEMORY
203 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
206 struct page_table_level_info
{
208 const uint64_t offmask
;
209 const uint64_t shift
;
210 const uint64_t index_mask
;
211 const uint64_t valid_mask
;
212 const uint64_t type_mask
;
213 const uint64_t type_block
;
217 * For setups where the kernel page size does not match the hardware
218 * page size (assumably, the kernel page size must be a multiple of
219 * the hardware page size), we will need to determine what the page
222 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
223 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
227 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
230 * Convert addresses to pages and vice versa.
231 * No rounding is used.
233 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
234 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
237 * Round off or truncate to the nearest page. These will work
238 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
241 #define arm_round_page(x) \
242 ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
243 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
246 /* Convert address offset to page table index */
247 #define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT))
250 #if (__ARM_VMSA__ <= 7)
251 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
252 #define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t))
254 #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t))
255 #define NPTES (ARM_PGBYTES / sizeof(pt_entry_t))
258 extern void flush_mmu_tlb_region(vm_offset_t va
, unsigned length
);
260 #if defined(__arm64__)
261 extern uint64_t get_mmu_control(void);
262 extern uint64_t get_aux_control(void);
263 extern void set_aux_control(uint64_t);
264 extern void set_mmu_ttb(uint64_t);
265 extern void set_mmu_ttb_alternate(uint64_t);
266 extern uint64_t get_tcr(void);
267 extern void set_tcr(uint64_t);
268 extern uint64_t pmap_get_arm64_prot(pmap_t
, vm_offset_t
);
269 #if defined(HAS_VMSA_LOCK)
270 extern void vmsa_lock(void);
273 extern uint32_t get_mmu_control(void);
274 extern void set_mmu_control(uint32_t);
275 extern uint32_t get_aux_control(void);
276 extern void set_aux_control(uint32_t);
277 extern void set_mmu_ttb(pmap_paddr_t
);
278 extern void set_mmu_ttb_alternate(pmap_paddr_t
);
279 extern void set_context_id(uint32_t);
282 extern pmap_paddr_t
get_mmu_ttb(void);
283 extern pmap_paddr_t
mmu_kvtop(vm_offset_t va
);
284 extern pmap_paddr_t
mmu_kvtop_wpreflight(vm_offset_t va
);
285 extern pmap_paddr_t
mmu_uvtop(vm_offset_t va
);
287 #if (__ARM_VMSA__ <= 7)
288 /* Convert address offset to translation table index */
289 #define ttenum(a) ((a) >> ARM_TT_L1_SHIFT)
291 /* Convert translation table index to user virtual address */
292 #define tteitova(a) ((a) << ARM_TT_L1_SHIFT)
294 #define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK)
295 #define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK)
297 #define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK)
298 #define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK)
300 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
301 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
303 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
304 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
305 #define pte_increment_pa(p) ((p) += ptoa(1))
307 #define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE)
308 #define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE))
312 /* Convert address offset to translation table index */
313 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
314 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
315 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
317 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
318 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
320 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
321 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
322 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
323 #define pte_increment_pa(p) ((p) += ptoa(1))
325 #define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL)
327 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
329 #endif /* __ARM_VMSA__ <= 7 */
331 #define PMAP_GC_INFLIGHT 1
332 #define PMAP_GC_WAIT 2
334 #if DEVELOPMENT || DEBUG
335 #define pmap_cs_log_h(msg, args...) { if(pmap_cs_log_hacks) printf("PMAP_CS: " msg "\n", ##args); }
336 #define pmap_cs_log pmap_cs_log_h
339 #define pmap_cs_log(msg, args...)
340 #define pmap_cs_log_h(msg, args...)
341 #endif /* DEVELOPMENT || DEBUG */
346 * Convert translation/page table entry to kernel virtual address
348 #define ttetokv(a) (phystokv(tte_to_pa(a)))
349 #define ptetokv(a) (phystokv(pte_to_pa(a)))
352 tt_entry_t
*XNU_PTRAUTH_SIGNED_PTR("pmap.tte") tte
; /* translation table entries */
353 pmap_paddr_t ttep
; /* translation table physical */
354 vm_map_address_t min
; /* min address in pmap */
355 vm_map_address_t max
; /* max address in pmap */
356 #if ARM_PARAMETERIZED_PMAP
357 const struct page_table_attr
* pmap_pt_attr
; /* details about page table layout */
358 #endif /* ARM_PARAMETERIZED_PMAP */
359 ledger_t ledger
; /* ledger tracking phys mappings */
361 decl_lck_rw_data(, rwlock
);
363 struct pmap_statistics stats
; /* map statistics */
364 queue_chain_t pmaps
; /* global list of pmaps */
365 tt_entry_t
*tt_entry_free
; /* free translation table entries */
366 struct pmap
*XNU_PTRAUTH_SIGNED_PTR("pmap.nested_pmap") nested_pmap
; /* nested pmap */
367 vm_map_address_t nested_region_addr
;
368 vm_map_offset_t nested_region_size
;
369 vm_map_offset_t nested_region_true_start
;
370 vm_map_offset_t nested_region_true_end
;
371 unsigned int *nested_region_asid_bitmap
;
373 #if (__ARM_VMSA__ <= 7)
374 unsigned int tte_index_max
; /* max tte index in translation table entries */
382 unsigned int stamp
; /* creation stamp */
383 _Atomic
int32_t ref_count
; /* pmap reference count */
384 unsigned int gc_status
; /* gc status */
385 unsigned int nested_region_asid_bitmap_size
;
386 uint32_t nested_no_bounds_refcnt
;/* number of pmaps that nested this pmap without bounds set */
392 char pmap_procname
[17];
393 bool pmap_stats_assert
;
394 #endif /* MACH_ASSERT */
396 bool pmap_vm_map_cs_enforced
;
402 #if DEVELOPMENT || DEBUG
403 bool footprint_suspended
;
404 bool footprint_was_suspended
;
405 #endif /* DEVELOPMENT || DEBUG */
406 bool nx_enabled
; /* no execute */
407 bool nested
; /* is nested */
408 bool is_64bit
; /* is 64bit */
409 bool nested_has_no_bounds_ref
; /* nested a pmap when the bounds were not set */
410 bool nested_bounds_set
; /* The nesting bounds have been set */
415 #endif /* HAS_APPLE_PAC */
418 #define PMAP_VASID(pmap) (((uint32_t)((pmap)->sw_asid) << 16) | pmap->hw_asid)
421 extern int pmap_list_resident_pages(
426 #else /* #if VM_DEBUG */
427 #define pmap_list_resident_pages(pmap, listp, space) (0)
428 #endif /* #if VM_DEBUG */
430 extern int copysafe(vm_map_address_t from
, vm_map_address_t to
, uint32_t cnt
, int type
, uint32_t *bytes_copied
);
432 /* globals shared between arm_vm_init and pmap */
433 extern tt_entry_t
*cpu_tte
; /* first CPUs translation table (shared with kernel pmap) */
434 extern pmap_paddr_t cpu_ttep
; /* physical translation table addr */
437 extern void *ropagetable_begin
;
438 extern void *ropagetable_end
;
442 extern tt_entry_t
*invalid_tte
; /* global invalid translation table */
443 extern pmap_paddr_t invalid_ttep
; /* physical invalid translation table addr */
446 #define PMAP_CONTEXT(pmap, thread)
449 * platform dependent Prototypes
451 extern void pmap_switch_user_ttb(pmap_t pmap
);
452 extern void pmap_clear_user_ttb(void);
453 extern void pmap_bootstrap(vm_offset_t
);
454 extern vm_map_address_t
pmap_ptov(pmap_t
, ppnum_t
);
455 extern pmap_paddr_t
pmap_find_pa(pmap_t map
, addr64_t va
);
456 extern pmap_paddr_t
pmap_find_pa_nofault(pmap_t map
, addr64_t va
);
457 extern ppnum_t
pmap_find_phys(pmap_t map
, addr64_t va
);
458 extern ppnum_t
pmap_find_phys_nofault(pmap_t map
, addr64_t va
);
459 extern void pmap_set_pmap(pmap_t pmap
, thread_t thread
);
460 extern void pmap_collect(pmap_t pmap
);
461 extern void pmap_gc(void);
463 extern void * pmap_sign_user_ptr(void *value
, ptrauth_key key
, uint64_t data
, uint64_t jop_key
);
464 extern void * pmap_auth_user_ptr(void *value
, ptrauth_key key
, uint64_t data
, uint64_t jop_key
);
465 #endif /* HAS_APPLE_PAC */
468 * Interfaces implemented as macros.
471 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
473 pmap_set_pmap(vm_map_pmap(new_map), th); \
476 #define pmap_kernel() \
479 #define pmap_compressed(pmap) \
480 ((pmap)->stats.compressed)
482 #define pmap_resident_count(pmap) \
483 ((pmap)->stats.resident_count)
485 #define pmap_resident_max(pmap) \
486 ((pmap)->stats.resident_max)
490 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \
493 #define pmap_pageable(pmap, start, end, pageable) \
496 #define pmap_kernel_va(VA) \
497 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
499 #define pmap_attribute(pmap, addr, size, attr, value) \
500 (KERN_INVALID_ADDRESS)
502 #define copyinmsg(from, to, cnt) \
503 copyin(from, to, cnt)
505 #define copyoutmsg(from, to, cnt) \
506 copyout(from, to, cnt)
508 extern pmap_paddr_t
kvtophys(vm_offset_t va
);
509 extern vm_map_address_t
phystokv(pmap_paddr_t pa
);
510 extern vm_map_address_t
phystokv_range(pmap_paddr_t pa
, vm_size_t
*max_len
);
512 extern vm_map_address_t
pmap_map(vm_map_address_t va
, vm_offset_t sa
, vm_offset_t ea
, vm_prot_t prot
, unsigned int flags
);
513 extern vm_map_address_t
pmap_map_high_window_bd( vm_offset_t pa
, vm_size_t len
, vm_prot_t prot
);
514 extern kern_return_t
pmap_map_block(pmap_t pmap
, addr64_t va
, ppnum_t pa
, uint32_t size
, vm_prot_t prot
, int attr
, unsigned int flags
);
515 extern void pmap_map_globals(void);
517 #define PMAP_MAP_BD_DEVICE 0x0
518 #define PMAP_MAP_BD_WCOMB 0x1
519 #define PMAP_MAP_BD_POSTED 0x2
520 #define PMAP_MAP_BD_POSTED_REORDERED 0x3
521 #define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
522 #define PMAP_MAP_BD_MASK 0x7
524 extern vm_map_address_t
pmap_map_bd_with_options(vm_map_address_t va
, vm_offset_t sa
, vm_offset_t ea
, vm_prot_t prot
, int32_t options
);
525 extern vm_map_address_t
pmap_map_bd(vm_map_address_t va
, vm_offset_t sa
, vm_offset_t ea
, vm_prot_t prot
);
527 extern void pmap_init_pte_page(pmap_t
, pt_entry_t
*, vm_offset_t
, unsigned int ttlevel
, boolean_t alloc_ptd
);
529 extern boolean_t
pmap_valid_address(pmap_paddr_t addr
);
530 extern void pmap_disable_NX(pmap_t pmap
);
531 extern void pmap_set_nested(pmap_t pmap
);
532 extern void pmap_create_sharedpages(vm_map_address_t
*kernel_data_addr
, vm_map_address_t
*kernel_text_addr
, vm_map_address_t
*user_text_addr
);
533 extern void pmap_insert_sharedpage(pmap_t pmap
);
534 extern void pmap_protect_sharedpage(void);
536 extern vm_offset_t
pmap_cpu_windows_copy_addr(int cpu_num
, unsigned int index
);
537 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn
, vm_prot_t prot
, unsigned int wimg_bits
);
538 extern void pmap_unmap_cpu_windows_copy(unsigned int index
);
541 /* exposed for use by the HMAC SHA driver */
542 extern void pmap_invoke_with_page(ppnum_t page_number
, void *ctx
,
543 void (*callback
)(void *ctx
, ppnum_t page_number
, const void *page
));
544 extern void pmap_hibernate_invoke(void *ctx
, void (*callback
)(void *ctx
, uint64_t addr
, uint64_t len
));
545 extern void pmap_set_ppl_hashed_flag(const pmap_paddr_t addr
);
546 extern void pmap_clear_ppl_hashed_flag_all(void);
547 extern void pmap_check_ppl_hashed_flag_all(void);
548 #endif /* XNU_MONITOR */
550 extern boolean_t
pmap_valid_page(ppnum_t pn
);
551 extern boolean_t
pmap_bootloader_page(ppnum_t pn
);
553 #define MACHINE_PMAP_IS_EMPTY 1
554 extern boolean_t
pmap_is_empty(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
556 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
557 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
558 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
559 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
560 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
563 extern vm_map_offset_t
pmap_max_offset(boolean_t is64
, unsigned int option
);
564 extern vm_map_offset_t
pmap_max_64bit_offset(unsigned int option
);
565 extern vm_map_offset_t
pmap_max_32bit_offset(unsigned int option
);
567 boolean_t
pmap_virtual_region(unsigned int region_select
, vm_map_offset_t
*startp
, vm_map_size_t
*size
);
569 boolean_t
pmap_enforces_execute_only(pmap_t pmap
);
573 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
575 pmap_disable_user_jop(pmap_t pmap
);
576 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
578 /* pmap dispatch indices */
579 #define ARM_FAST_FAULT_INDEX 0
580 #define ARM_FORCE_FAST_FAULT_INDEX 1
581 #define MAPPING_FREE_PRIME_INDEX 2
582 #define MAPPING_REPLENISH_INDEX 3
583 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
584 #define PHYS_ATTRIBUTE_SET_INDEX 5
585 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
586 #define PMAP_CHANGE_WIRING_INDEX 7
587 #define PMAP_CREATE_INDEX 8
588 #define PMAP_DESTROY_INDEX 9
589 #define PMAP_ENTER_OPTIONS_INDEX 10
590 /* #define PMAP_EXTRACT_INDEX 11 -- Not used*/
591 #define PMAP_FIND_PA_INDEX 12
592 #define PMAP_INSERT_SHAREDPAGE_INDEX 13
593 #define PMAP_IS_EMPTY_INDEX 14
594 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
595 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
596 #define PMAP_NEST_INDEX 17
597 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
598 #define PMAP_PROTECT_OPTIONS_INDEX 19
599 #define PMAP_QUERY_PAGE_INFO_INDEX 20
600 #define PMAP_QUERY_RESIDENT_INDEX 21
601 #define PMAP_REFERENCE_INDEX 22
602 #define PMAP_REMOVE_OPTIONS_INDEX 23
603 #define PMAP_RETURN_INDEX 24
604 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
605 #define PMAP_SET_NESTED_INDEX 26
606 #define PMAP_SET_PROCESS_INDEX 27
607 #define PMAP_SWITCH_INDEX 28
608 #define PMAP_SWITCH_USER_TTB_INDEX 29
609 #define PMAP_CLEAR_USER_TTB_INDEX 30
610 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
611 #define PMAP_UNNEST_OPTIONS_INDEX 32
612 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
613 #define PMAP_CPU_DATA_INIT_INDEX 34
614 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
615 #define PMAP_SET_JIT_ENTITLED_INDEX 36
618 #define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55
619 #define PMAP_TRIM_INDEX 56
620 #define PMAP_LEDGER_ALLOC_INIT_INDEX 57
621 #define PMAP_LEDGER_ALLOC_INDEX 58
622 #define PMAP_LEDGER_FREE_INDEX 59
625 #define PMAP_SIGN_USER_PTR 60
626 #define PMAP_AUTH_USER_PTR 61
627 #endif /* HAS_APPLE_PAC */
629 #define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66
632 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
633 #define PMAP_DISABLE_USER_JOP_INDEX 69
634 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
638 #define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
640 #define PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX 73
641 #define PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX 74
644 #if DEVELOPMENT || DEBUG
645 #define PMAP_TEST_TEXT_CORRUPTION_INDEX 76
646 #endif /* DEVELOPMENT || DEBUG */
648 #define PMAP_COUNT 77
650 #define PMAP_INVALID_CPU_NUM (~0U)
652 struct pmap_cpu_data_array_entry
{
653 pmap_cpu_data_t cpu_data
;
654 } __attribute__((aligned(1 << MAX_L2_CLINE
)));
656 /* Initialize the pmap per-CPU data for the current CPU. */
657 extern void pmap_cpu_data_init(void);
659 /* Get the pmap per-CPU data for the current CPU. */
660 extern pmap_cpu_data_t
* pmap_get_cpu_data(void);
663 * For most batched page operations, we pick a sane default page count
664 * interval at which to check for pending preemption and exit the PPL if found.
666 #define PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL 64
669 pmap_pending_preemption(void)
671 return !!(*((volatile ast_t
*)ast_pending()) & AST_URGENT
);
675 extern boolean_t pmap_ppl_locked_down
;
678 * Denotes the bounds of the PPL stacks. These are visible so that other code
679 * can check if addresses are part of the PPL stacks.
681 extern void * pmap_stacks_start
;
682 extern void * pmap_stacks_end
;
684 /* Asks if a page belongs to the monitor. */
685 extern boolean_t
pmap_is_monitor(ppnum_t pn
);
688 * Indicates that we are done with our static bootstrap
689 * allocations, so the monitor may now mark the pages
692 extern void pmap_static_allocations_done(void);
695 * Indicates that we are done mutating sensitive state in the system, and that
696 * the PPL may now restict write access to PPL owned mappings.
698 extern void pmap_lockdown_ppl(void);
702 #define PPL_STACK_SIZE (PAGE_SIZE << 2)
704 #define PPL_STACK_SIZE PAGE_SIZE
707 /* One stack for each CPU, plus a guard page below each stack and above the last stack */
708 #define PPL_STACK_REGION_SIZE ((MAX_CPUS * (PPL_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
710 #define PPL_DATA_SEGMENT_SECTION_NAME "__PPLDATA,__data"
711 #define PPL_TEXT_SEGMENT_SECTION_NAME "__PPLTEXT,__text,regular,pure_instructions"
712 #define PPL_DATACONST_SEGMENT_SECTION_NAME "__PPLDATA,__const"
714 #define MARK_AS_PMAP_DATA \
715 __PLACE_IN_SECTION(PPL_DATA_SEGMENT_SECTION_NAME)
716 #define MARK_AS_PMAP_TEXT \
717 __attribute__((used, section(PPL_TEXT_SEGMENT_SECTION_NAME), noinline))
718 #define MARK_AS_PMAP_RODATA \
719 __PLACE_IN_SECTION(PPL_DATACONST_SEGMENT_SECTION_NAME)
721 #else /* XNU_MONITOR */
723 #define MARK_AS_PMAP_TEXT
724 #define MARK_AS_PMAP_DATA
725 #define MARK_AS_PMAP_RODATA
727 #endif /* !XNU_MONITOR */
730 extern kern_return_t
pmap_return(boolean_t do_panic
, boolean_t do_recurse
);
732 extern lck_grp_t pmap_lck_grp
;
735 extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va
, size_t length
);
736 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force_nopreempt(va, sz)
737 #define pmap_simple_lock(l) simple_lock_nopreempt(l, &pmap_lck_grp)
738 #define pmap_simple_unlock(l) simple_unlock_nopreempt(l)
739 #define pmap_simple_lock_try(l) simple_lock_try_nopreempt(l, &pmap_lck_grp)
740 #define pmap_lock_bit(l, i) hw_lock_bit_nopreempt(l, i, &pmap_lck_grp)
741 #define pmap_unlock_bit(l, i) hw_unlock_bit_nopreempt(l, i)
743 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
744 #define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
745 #define pmap_simple_unlock(l) simple_unlock(l)
746 #define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
747 #define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp)
748 #define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
751 #if DEVELOPMENT || DEBUG
752 extern kern_return_t
pmap_test_text_corruption(pmap_paddr_t
);
753 #endif /* DEVELOPMENT || DEBUG */
755 #endif /* #ifndef ASSEMBLER */
757 #if __ARM_KERNEL_PROTECT__
759 * The exception vector mappings start at the middle of the kernel page table
760 * range (so that the EL0 mapping can be located at the base of the range).
762 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
763 #endif /* __ARM_KERNEL_PROTECT__ */
765 #endif /* #ifndef _ARM_PMAP_H_ */