case APRR_USER_RW_INDEX: return XPRR_USER_RW_PERM;
case APRR_PPL_RX_INDEX: return XPRR_PPL_RX_PERM;
case APRR_KERN_RX_INDEX: return XPRR_KERN_RX_PERM;
- case APRR_PPL_RO_INDEX: return XPRR_PPL_RO_PERM;
+ case APRR_USER_XO_INDEX: return XPRR_USER_XO_PERM;
case APRR_KERN_RO_INDEX: return XPRR_KERN_RO_PERM;
case APRR_KERN0_RX_INDEX: return XPRR_KERN0_RO_PERM;
case APRR_KERN0_RO_INDEX: return XPRR_KERN0_RO_PERM;
case XPRR_USER_RW_PERM: return APRR_USER_RW_INDEX;
case XPRR_PPL_RX_PERM: return APRR_PPL_RX_INDEX;
case XPRR_KERN_RX_PERM: return APRR_KERN_RX_INDEX;
- case XPRR_PPL_RO_PERM: return APRR_PPL_RO_INDEX;
+ case XPRR_USER_XO_PERM: return APRR_USER_XO_INDEX;
case XPRR_KERN_RO_PERM: return APRR_KERN_RO_INDEX;
case XPRR_KERN0_RX_PERM: return APRR_KERN0_RO_INDEX;
case XPRR_KERN0_RO_PERM: return APRR_KERN0_RO_INDEX;
monitor_start_pa = BootArgs->topOfKernelData;
monitor_end_pa = BootArgs->topOfKernelData + BOOTSTRAP_TABLE_SIZE;
- /* The bootstrap page tables are mapped RO at boostrap. */
- pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RO_PERM, XPRR_PPL_RO_PERM);
+ /*
+ * The bootstrap page tables are mapped RO at boostrap.
+ *
+ * Note that this function call requests switching XPRR permissions from
+ * XPRR_KERN_RO_PERM to XPRR_KERN_RO_PERM. Whilst this may seem redundant,
+ * pa_set_range_xprr_perm() does other things too, such as calling
+ * pa_set_range_monitor() on the requested address range and performing a number
+ * of integrity checks on the PTEs. We should still
+ * call this function for all PPL-owned memory, regardless of whether
+ * permissions are required to be changed or not.
+ */
+ pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RO_PERM, XPRR_KERN_RO_PERM);
monitor_start_pa = BootArgs->topOfKernelData + BOOTSTRAP_TABLE_SIZE;
monitor_end_pa = avail_start;
/* The other bootstrap allocations are mapped RW at bootstrap. */
pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_PPL_RW_PERM);
- /* The RO page tables are mapped RW at bootstrap. */
+ /*
+ * The RO page tables are mapped RW at bootstrap and remain RW after the call
+ * to pa_set_range_xprr_perm(). We do this, as opposed to using XPRR_PPL_RW_PERM,
+ * to work around a functional issue on H11 devices where CTRR shifts the APRR
+ * lookup table index to USER_XO before APRR is applied, hence causing the hardware
+ * to believe we are dealing with an user XO page upon performing a translation.
+ *
+ * Note that this workaround does not pose a security risk, because the RO
+ * page tables still remain read-only, due to KTRR/CTRR, and further protecting
+ * them at the APRR level would be unnecessary.
+ */
monitor_start_pa = kvtophys((vm_offset_t)&ropagetable_begin);
monitor_end_pa = monitor_start_pa + ((vm_offset_t)&ropagetable_end - (vm_offset_t)&ropagetable_begin);
- pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_PPL_RW_PERM);
+ pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_KERN_RW_PERM);
monitor_start_pa = kvtophys(segPPLDATAB);
monitor_end_pa = monitor_start_pa + segSizePPLDATA;
monitor_start_pa = kvtophys(segPPLDATACONSTB);
monitor_end_pa = monitor_start_pa + segSizePPLDATACONST;
- pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RO_PERM, XPRR_PPL_RO_PERM);
+ pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RO_PERM, XPRR_KERN_RO_PERM);
}
/*
* Mark the original physical aperture mapping for the PPL stack pages RO as an additional security
* precaution. The real RW mappings are at a different location with guard pages.
*/
- pa_set_range_xprr_perm(pmap_stacks_start_pa, pmap_stacks_end_pa, XPRR_PPL_RW_PERM, XPRR_PPL_RO_PERM);
+ pa_set_range_xprr_perm(pmap_stacks_start_pa, pmap_stacks_end_pa, XPRR_PPL_RW_PERM, XPRR_KERN_RO_PERM);
}
if ((p = pmap_alloc_pmap()) == PMAP_NULL) {
return PMAP_NULL;
}
+
+ if (ledger) {
+ pmap_ledger_validate(ledger);
+ pmap_ledger_retain(ledger);
+ }
#else
/*
* Allocate a pmap struct from the pmap_zone. Then allocate
}
#endif
+ p->ledger = ledger;
+
if (flags & PMAP_CREATE_64BIT) {
p->min = MACH_VM_MIN_ADDRESS;
p->max = MACH_VM_MAX_ADDRESS;
}
-#if XNU_MONITOR
- if (ledger) {
- pmap_ledger_validate(ledger);
- pmap_ledger_retain(ledger);
- }
-#endif /* XNU_MONITOR */
-
- p->ledger = ledger;
PMAP_LOCK_INIT(p);
memset((void *) &p->stats, 0, sizeof(p->stats));
pte_set_was_writeable(tmplate, false);
#if __APRR_SUPPORTED__
- if (__improbable(is_pte_xprr_protected(spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM))) {
+ if (__improbable(is_pte_xprr_protected(spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
+ && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
/* Only test for PPL protection here, User-JIT mappings may be mutated by this function. */
panic("%s: modifying a PPL mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
__func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
}
- if (__improbable(is_pte_xprr_protected(tmplate))) {
+ if (__improbable(is_pte_xprr_protected(tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
__func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
}
#if XNU_MONITOR
if (!pmap_ppl_disable && (wimg_bits & PP_ATTR_MONITOR)) {
uint64_t xprr_perm = pte_to_xprr_perm(pte);
- pte &= ~ARM_PTE_XPRR_MASK;
switch (xprr_perm) {
case XPRR_KERN_RO_PERM:
- pte |= xprr_perm_to_pte(XPRR_PPL_RO_PERM);
break;
case XPRR_KERN_RW_PERM:
+ pte &= ~ARM_PTE_XPRR_MASK;
pte |= xprr_perm_to_pte(XPRR_PPL_RW_PERM);
break;
default: