+#endif
+}
+
+/*
+ * pmap_trim_range(pmap, start, end)
+ *
+ * pmap = pmap to operate on
+ * start = start of the range
+ * end = end of the range
+ *
+ * Attempts to deallocate TTEs for the given range in the nested range.
+ */
+MARK_AS_PMAP_TEXT static void
+pmap_trim_range(
+ pmap_t pmap,
+ addr64_t start,
+ addr64_t end)
+{
+ addr64_t cur;
+ addr64_t nested_region_start;
+ addr64_t nested_region_end;
+ addr64_t adjusted_start;
+ addr64_t adjusted_end;
+ addr64_t adjust_offmask;
+ tt_entry_t * tte_p;
+ pt_entry_t * pte_p;
+ __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
+
+ if (__improbable(end < start)) {
+ panic("%s: invalid address range, "
+ "pmap=%p, start=%p, end=%p",
+ __func__,
+ pmap, (void*)start, (void*)end);
+ }
+
+ nested_region_start = pmap->nested ? pmap->nested_region_subord_addr : pmap->nested_region_subord_addr;
+ nested_region_end = nested_region_start + pmap->nested_region_size;
+
+ if (__improbable((start < nested_region_start) || (end > nested_region_end))) {
+ panic("%s: range outside nested region %p-%p, "
+ "pmap=%p, start=%p, end=%p",
+ __func__, (void *)nested_region_start, (void *)nested_region_end,
+ pmap, (void*)start, (void*)end);
+ }
+
+ /* Contract the range to TT page boundaries. */
+ adjust_offmask = pt_attr_leaf_table_offmask(pt_attr);
+ adjusted_start = ((start + adjust_offmask) & ~adjust_offmask);
+ adjusted_end = end & ~adjust_offmask;
+ bool modified = false;
+
+ /* Iterate over the range, trying to remove TTEs. */
+ for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += pt_attr_twig_size(pt_attr)) {
+ PMAP_LOCK(pmap);
+
+ tte_p = pmap_tte(pmap, cur);
+
+ if (tte_p == (tt_entry_t *) NULL) {
+ goto done;
+ }
+
+ if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
+ pte_p = (pt_entry_t *) ttetokv(*tte_p);
+
+ if ((ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt == 0) &&
+ (pmap != kernel_pmap)) {
+ if (pmap->nested == TRUE) {
+ /* Deallocate for the nested map. */
+ pmap_tte_deallocate(pmap, tte_p, pt_attr_twig_level(pt_attr));
+ } else {
+ /* Just remove for the parent map. */
+ pmap_tte_remove(pmap, tte_p, pt_attr_twig_level(pt_attr));
+ }
+
+ pmap_get_pt_ops(pmap)->flush_tlb_tte_async(cur, pmap);
+ modified = true;
+ }
+ }
+
+done:
+ PMAP_UNLOCK(pmap);
+ }
+
+ if (modified) {
+ sync_tlb_flush();
+ }
+
+#if (__ARM_VMSA__ > 7)
+ /* Remove empty L2 TTs. */
+ adjusted_start = ((start + ARM_TT_L1_OFFMASK) & ~ARM_TT_L1_OFFMASK);
+ adjusted_end = end & ~ARM_TT_L1_OFFMASK;
+
+ for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += ARM_TT_L1_SIZE) {
+ /* For each L1 entry in our range... */
+ PMAP_LOCK(pmap);
+
+ bool remove_tt1e = true;
+ tt_entry_t * tt1e_p = pmap_tt1e(pmap, cur);
+ tt_entry_t * tt2e_start;
+ tt_entry_t * tt2e_end;
+ tt_entry_t * tt2e_p;
+ tt_entry_t tt1e;
+
+ if (tt1e_p == NULL) {
+ PMAP_UNLOCK(pmap);
+ continue;
+ }
+
+ tt1e = *tt1e_p;
+
+ if (tt1e == ARM_TTE_TYPE_FAULT) {
+ PMAP_UNLOCK(pmap);
+ continue;
+ }
+
+ tt2e_start = &((tt_entry_t*) phystokv(tt1e & ARM_TTE_TABLE_MASK))[0];
+ tt2e_end = &tt2e_start[TTE_PGENTRIES];
+
+ for (tt2e_p = tt2e_start; tt2e_p < tt2e_end; tt2e_p++) {
+ if (*tt2e_p != ARM_TTE_TYPE_FAULT) {
+ /*
+ * If any TTEs are populated, don't remove the
+ * L1 TT.
+ */
+ remove_tt1e = false;
+ }
+ }
+
+ if (remove_tt1e) {
+ pmap_tte_deallocate(pmap, tt1e_p, PMAP_TT_L1_LEVEL);
+ PMAP_UPDATE_TLBS(pmap, cur, cur + PAGE_SIZE, false);
+ }
+
+ PMAP_UNLOCK(pmap);
+ }
+#endif /* (__ARM_VMSA__ > 7) */
+}
+
+/*
+ * pmap_trim_internal(grand, subord, vstart, nstart, size)
+ *
+ * grand = pmap subord is nested in
+ * subord = nested pmap
+ * vstart = start of the used range in grand
+ * nstart = start of the used range in nstart
+ * size = size of the used range
+ *
+ * Attempts to trim the shared region page tables down to only cover the given
+ * range in subord and grand.
+ */
+MARK_AS_PMAP_TEXT static void
+pmap_trim_internal(
+ pmap_t grand,
+ pmap_t subord,
+ addr64_t vstart,
+ addr64_t nstart,
+ uint64_t size)
+{
+ addr64_t vend, nend;
+ addr64_t adjust_offmask;
+
+ if (__improbable(os_add_overflow(vstart, size, &vend))) {
+ panic("%s: grand addr wraps around, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ if (__improbable(os_add_overflow(nstart, size, &nend))) {
+ panic("%s: nested addr wraps around, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ VALIDATE_PMAP(grand);
+ VALIDATE_PMAP(subord);
+
+ __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(grand);
+
+ PMAP_LOCK(subord);
+
+ if (!subord->nested) {
+ panic("%s: subord is not nestable, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ if (grand->nested) {
+ panic("%s: grand is nestable, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ if (grand->nested_pmap != subord) {
+ panic("%s: grand->nested != subord, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ if (size != 0) {
+ if ((vstart < grand->nested_region_grand_addr) || (vend > (grand->nested_region_grand_addr + grand->nested_region_size))) {
+ panic("%s: grand range not in nested region, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ if ((nstart < grand->nested_region_grand_addr) || (nend > (grand->nested_region_grand_addr + grand->nested_region_size))) {
+ panic("%s: subord range not in nested region, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+ }
+
+
+ if (!grand->nested_has_no_bounds_ref) {
+ assert(subord->nested_bounds_set);
+
+ if (!grand->nested_bounds_set) {
+ /* Inherit the bounds from subord. */
+ grand->nested_region_true_start = (subord->nested_region_true_start - grand->nested_region_subord_addr) + grand->nested_region_grand_addr;
+ grand->nested_region_true_end = (subord->nested_region_true_end - grand->nested_region_subord_addr) + grand->nested_region_grand_addr;
+ grand->nested_bounds_set = true;
+ }
+
+ PMAP_UNLOCK(subord);
+ return;
+ }
+
+ if ((!subord->nested_bounds_set) && size) {
+ adjust_offmask = pt_attr_leaf_table_offmask(pt_attr);
+
+ subord->nested_region_true_start = nstart;
+ subord->nested_region_true_end = nend;
+ subord->nested_region_true_start &= ~adjust_offmask;
+
+ if (__improbable(os_add_overflow(subord->nested_region_true_end, adjust_offmask, &subord->nested_region_true_end))) {
+ panic("%s: padded true end wraps around, "
+ "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx",
+ __func__, grand, subord, (void*)vstart, (void*)nstart, size);
+ }
+
+ subord->nested_region_true_end &= ~adjust_offmask;
+ subord->nested_bounds_set = true;
+ }
+
+ if (subord->nested_bounds_set) {
+ /* Inherit the bounds from subord. */
+ grand->nested_region_true_start = (subord->nested_region_true_start - grand->nested_region_subord_addr) + grand->nested_region_grand_addr;
+ grand->nested_region_true_end = (subord->nested_region_true_end - grand->nested_region_subord_addr) + grand->nested_region_grand_addr;
+ grand->nested_bounds_set = true;
+
+ /* If we know the bounds, we can trim the pmap. */
+ grand->nested_has_no_bounds_ref = false;
+ PMAP_UNLOCK(subord);
+ } else {
+ /* Don't trim if we don't know the bounds. */
+ PMAP_UNLOCK(subord);
+ return;
+ }
+
+ /* Trim grand to only cover the given range. */
+ pmap_trim_range(grand, grand->nested_region_grand_addr, grand->nested_region_true_start);
+ pmap_trim_range(grand, grand->nested_region_true_end, (grand->nested_region_grand_addr + grand->nested_region_size));
+
+ /* Try to trim subord. */
+ pmap_trim_subord(subord);
+}
+
+MARK_AS_PMAP_TEXT static void
+pmap_trim_self(pmap_t pmap)
+{
+ if (pmap->nested_has_no_bounds_ref && pmap->nested_pmap) {
+ /* If we have a no bounds ref, we need to drop it. */
+ PMAP_LOCK(pmap->nested_pmap);
+ pmap->nested_has_no_bounds_ref = false;
+ boolean_t nested_bounds_set = pmap->nested_pmap->nested_bounds_set;
+ vm_map_offset_t nested_region_true_start = (pmap->nested_pmap->nested_region_true_start - pmap->nested_region_subord_addr) + pmap->nested_region_grand_addr;
+ vm_map_offset_t nested_region_true_end = (pmap->nested_pmap->nested_region_true_end - pmap->nested_region_subord_addr) + pmap->nested_region_grand_addr;
+ PMAP_UNLOCK(pmap->nested_pmap);
+
+ if (nested_bounds_set) {
+ pmap_trim_range(pmap, pmap->nested_region_grand_addr, nested_region_true_start);
+ pmap_trim_range(pmap, nested_region_true_end, (pmap->nested_region_grand_addr + pmap->nested_region_size));
+ }
+ /*
+ * Try trimming the nested pmap, in case we had the
+ * last reference.
+ */
+ pmap_trim_subord(pmap->nested_pmap);
+ }
+}
+
+/*
+ * pmap_trim_subord(grand, subord)
+ *
+ * grand = pmap that we have nested subord in
+ * subord = nested pmap we are attempting to trim
+ *
+ * Trims subord if possible
+ */
+MARK_AS_PMAP_TEXT static void
+pmap_trim_subord(pmap_t subord)
+{
+ bool contract_subord = false;
+
+ PMAP_LOCK(subord);
+
+ subord->nested_no_bounds_refcnt--;
+
+ if ((subord->nested_no_bounds_refcnt == 0) && (subord->nested_bounds_set)) {
+ /* If this was the last no bounds reference, trim subord. */
+ contract_subord = true;
+ }
+
+ PMAP_UNLOCK(subord);
+
+ if (contract_subord) {
+ pmap_trim_range(subord, subord->nested_region_subord_addr, subord->nested_region_true_start);
+ pmap_trim_range(subord, subord->nested_region_true_end, subord->nested_region_subord_addr + subord->nested_region_size);
+ }
+}
+
+void
+pmap_trim(
+ pmap_t grand,
+ pmap_t subord,
+ addr64_t vstart,
+ addr64_t nstart,
+ uint64_t size)
+{
+#if XNU_MONITOR
+ pmap_trim_ppl(grand, subord, vstart, nstart, size);
+
+ pmap_ledger_check_balance(grand);
+ pmap_ledger_check_balance(subord);
+#else
+ pmap_trim_internal(grand, subord, vstart, nstart, size);
+#endif
+}
+
+#if HAS_APPLE_PAC && XNU_MONITOR
+static void *
+pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator)
+{
+ void *res = NULL;
+ boolean_t current_intr_state = ml_set_interrupts_enabled(FALSE);
+
+ ml_set_kernelkey_enabled(FALSE);
+ switch (key) {
+ case ptrauth_key_asia:
+ res = ptrauth_sign_unauthenticated(value, ptrauth_key_asia, discriminator);
+ break;
+ case ptrauth_key_asda:
+ res = ptrauth_sign_unauthenticated(value, ptrauth_key_asda, discriminator);
+ break;
+ default:
+ panic("attempt to sign user pointer without process independent key");
+ }
+ ml_set_kernelkey_enabled(TRUE);
+
+ ml_set_interrupts_enabled(current_intr_state);
+
+ return res;
+}
+
+void *
+pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t discriminator)
+{
+ return pmap_sign_user_ptr_internal(value, key, discriminator);
+}
+
+static void *
+pmap_auth_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator)
+{
+ if ((key != ptrauth_key_asia) && (key != ptrauth_key_asda)) {
+ panic("attempt to auth user pointer without process independent key");
+ }
+
+ void *res = NULL;
+ boolean_t current_intr_state = ml_set_interrupts_enabled(FALSE);
+
+ ml_set_kernelkey_enabled(FALSE);
+ res = ml_auth_ptr_unchecked(value, key, discriminator);
+ ml_set_kernelkey_enabled(TRUE);
+
+ ml_set_interrupts_enabled(current_intr_state);
+
+ return res;
+}
+
+void *
+pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t discriminator)
+{
+ return pmap_auth_user_ptr_internal(value, key, discriminator);