+ IdlePML4[KERNEL_DBLMAP_PML4_INDEX] = ((uintptr_t)ID_MAP_VTOP(dblmapL3)) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF;
+
+ dblmap_base = KVADDR(KERNEL_DBLMAP_PML4_INDEX, dblmapL3, 0, 0);
+ dblmap_max = dblmap_base + hdescszr;
+ /* Calculate the double-map distance, which accounts for the current
+ * KASLR slide
+ */
+
+ dblmap_dist = dblmap_base - hdescb;
+ idt64_hndl_table0[1] = DBLMAP(idt64_hndl_table0[1]);
+ idt64_hndl_table0[6] = (uint64_t)(uintptr_t)&kernel_stack_mask;
+
+ extern cpu_data_t cpshadows[], scdatas[];
+ uintptr_t cd1 = (uintptr_t) &cpshadows[0];
+ uintptr_t cd2 = (uintptr_t) &scdatas[0];
+/* Record the displacement from the kernel's per-CPU data pointer, eventually
+ * programmed into GSBASE, to the "shadows" in the doublemapped
+ * region. These are not aliases, but separate physical allocations
+ * containing data required in the doublemapped trampolines.
+*/
+ idt64_hndl_table0[2] = dblmap_dist + cd1 - cd2;
+
+ DBG("Double map base: 0x%qx\n", dblmap_base);
+ DBG("double map idlepml4[%d]: 0x%llx\n", KERNEL_DBLMAP_PML4_INDEX, IdlePML4[KERNEL_DBLMAP_PML4_INDEX]);
+ assert(LDTSZ > LDTSZ_MIN);
+}
+
+vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t);
+
+#include <i386/pmap_internal.h>
+
+/* Use of this routine is expected to be synchronized by callers
+ * Creates non-executable aliases.
+ */
+vm_offset_t dyn_dblmap(vm_offset_t cva, vm_offset_t sz) {
+ vm_offset_t ava = dblmap_max;
+
+ assert((sz & PAGE_MASK) == 0);
+ assert(cva != 0);
+
+ pmap_alias(ava, cva, cva + sz, VM_PROT_READ | VM_PROT_WRITE, PMAP_EXPAND_OPTIONS_ALIASMAP);
+ dblmap_max += sz;
+ return (ava - cva);
+}
+/* Adjust offsets interior to the bootstrap interrupt descriptor table to redirect
+ * control to the double-mapped interrupt vectors. The IDTR proper will be
+ * programmed via cpu_desc_load()
+ */
+void idt64_remap(void) {
+ for (int i = 0; i < IDTSZ; i++) {
+ master_idt64[i].offset64 = DBLMAP(master_idt64[i].offset64);
+ }
+}