+
+void doublemap_init(void);
+
+static void
+Idle_PTs_init(void)
+{
+ /* Allocate the "idle" kernel page tables: */
+ KPTphys = ALLOCPAGES(NKPT); /* level 1 */
+ IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */
+ IdlePDPT = ALLOCPAGES(1); /* level 3 */
+ IdlePML4 = ALLOCPAGES(1); /* level 4 */
+
+ // Fill the lowest level with everything up to physfree
+ fillkpt(KPTphys,
+ INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT));
+
+ /* IdlePTD */
+ fillkpt(IdlePTD,
+ INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT);
+
+ // IdlePDPT entries
+ fillkpt(IdlePDPT,
+ INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD);
+
+ // IdlePML4 single entry for kernel space.
+ fillkpt(IdlePML4 + KERNEL_PML4_INDEX,
+ INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1);
+
+ postcode(VSTART_PHYSMAP_INIT);
+
+ physmap_init();
+ doublemap_init();
+ idt64_remap();
+
+ postcode(VSTART_SET_CR3);
+
+ // Switch to the page tables..
+ set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
+
+}
+
+extern void vstart_trap_handler;
+
+#define BOOT_TRAP_VECTOR(t) \
+ [t] = { \
+ (uintptr_t) &vstart_trap_handler, \
+ KERNEL64_CS, \
+ 0, \
+ ACC_P|ACC_PL_K|ACC_INTR_GATE, \
+ 0 \
+ },
+
+/* Recursive macro to iterate 0..31 */
+#define L0(x,n) x(n)
+#define L1(x,n) L0(x,n-1) L0(x,n)
+#define L2(x,n) L1(x,n-2) L1(x,n)
+#define L3(x,n) L2(x,n-4) L2(x,n)
+#define L4(x,n) L3(x,n-8) L3(x,n)
+#define L5(x,n) L4(x,n-16) L4(x,n)
+#define FOR_0_TO_31(x) L5(x,31)
+
+/*
+ * Bootstrap IDT. Active only during early startup.
+ * Only the trap vectors are defined since interrupts are masked.
+ * All traps point to a common handler.
+ */
+struct fake_descriptor64 master_boot_idt64[IDTSZ]
+ __attribute__((section("__HIB,__desc")))
+ __attribute__((aligned(PAGE_SIZE))) = {
+ FOR_0_TO_31(BOOT_TRAP_VECTOR)
+};
+
+static void
+vstart_idt_init(void)
+{
+ x86_64_desc_register_t vstart_idt = {
+ sizeof(master_boot_idt64),
+ master_boot_idt64 };
+
+ fix_desc64(master_boot_idt64, 32);
+ lidt((void *)&vstart_idt);
+}
+
+/*
+ * vstart() is called in the natural mode (64bit for K64, 32 for K32)
+ * on a set of bootstrap pagetables which use large, 2MB pages to map
+ * all of physical memory in both. See idle_pt.c for details.
+ *
+ * In K64 this identity mapping is mirrored the top and bottom 512GB
+ * slots of PML4.
+ *
+ * The bootstrap processor called with argument boot_args_start pointing to
+ * the boot-args block. The kernel's (4K page) page tables are allocated and
+ * initialized before switching to these.
+ *
+ * Non-bootstrap processors are called with argument boot_args_start NULL.
+ * These processors switch immediately to the existing kernel page tables.
+ */
+__attribute__((noreturn))
+void
+vstart(vm_offset_t boot_args_start)
+{
+ boolean_t is_boot_cpu = !(boot_args_start == 0);
+ int cpu = 0;
+ uint32_t lphysfree;
+
+ postcode(VSTART_ENTRY);
+
+ if (is_boot_cpu) {
+ /*
+ * Set-up temporary trap handlers during page-table set-up.
+ */
+ vstart_idt_init();
+ postcode(VSTART_IDT_INIT);
+
+ /*
+ * Get startup parameters.
+ */
+ kernelBootArgs = (boot_args *)boot_args_start;
+ lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize;
+ physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1));
+
+#if DEVELOPMENT || DEBUG
+ pal_serial_init();
+#endif
+ DBG("revision 0x%x\n", kernelBootArgs->Revision);
+ DBG("version 0x%x\n", kernelBootArgs->Version);
+ DBG("command line %s\n", kernelBootArgs->CommandLine);
+ DBG("memory map 0x%x\n", kernelBootArgs->MemoryMap);
+ DBG("memory map sz 0x%x\n", kernelBootArgs->MemoryMapSize);
+ DBG("kaddr 0x%x\n", kernelBootArgs->kaddr);
+ DBG("ksize 0x%x\n", kernelBootArgs->ksize);
+ DBG("physfree %p\n", physfree);
+ DBG("bootargs: %p, &ksize: %p &kaddr: %p\n",
+ kernelBootArgs,
+ &kernelBootArgs->ksize,
+ &kernelBootArgs->kaddr);
+ DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize);
+
+ /*
+ * Setup boot args given the physical start address.
+ * Note: PE_init_platform needs to be called before Idle_PTs_init
+ * because access to the DeviceTree is required to read the
+ * random seed before generating a random physical map slide.
+ */
+ kernelBootArgs = (boot_args *)
+ ml_static_ptovirt(boot_args_start);
+ DBG("i386_init(0x%lx) kernelBootArgs=%p\n",
+ (unsigned long)boot_args_start, kernelBootArgs);
+
+#if KASAN
+ kasan_reserve_memory(kernelBootArgs);
+#endif
+
+ PE_init_platform(FALSE, kernelBootArgs);
+ postcode(PE_INIT_PLATFORM_D);
+
+ Idle_PTs_init();
+ postcode(VSTART_IDLE_PTS_INIT);
+
+#if KASAN
+ /* Init kasan and map whatever was stolen from physfree */
+ kasan_init();
+ kasan_notify_stolen((uintptr_t)ml_static_ptovirt((vm_offset_t)physfree));