+#include <i386/locks.h> /* LcksOpts */
+#if DEBUG
+#include <machine/pal_routines.h>
+#endif
+#if DEBUG
+#define DBG(x...) kprintf(x)
+#else
+#define DBG(x...)
+#endif
+
+int debug_task;
+
+static boot_args *kernelBootArgs;
+
+extern int disableConsoleOutput;
+extern const char version[];
+extern const char version_variant[];
+extern int nx_enabled;
+
+/*
+ * Set initial values so that ml_phys_* routines can use the booter's ID mapping
+ * to touch physical space before the kernel's physical aperture exists.
+ */
+uint64_t physmap_base = 0;
+uint64_t physmap_max = 4*GB;
+
+pd_entry_t *KPTphys;
+pd_entry_t *IdlePTD;
+pdpt_entry_t *IdlePDPT;
+pml4_entry_t *IdlePML4;
+
+char *physfree;
+
+/*
+ * Note: ALLOCPAGES() can only be used safely within Idle_PTs_init()
+ * due to the mutation of physfree.
+ */
+static void *
+ALLOCPAGES(int npages)
+{
+ uintptr_t tmp = (uintptr_t)physfree;
+ bzero(physfree, npages * PAGE_SIZE);
+ physfree += npages * PAGE_SIZE;
+ tmp += VM_MIN_KERNEL_ADDRESS & ~LOW_4GB_MASK;
+ return (void *)tmp;
+}
+
+static void
+fillkpt(pt_entry_t *base, int prot, uintptr_t src, int index, int count)
+{
+ int i;
+ for (i=0; i<count; i++) {
+ base[index] = src | prot | INTEL_PTE_VALID;
+ src += PAGE_SIZE;
+ index++;
+ }
+}
+
+extern pmap_paddr_t first_avail;
+
+int break_kprintf = 0;
+
+uint64_t
+x86_64_pre_sleep(void)
+{
+ IdlePML4[0] = IdlePML4[KERNEL_PML4_INDEX];
+ uint64_t oldcr3 = get_cr3_raw();
+ set_cr3_raw((uint32_t) (uintptr_t)ID_MAP_VTOP(IdlePML4));
+ return oldcr3;
+}
+
+void
+x86_64_post_sleep(uint64_t new_cr3)
+{
+ IdlePML4[0] = 0;
+ set_cr3_raw((uint32_t) new_cr3);
+}
+
+
+
+
+// Set up the physical mapping - NPHYSMAP GB of memory mapped at a high address
+// NPHYSMAP is determined by the maximum supported RAM size plus 4GB to account
+// the PCI hole (which is less 4GB but not more).
+
+/* Compile-time guard: NPHYSMAP is capped to 256GiB, accounting for
+ * randomisation
+ */
+extern int maxphymapsupported[NPHYSMAP <= (PTE_PER_PAGE/2) ? 1 : -1];
+
+static void
+physmap_init(void)
+{
+ pt_entry_t *physmapL3 = ALLOCPAGES(1);
+ struct {
+ pt_entry_t entries[PTE_PER_PAGE];
+ } * physmapL2 = ALLOCPAGES(NPHYSMAP);
+
+ uint64_t i;
+ uint8_t phys_random_L3 = early_random() & 0xFF;
+
+ /* We assume NX support. Mark all levels of the PHYSMAP NX
+ * to avoid granting executability via a single bit flip.
+ */
+#if DEVELOPMENT || DEBUG
+ uint32_t reg[4];
+ do_cpuid(0x80000000, reg);
+ if (reg[eax] >= 0x80000001) {
+ do_cpuid(0x80000001, reg);
+ assert(reg[edx] & CPUID_EXTFEATURE_XD);
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+ for(i = 0; i < NPHYSMAP; i++) {
+ physmapL3[i + phys_random_L3] =
+ ((uintptr_t)ID_MAP_VTOP(&physmapL2[i]))
+ | INTEL_PTE_VALID
+ | INTEL_PTE_NX
+ | INTEL_PTE_WRITE;
+
+ uint64_t j;
+ for(j = 0; j < PTE_PER_PAGE; j++) {
+ physmapL2[i].entries[j] =
+ ((i * PTE_PER_PAGE + j) << PDSHIFT)
+ | INTEL_PTE_PS
+ | INTEL_PTE_VALID
+ | INTEL_PTE_NX
+ | INTEL_PTE_WRITE;
+ }
+ }
+
+ IdlePML4[KERNEL_PHYSMAP_PML4_INDEX] =
+ ((uintptr_t)ID_MAP_VTOP(physmapL3))
+ | INTEL_PTE_VALID
+ | INTEL_PTE_NX
+ | INTEL_PTE_WRITE;
+
+ physmap_base = KVADDR(KERNEL_PHYSMAP_PML4_INDEX, phys_random_L3, 0, 0);
+ physmap_max = physmap_base + NPHYSMAP * GB;
+ DBG("Physical address map base: 0x%qx\n", physmap_base);
+ DBG("Physical map idlepml4[%d]: 0x%llx\n",
+ KERNEL_PHYSMAP_PML4_INDEX, IdlePML4[KERNEL_PHYSMAP_PML4_INDEX]);
+}
+
+static void
+descriptor_alias_init()
+{
+ vm_offset_t master_gdt_phys;
+ vm_offset_t master_gdt_alias_phys;
+ vm_offset_t master_idt_phys;
+ vm_offset_t master_idt_alias_phys;
+
+ assert(((vm_offset_t)master_gdt & PAGE_MASK) == 0);
+ assert(((vm_offset_t)master_idt64 & PAGE_MASK) == 0);
+
+ master_gdt_phys = (vm_offset_t) ID_MAP_VTOP(master_gdt);
+ master_idt_phys = (vm_offset_t) ID_MAP_VTOP(master_idt64);
+ master_gdt_alias_phys = (vm_offset_t) ID_MAP_VTOP(MASTER_GDT_ALIAS);
+ master_idt_alias_phys = (vm_offset_t) ID_MAP_VTOP(MASTER_IDT_ALIAS);
+
+ DBG("master_gdt_phys: %p\n", (void *) master_gdt_phys);
+ DBG("master_idt_phys: %p\n", (void *) master_idt_phys);
+ DBG("master_gdt_alias_phys: %p\n", (void *) master_gdt_alias_phys);
+ DBG("master_idt_alias_phys: %p\n", (void *) master_idt_alias_phys);
+
+ KPTphys[atop_kernel(master_gdt_alias_phys)] = master_gdt_phys |
+ INTEL_PTE_VALID | INTEL_PTE_NX | INTEL_PTE_WRITE;
+ KPTphys[atop_kernel(master_idt_alias_phys)] = master_idt_phys |
+ INTEL_PTE_VALID | INTEL_PTE_NX; /* read-only */
+}
+
+static void
+Idle_PTs_init(void)
+{
+ /* Allocate the "idle" kernel page tables: */
+ KPTphys = ALLOCPAGES(NKPT); /* level 1 */
+ IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */
+ IdlePDPT = ALLOCPAGES(1); /* level 3 */
+ IdlePML4 = ALLOCPAGES(1); /* level 4 */
+
+ // Fill the lowest level with everything up to physfree
+ fillkpt(KPTphys,
+ INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT));
+
+ /* IdlePTD */
+ fillkpt(IdlePTD,
+ INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT);
+
+ // IdlePDPT entries
+ fillkpt(IdlePDPT,
+ INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD);
+
+ // IdlePML4 single entry for kernel space.
+ fillkpt(IdlePML4 + KERNEL_PML4_INDEX,
+ INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1);
+
+ postcode(VSTART_PHYSMAP_INIT);
+
+ physmap_init();
+
+ postcode(VSTART_DESC_ALIAS_INIT);
+
+ descriptor_alias_init();
+
+ postcode(VSTART_SET_CR3);
+
+ // Switch to the page tables..
+ set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));