+extern void vstart_trap_handler;
+
+#define BOOT_TRAP_VECTOR(t) \
+ [t] = { \
+ (uintptr_t) &vstart_trap_handler, \
+ KERNEL64_CS, \
+ 0, \
+ ACC_P|ACC_PL_K|ACC_INTR_GATE, \
+ 0 \
+ },
+
+/* Recursive macro to iterate 0..31 */
+#define L0(x, n) x(n)
+#define L1(x, n) L0(x,n-1) L0(x,n)
+#define L2(x, n) L1(x,n-2) L1(x,n)
+#define L3(x, n) L2(x,n-4) L2(x,n)
+#define L4(x, n) L3(x,n-8) L3(x,n)
+#define L5(x, n) L4(x,n-16) L4(x,n)
+#define FOR_0_TO_31(x) L5(x,31)
+
+/*
+ * Bootstrap IDT. Active only during early startup.
+ * Only the trap vectors are defined since interrupts are masked.
+ * All traps point to a common handler.
+ */
+struct fake_descriptor64 master_boot_idt64[IDTSZ]
+__attribute__((section("__HIB,__desc")))
+__attribute__((aligned(PAGE_SIZE))) = {
+ FOR_0_TO_31(BOOT_TRAP_VECTOR)
+};
+
+static void
+vstart_idt_init(boolean_t master)
+{
+ x86_64_desc_register_t vstart_idt = {
+ sizeof(master_boot_idt64),
+ master_boot_idt64
+ };
+
+ if (master) {
+ fix_desc64(master_boot_idt64, 32);
+ }
+ lidt((void *)&vstart_idt);
+}
+
+extern void *collection_base_pointers[KCNumKinds];
+
+kern_return_t
+i386_slide_individual_kext(kernel_mach_header_t *mh, uintptr_t slide)
+{
+ int ret = kernel_collection_slide(mh, (const void **) (void *)collection_base_pointers);
+ if (ret != 0) {
+ printf("Sliding pageable kc was stopped\n");
+ return KERN_FAILURE;
+ }
+
+ kernel_collection_adjust_fileset_entry_addrs(mh, slide);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+i386_slide_kext_collection_mh_addrs(kernel_mach_header_t *mh, uintptr_t slide, bool adjust_mach_headers)
+{
+ int ret = kernel_collection_slide(mh, (const void **) (void *)collection_base_pointers);
+ if (ret != KERN_SUCCESS) {
+ printf("Kernel Collection slide was stopped with value %d\n", ret);
+ return KERN_FAILURE;
+ }
+
+ kernel_collection_adjust_mh_addrs(mh, slide, adjust_mach_headers,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+
+ return KERN_SUCCESS;
+}
+
+static void
+i386_slide_and_rebase_image(uintptr_t kstart_addr)
+{
+ extern uintptr_t kc_highest_nonlinkedit_vmaddr;
+ kernel_mach_header_t *k_mh, *kc_mh = NULL;
+ kernel_segment_command_t *seg;
+ uintptr_t slide;
+
+ k_mh = &_mh_execute_header;
+ /*
+ * If we're not booting, an MH_FILESET, we don't need to slide
+ * anything because EFI has done that for us. When booting an
+ * MH_FILESET, EFI will slide the kernel proper, but not the kexts.
+ * Below, we infer the slide by comparing the slid address of the
+ * kernel's mach-o header and the unslid vmaddr of the first segment
+ * of the mach-o (which is assumed to always point to the mach-o
+ * header).
+ */
+ if (!kernel_mach_header_is_in_fileset(k_mh)) {
+ DBG("[MH] kcgen-style KC\n");
+ return;
+ }
+
+ /*
+ * The kernel is part of a MH_FILESET kernel collection: determine slide
+ * based on first segment's mach-o vmaddr.
+ */
+ seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh));
+ assert(seg->cmd == LC_SEGMENT_KERNEL);
+ slide = (uintptr_t)k_mh - seg->vmaddr;
+ DBG("[MH] Sliding new-style KC: %llu\n", (unsigned long long)slide);
+
+ /*
+ * The kernel collection mach-o header should be the start address
+ * passed to us by EFI.
+ */
+ kc_mh = (kernel_mach_header_t *)(kstart_addr);
+ assert(kc_mh->filetype == MH_FILESET);
+
+ PE_set_kc_header(KCKindPrimary, kc_mh, slide);
+
+ /*
+ * rebase/slide all the kexts in the collection
+ * (EFI should have already rebased the kernel)
+ */
+ kernel_collection_slide(kc_mh, (const void **) (void *)collection_base_pointers);
+
+
+ /*
+ * Now adjust the vmaddr fields of all mach-o headers
+ * and symbols in this MH_FILESET
+ */
+ kernel_collection_adjust_mh_addrs(kc_mh, slide, false,
+ NULL, NULL, NULL, NULL, NULL, NULL, &kc_highest_nonlinkedit_vmaddr);
+}
+
+/*
+ * vstart() is called in the natural mode (64bit for K64, 32 for K32)
+ * on a set of bootstrap pagetables which use large, 2MB pages to map
+ * all of physical memory in both. See idle_pt.c for details.
+ *
+ * In K64 this identity mapping is mirrored the top and bottom 512GB
+ * slots of PML4.
+ *
+ * The bootstrap processor called with argument boot_args_start pointing to
+ * the boot-args block. The kernel's (4K page) page tables are allocated and
+ * initialized before switching to these.
+ *
+ * Non-bootstrap processors are called with argument boot_args_start NULL.
+ * These processors switch immediately to the existing kernel page tables.
+ */
+__attribute__((noreturn))
+void
+vstart(vm_offset_t boot_args_start)
+{
+ boolean_t is_boot_cpu = !(boot_args_start == 0);
+ int cpu = 0;
+ uint32_t lphysfree;
+#if DEBUG
+ uint64_t gsbase;
+#endif
+
+
+ postcode(VSTART_ENTRY);
+
+ /*
+ * Set-up temporary trap handlers during page-table set-up.
+ */
+
+ if (is_boot_cpu) {
+ vstart_idt_init(TRUE);
+ postcode(VSTART_IDT_INIT);
+
+ /*
+ * Ensure that any %gs-relative access results in an immediate fault
+ * until gsbase is properly initialized below
+ */
+ wrmsr64(MSR_IA32_GS_BASE, EARLY_GSBASE_MAGIC);
+
+ /*
+ * Get startup parameters.
+ */
+ kernelBootArgs = (boot_args *)boot_args_start;
+ lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize;
+ physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
+
+ pal_serial_init();
+
+ DBG("revision 0x%x\n", kernelBootArgs->Revision);
+ DBG("version 0x%x\n", kernelBootArgs->Version);
+ DBG("command line %s\n", kernelBootArgs->CommandLine);
+ DBG("memory map 0x%x\n", kernelBootArgs->MemoryMap);
+ DBG("memory map sz 0x%x\n", kernelBootArgs->MemoryMapSize);
+ DBG("kaddr 0x%x\n", kernelBootArgs->kaddr);
+ DBG("ksize 0x%x\n", kernelBootArgs->ksize);
+ DBG("physfree %p\n", physfree);
+ DBG("bootargs: %p, &ksize: %p &kaddr: %p\n",
+ kernelBootArgs,
+ &kernelBootArgs->ksize,
+ &kernelBootArgs->kaddr);
+ DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize);
+ DBG("KC_hdrs_vaddr %p\n", (void *)kernelBootArgs->KC_hdrs_vaddr);
+
+ if (kernelBootArgs->Version >= 2 && kernelBootArgs->Revision >= 1 &&
+ kernelBootArgs->KC_hdrs_vaddr != 0) {
+ /*
+ * slide the header addresses in all mach-o segments and sections, and
+ * perform any new-style chained-fixup sliding for kexts, as necessary.
+ * Note that efiboot has already loaded the kernel and all LC_SEGMENT_64s
+ * that correspond to the kexts present in the primary KC, into slid addresses.
+ */
+ i386_slide_and_rebase_image((uintptr_t)ml_static_ptovirt(kernelBootArgs->KC_hdrs_vaddr));
+ }
+
+ /*
+ * Setup boot args given the physical start address.
+ * Note: PE_init_platform needs to be called before Idle_PTs_init
+ * because access to the DeviceTree is required to read the
+ * random seed before generating a random physical map slide.
+ */
+ kernelBootArgs = (boot_args *)
+ ml_static_ptovirt(boot_args_start);
+ DBG("i386_init(0x%lx) kernelBootArgs=%p\n",
+ (unsigned long)boot_args_start, kernelBootArgs);
+
+#if KASAN
+ kasan_reserve_memory(kernelBootArgs);
+#endif
+
+ PE_init_platform(FALSE, kernelBootArgs);
+ postcode(PE_INIT_PLATFORM_D);
+
+ Idle_PTs_init();
+ postcode(VSTART_IDLE_PTS_INIT);
+
+#if KASAN
+ /* Init kasan and map whatever was stolen from physfree */
+ kasan_init();
+ kasan_notify_stolen((uintptr_t)ml_static_ptovirt((vm_offset_t)physfree));
+#endif
+
+#if MONOTONIC
+ mt_early_init();
+#endif /* MONOTONIC */
+
+ first_avail = (vm_offset_t)ID_MAP_VTOP(physfree);
+
+ cpu_data_alloc(TRUE);
+
+ cpu_desc_init(cpu_datap(0));
+ postcode(VSTART_CPU_DESC_INIT);
+ cpu_desc_load(cpu_datap(0));
+
+ postcode(VSTART_CPU_MODE_INIT);
+ cpu_syscall_init(cpu_datap(0)); /* cpu_syscall_init() will be
+ * invoked on the APs
+ * via i386_init_slave()
+ */
+ } else {
+ /* Slave CPUs should use the basic IDT until i386_init_slave() */
+ vstart_idt_init(FALSE);
+
+ /* Switch to kernel's page tables (from the Boot PTs) */
+ set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
+
+ /* Find our logical cpu number */
+ cpu = lapic_to_cpu[lapic_safe_apicid()];
+#if DEBUG
+ gsbase = rdmsr64(MSR_IA32_GS_BASE);
+#endif
+ cpu_desc_load(cpu_datap(cpu));
+#if DEBUG
+ DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, (unsigned long long)gsbase);
+#endif
+
+ /*
+ * Before we can discover our local APIC ID, we need to potentially
+ * initialize X2APIC, if it's enabled and firmware started us with
+ * the APIC in legacy mode.
+ */
+ lapic_init_slave();
+ }
+
+ early_boot = 0;
+ postcode(VSTART_EXIT);
+ x86_init_wrapper(is_boot_cpu ? (uintptr_t) i386_init
+ : (uintptr_t) i386_init_slave,
+ cpu_datap(cpu)->cpu_int_stack_top);
+}
+
+void
+pstate_trace(void)
+{
+}