]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/i386_init.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_init.c
CommitLineData
55e303ae 1/*
0a7de745 2 * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
55e303ae 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
55e303ae
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
55e303ae
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
55e303ae
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
55e303ae 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
55e303ae
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
55e303ae
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
55e303ae
A
57
58#include <mach/i386/vm_param.h>
59
60#include <string.h>
0a7de745 61#include <stdint.h>
55e303ae
A
62#include <mach/vm_param.h>
63#include <mach/vm_prot.h>
64#include <mach/machine.h>
65#include <mach/time_value.h>
55e303ae
A
66#include <kern/spl.h>
67#include <kern/assert.h>
68#include <kern/debug.h>
69#include <kern/misc_protos.h>
70#include <kern/startup.h>
71#include <kern/clock.h>
0c530ab8 72#include <kern/pms.h>
55e303ae
A
73#include <kern/cpu_data.h>
74#include <kern/processor.h>
fe8ab488 75#include <sys/kdebug.h>
0c530ab8 76#include <console/serial_protos.h>
55e303ae
A
77#include <vm/vm_page.h>
78#include <vm/pmap.h>
79#include <vm/vm_kern.h>
6d2010ae 80#include <machine/pal_routines.h>
55e303ae
A
81#include <i386/fpu.h>
82#include <i386/pmap.h>
55e303ae 83#include <i386/misc_protos.h>
b0d623f7 84#include <i386/cpu_threads.h>
55e303ae 85#include <i386/cpuid.h>
b0d623f7 86#include <i386/lapic.h>
55e303ae 87#include <i386/mp.h>
0c530ab8 88#include <i386/mp_desc.h>
6d2010ae 89#if CONFIG_MTRR
b0d623f7 90#include <i386/mtrr.h>
6d2010ae 91#endif
91447636 92#include <i386/machine_routines.h>
b0d623f7 93#if CONFIG_MCA
0c530ab8 94#include <i386/machine_check.h>
b0d623f7 95#endif
6d2010ae 96#include <i386/ucode.h>
91447636 97#include <i386/postcode.h>
0c530ab8
A
98#include <i386/Diagnostics.h>
99#include <i386/pmCPU.h>
100#include <i386/tsc.h>
2d21ac55 101#include <i386/locks.h> /* LcksOpts */
6d2010ae
A
102#if DEBUG
103#include <machine/pal_routines.h>
104#endif
5ba3f43e
A
105
106#if MONOTONIC
107#include <kern/monotonic.h>
108#endif /* MONOTONIC */
109
110#include <san/kasan.h>
111
b0d623f7 112#if DEBUG
0a7de745 113#define DBG(x ...) kprintf(x)
b0d623f7 114#else
0a7de745 115#define DBG(x ...)
b0d623f7 116#endif
55e303ae 117
0a7de745 118int debug_task;
b0d623f7 119
0a7de745 120int early_boot = 1;
b0d623f7 121
0a7de745
A
122static boot_args *kernelBootArgs;
123
124extern int disableConsoleOutput;
125extern const char version[];
126extern const char version_variant[];
127extern int nx_enabled;
b0d623f7 128
813fb2f6
A
129/*
130 * Set initial values so that ml_phys_* routines can use the booter's ID mapping
131 * to touch physical space before the kernel's physical aperture exists.
132 */
0a7de745
A
133uint64_t physmap_base = 0;
134uint64_t physmap_max = 4 * GB;
135
136pd_entry_t *KPTphys;
137pd_entry_t *IdlePTD;
138pdpt_entry_t *IdlePDPT;
139pml4_entry_t *IdlePML4;
140
141int kernPhysPML4Index;
142int kernPhysPML4EntryCount;
b0d623f7 143
cb323159
A
144/*
145 * These are 4K mapping page table pages from KPTphys[] that we wound
146 * up not using. They get ml_static_mfree()'d once the VM is initialized.
147 */
148ppnum_t released_PT_ppn = 0;
149uint32_t released_PT_cnt = 0;
b0d623f7 150
b0d623f7 151char *physfree;
5c9f4661 152void idt64_remap(void);
b0d623f7
A
153
154/*
155 * Note: ALLOCPAGES() can only be used safely within Idle_PTs_init()
156 * due to the mutation of physfree.
157 */
158static void *
159ALLOCPAGES(int npages)
160{
161 uintptr_t tmp = (uintptr_t)physfree;
162 bzero(physfree, npages * PAGE_SIZE);
163 physfree += npages * PAGE_SIZE;
b0d623f7 164 tmp += VM_MIN_KERNEL_ADDRESS & ~LOW_4GB_MASK;
b0d623f7
A
165 return (void *)tmp;
166}
167
168static void
169fillkpt(pt_entry_t *base, int prot, uintptr_t src, int index, int count)
170{
171 int i;
0a7de745 172 for (i = 0; i < count; i++) {
b0d623f7
A
173 base[index] = src | prot | INTEL_PTE_VALID;
174 src += PAGE_SIZE;
175 index++;
176 }
177}
178
6d2010ae 179extern pmap_paddr_t first_avail;
b0d623f7 180
b0d623f7
A
181int break_kprintf = 0;
182
183uint64_t
184x86_64_pre_sleep(void)
185{
186 IdlePML4[0] = IdlePML4[KERNEL_PML4_INDEX];
6d2010ae
A
187 uint64_t oldcr3 = get_cr3_raw();
188 set_cr3_raw((uint32_t) (uintptr_t)ID_MAP_VTOP(IdlePML4));
b0d623f7
A
189 return oldcr3;
190}
191
192void
193x86_64_post_sleep(uint64_t new_cr3)
194{
195 IdlePML4[0] = 0;
6d2010ae 196 set_cr3_raw((uint32_t) new_cr3);
b0d623f7
A
197}
198
b0d623f7 199
b0d623f7 200
55e303ae 201
b0d623f7
A
202// Set up the physical mapping - NPHYSMAP GB of memory mapped at a high address
203// NPHYSMAP is determined by the maximum supported RAM size plus 4GB to account
204// the PCI hole (which is less 4GB but not more).
7ddcb079 205
0a7de745
A
206static int
207physmap_init_L2(uint64_t *physStart, pt_entry_t **l2ptep)
208{
209 unsigned i;
210 pt_entry_t *physmapL2 = ALLOCPAGES(1);
211
212 if (physmapL2 == NULL) {
213 DBG("physmap_init_L2 page alloc failed when initting L2 for physAddr 0x%llx.\n", *physStart);
214 *l2ptep = NULL;
215 return -1;
216 }
217
218 for (i = 0; i < NPDPG; i++) {
219 physmapL2[i] = *physStart
220 | INTEL_PTE_PS
221 | INTEL_PTE_VALID
222 | INTEL_PTE_NX
223 | INTEL_PTE_WRITE;
224
225 *physStart += NBPD;
226 }
227 *l2ptep = physmapL2;
228 return 0;
229}
230
231static int
232physmap_init_L3(int startIndex, uint64_t highest_phys, uint64_t *physStart, pt_entry_t **l3ptep)
233{
234 unsigned i;
235 int ret;
236 pt_entry_t *l2pte;
237 pt_entry_t *physmapL3 = ALLOCPAGES(1); /* ALLOCPAGES bzeroes the memory */
238
239 if (physmapL3 == NULL) {
240 DBG("physmap_init_L3 page alloc failed when initting L3 for physAddr 0x%llx.\n", *physStart);
241 *l3ptep = NULL;
242 return -1;
243 }
244
245 for (i = startIndex; i < NPDPTPG && *physStart < highest_phys; i++) {
246 if ((ret = physmap_init_L2(physStart, &l2pte)) < 0) {
247 return ret;
248 }
249
250 physmapL3[i] = ((uintptr_t)ID_MAP_VTOP(l2pte))
251 | INTEL_PTE_VALID
252 | INTEL_PTE_NX
253 | INTEL_PTE_WRITE;
254 }
255
256 *l3ptep = physmapL3;
257
258 return 0;
259}
316670eb 260
b0d623f7 261static void
0a7de745 262physmap_init(uint8_t phys_random_L3)
b0d623f7 263{
0a7de745
A
264 pt_entry_t *l3pte;
265 int pml4_index, i;
266 int L3_start_index;
267 uint64_t physAddr = 0;
268 uint64_t highest_physaddr;
269 unsigned pdpte_count;
270
271#if DEVELOPMENT || DEBUG
272 if (kernelBootArgs->PhysicalMemorySize > K64_MAXMEM) {
273 panic("Installed physical memory exceeds configured maximum.");
274 }
275#endif
276
277 /*
278 * Add 4GB to the loader-provided physical memory size to account for MMIO space
279 * XXX in a perfect world, we'd scan PCI buses and count the max memory requested in BARs by
280 * XXX all enumerated device, then add more for hot-pluggable devices.
281 */
282 highest_physaddr = kernelBootArgs->PhysicalMemorySize + 4 * GB;
283
284 /*
285 * Calculate the number of PML4 entries we'll need. The total number of entries is
286 * pdpte_count = (((highest_physaddr) >> PDPT_SHIFT) + entropy_value +
287 * ((highest_physaddr & PDPT_MASK) == 0 ? 0 : 1))
288 * pml4e_count = pdpte_count >> (PML4_SHIFT - PDPT_SHIFT)
289 */
290 assert(highest_physaddr < (UINT64_MAX - PDPTMASK));
291 pdpte_count = (unsigned) (((highest_physaddr + PDPTMASK) >> PDPTSHIFT) + phys_random_L3);
292 kernPhysPML4EntryCount = (pdpte_count + ((1U << (PML4SHIFT - PDPTSHIFT)) - 1)) >> (PML4SHIFT - PDPTSHIFT);
293 if (kernPhysPML4EntryCount == 0) {
294 kernPhysPML4EntryCount = 1;
295 }
296 if (kernPhysPML4EntryCount > KERNEL_PHYSMAP_PML4_COUNT_MAX) {
297#if DEVELOPMENT || DEBUG
298 panic("physmap too large");
299#else
300 kprintf("[pmap] Limiting physmap to %d PML4s (was %d)\n", KERNEL_PHYSMAP_PML4_COUNT_MAX,
301 kernPhysPML4EntryCount);
302 kernPhysPML4EntryCount = KERNEL_PHYSMAP_PML4_COUNT_MAX;
303#endif
304 }
305
306 kernPhysPML4Index = KERNEL_KEXTS_INDEX - kernPhysPML4EntryCount; /* utb: KERNEL_PHYSMAP_PML4_INDEX */
307
308 /*
309 * XXX: Make sure that the addresses returned for physmapL3 and physmapL2 plus their extents
310 * are in the system-available memory range
311 */
b0d623f7 312
316670eb
A
313
314 /* We assume NX support. Mark all levels of the PHYSMAP NX
315 * to avoid granting executability via a single bit flip.
316 */
fe8ab488
A
317#if DEVELOPMENT || DEBUG
318 uint32_t reg[4];
319 do_cpuid(0x80000000, reg);
320 if (reg[eax] >= 0x80000001) {
321 do_cpuid(0x80000001, reg);
322 assert(reg[edx] & CPUID_EXTFEATURE_XD);
323 }
324#endif /* DEVELOPMENT || DEBUG */
316670eb 325
0a7de745
A
326 L3_start_index = phys_random_L3;
327
328 for (pml4_index = kernPhysPML4Index;
329 pml4_index < (kernPhysPML4Index + kernPhysPML4EntryCount) && physAddr < highest_physaddr;
330 pml4_index++) {
331 if (physmap_init_L3(L3_start_index, highest_physaddr, &physAddr, &l3pte) < 0) {
332 panic("Physmap page table initialization failed");
333 /* NOTREACHED */
b0d623f7 334 }
0a7de745
A
335
336 L3_start_index = 0;
337
338 IdlePML4[pml4_index] = ((uintptr_t)ID_MAP_VTOP(l3pte))
339 | INTEL_PTE_VALID
340 | INTEL_PTE_NX
341 | INTEL_PTE_WRITE;
b0d623f7
A
342 }
343
0a7de745
A
344 physmap_base = KVADDR(kernPhysPML4Index, phys_random_L3, 0, 0);
345 /*
346 * physAddr contains the last-mapped physical address, so that's what we
347 * add to physmap_base to derive the ending VA for the physmap.
348 */
349 physmap_max = physmap_base + physAddr;
316670eb 350
316670eb 351 DBG("Physical address map base: 0x%qx\n", physmap_base);
0a7de745
A
352 for (i = kernPhysPML4Index; i < (kernPhysPML4Index + kernPhysPML4EntryCount); i++) {
353 DBG("Physical map idlepml4[%d]: 0x%llx\n", i, IdlePML4[i]);
354 }
316670eb 355}
6d2010ae 356
0a7de745 357void doublemap_init(uint8_t);
b0d623f7
A
358
359static void
360Idle_PTs_init(void)
361{
0a7de745
A
362 uint64_t rand64;
363
b0d623f7 364 /* Allocate the "idle" kernel page tables: */
0a7de745
A
365 KPTphys = ALLOCPAGES(NKPT); /* level 1 */
366 IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */
367 IdlePDPT = ALLOCPAGES(1); /* level 3 */
368 IdlePML4 = ALLOCPAGES(1); /* level 4 */
316670eb
A
369
370 // Fill the lowest level with everything up to physfree
371 fillkpt(KPTphys,
0a7de745 372 INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT));
316670eb
A
373
374 /* IdlePTD */
375 fillkpt(IdlePTD,
0a7de745 376 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT);
316670eb
A
377
378 // IdlePDPT entries
379 fillkpt(IdlePDPT,
0a7de745 380 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD);
316670eb
A
381
382 // IdlePML4 single entry for kernel space.
383 fillkpt(IdlePML4 + KERNEL_PML4_INDEX,
0a7de745
A
384 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1);
385
316670eb 386 postcode(VSTART_PHYSMAP_INIT);
b0d623f7 387
0a7de745
A
388 /*
389 * early_random() cannot be called more than one time before the cpu's
390 * gsbase is initialized, so use the full 64-bit value to extract the
391 * two 8-bit entropy values needed for address randomization.
392 */
393 rand64 = early_random();
394 physmap_init(rand64 & 0xFF);
395 doublemap_init((rand64 >> 8) & 0xFF);
5c9f4661 396 idt64_remap();
316670eb
A
397
398 postcode(VSTART_SET_CR3);
399
400 // Switch to the page tables..
401 set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
316670eb
A
402}
403
cb323159
A
404/*
405 * Release any still unused, preallocated boot kernel page tables.
406 * start..end is the VA range currently unused.
407 */
408void
409Idle_PTs_release(vm_offset_t start, vm_offset_t end)
410{
411 uint32_t i;
412 uint32_t index_start;
413 uint32_t index_limit;
414 ppnum_t pn_first;
415 ppnum_t pn;
416 uint32_t cnt;
417
418 /*
419 * Align start to the next large page boundary
420 */
421 start = ((start + I386_LPGMASK) & ~I386_LPGMASK);
422
423 /*
424 * convert start into an index in KPTphys[]
425 */
426 index_start = (uint32_t)((start - KERNEL_BASE) >> PAGE_SHIFT);
427
428 /*
429 * Find the ending index in KPTphys[]
430 */
431 index_limit = (uint32_t)((end - KERNEL_BASE) >> PAGE_SHIFT);
432
433 if (index_limit > NKPT * PTE_PER_PAGE) {
434 index_limit = NKPT * PTE_PER_PAGE;
435 }
436
437 /*
438 * Make sure all the 4K page tables are empty.
439 * If not, panic a development/debug kernel.
440 * On a production kernel, since this would stop us from booting,
441 * just abort the operation.
442 */
443 for (i = index_start; i < index_limit; ++i) {
444 assert(KPTphys[i] == 0);
445 if (KPTphys[i] != 0) {
446 return;
447 }
448 }
449
450 /*
451 * Now figure out the indices into the 2nd level page tables, IdlePTD[].
452 */
453 index_start >>= PTPGSHIFT;
454 index_limit >>= PTPGSHIFT;
455 if (index_limit > NPGPTD * PTE_PER_PAGE) {
456 index_limit = NPGPTD * PTE_PER_PAGE;
457 }
458
459 if (index_limit <= index_start) {
460 return;
461 }
462
463
464 /*
465 * Now check the pages referenced from Level 2 tables.
466 * They should be contiguous, assert fail if not on development/debug.
467 * In production, just fail the removal to allow the system to boot.
468 */
469 pn_first = 0;
470 cnt = 0;
471 for (i = index_start; i < index_limit; ++i) {
472 assert(IdlePTD[i] != 0);
473 if (IdlePTD[i] == 0) {
474 return;
475 }
476
477 pn = (ppnum_t)((PG_FRAME & IdlePTD[i]) >> PTSHIFT);
478 if (cnt == 0) {
479 pn_first = pn;
480 } else {
481 assert(pn == pn_first + cnt);
482 if (pn != pn_first + cnt) {
483 return;
484 }
485 }
486 ++cnt;
487 }
488
489 /*
490 * Good to go, clear the level 2 entries and invalidate the TLB
491 */
492 for (i = index_start; i < index_limit; ++i) {
493 IdlePTD[i] = 0;
494 }
495 set_cr3_raw(get_cr3_raw());
496
497 /*
498 * Remember these PFNs to be released later in pmap_lowmem_finalize()
499 */
500 released_PT_ppn = pn_first;
501 released_PT_cnt = cnt;
502#if DEVELOPMENT || DEBUG
503 printf("Idle_PTs_release %d pages from PFN 0x%x\n", released_PT_cnt, released_PT_ppn);
504#endif
505}
506
5ba3f43e
A
507extern void vstart_trap_handler;
508
0a7de745
A
509#define BOOT_TRAP_VECTOR(t) \
510 [t] = { \
511 (uintptr_t) &vstart_trap_handler, \
512 KERNEL64_CS, \
513 0, \
514 ACC_P|ACC_PL_K|ACC_INTR_GATE, \
515 0 \
5ba3f43e
A
516 },
517
518/* Recursive macro to iterate 0..31 */
0a7de745
A
519#define L0(x, n) x(n)
520#define L1(x, n) L0(x,n-1) L0(x,n)
521#define L2(x, n) L1(x,n-2) L1(x,n)
522#define L3(x, n) L2(x,n-4) L2(x,n)
523#define L4(x, n) L3(x,n-8) L3(x,n)
524#define L5(x, n) L4(x,n-16) L4(x,n)
5ba3f43e
A
525#define FOR_0_TO_31(x) L5(x,31)
526
527/*
528 * Bootstrap IDT. Active only during early startup.
529 * Only the trap vectors are defined since interrupts are masked.
530 * All traps point to a common handler.
531 */
532struct fake_descriptor64 master_boot_idt64[IDTSZ]
0a7de745
A
533__attribute__((section("__HIB,__desc")))
534__attribute__((aligned(PAGE_SIZE))) = {
5ba3f43e
A
535 FOR_0_TO_31(BOOT_TRAP_VECTOR)
536};
537
538static void
539vstart_idt_init(void)
540{
0a7de745
A
541 x86_64_desc_register_t vstart_idt = {
542 sizeof(master_boot_idt64),
543 master_boot_idt64
544 };
545
5ba3f43e
A
546 fix_desc64(master_boot_idt64, 32);
547 lidt((void *)&vstart_idt);
548}
b0d623f7
A
549
550/*
551 * vstart() is called in the natural mode (64bit for K64, 32 for K32)
0a7de745 552 * on a set of bootstrap pagetables which use large, 2MB pages to map
b0d623f7
A
553 * all of physical memory in both. See idle_pt.c for details.
554 *
0a7de745 555 * In K64 this identity mapping is mirrored the top and bottom 512GB
b0d623f7
A
556 * slots of PML4.
557 *
558 * The bootstrap processor called with argument boot_args_start pointing to
559 * the boot-args block. The kernel's (4K page) page tables are allocated and
560 * initialized before switching to these.
561 *
562 * Non-bootstrap processors are called with argument boot_args_start NULL.
563 * These processors switch immediately to the existing kernel page tables.
564 */
39037602 565__attribute__((noreturn))
b0d623f7
A
566void
567vstart(vm_offset_t boot_args_start)
568{
0a7de745
A
569 boolean_t is_boot_cpu = !(boot_args_start == 0);
570 int cpu = 0;
571 uint32_t lphysfree;
b0d623f7
A
572
573 postcode(VSTART_ENTRY);
574
575 if (is_boot_cpu) {
5ba3f43e
A
576 /*
577 * Set-up temporary trap handlers during page-table set-up.
578 */
579 vstart_idt_init();
580 postcode(VSTART_IDT_INIT);
581
0a7de745
A
582 /*
583 * Ensure that any %gs-relative access results in an immediate fault
584 * until gsbase is properly initialized below
585 */
586 wrmsr64(MSR_IA32_GS_BASE, EARLY_GSBASE_MAGIC);
587
b0d623f7
A
588 /*
589 * Get startup parameters.
590 */
591 kernelBootArgs = (boot_args *)boot_args_start;
592 lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize;
0a7de745 593 physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
3e170ce0 594
6d2010ae 595 pal_serial_init();
cb323159 596
b0d623f7
A
597 DBG("revision 0x%x\n", kernelBootArgs->Revision);
598 DBG("version 0x%x\n", kernelBootArgs->Version);
599 DBG("command line %s\n", kernelBootArgs->CommandLine);
600 DBG("memory map 0x%x\n", kernelBootArgs->MemoryMap);
601 DBG("memory map sz 0x%x\n", kernelBootArgs->MemoryMapSize);
602 DBG("kaddr 0x%x\n", kernelBootArgs->kaddr);
603 DBG("ksize 0x%x\n", kernelBootArgs->ksize);
604 DBG("physfree %p\n", physfree);
605 DBG("bootargs: %p, &ksize: %p &kaddr: %p\n",
0a7de745
A
606 kernelBootArgs,
607 &kernelBootArgs->ksize,
608 &kernelBootArgs->kaddr);
3e170ce0
A
609 DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize);
610
7ddcb079
A
611 /*
612 * Setup boot args given the physical start address.
fe8ab488
A
613 * Note: PE_init_platform needs to be called before Idle_PTs_init
614 * because access to the DeviceTree is required to read the
615 * random seed before generating a random physical map slide.
7ddcb079
A
616 */
617 kernelBootArgs = (boot_args *)
618 ml_static_ptovirt(boot_args_start);
619 DBG("i386_init(0x%lx) kernelBootArgs=%p\n",
620 (unsigned long)boot_args_start, kernelBootArgs);
5ba3f43e
A
621
622#if KASAN
623 kasan_reserve_memory(kernelBootArgs);
624#endif
625
7ddcb079
A
626 PE_init_platform(FALSE, kernelBootArgs);
627 postcode(PE_INIT_PLATFORM_D);
fe8ab488
A
628
629 Idle_PTs_init();
630 postcode(VSTART_IDLE_PTS_INIT);
631
5ba3f43e
A
632#if KASAN
633 /* Init kasan and map whatever was stolen from physfree */
634 kasan_init();
635 kasan_notify_stolen((uintptr_t)ml_static_ptovirt((vm_offset_t)physfree));
636#endif
637
638#if MONOTONIC
d9a64523 639 mt_early_init();
5ba3f43e
A
640#endif /* MONOTONIC */
641
fe8ab488
A
642 first_avail = (vm_offset_t)ID_MAP_VTOP(physfree);
643
fe8ab488 644 cpu_data_alloc(TRUE);
5ba3f43e
A
645
646 cpu_desc_init(cpu_datap(0));
647 postcode(VSTART_CPU_DESC_INIT);
648 cpu_desc_load(cpu_datap(0));
649
650 postcode(VSTART_CPU_MODE_INIT);
651 cpu_syscall_init(cpu_datap(0)); /* cpu_syscall_init() will be
0a7de745
A
652 * invoked on the APs
653 * via i386_init_slave()
654 */
b0d623f7 655 } else {
316670eb
A
656 /* Switch to kernel's page tables (from the Boot PTs) */
657 set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4));
b0d623f7 658 /* Find our logical cpu number */
0a7de745 659 cpu = lapic_to_cpu[(LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK];
7ddcb079 660 DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, rdmsr64(MSR_IA32_GS_BASE));
5ba3f43e 661 cpu_desc_load(cpu_datap(cpu));
b0d623f7 662 }
8ad349bb 663
0a7de745 664 early_boot = 0;
b0d623f7 665 postcode(VSTART_EXIT);
316670eb 666 x86_init_wrapper(is_boot_cpu ? (uintptr_t) i386_init
0a7de745
A
667 : (uintptr_t) i386_init_slave,
668 cpu_datap(cpu)->cpu_int_stack_top);
b0d623f7 669}
21362eb3 670
fe8ab488
A
671void
672pstate_trace(void)
673{
674}
675
55e303ae
A
676/*
677 * Cpu initialization. Running virtual, but without MACH VM
b0d623f7 678 * set up.
55e303ae
A
679 */
680void
7ddcb079 681i386_init(void)
55e303ae 682{
0a7de745
A
683 unsigned int maxmem;
684 uint64_t maxmemtouse;
685 unsigned int cpus = 0;
686 boolean_t fidn;
687 boolean_t IA32e = TRUE;
91447636
A
688
689 postcode(I386_INIT_ENTRY);
55e303ae 690
6d2010ae 691 pal_i386_init();
fe8ab488 692 tsc_init();
0a7de745 693 rtclock_early_init(); /* mach_absolute_time() now functionsl */
fe8ab488 694
39037602 695 kernel_debug_string_early("i386_init");
fe8ab488 696 pstate_trace();
6d2010ae 697
b0d623f7 698#if CONFIG_MCA
0c530ab8
A
699 /* Initialize machine-check handling */
700 mca_cpu_init();
b0d623f7 701#endif
4452a7af 702
0c530ab8 703 master_cpu = 0;
cb323159
A
704
705 lck_mod_init();
706
707 /*
708 * Initialize the timer callout world
709 */
710 timer_call_init();
711
0c530ab8 712 cpu_init();
b0d623f7 713
0c530ab8
A
714 postcode(CPU_INIT_D);
715
0a7de745
A
716 printf_init(); /* Init this in case we need debugger */
717 panic_init(); /* Init this in case we need debugger */
55e303ae
A
718
719 /* setup debugging output if one has been chosen */
39037602 720 kernel_debug_string_early("PE_init_kprintf");
55e303ae 721 PE_init_kprintf(FALSE);
55e303ae 722
39037602 723 kernel_debug_string_early("kernel_early_bootstrap");
39236c6e
A
724 kernel_early_bootstrap();
725
0a7de745 726 if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof(dgWork.dgFlags))) {
0c530ab8 727 dgWork.dgFlags = 0;
0a7de745
A
728 }
729
0c530ab8 730 serialmode = 0;
5ba3f43e 731 if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) {
0c530ab8
A
732 /* We want a serial keyboard and/or console */
733 kprintf("Serial mode specified: %08X\n", serialmode);
5ba3f43e
A
734 int force_sync = serialmode & SERIALMODE_SYNCDRAIN;
735 if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) {
736 if (force_sync) {
737 serialmode |= SERIALMODE_SYNCDRAIN;
738 kprintf(
0a7de745
A
739 "WARNING: Forcing uart driver to output synchronously."
740 "printf()s/IOLogs will impact kernel performance.\n"
741 "You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
5ba3f43e
A
742 }
743 }
0c530ab8 744 }
5ba3f43e 745 if (serialmode & SERIALMODE_OUTPUT) {
0c530ab8 746 (void)switch_to_serial_console();
5ba3f43e 747 disableConsoleOutput = FALSE; /* Allow printfs to happen */
0c530ab8
A
748 }
749
55e303ae 750 /* setup console output */
39037602 751 kernel_debug_string_early("PE_init_printf");
55e303ae
A
752 PE_init_printf(FALSE);
753
754 kprintf("version_variant = %s\n", version_variant);
755 kprintf("version = %s\n", version);
0a7de745
A
756
757 if (!PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) {
593a1d5f 758 maxmemtouse = 0;
0a7de745
A
759 } else {
760 maxmemtouse = ((uint64_t)maxmem) * MB;
761 }
55e303ae 762
0a7de745
A
763 if (PE_parse_boot_argn("cpus", &cpus, sizeof(cpus))) {
764 if ((0 < cpus) && (cpus < max_ncpus)) {
765 max_ncpus = cpus;
766 }
91447636 767 }
55e303ae 768
0c530ab8
A
769 /*
770 * debug support for > 4G systems
771 */
0a7de745
A
772 PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof(vm_himemory_mode));
773 if (!vm_himemory_mode) {
774 kprintf("himemory_mode disabled\n");
775 }
0c530ab8 776
0a7de745 777 if (!PE_parse_boot_argn("immediate_NMI", &fidn, sizeof(fidn))) {
2d21ac55 778 force_immediate_debugger_NMI = FALSE;
0a7de745 779 } else {
935ed37a 780 force_immediate_debugger_NMI = fidn;
0a7de745 781 }
6d2010ae
A
782
783#if DEBUG
784 nanoseconds_to_absolutetime(URGENCY_NOTIFICATION_ASSERT_NS, &urgency_notification_assert_abstime_threshold);
785#endif
786 PE_parse_boot_argn("urgency_notification_abstime",
787 &urgency_notification_assert_abstime_threshold,
788 sizeof(urgency_notification_assert_abstime_threshold));
789
0a7de745 790 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) {
0c530ab8 791 nx_enabled = 0;
0a7de745 792 }
0c530ab8 793
0a7de745 794 /*
2d21ac55 795 * VM initialization, after this we're using page tables...
fe8ab488 796 * Thn maximum number of cpus must be set beforehand.
2d21ac55 797 */
39037602 798 kernel_debug_string_early("i386_vm_init");
0c530ab8
A
799 i386_vm_init(maxmemtouse, IA32e, kernelBootArgs);
800
6d2010ae
A
801 /* create the console for verbose or pretty mode */
802 /* Note: doing this prior to tsc_init() allows for graceful panic! */
803 PE_init_platform(TRUE, kernelBootArgs);
804 PE_create_console();
0c530ab8 805
39037602 806 kernel_debug_string_early("power_management_init");
0b4c1975 807 power_management_init();
cb323159
A
808
809#if MONOTONIC
810 mt_cpu_up(cpu_datap(0));
811#endif /* MONOTONIC */
812
0c530ab8
A
813 processor_bootstrap();
814 thread_bootstrap();
815
fe8ab488 816 pstate_trace();
39037602 817 kernel_debug_string_early("machine_startup");
55e303ae 818 machine_startup();
fe8ab488 819 pstate_trace();
55e303ae 820}
b0d623f7 821
cb323159 822static void __dead2
b0d623f7
A
823do_init_slave(boolean_t fast_restart)
824{
0a7de745 825 void *init_param = FULL_SLAVE_INIT;
b0d623f7
A
826
827 postcode(I386_INIT_SLAVE);
828
829 if (!fast_restart) {
830 /* Ensure that caching and write-through are enabled */
0a7de745
A
831 set_cr0(get_cr0() & ~(CR0_NW | CR0_CD));
832
b0d623f7
A
833 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
834 get_cpu_number(), get_cpu_phys_number());
0a7de745 835
b0d623f7 836 assert(!ml_get_interrupts_enabled());
0a7de745 837
5ba3f43e 838 cpu_syscall_init(current_cpu_datap());
316670eb 839 pmap_cpu_init();
0a7de745 840
b0d623f7
A
841#if CONFIG_MCA
842 mca_cpu_init();
843#endif
0a7de745 844
bd504ef0 845 LAPIC_INIT();
b0d623f7
A
846 lapic_configure();
847 LAPIC_DUMP();
848 LAPIC_CPU_MAP_DUMP();
0a7de745 849
b0d623f7 850 init_fpu();
0a7de745 851
6d2010ae 852#if CONFIG_MTRR
b0d623f7 853 mtrr_update_cpu();
6d2010ae 854#endif
bd504ef0
A
855 /* update CPU microcode */
856 ucode_update_wake();
0a7de745
A
857
858 /* Do CPU workarounds after the microcode update */
859 cpuid_do_was();
860 } else {
861 init_param = FAST_SLAVE_INIT;
862 }
b0d623f7
A
863
864#if CONFIG_VMX
865 /* resume VT operation */
490019cf 866 vmx_resume(FALSE);
b0d623f7
A
867#endif
868
6d2010ae 869#if CONFIG_MTRR
0a7de745
A
870 if (!fast_restart) {
871 pat_init();
872 }
6d2010ae 873#endif
b0d623f7 874
0a7de745
A
875 cpu_thread_init(); /* not strictly necessary */
876
877 cpu_init(); /* Sets cpu_running which starter cpu waits for */
cb323159
A
878
879
880#if MONOTONIC
881 mt_cpu_up(current_cpu_datap());
882#endif /* MONOTONIC */
883
0a7de745 884 slave_main(init_param);
b0d623f7 885
0a7de745 886 panic("do_init_slave() returned from slave_main()");
b0d623f7
A
887}
888
889/*
890 * i386_init_slave() is called from pstart.
891 * We're in the cpu's interrupt stack with interrupts disabled.
892 * At this point we are in legacy mode. We need to switch on IA32e
893 * if the mode is set to 64-bits.
894 */
895void
896i386_init_slave(void)
897{
0a7de745 898 do_init_slave(FALSE);
b0d623f7
A
899}
900
901/*
902 * i386_init_slave_fast() is called from pmCPUHalt.
903 * We're running on the idle thread and need to fix up
904 * some accounting and get it so that the scheduler sees this
905 * CPU again.
906 */
907void
908i386_init_slave_fast(void)
909{
0a7de745 910 do_init_slave(TRUE);
b0d623f7
A
911}
912
5c9f4661
A
913#include <libkern/kernel_mach_header.h>
914
915/* TODO: Evaluate global PTEs for the double-mapped translations */
916
917uint64_t dblmap_base, dblmap_max;
918kernel_segment_command_t *hdescseg;
b0d623f7 919
5c9f4661
A
920pt_entry_t *dblmapL3;
921unsigned int dblallocs;
922uint64_t dblmap_dist;
923extern uint64_t idt64_hndl_table0[];
924
925
0a7de745
A
926void
927doublemap_init(uint8_t randL3)
928{
5c9f4661
A
929 dblmapL3 = ALLOCPAGES(1); // for 512 1GiB entries
930 dblallocs++;
931
932 struct {
933 pt_entry_t entries[PTE_PER_PAGE];
934 } * dblmapL2 = ALLOCPAGES(1); // for 512 2MiB entries
935 dblallocs++;
936
0a7de745 937 dblmapL3[randL3] = ((uintptr_t)ID_MAP_VTOP(&dblmapL2[0]))
5c9f4661
A
938 | INTEL_PTE_VALID
939 | INTEL_PTE_WRITE;
940
941 hdescseg = getsegbynamefromheader(&_mh_execute_header, "__HIB");
942
943 vm_offset_t hdescb = hdescseg->vmaddr;
944 unsigned long hdescsz = hdescseg->vmsize;
945 unsigned long hdescszr = round_page_64(hdescsz);
946 vm_offset_t hdescc = hdescb, hdesce = hdescb + hdescszr;
947
948 kernel_section_t *thdescsect = getsectbynamefromheader(&_mh_execute_header, "__HIB", "__text");
949 vm_offset_t thdescb = thdescsect->addr;
950 unsigned long thdescsz = thdescsect->size;
951 unsigned long thdescszr = round_page_64(thdescsz);
952 vm_offset_t thdesce = thdescb + thdescszr;
953
954 assert((hdescb & 0xFFF) == 0);
955 /* Mirror HIB translations into the double-mapped pagetable subtree*/
0a7de745 956 for (int i = 0; hdescc < hdesce; i++) {
5c9f4661
A
957 struct {
958 pt_entry_t entries[PTE_PER_PAGE];
959 } * dblmapL1 = ALLOCPAGES(1);
960 dblallocs++;
961 dblmapL2[0].entries[i] = ((uintptr_t)ID_MAP_VTOP(&dblmapL1[0])) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF;
962 int hdescn = (int) ((hdesce - hdescc) / PAGE_SIZE);
963 for (int j = 0; j < MIN(PTE_PER_PAGE, hdescn); j++) {
964 uint64_t template = INTEL_PTE_VALID;
965 if ((hdescc >= thdescb) && (hdescc < thdesce)) {
966 /* executable */
967 } else {
0a7de745 968 template |= INTEL_PTE_WRITE | INTEL_PTE_NX; /* Writeable, NX */
5c9f4661
A
969 }
970 dblmapL1[0].entries[j] = ((uintptr_t)ID_MAP_VTOP(hdescc)) | template;
971 hdescc += PAGE_SIZE;
972 }
973 }
974
975 IdlePML4[KERNEL_DBLMAP_PML4_INDEX] = ((uintptr_t)ID_MAP_VTOP(dblmapL3)) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF;
976
0a7de745 977 dblmap_base = KVADDR(KERNEL_DBLMAP_PML4_INDEX, randL3, 0, 0);
5c9f4661
A
978 dblmap_max = dblmap_base + hdescszr;
979 /* Calculate the double-map distance, which accounts for the current
980 * KASLR slide
981 */
982
983 dblmap_dist = dblmap_base - hdescb;
cb323159
A
984 idt64_hndl_table0[1] = DBLMAP(idt64_hndl_table0[1]); /* 64-bit exit trampoline */
985 idt64_hndl_table0[3] = DBLMAP(idt64_hndl_table0[3]); /* 32-bit exit trampoline */
d9a64523 986 idt64_hndl_table0[6] = (uint64_t)(uintptr_t)&kernel_stack_mask;
5c9f4661
A
987
988 extern cpu_data_t cpshadows[], scdatas[];
989 uintptr_t cd1 = (uintptr_t) &cpshadows[0];
990 uintptr_t cd2 = (uintptr_t) &scdatas[0];
991/* Record the displacement from the kernel's per-CPU data pointer, eventually
992 * programmed into GSBASE, to the "shadows" in the doublemapped
993 * region. These are not aliases, but separate physical allocations
994 * containing data required in the doublemapped trampolines.
0a7de745 995 */
5c9f4661
A
996 idt64_hndl_table0[2] = dblmap_dist + cd1 - cd2;
997
998 DBG("Double map base: 0x%qx\n", dblmap_base);
999 DBG("double map idlepml4[%d]: 0x%llx\n", KERNEL_DBLMAP_PML4_INDEX, IdlePML4[KERNEL_DBLMAP_PML4_INDEX]);
1000 assert(LDTSZ > LDTSZ_MIN);
1001}
1002
1003vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t);
1004
1005#include <i386/pmap_internal.h>
1006
1007/* Use of this routine is expected to be synchronized by callers
1008 * Creates non-executable aliases.
1009 */
0a7de745
A
1010vm_offset_t
1011dyn_dblmap(vm_offset_t cva, vm_offset_t sz)
1012{
5c9f4661
A
1013 vm_offset_t ava = dblmap_max;
1014
1015 assert((sz & PAGE_MASK) == 0);
1016 assert(cva != 0);
1017
1018 pmap_alias(ava, cva, cva + sz, VM_PROT_READ | VM_PROT_WRITE, PMAP_EXPAND_OPTIONS_ALIASMAP);
1019 dblmap_max += sz;
0a7de745 1020 return ava - cva;
5c9f4661
A
1021}
1022/* Adjust offsets interior to the bootstrap interrupt descriptor table to redirect
1023 * control to the double-mapped interrupt vectors. The IDTR proper will be
1024 * programmed via cpu_desc_load()
1025 */
0a7de745
A
1026void
1027idt64_remap(void)
1028{
5c9f4661
A
1029 for (int i = 0; i < IDTSZ; i++) {
1030 master_idt64[i].offset64 = DBLMAP(master_idt64[i].offset64);
1031 }
1032}