2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <platforms.h>
60 #include <mach/i386/vm_param.h>
63 #include <mach/vm_param.h>
64 #include <mach/vm_prot.h>
65 #include <mach/machine.h>
66 #include <mach/time_value.h>
68 #include <kern/assert.h>
69 #include <kern/debug.h>
70 #include <kern/misc_protos.h>
71 #include <kern/startup.h>
72 #include <kern/clock.h>
75 #include <kern/cpu_data.h>
76 #include <kern/processor.h>
77 #include <console/serial_protos.h>
78 #include <vm/vm_page.h>
80 #include <vm/vm_kern.h>
81 #include <machine/pal_routines.h>
83 #include <i386/pmap.h>
84 #include <i386/misc_protos.h>
85 #include <i386/cpu_threads.h>
86 #include <i386/cpuid.h>
87 #include <i386/lapic.h>
89 #include <i386/mp_desc.h>
91 #include <i386/mtrr.h>
93 #include <i386/machine_routines.h>
95 #include <i386/machine_check.h>
97 #include <i386/ucode.h>
98 #include <i386/postcode.h>
99 #include <i386/Diagnostics.h>
100 #include <i386/pmCPU.h>
101 #include <i386/tsc.h>
102 #include <i386/locks.h> /* LcksOpts */
104 #include <i386/cpu_capabilities.h>
106 #include <machine/db_machdep.h>
110 #include <machine/pal_routines.h>
114 #define DBG(x...) kprintf(x)
119 #include <ddb/db_aout.h>
120 #endif /* MACH_KDB */
124 static boot_args
*kernelBootArgs
;
126 extern int disableConsoleOutput
;
127 extern const char version
[];
128 extern const char version_variant
[];
129 extern int nx_enabled
;
132 extern void *low_eintstack
;
138 pd_entry_t
*IdlePDPT64
;
144 * Note: ALLOCPAGES() can only be used safely within Idle_PTs_init()
145 * due to the mutation of physfree.
148 ALLOCPAGES(int npages
)
150 uintptr_t tmp
= (uintptr_t)physfree
;
151 bzero(physfree
, npages
* PAGE_SIZE
);
152 physfree
+= npages
* PAGE_SIZE
;
154 tmp
+= VM_MIN_KERNEL_ADDRESS
& ~LOW_4GB_MASK
;
160 fillkpt(pt_entry_t
*base
, int prot
, uintptr_t src
, int index
, int count
)
163 for (i
=0; i
<count
; i
++) {
164 base
[index
] = src
| prot
| INTEL_PTE_VALID
;
170 extern pmap_paddr_t first_avail
;
173 int break_kprintf
= 0;
176 x86_64_pre_sleep(void)
178 IdlePML4
[0] = IdlePML4
[KERNEL_PML4_INDEX
];
179 uint64_t oldcr3
= get_cr3_raw();
180 set_cr3_raw((uint32_t) (uintptr_t)ID_MAP_VTOP(IdlePML4
));
185 x86_64_post_sleep(uint64_t new_cr3
)
188 set_cr3_raw((uint32_t) new_cr3
);
194 #define ID_MAP_VTOP(x) x
199 // Set up the physical mapping - NPHYSMAP GB of memory mapped at a high address
200 // NPHYSMAP is determined by the maximum supported RAM size plus 4GB to account
201 // the PCI hole (which is less 4GB but not more).
202 #define NPHYSMAP MAX(K64_MAXMEM/GB + 4, 4)
203 // Compile-time guard:
204 extern int maxphymapsupported
[NPHYSMAP
<= PTE_PER_PAGE
? 1 : -1];
208 pt_entry_t
*physmapL3
= ALLOCPAGES(1);
210 pt_entry_t entries
[PTE_PER_PAGE
];
211 } * physmapL2
= ALLOCPAGES(NPHYSMAP
);
214 for(i
=0;i
<NPHYSMAP
;i
++) {
215 physmapL3
[i
] = ((uintptr_t)ID_MAP_VTOP(&physmapL2
[i
]))
219 for(j
=0;j
<PTE_PER_PAGE
;j
++) {
220 physmapL2
[i
].entries
[j
] = (((i
*PTE_PER_PAGE
+j
)<<PDSHIFT
)
227 IdlePML4
[KERNEL_PHYSMAP_INDEX
] = ((uintptr_t)ID_MAP_VTOP(physmapL3
))
230 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
231 IdlePML4
[KERNEL_PHYSMAP_INDEX
] |= INTEL_PTE_NX
;
234 DBG("physical map idlepml4[%d]: 0x%llx\n",
235 KERNEL_PHYSMAP_INDEX
, IdlePML4
[KERNEL_PHYSMAP_INDEX
]);
242 /* Allocate the "idle" kernel page tables: */
243 KPTphys
= ALLOCPAGES(NKPT
); /* level 1 */
244 IdlePTD
= ALLOCPAGES(NPGPTD
); /* level 2 */
249 IdlePDPT64
= ALLOCPAGES(1);
251 // Recursive mapping of PTEs
252 fillkpt(IdlePTD
, INTEL_PTE_WRITE
, (uintptr_t)IdlePTD
, PTDPTDI
, NPGPTD
);
254 fillkpt(IdlePTD
, INTEL_PTE_WRITE
|INTEL_PTE_USER
, (uintptr_t)ALLOCPAGES(1), _COMM_PAGE32_BASE_ADDRESS
>> PDESHIFT
,1);
256 // Fill the lowest level with everything up to physfree
258 INTEL_PTE_WRITE
, 0, 0, (int)(((uintptr_t)physfree
) >> PAGE_SHIFT
));
260 // Rewrite the 2nd-lowest level to point to pages of KPTphys.
261 // This was previously filled statically by idle_pt.c, and thus
262 // must be done after the KPTphys fill since IdlePTD is in use
264 INTEL_PTE_WRITE
, (uintptr_t)ID_MAP_VTOP(KPTphys
), 0, NKPT
);
268 fillkpt(IdlePDPT
, 0, (uintptr_t)IdlePTD
, 0, NPGPTD
);
270 fillkpt(IdlePDPT
, INTEL_PTE_WRITE
, (uintptr_t)ID_MAP_VTOP(IdlePTD
), 0, NPGPTD
);
273 // Flush the TLB now we're done rewriting the page tables..
274 set_cr3_raw(get_cr3_raw());
278 * vstart() is called in the natural mode (64bit for K64, 32 for K32)
279 * on a set of bootstrap pagetables which use large, 2MB pages to map
280 * all of physical memory in both. See idle_pt.c for details.
282 * In K64 this identity mapping is mirrored the top and bottom 512GB
285 * The bootstrap processor called with argument boot_args_start pointing to
286 * the boot-args block. The kernel's (4K page) page tables are allocated and
287 * initialized before switching to these.
289 * Non-bootstrap processors are called with argument boot_args_start NULL.
290 * These processors switch immediately to the existing kernel page tables.
293 vstart(vm_offset_t boot_args_start
)
295 boolean_t is_boot_cpu
= !(boot_args_start
== 0);
299 postcode(VSTART_ENTRY
);
303 * Get startup parameters.
305 kernelBootArgs
= (boot_args
*)boot_args_start
;
306 lphysfree
= kernelBootArgs
->kaddr
+ kernelBootArgs
->ksize
;
307 physfree
= (void *)(uintptr_t)((lphysfree
+ PAGE_SIZE
- 1) &~ (PAGE_SIZE
- 1));
311 DBG("revision 0x%x\n", kernelBootArgs
->Revision
);
312 DBG("version 0x%x\n", kernelBootArgs
->Version
);
313 DBG("command line %s\n", kernelBootArgs
->CommandLine
);
314 DBG("memory map 0x%x\n", kernelBootArgs
->MemoryMap
);
315 DBG("memory map sz 0x%x\n", kernelBootArgs
->MemoryMapSize
);
316 DBG("kaddr 0x%x\n", kernelBootArgs
->kaddr
);
317 DBG("ksize 0x%x\n", kernelBootArgs
->ksize
);
318 DBG("physfree %p\n", physfree
);
319 DBG("bootargs: %p, &ksize: %p &kaddr: %p\n",
321 &kernelBootArgs
->ksize
,
322 &kernelBootArgs
->kaddr
);
324 /* enable NX/XD, boot processor */
325 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
326 wrmsr64(MSR_IA32_EFER
, rdmsr64(MSR_IA32_EFER
) | MSR_IA32_EFER_NXE
);
327 DBG("vstart() NX/XD enabled\n");
330 postcode(PSTART_PAGE_TABLES
);
334 first_avail
= (vm_offset_t
)ID_MAP_VTOP(physfree
);
337 cpu_data_alloc(TRUE
);
339 /* Find our logical cpu number */
340 cpu
= lapic_to_cpu
[(LAPIC_READ(ID
)>>LAPIC_ID_SHIFT
) & LAPIC_ID_MASK
];
342 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
343 wrmsr64(MSR_IA32_EFER
, rdmsr64(MSR_IA32_EFER
) | MSR_IA32_EFER_NXE
);
344 DBG("vstart() NX/XD enabled, non-boot\n");
351 cpu_desc_init64(cpu_datap(cpu
));
352 cpu_desc_load64(cpu_datap(cpu
));
355 cpu_desc_init(cpu_datap(cpu
));
356 cpu_desc_load(cpu_datap(cpu
));
359 cpu_mode_init(current_cpu_datap()); /* cpu_mode_init() will be
361 * via i386_init_slave()
364 /* Done with identity mapping */
368 postcode(VSTART_EXIT
);
370 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD
) {
371 wrmsr64(MSR_IA32_EFER
, rdmsr64(MSR_IA32_EFER
) | MSR_IA32_EFER_NXE
);
372 DBG("vstart() NX/XD enabled, i386\n");
376 i386_init(boot_args_start
);
381 /* We need to switch to a new per-cpu stack, but we must do this atomically with
382 * the call to ensure the compiler doesn't assume anything about the stack before
383 * e.g. tail-call optimisations
390 "call _i386_init;" : : "r"
391 (cpu_datap(cpu
)->cpu_int_stack_top
), "r" (boot_args_start
));
397 "call _i386_init_slave;" : : "r"
398 (cpu_datap(cpu
)->cpu_int_stack_top
));
405 * Cpu initialization. Running virtual, but without MACH VM
409 i386_init(vm_offset_t boot_args_start
)
412 uint64_t maxmemtouse
;
413 unsigned int cpus
= 0;
415 boolean_t IA32e
= TRUE
;
417 postcode(I386_INIT_ENTRY
);
422 /* Initialize machine-check handling */
427 * Setup boot args given the physical start address.
429 kernelBootArgs
= (boot_args
*)
430 ml_static_ptovirt(boot_args_start
);
431 DBG("i386_init(0x%lx) kernelBootArgs=%p\n",
432 (unsigned long)boot_args_start
, kernelBootArgs
);
434 PE_init_platform(FALSE
, kernelBootArgs
);
435 postcode(PE_INIT_PLATFORM_D
);
437 kernel_early_bootstrap();
442 postcode(CPU_INIT_D
);
444 printf_init(); /* Init this in case we need debugger */
445 panic_init(); /* Init this in case we need debugger */
447 /* setup debugging output if one has been chosen */
448 PE_init_kprintf(FALSE
);
450 if (!PE_parse_boot_argn("diag", &dgWork
.dgFlags
, sizeof (dgWork
.dgFlags
)))
454 if(PE_parse_boot_argn("serial", &serialmode
, sizeof (serialmode
))) {
455 /* We want a serial keyboard and/or console */
456 kprintf("Serial mode specified: %08X\n", serialmode
);
459 (void)switch_to_serial_console();
460 disableConsoleOutput
= FALSE
; /* Allow printfs to happen */
463 /* setup console output */
464 PE_init_printf(FALSE
);
466 kprintf("version_variant = %s\n", version_variant
);
467 kprintf("version = %s\n", version
);
469 if (!PE_parse_boot_argn("maxmem", &maxmem
, sizeof (maxmem
)))
472 maxmemtouse
= ((uint64_t)maxmem
) * MB
;
474 if (PE_parse_boot_argn("cpus", &cpus
, sizeof (cpus
))) {
475 if ((0 < cpus
) && (cpus
< max_ncpus
))
480 * debug support for > 4G systems
482 if (!PE_parse_boot_argn("himemory_mode", &vm_himemory_mode
, sizeof (vm_himemory_mode
)))
483 vm_himemory_mode
= 0;
485 if (!PE_parse_boot_argn("immediate_NMI", &fidn
, sizeof (fidn
)))
486 force_immediate_debugger_NMI
= FALSE
;
488 force_immediate_debugger_NMI
= fidn
;
491 nanoseconds_to_absolutetime(URGENCY_NOTIFICATION_ASSERT_NS
, &urgency_notification_assert_abstime_threshold
);
493 PE_parse_boot_argn("urgency_notification_abstime",
494 &urgency_notification_assert_abstime_threshold
,
495 sizeof(urgency_notification_assert_abstime_threshold
));
499 * At this point we check whether we are a 64-bit processor
500 * and that we're not restricted to legacy mode, 32-bit operation.
502 if (cpuid_extfeatures() & CPUID_EXTFEATURE_EM64T
) {
503 boolean_t legacy_mode
;
504 kprintf("EM64T supported");
505 if (PE_parse_boot_argn("-legacy", &legacy_mode
, sizeof (legacy_mode
))) {
506 kprintf(" but legacy mode forced\n");
509 kprintf(" and will be enabled\n");
515 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD
))
519 * VM initialization, after this we're using page tables...
520 * The maximum number of cpus must be set beforehand.
522 i386_vm_init(maxmemtouse
, IA32e
, kernelBootArgs
);
524 /* create the console for verbose or pretty mode */
525 /* Note: doing this prior to tsc_init() allows for graceful panic! */
526 PE_init_platform(TRUE
, kernelBootArgs
);
530 power_management_init();
532 processor_bootstrap();
539 do_init_slave(boolean_t fast_restart
)
541 void *init_param
= FULL_SLAVE_INIT
;
543 postcode(I386_INIT_SLAVE
);
546 /* Ensure that caching and write-through are enabled */
547 set_cr0(get_cr0() & ~(CR0_NW
|CR0_CD
));
549 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
550 get_cpu_number(), get_cpu_phys_number());
552 assert(!ml_get_interrupts_enabled());
554 cpu_mode_init(current_cpu_datap());
562 LAPIC_CPU_MAP_DUMP();
570 init_param
= FAST_SLAVE_INIT
;
572 /* update CPU microcode */
576 /* resume VT operation */
585 cpu_thread_init(); /* not strictly necessary */
588 /* Re-zero the identity-map for the idle PT's. This MUST be done before
589 * cpu_running is set so that other slaves can set up their own
595 cpu_init(); /* Sets cpu_running which starter cpu waits for */
597 slave_main(init_param
);
599 panic("do_init_slave() returned from slave_main()");
603 * i386_init_slave() is called from pstart.
604 * We're in the cpu's interrupt stack with interrupts disabled.
605 * At this point we are in legacy mode. We need to switch on IA32e
606 * if the mode is set to 64-bits.
609 i386_init_slave(void)
611 do_init_slave(FALSE
);
615 * i386_init_slave_fast() is called from pmCPUHalt.
616 * We're running on the idle thread and need to fix up
617 * some accounting and get it so that the scheduler sees this
621 i386_init_slave_fast(void)