#include <i386/mp.h>
#include <i386/cpu_number.h>
#include <i386/machine_cpu.h>
-#include <i386/mp_slave_boot.h>
#include <i386/seg.h>
#include <vm/vm_protos.h>
#include <i386/postcode.h>
+#ifdef __i386__
void
cpu_IA32e_enable(cpu_data_t *cdp)
{
- uint32_t cr0 = get_cr0();
- uint64_t efer = rdmsr64(MSR_IA32_EFER);
-
assert(!ml_get_interrupts_enabled());
- postcode(CPU_IA32_ENABLE_ENTRY);
-
- /* Turn paging off - works because we're identity mapped */
- set_cr0(cr0 & ~CR0_PG);
+ if (!cdp->cpu_is64bit ||
+ (rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) != 0)
+ return;
- /* pop in new top level phys pg addr */
- set_cr3((vm_offset_t) kernel64_cr3);
-
- wrmsr64(MSR_IA32_EFER, efer | MSR_IA32_EFER_LME); /* set mode */
-
- /* Turn paging on */
- set_cr0(cr0 | CR0_PG);
+ postcode(CPU_IA32_ENABLE_ENTRY);
- /* this call is required to re-activate paging */
+ /*
+ * The following steps are performed by inlines so that
+ * we can be assured we don't use the stack or any other
+ * non-identity mapped data while paging is turned off...
+ */
+ /* Turn paging off */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "andl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (~CR0_PG)
+ : "eax" );
+
+ /* Pop new top level phys pg addr into CR3 */
+ asm volatile(
+ "mov %%eax, %%cr3 \n\t"
+ :
+ : "a" ((uint32_t) kernel64_cr3));
+
+ /* Turn on the 64-bit mode bit */
+ asm volatile(
+ "rdmsr \n\t"
+ "orl %1, %%eax \n\t"
+ "wrmsr \n\t"
+ :
+ : "c" (MSR_IA32_EFER), "i" (MSR_IA32_EFER_LME)
+ : "eax", "edx");
+
+ /* Turn paging on again */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "orl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (CR0_PG)
+ : "eax" );
+
+#if ONLY_SAFE_FOR_LINDA_SERIAL
kprintf("cpu_IA32e_enable(%p)\n", cdp);
+#endif
if ((rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) == 0)
panic("cpu_IA32e_enable() MSR_IA32_EFER_LMA not asserted");
void
cpu_IA32e_disable(cpu_data_t *cdp)
{
- uint32_t cr0 = get_cr0();
- uint64_t efer = rdmsr64(MSR_IA32_EFER);
-
assert(!ml_get_interrupts_enabled());
postcode(CPU_IA32_DISABLE_ENTRY);
- if ((rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) == 0)
- panic("cpu_IA32e_disable() MSR_IA32_EFER_LMA clear on entry");
-
- /* Turn paging off - works because we're identity mapped */
- set_cr0(cr0 & ~CR0_PG);
-
- /* pop in legacy top level phys pg addr */
- set_cr3((vm_offset_t) lo_kernel_cr3);
-
- wrmsr64(MSR_IA32_EFER, efer & ~MSR_IA32_EFER_LME); /* reset mode */
-
- /* Turn paging on */
- set_cr0(cr0 | CR0_PG);
-
- /* this call is required to re-activate paging */
+ if (!cdp->cpu_is64bit ||
+ (rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) == 0)
+ return;
+
+ /*
+ * The following steps are performed by inlines so that
+ * we can be assured we don't use the stack or any other
+ * non-identity mapped data while paging is turned off...
+ */
+ /* Turn paging off */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "andl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (~CR0_PG)
+ : "eax" );
+
+ /* Pop legacy top level phys pg addr into CR3 */
+ asm volatile(
+ "mov %%eax, %%cr3 \n\t"
+ :
+ : "a" ((uint32_t) lo_kernel_cr3));
+
+ /* Turn off the 64-bit mode bit */
+ asm volatile(
+ "rdmsr \n\t"
+ "andl %1, %%eax \n\t"
+ "wrmsr \n\t"
+ :
+ : "c" (MSR_IA32_EFER), "i" (~MSR_IA32_EFER_LME)
+ : "eax", "edx");
+
+ /* Turn paging on again */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "orl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (CR0_PG)
+ : "eax" );
+
kprintf("cpu_IA32e_disable(%p)\n", cdp);
if ((rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) != 0)
postcode(CPU_IA32_DISABLE_EXIT);
}
-
-void
-fix_desc64(void *descp, int count)
-{
- struct fake_descriptor64 *fakep;
- union {
- struct real_gate64 gate;
- struct real_descriptor64 desc;
- } real;
- int i;
-
- fakep = (struct fake_descriptor64 *) descp;
-
- for (i = 0; i < count; i++, fakep++) {
- /*
- * Construct the real decriptor locally.
- */
-
- bzero((void *) &real, sizeof(real));
-
- switch (fakep->access & ACC_TYPE) {
- case 0:
- break;
- case ACC_CALL_GATE:
- case ACC_INTR_GATE:
- case ACC_TRAP_GATE:
- real.gate.offset_low16 = fakep->offset[0] & 0xFFFF;
- real.gate.selector16 = fakep->lim_or_seg & 0xFFFF;
- real.gate.IST = fakep->size_or_IST & 0x7;
- real.gate.access8 = fakep->access;
- real.gate.offset_high16 = (fakep->offset[0]>>16)&0xFFFF;
- real.gate.offset_top32 = (uint32_t)fakep->offset[1];
- break;
- default: /* Otherwise */
- real.desc.limit_low16 = fakep->lim_or_seg & 0xFFFF;
- real.desc.base_low16 = fakep->offset[0] & 0xFFFF;
- real.desc.base_med8 = (fakep->offset[0] >> 16) & 0xFF;
- real.desc.access8 = fakep->access;
- real.desc.limit_high4 = (fakep->lim_or_seg >> 16) & 0xFF;
- real.desc.granularity4 = fakep->size_or_IST;
- real.desc.base_high8 = (fakep->offset[0] >> 24) & 0xFF;
- real.desc.base_top32 = (uint32_t) fakep->offset[1];
- }
-
- /*
- * Now copy back over the fake structure.
- */
- bcopy((void *) &real, (void *) fakep, sizeof(real));
- }
-}
+#endif
#if DEBUG
extern void dump_gdt(void *);
unsigned int i;
uint32_t *ip = (uint32_t *) scp;
- kprintf("dump_frame32(0x%08x):\n", scp);
+ kprintf("dump_frame32(%p):\n", scp);
for (i = 0;
i < sizeof(x86_saved_state_compat32_t)/sizeof(uint32_t);
i++, ip++)
- kprintf("0x%08x: 0x%08x\n", ip, *ip);
+ kprintf("%p: 0x%08x\n", ip, *ip);
kprintf("scp->isf64.err: 0x%016llx\n", scp->isf64.err);
kprintf("scp->isf64.rip: 0x%016llx\n", scp->isf64.rip);
for (i = 0;
i < sizeof(x86_saved_state64_t)/sizeof(uint64_t);
i++, ip++)
- kprintf("0x%08x: 0x%016x\n", ip, *ip);
+ kprintf("%p: 0x%016llx\n", ip, *ip);
kprintf("sp->isf.trapno: 0x%08x\n", sp->isf.trapno);
- kprintf("sp->isf.trapfn: 0x%08x\n", sp->isf.trapfn);
+ kprintf("sp->isf.trapfn: 0x%016llx\n", sp->isf.trapfn);
kprintf("sp->isf.err: 0x%016llx\n", sp->isf.err);
kprintf("sp->isf.rip: 0x%016llx\n", sp->isf.rip);
kprintf("sp->isf.cs: 0x%016llx\n", sp->isf.cs);
unsigned int i;
uint32_t *ip = (uint32_t *) gdtp;
- kprintf("GDT:\n", ip);
+ kprintf("GDT:\n");
for (i = 0; i < GDTSZ; i++, ip += 2) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
kprintf("%p: 0x%08x\n", ip+1, *(ip+1));
unsigned int i;
uint32_t *ip = (uint32_t *) ldtp;
- kprintf("LDT:\n", ip);
+ kprintf("LDT:\n");
for (i = 0; i < LDTSZ_MIN; i++, ip += 2) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
kprintf("%p: 0x%08x\n", ip+1, *(ip+1));
unsigned int i;
uint32_t *ip = (uint32_t *) idtp;
- kprintf("IDT64:\n", ip);
+ kprintf("IDT64:\n");
for (i = 0; i < 16; i++, ip += 4) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
kprintf("%p: 0x%08x\n", ip+1, *(ip+1));
unsigned int i;
uint32_t *ip = (uint32_t *) tssp;
- kprintf("TSS64:\n", ip);
+ kprintf("TSS64:\n");
for (i = 0; i < sizeof(master_ktss64)/sizeof(uint32_t); i++, ip++) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
}