/*
* Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <string.h>
#include <i386/mp.h>
#include <i386/cpu_number.h>
#include <i386/machine_cpu.h>
-#include <i386/mp_slave_boot.h>
#include <i386/seg.h>
#include <vm/vm_protos.h>
#include <i386/postcode.h>
+#ifdef __i386__
void
cpu_IA32e_enable(cpu_data_t *cdp)
{
- uint32_t cr0 = get_cr0();
- uint64_t efer = rdmsr64(MSR_IA32_EFER);
-
assert(!ml_get_interrupts_enabled());
- postcode(CPU_IA32_ENABLE_ENTRY);
-
- /* Turn paging off - works because we're identity mapped */
- set_cr0(cr0 & ~CR0_PG);
+ if (!cdp->cpu_is64bit ||
+ (rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) != 0)
+ return;
- /* pop in new top level phys pg addr */
- set_cr3((vm_offset_t) kernel64_cr3);
-
- wrmsr64(MSR_IA32_EFER, efer | MSR_IA32_EFER_LME); /* set mode */
-
- /* Turn paging on */
- set_cr0(cr0 | CR0_PG);
+ postcode(CPU_IA32_ENABLE_ENTRY);
- /* this call is required to re-activate paging */
+ /*
+ * The following steps are performed by inlines so that
+ * we can be assured we don't use the stack or any other
+ * non-identity mapped data while paging is turned off...
+ */
+ /* Turn paging off */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "andl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (~CR0_PG)
+ : "eax" );
+
+ /* Pop new top level phys pg addr into CR3 */
+ asm volatile(
+ "mov %%eax, %%cr3 \n\t"
+ :
+ : "a" ((uint32_t) kernel64_cr3));
+
+ /* Turn on the 64-bit mode bit */
+ asm volatile(
+ "rdmsr \n\t"
+ "orl %1, %%eax \n\t"
+ "wrmsr \n\t"
+ :
+ : "c" (MSR_IA32_EFER), "i" (MSR_IA32_EFER_LME)
+ : "eax", "edx");
+
+ /* Turn paging on again */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "orl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (CR0_PG)
+ : "eax" );
+
+#if ONLY_SAFE_FOR_LINDA_SERIAL
kprintf("cpu_IA32e_enable(%p)\n", cdp);
+#endif
if ((rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) == 0)
panic("cpu_IA32e_enable() MSR_IA32_EFER_LMA not asserted");
void
cpu_IA32e_disable(cpu_data_t *cdp)
{
- uint32_t cr0 = get_cr0();
- uint64_t efer = rdmsr64(MSR_IA32_EFER);
-
assert(!ml_get_interrupts_enabled());
postcode(CPU_IA32_DISABLE_ENTRY);
- if ((rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) == 0)
- panic("cpu_IA32e_disable() MSR_IA32_EFER_LMA clear on entry");
-
- /* Turn paging off - works because we're identity mapped */
- set_cr0(cr0 & ~CR0_PG);
-
- /* pop in legacy top level phys pg addr */
- set_cr3((vm_offset_t) lo_kernel_cr3);
-
- wrmsr64(MSR_IA32_EFER, efer & ~MSR_IA32_EFER_LME); /* reset mode */
-
- /* Turn paging on */
- set_cr0(cr0 | CR0_PG);
-
- /* this call is required to re-activate paging */
+ if (!cdp->cpu_is64bit ||
+ (rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) == 0)
+ return;
+
+ /*
+ * The following steps are performed by inlines so that
+ * we can be assured we don't use the stack or any other
+ * non-identity mapped data while paging is turned off...
+ */
+ /* Turn paging off */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "andl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (~CR0_PG)
+ : "eax" );
+
+ /* Pop legacy top level phys pg addr into CR3 */
+ asm volatile(
+ "mov %%eax, %%cr3 \n\t"
+ :
+ : "a" ((uint32_t) lo_kernel_cr3));
+
+ /* Turn off the 64-bit mode bit */
+ asm volatile(
+ "rdmsr \n\t"
+ "andl %1, %%eax \n\t"
+ "wrmsr \n\t"
+ :
+ : "c" (MSR_IA32_EFER), "i" (~MSR_IA32_EFER_LME)
+ : "eax", "edx");
+
+ /* Turn paging on again */
+ asm volatile(
+ "mov %%cr0, %%eax \n\t"
+ "orl %0, %%eax \n\t"
+ "mov %%eax, %%cr0 \n\t"
+ :
+ : "i" (CR0_PG)
+ : "eax" );
+
kprintf("cpu_IA32e_disable(%p)\n", cdp);
if ((rdmsr64(MSR_IA32_EFER) & MSR_IA32_EFER_LMA) != 0)
postcode(CPU_IA32_DISABLE_EXIT);
}
-
-void
-fix_desc64(void *descp, int count)
-{
- struct fake_descriptor64 *fakep;
- union {
- struct real_gate64 gate;
- struct real_descriptor64 desc;
- } real;
- int i;
-
- fakep = (struct fake_descriptor64 *) descp;
-
- for (i = 0; i < count; i++, fakep++) {
- /*
- * Construct the real decriptor locally.
- */
-
- bzero((void *) &real, sizeof(real));
-
- switch (fakep->access & ACC_TYPE) {
- case 0:
- break;
- case ACC_CALL_GATE:
- case ACC_INTR_GATE:
- case ACC_TRAP_GATE:
- real.gate.offset_low16 = fakep->offset[0] & 0xFFFF;
- real.gate.selector16 = fakep->lim_or_seg & 0xFFFF;
- real.gate.IST = fakep->size_or_IST & 0x7;
- real.gate.access8 = fakep->access;
- real.gate.offset_high16 = (fakep->offset[0]>>16)&0xFFFF;
- real.gate.offset_top32 = (uint32_t)fakep->offset[1];
- break;
- default: /* Otherwise */
- real.desc.limit_low16 = fakep->lim_or_seg & 0xFFFF;
- real.desc.base_low16 = fakep->offset[0] & 0xFFFF;
- real.desc.base_med8 = (fakep->offset[0] >> 16) & 0xFF;
- real.desc.access8 = fakep->access;
- real.desc.limit_high4 = (fakep->lim_or_seg >> 16) & 0xFF;
- real.desc.granularity4 = fakep->size_or_IST;
- real.desc.base_high8 = (fakep->offset[0] >> 24) & 0xFF;
- real.desc.base_top32 = (uint32_t) fakep->offset[1];
- }
-
- /*
- * Now copy back over the fake structure.
- */
- bcopy((void *) &real, (void *) fakep, sizeof(real));
- }
-}
+#endif
#if DEBUG
+extern void dump_regs64(void);
extern void dump_gdt(void *);
extern void dump_ldt(void *);
extern void dump_idt(void *);
extern void dump_tss(void *);
extern void dump_frame32(x86_saved_state_compat32_t *scp);
-extern void dump_frame64(x86_saved_state64_t *scp);
+extern void dump_frame64(x86_saved_state64_t *sp);
+extern void dump_frame(x86_saved_state_t *sp);
+
+void
+dump_frame(x86_saved_state_t *sp)
+{
+ if (is_saved_state32(sp))
+ dump_frame32((x86_saved_state_compat32_t *) sp);
+ else if (is_saved_state64(sp))
+ dump_frame64(&sp->ss_64);
+ else
+ kprintf("dump_frame(%p) unknown type %d\n", sp, sp->flavor);
+}
void
dump_frame32(x86_saved_state_compat32_t *scp)
unsigned int i;
uint32_t *ip = (uint32_t *) scp;
- kprintf("dump_frame32(0x%08x):\n", scp);
+ kprintf("dump_frame32(%p):\n", scp);
for (i = 0;
i < sizeof(x86_saved_state_compat32_t)/sizeof(uint32_t);
i++, ip++)
- kprintf("0x%08x: 0x%08x\n", ip, *ip);
+ kprintf("%p: 0x%08x\n", ip, *ip);
kprintf("scp->isf64.err: 0x%016llx\n", scp->isf64.err);
kprintf("scp->isf64.rip: 0x%016llx\n", scp->isf64.rip);
}
void
-dump_frame64(x86_saved_state64_t *scp)
+dump_frame64(x86_saved_state64_t *sp)
{
unsigned int i;
- uint64_t *ip = (uint64_t *) scp;
+ uint64_t *ip = (uint64_t *) sp;
- kprintf("dump_frame64(0x%08x):\n", scp);
+ kprintf("dump_frame64(%p):\n", sp);
for (i = 0;
i < sizeof(x86_saved_state64_t)/sizeof(uint64_t);
i++, ip++)
- kprintf("0x%08x: 0x%016x\n", ip, *ip);
-
- kprintf("scp->isf.trapno: 0x%08x\n", scp->isf.trapno);
- kprintf("scp->isf.trapfn: 0x%08x\n", scp->isf.trapfn);
- kprintf("scp->isf.err: 0x%016llx\n", scp->isf.err);
- kprintf("scp->isf.rip: 0x%016llx\n", scp->isf.rip);
- kprintf("scp->isf.cs: 0x%016llx\n", scp->isf.cs);
- kprintf("scp->isf.rflags: 0x%016llx\n", scp->isf.rflags);
- kprintf("scp->isf.rsp: 0x%016llx\n", scp->isf.rsp);
- kprintf("scp->isf.ss: 0x%016llx\n", scp->isf.ss);
-
- kprintf("scp->fs: 0x%016llx\n", scp->fs);
- kprintf("scp->gs: 0x%016llx\n", scp->gs);
- kprintf("scp->rax: 0x%016llx\n", scp->rax);
- kprintf("scp->rcx: 0x%016llx\n", scp->rcx);
- kprintf("scp->rbx: 0x%016llx\n", scp->rbx);
- kprintf("scp->rbp: 0x%016llx\n", scp->rbp);
- kprintf("scp->r11: 0x%016llx\n", scp->r11);
- kprintf("scp->r12: 0x%016llx\n", scp->r12);
- kprintf("scp->r13: 0x%016llx\n", scp->r13);
- kprintf("scp->r14: 0x%016llx\n", scp->r14);
- kprintf("scp->r15: 0x%016llx\n", scp->r15);
- kprintf("scp->cr2: 0x%016llx\n", scp->cr2);
- kprintf("scp->v_arg8: 0x%016llx\n", scp->v_arg8);
- kprintf("scp->v_arg7: 0x%016llx\n", scp->v_arg7);
- kprintf("scp->v_arg6: 0x%016llx\n", scp->v_arg6);
- kprintf("scp->r9: 0x%016llx\n", scp->r9);
- kprintf("scp->r8: 0x%016llx\n", scp->r8);
- kprintf("scp->r10: 0x%016llx\n", scp->r10);
- kprintf("scp->rdx: 0x%016llx\n", scp->rdx);
- kprintf("scp->rsi: 0x%016llx\n", scp->rsi);
- kprintf("scp->rdi: 0x%016llx\n", scp->rdi);
+ kprintf("%p: 0x%016llx\n", ip, *ip);
+
+ kprintf("sp->isf.trapno: 0x%08x\n", sp->isf.trapno);
+ kprintf("sp->isf.trapfn: 0x%016llx\n", sp->isf.trapfn);
+ kprintf("sp->isf.err: 0x%016llx\n", sp->isf.err);
+ kprintf("sp->isf.rip: 0x%016llx\n", sp->isf.rip);
+ kprintf("sp->isf.cs: 0x%016llx\n", sp->isf.cs);
+ kprintf("sp->isf.rflags: 0x%016llx\n", sp->isf.rflags);
+ kprintf("sp->isf.rsp: 0x%016llx\n", sp->isf.rsp);
+ kprintf("sp->isf.ss: 0x%016llx\n", sp->isf.ss);
+
+ kprintf("sp->fs: 0x%016x\n", sp->fs);
+ kprintf("sp->gs: 0x%016x\n", sp->gs);
+ kprintf("sp->rax: 0x%016llx\n", sp->rax);
+ kprintf("sp->rcx: 0x%016llx\n", sp->rcx);
+ kprintf("sp->rbx: 0x%016llx\n", sp->rbx);
+ kprintf("sp->rbp: 0x%016llx\n", sp->rbp);
+ kprintf("sp->r11: 0x%016llx\n", sp->r11);
+ kprintf("sp->r12: 0x%016llx\n", sp->r12);
+ kprintf("sp->r13: 0x%016llx\n", sp->r13);
+ kprintf("sp->r14: 0x%016llx\n", sp->r14);
+ kprintf("sp->r15: 0x%016llx\n", sp->r15);
+ kprintf("sp->cr2: 0x%016llx\n", sp->cr2);
+ kprintf("sp->v_arg8: 0x%016llx\n", sp->v_arg8);
+ kprintf("sp->v_arg7: 0x%016llx\n", sp->v_arg7);
+ kprintf("sp->v_arg6: 0x%016llx\n", sp->v_arg6);
+ kprintf("sp->r9: 0x%016llx\n", sp->r9);
+ kprintf("sp->r8: 0x%016llx\n", sp->r8);
+ kprintf("sp->r10: 0x%016llx\n", sp->r10);
+ kprintf("sp->rdx: 0x%016llx\n", sp->rdx);
+ kprintf("sp->rsi: 0x%016llx\n", sp->rsi);
+ kprintf("sp->rdi: 0x%016llx\n", sp->rdi);
postcode(0x98);
}
unsigned int i;
uint32_t *ip = (uint32_t *) gdtp;
- kprintf("GDT:\n", ip);
+ kprintf("GDT:\n");
for (i = 0; i < GDTSZ; i++, ip += 2) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
kprintf("%p: 0x%08x\n", ip+1, *(ip+1));
unsigned int i;
uint32_t *ip = (uint32_t *) ldtp;
- kprintf("LDT:\n", ip);
+ kprintf("LDT:\n");
for (i = 0; i < LDTSZ_MIN; i++, ip += 2) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
kprintf("%p: 0x%08x\n", ip+1, *(ip+1));
unsigned int i;
uint32_t *ip = (uint32_t *) idtp;
- kprintf("IDT64:\n", ip);
+ kprintf("IDT64:\n");
for (i = 0; i < 16; i++, ip += 4) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
kprintf("%p: 0x%08x\n", ip+1, *(ip+1));
unsigned int i;
uint32_t *ip = (uint32_t *) tssp;
- kprintf("TSS64:\n", ip);
+ kprintf("TSS64:\n");
for (i = 0; i < sizeof(master_ktss64)/sizeof(uint32_t); i++, ip++) {
kprintf("%p: 0x%08x\n", ip+0, *(ip+0));
}
}
+
+#if defined(__x86_64__)
+void dump_regs64(void)
+{
+
+#define SNAP_REG(reg) \
+ uint64_t reg; \
+ __asm__ volatile("mov %%" #reg ", %0" : "=m" (reg))
+
+#define KPRINT_REG(reg) \
+ kprintf("%3s: %p\n", #reg, (void *) reg)
+
+ SNAP_REG(rsp);
+ SNAP_REG(rbp);
+ SNAP_REG(rax);
+ SNAP_REG(rbx);
+ SNAP_REG(rcx);
+ SNAP_REG(rdx);
+ SNAP_REG(rsi);
+ SNAP_REG(rdi);
+ SNAP_REG(r8);
+ SNAP_REG(r9);
+ SNAP_REG(r10);
+ SNAP_REG(r11);
+ SNAP_REG(r12);
+ SNAP_REG(r13);
+ SNAP_REG(r14);
+
+ KPRINT_REG(rsp);
+ KPRINT_REG(rbp);
+ KPRINT_REG(rax);
+ KPRINT_REG(rbx);
+ KPRINT_REG(rcx);
+ KPRINT_REG(rdx);
+ KPRINT_REG(rsi);
+ KPRINT_REG(rdi);
+ KPRINT_REG(r8);
+ KPRINT_REG(r9);
+ KPRINT_REG(r10);
+ KPRINT_REG(r11);
+ KPRINT_REG(r12);
+ KPRINT_REG(r13);
+ KPRINT_REG(r14);
+}
+#endif /* __x86_64__ */
#endif /* DEBUG */