* the rights to redistribute these changes.
*/
-#include <mach_rt.h>
-#include <platforms.h>
+#include <debug.h>
#include <mach_kdp.h>
#include <mach_assert.h>
#include <i386/asm.h>
#include <i386/cpuid.h>
#include <i386/eflags.h>
+#include <i386/postcode.h>
#include <i386/proc_reg.h>
#include <i386/trap.h>
#include <assym.s>
#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
#include <mach/i386/syscall_sw.h>
-#include <i386/mp.h>
-
/*
* Fault recovery.
*/
#define RECOVERY_SECTION .section __VECTORS, __recover
#else
#define RECOVERY_SECTION .text
-#define RECOVERY_SECTION .text
#endif
#define RECOVER_TABLE_START \
rdmsr_fail:
movq $1, %rax
ret
+/*
+ * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
+ */
+
+ENTRY(rdmsr64_carefully)
+ movl %edi, %ecx
+ RECOVERY_SECTION
+ RECOVER(rdmsr64_carefully_fail)
+ rdmsr
+ movl %eax, (%rsi)
+ movl %edx, 4(%rsi)
+ xorl %eax, %eax
+ ret
+rdmsr64_carefully_fail:
+ movl $1, %eax
+ ret
+/*
+ * int wrmsr64_carefully(uint32_t msr, uint64_t val);
+ */
+
+ENTRY(wrmsr_carefully)
+ movl %edi, %ecx
+ movl %esi, %eax
+ shr $32, %rsi
+ movl %esi, %edx
+ RECOVERY_SECTION
+ RECOVER(wrmsr_fail)
+ wrmsr
+ xorl %eax, %eax
+ ret
+wrmsr_fail:
+ movl $1, %eax
+ ret
+
+#if DEBUG
+#ifndef TERI
+#define TERI 1
+#endif
+#endif
+#if TERI
+.globl EXT(thread_exception_return_internal)
+#else
.globl EXT(thread_exception_return)
+#endif
.globl EXT(thread_bootstrap_return)
LEXT(thread_bootstrap_return)
#if CONFIG_DTRACE
call EXT(dtrace_thread_bootstrap)
#endif
+#if TERI
+LEXT(thread_exception_return_internal)
+#else
LEXT(thread_exception_return)
+#endif
cli
xorl %ecx, %ecx /* don't check if we're in the PFZ */
jmp EXT(return_from_trap)
* Copyin/out from user/kernel address space.
* rdi: source address
* rsi: destination address
- * rdx: byte count
+ * rdx: byte count (in fact, always < 64MB -- see copyio)
*/
Entry(_bcopy)
-// TODO not pop regs; movq; think about 32 bit or 64 bit byte count
- xchgq %rdi, %rsi /* source %rsi, dest %rdi */
+ xchg %rdi, %rsi /* source %rsi, dest %rdi */
cld /* count up */
- movl %edx,%ecx /* move by longwords first */
- shrl $3,%ecx
+ mov %rdx, %rcx /* move by longwords first */
+ shr $3, %rcx
RECOVERY_SECTION
RECOVER(_bcopy_fail)
rep
movsq /* move longwords */
- movl %edx,%ecx /* now move remaining bytes */
- andl $7,%ecx
+ movl %edx, %ecx /* now move remaining bytes */
+ andl $7, %ecx
RECOVERY_SECTION
RECOVER(_bcopy_fail)
rep
xor %eax, %eax
ret
+/*
+ * 2-byte copy used by ml_copy_phys().
+ * rdi: source address
+ * rsi: destination address
+ */
+Entry(_bcopy2)
+ RECOVERY_SECTION
+ RECOVER(_bcopy_fail)
+ movw (%rdi), %cx
+ RECOVERY_SECTION
+ RECOVER(_bcopy_fail)
+ movw %cx, (%rsi)
+
+ xorl %eax,%eax /* return 0 for success */
+ ret /* and return */
+
+/*
+ * 4-byte copy used by ml_copy_phys().
+ * rdi: source address
+ * rsi: destination address
+ */
+Entry(_bcopy4)
+ RECOVERY_SECTION
+ RECOVER(_bcopy_fail)
+ movl (%rdi), %ecx
+ RECOVERY_SECTION
+ RECOVER(_bcopy_fail)
+ mov %ecx, (%rsi)
+
+ xorl %eax,%eax /* return 0 for success */
+ ret /* and return */
+
+/*
+ * 8-byte copy used by ml_copy_phys().
+ * rdi: source address
+ * rsi: destination address
+ */
+Entry(_bcopy8)
+ RECOVERY_SECTION
+ RECOVER(_bcopy_fail)
+ movq (%rdi), %rcx
+ RECOVERY_SECTION
+ RECOVER(_bcopy_fail)
+ mov %rcx, (%rsi)
+
+ xorl %eax,%eax /* return 0 for success */
+ ret /* and return */
+
/*
movl $(EFAULT),%eax /* return error for failure */
ret
+/*
+ * Copyin 32 bit aligned word as a single transaction
+ * rdi: source address (user)
+ * rsi: destination address (kernel)
+ */
+Entry(_copyin_atomic32)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ RECOVERY_SECTION
+ RECOVER(L_copyin_atomic32_fail) /* Set up recovery handler for next instruction */
+ movl (%rdi), %eax /* Load long from user */
+ movl %eax, (%rsi) /* Store to kernel */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyin_atomic32_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+/*
+ * Copyin 64 bit aligned word as a single transaction
+ * rdi: source address (user)
+ * rsi: destination address (kernel)
+ */
+Entry(_copyin_atomic64)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ RECOVERY_SECTION
+ RECOVER(L_copyin_atomic64_fail) /* Set up recovery handler for next instruction*/
+ movq (%rdi), %rax /* Load quad from user */
+ movq %rax, (%rsi) /* Store to kernel */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyin_atomic64_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+/*
+ * Copyin 32 bit aligned word as a single transaction
+ * rdi: source address (kernel)
+ * rsi: destination address (user)
+ */
+Entry(_copyout_atomic32)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ movl (%rdi), %eax /* Load long from kernel */
+ RECOVERY_SECTION
+ RECOVER(L_copyout_atomic32_fail) /* Set up recovery handler for next instruction*/
+ movl %eax, (%rsi) /* Store long to user */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyout_atomic32_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+/*
+ * Copyin 64 bit aligned word as a single transaction
+ * rdi: source address (kernel)
+ * rsi: destination address (user)
+ */
+Entry(_copyout_atomic64)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ movq (%rdi), %rax /* Load quad from kernel */
+ RECOVERY_SECTION
+ RECOVER(L_copyout_atomic64_fail) /* Set up recovery handler for next instruction*/
+ movq %rax, (%rsi) /* Store quad to user */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyout_atomic64_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+
/*
* Done with recovery table.
*/
RECOVERY_SECTION
RECOVER_TABLE_END
+
+/*
+ * Vector here on any exception at startup prior to switching to
+ * the kernel's idle page-tables and installing the kernel master IDT.
+ */
+Entry(vstart_trap_handler)
+ POSTCODE(BOOT_TRAP_HLT)
+ hlt
+