* the rights to redistribute these changes.
*/
-#include <mach_rt.h>
+#include <debug.h>
#include <mach_kdp.h>
#include <mach_assert.h>
#include <i386/asm.h>
#include <i386/cpuid.h>
#include <i386/eflags.h>
+#include <i386/postcode.h>
#include <i386/proc_reg.h>
#include <i386/trap.h>
#include <assym.s>
#define RECOVERY_SECTION .section __VECTORS, __recover
#else
#define RECOVERY_SECTION .text
-#define RECOVERY_SECTION .text
#endif
#define RECOVER_TABLE_START \
movl $1, %eax
ret
+#if DEBUG
+#ifndef TERI
+#define TERI 1
+#endif
+#endif
+
+#if TERI
+.globl EXT(thread_exception_return_internal)
+#else
.globl EXT(thread_exception_return)
+#endif
.globl EXT(thread_bootstrap_return)
LEXT(thread_bootstrap_return)
#if CONFIG_DTRACE
call EXT(dtrace_thread_bootstrap)
#endif
+#if TERI
+LEXT(thread_exception_return_internal)
+#else
LEXT(thread_exception_return)
+#endif
cli
xorl %ecx, %ecx /* don't check if we're in the PFZ */
jmp EXT(return_from_trap)
movl $(EFAULT),%eax /* return error for failure */
ret
+/*
+ * Copyin 32 bit aligned word as a single transaction
+ * rdi: source address (user)
+ * rsi: destination address (kernel)
+ */
+Entry(_copyin_atomic32)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ RECOVERY_SECTION
+ RECOVER(L_copyin_atomic32_fail) /* Set up recovery handler for next instruction */
+ movl (%rdi), %eax /* Load long from user */
+ movl %eax, (%rsi) /* Store to kernel */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyin_atomic32_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+/*
+ * Copyin 64 bit aligned word as a single transaction
+ * rdi: source address (user)
+ * rsi: destination address (kernel)
+ */
+Entry(_copyin_atomic64)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ RECOVERY_SECTION
+ RECOVER(L_copyin_atomic64_fail) /* Set up recovery handler for next instruction*/
+ movq (%rdi), %rax /* Load quad from user */
+ movq %rax, (%rsi) /* Store to kernel */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyin_atomic64_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+/*
+ * Copyin 32 bit aligned word as a single transaction
+ * rdi: source address (kernel)
+ * rsi: destination address (user)
+ */
+Entry(_copyout_atomic32)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ movl (%rdi), %eax /* Load long from kernel */
+ RECOVERY_SECTION
+ RECOVER(L_copyout_atomic32_fail) /* Set up recovery handler for next instruction*/
+ movl %eax, (%rsi) /* Store long to user */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyout_atomic32_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+/*
+ * Copyin 64 bit aligned word as a single transaction
+ * rdi: source address (kernel)
+ * rsi: destination address (user)
+ */
+Entry(_copyout_atomic64)
+ pushq %rbp /* Save registers */
+ movq %rsp, %rbp
+ movq (%rdi), %rax /* Load quad from kernel */
+ RECOVERY_SECTION
+ RECOVER(L_copyout_atomic64_fail) /* Set up recovery handler for next instruction*/
+ movq %rax, (%rsi) /* Store quad to user */
+ xorl %eax, %eax /* Return success */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+L_copyout_atomic64_fail:
+ movl $(EFAULT), %eax /* Return error for failure */
+ popq %rbp /* Restore registers */
+ retq /* Return */
+
+
/*
* Done with recovery table.
*/
RECOVERY_SECTION
RECOVER_TABLE_END
+
+/*
+ * Vector here on any exception at startup prior to switching to
+ * the kernel's idle page-tables and installing the kernel master IDT.
+ */
+Entry(vstart_trap_handler)
+ POSTCODE(BOOT_TRAP_HLT)
+ hlt
+