]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/x86_64/locore.s
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / x86_64 / locore.s
index 8ca0c92a437f15d9c5f03dad9b97511bce851fb0..4c71fd46186194cd2ebbe8f2896b94be30a5a86c 100644 (file)
@@ -54,8 +54,7 @@
  * the rights to redistribute these changes.
  */
 
  * the rights to redistribute these changes.
  */
 
-#include <mach_rt.h>
-#include <platforms.h>
+#include <debug.h>
 #include <mach_kdp.h>
 #include <mach_assert.h>
 
 #include <mach_kdp.h>
 #include <mach_assert.h>
 
@@ -63,6 +62,7 @@
 #include <i386/asm.h>
 #include <i386/cpuid.h>
 #include <i386/eflags.h>
 #include <i386/asm.h>
 #include <i386/cpuid.h>
 #include <i386/eflags.h>
+#include <i386/postcode.h>
 #include <i386/proc_reg.h>
 #include <i386/trap.h>
 #include <assym.s>
 #include <i386/proc_reg.h>
 #include <i386/trap.h>
 #include <assym.s>
@@ -72,8 +72,6 @@
 #define _ARCH_I386_ASM_HELP_H_          /* Prevent inclusion of user header */
 #include <mach/i386/syscall_sw.h>
 
 #define _ARCH_I386_ASM_HELP_H_          /* Prevent inclusion of user header */
 #include <mach/i386/syscall_sw.h>
 
-#include <i386/mp.h>
-
 /*
  * Fault recovery.
  */
 /*
  * Fault recovery.
  */
@@ -82,7 +80,6 @@
 #define        RECOVERY_SECTION        .section        __VECTORS, __recover 
 #else
 #define        RECOVERY_SECTION        .text
 #define        RECOVERY_SECTION        .section        __VECTORS, __recover 
 #else
 #define        RECOVERY_SECTION        .text
-#define        RECOVERY_SECTION        .text
 #endif
 
 #define        RECOVER_TABLE_START     \
 #endif
 
 #define        RECOVER_TABLE_START     \
@@ -127,7 +124,6 @@ ENTRY(rdmsr_carefully)
 rdmsr_fail:
        movq    $1, %rax
        ret
 rdmsr_fail:
        movq    $1, %rax
        ret
-
 /*
  * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
  */
 /*
  * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
  */
@@ -162,14 +158,28 @@ wrmsr_fail:
        movl    $1, %eax
        ret
 
        movl    $1, %eax
        ret
 
+#if DEBUG
+#ifndef TERI
+#define TERI 1
+#endif
+#endif
+
+#if TERI
+.globl EXT(thread_exception_return_internal)
+#else
 .globl EXT(thread_exception_return)
 .globl EXT(thread_exception_return)
+#endif
 .globl EXT(thread_bootstrap_return)
 LEXT(thread_bootstrap_return)
 #if CONFIG_DTRACE
        call EXT(dtrace_thread_bootstrap)
 #endif
 
 .globl EXT(thread_bootstrap_return)
 LEXT(thread_bootstrap_return)
 #if CONFIG_DTRACE
        call EXT(dtrace_thread_bootstrap)
 #endif
 
+#if TERI
+LEXT(thread_exception_return_internal)
+#else
 LEXT(thread_exception_return)
 LEXT(thread_exception_return)
+#endif
        cli
        xorl    %ecx, %ecx              /* don't check if we're in the PFZ */
        jmp     EXT(return_from_trap)
        cli
        xorl    %ecx, %ecx              /* don't check if we're in the PFZ */
        jmp     EXT(return_from_trap)
@@ -178,22 +188,21 @@ LEXT(thread_exception_return)
  * Copyin/out from user/kernel address space.
  * rdi:        source address
  * rsi:        destination address
  * Copyin/out from user/kernel address space.
  * rdi:        source address
  * rsi:        destination address
- * rdx:        byte count
+ * rdx:        byte count (in fact, always < 64MB -- see copyio)
  */
 Entry(_bcopy)
  */
 Entry(_bcopy)
-// TODO not pop regs; movq; think about 32 bit or 64 bit byte count
-       xchgq   %rdi, %rsi              /* source %rsi, dest %rdi */
+       xchg    %rdi, %rsi              /* source %rsi, dest %rdi */
 
        cld                             /* count up */
 
        cld                             /* count up */
-       movl    %edx,%ecx               /* move by longwords first */
-       shrl    $3,%ecx
+       mov     %rdx, %rcx              /* move by longwords first */
+       shr     $3, %rcx
        RECOVERY_SECTION
        RECOVER(_bcopy_fail)
        rep
        movsq                           /* move longwords */
 
        RECOVERY_SECTION
        RECOVER(_bcopy_fail)
        rep
        movsq                           /* move longwords */
 
-       movl    %edx,%ecx               /* now move remaining bytes */
-       andl    $7,%ecx
+       movl    %edx, %ecx              /* now move remaining bytes */
+       andl    $7, %ecx
        RECOVERY_SECTION
        RECOVER(_bcopy_fail)
        rep
        RECOVERY_SECTION
        RECOVER(_bcopy_fail)
        rep
@@ -217,6 +226,54 @@ _pmap_safe_read_fail:
        xor     %eax, %eax
        ret
 
        xor     %eax, %eax
        ret
 
+/*
+ * 2-byte copy used by ml_copy_phys().
+ * rdi:        source address
+ * rsi:        destination address
+ */
+Entry(_bcopy2)
+       RECOVERY_SECTION
+       RECOVER(_bcopy_fail)
+       movw    (%rdi), %cx
+       RECOVERY_SECTION
+       RECOVER(_bcopy_fail)
+       movw    %cx, (%rsi)
+
+       xorl    %eax,%eax               /* return 0 for success */
+       ret                             /* and return */
+
+/*
+ * 4-byte copy used by ml_copy_phys().
+ * rdi:        source address
+ * rsi:        destination address
+ */
+Entry(_bcopy4)
+       RECOVERY_SECTION
+       RECOVER(_bcopy_fail)
+       movl    (%rdi), %ecx
+       RECOVERY_SECTION
+       RECOVER(_bcopy_fail)
+       mov     %ecx, (%rsi)
+
+       xorl    %eax,%eax               /* return 0 for success */
+       ret                             /* and return */
+
+/*
+ * 8-byte copy used by ml_copy_phys().
+ * rdi:        source address
+ * rsi:        destination address
+ */
+Entry(_bcopy8)
+       RECOVERY_SECTION
+       RECOVER(_bcopy_fail)
+       movq    (%rdi), %rcx
+       RECOVERY_SECTION
+       RECOVER(_bcopy_fail)
+       mov     %rcx, (%rsi)
+
+       xorl    %eax,%eax               /* return 0 for success */
+       ret                             /* and return */
+
 
        
 /*
 
        
 /*
@@ -261,9 +318,103 @@ _bcopystr_fail:
        movl    $(EFAULT),%eax          /* return error for failure */
        ret
 
        movl    $(EFAULT),%eax          /* return error for failure */
        ret
 
+/*
+ * Copyin 32 bit aligned word as a single transaction
+ * rdi: source address (user)
+ * rsi: destination address (kernel)
+ */
+Entry(_copyin_atomic32)
+       pushq   %rbp                    /* Save registers */
+       movq    %rsp, %rbp
+       RECOVERY_SECTION
+       RECOVER(L_copyin_atomic32_fail) /* Set up recovery handler for next instruction */
+       movl    (%rdi), %eax            /* Load long from user */
+       movl    %eax, (%rsi)            /* Store to kernel */
+       xorl    %eax, %eax              /* Return success */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+L_copyin_atomic32_fail:
+       movl    $(EFAULT), %eax         /* Return error for failure */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+/*
+ * Copyin 64 bit aligned word as a single transaction
+ * rdi: source address (user)
+ * rsi: destination address (kernel)
+ */
+Entry(_copyin_atomic64)
+       pushq   %rbp                    /* Save registers */
+       movq    %rsp, %rbp
+       RECOVERY_SECTION
+       RECOVER(L_copyin_atomic64_fail) /* Set up recovery handler for next instruction*/
+       movq    (%rdi), %rax            /* Load quad from user */
+       movq    %rax, (%rsi)            /* Store to kernel */
+       xorl    %eax, %eax              /* Return success */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+L_copyin_atomic64_fail:
+       movl    $(EFAULT), %eax         /* Return error for failure */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+/*
+ * Copyin 32 bit aligned word as a single transaction
+ * rdi: source address (kernel)
+ * rsi: destination address (user)
+ */
+Entry(_copyout_atomic32)
+       pushq   %rbp                    /* Save registers */
+       movq    %rsp, %rbp
+       movl    (%rdi), %eax            /* Load long from kernel */
+       RECOVERY_SECTION
+       RECOVER(L_copyout_atomic32_fail)        /* Set up recovery handler for next instruction*/
+       movl    %eax, (%rsi)            /* Store long to user */
+       xorl    %eax, %eax              /* Return success */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+L_copyout_atomic32_fail:
+       movl    $(EFAULT), %eax         /* Return error for failure */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+/*
+ * Copyin 64 bit aligned word as a single transaction
+ * rdi: source address (kernel)
+ * rsi: destination address (user)
+ */
+Entry(_copyout_atomic64)
+       pushq   %rbp                    /* Save registers */
+       movq    %rsp, %rbp
+       movq    (%rdi), %rax            /* Load quad from kernel */
+       RECOVERY_SECTION
+       RECOVER(L_copyout_atomic64_fail)        /* Set up recovery handler for next instruction*/
+       movq    %rax, (%rsi)            /* Store quad to user */
+       xorl    %eax, %eax              /* Return success */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+L_copyout_atomic64_fail:
+       movl    $(EFAULT), %eax         /* Return error for failure */
+       popq    %rbp                    /* Restore registers */
+       retq                            /* Return */
+
+
 /*
  * Done with recovery table.
  */
        RECOVERY_SECTION
        RECOVER_TABLE_END
 
 /*
  * Done with recovery table.
  */
        RECOVERY_SECTION
        RECOVER_TABLE_END
 
+
+/*
+ * Vector here on any exception at startup prior to switching to
+ * the kernel's idle page-tables and installing the kernel master IDT.
+ */
+Entry(vstart_trap_handler)
+       POSTCODE(BOOT_TRAP_HLT)
+       hlt
+