X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/bd504ef0e0b883cdd7917b73b3574eb9ce669905..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/x86_64/locore.s?ds=sidebyside diff --git a/osfmk/x86_64/locore.s b/osfmk/x86_64/locore.s index 8ca0c92a4..4c71fd461 100644 --- a/osfmk/x86_64/locore.s +++ b/osfmk/x86_64/locore.s @@ -54,8 +54,7 @@ * the rights to redistribute these changes. */ -#include -#include +#include #include #include @@ -63,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -72,8 +72,6 @@ #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */ #include -#include - /* * Fault recovery. */ @@ -82,7 +80,6 @@ #define RECOVERY_SECTION .section __VECTORS, __recover #else #define RECOVERY_SECTION .text -#define RECOVERY_SECTION .text #endif #define RECOVER_TABLE_START \ @@ -127,7 +124,6 @@ ENTRY(rdmsr_carefully) rdmsr_fail: movq $1, %rax ret - /* * int rdmsr64_carefully(uint32_t msr, uint64_t *val); */ @@ -162,14 +158,28 @@ wrmsr_fail: movl $1, %eax ret +#if DEBUG +#ifndef TERI +#define TERI 1 +#endif +#endif + +#if TERI +.globl EXT(thread_exception_return_internal) +#else .globl EXT(thread_exception_return) +#endif .globl EXT(thread_bootstrap_return) LEXT(thread_bootstrap_return) #if CONFIG_DTRACE call EXT(dtrace_thread_bootstrap) #endif +#if TERI +LEXT(thread_exception_return_internal) +#else LEXT(thread_exception_return) +#endif cli xorl %ecx, %ecx /* don't check if we're in the PFZ */ jmp EXT(return_from_trap) @@ -178,22 +188,21 @@ LEXT(thread_exception_return) * Copyin/out from user/kernel address space. * rdi: source address * rsi: destination address - * rdx: byte count + * rdx: byte count (in fact, always < 64MB -- see copyio) */ Entry(_bcopy) -// TODO not pop regs; movq; think about 32 bit or 64 bit byte count - xchgq %rdi, %rsi /* source %rsi, dest %rdi */ + xchg %rdi, %rsi /* source %rsi, dest %rdi */ cld /* count up */ - movl %edx,%ecx /* move by longwords first */ - shrl $3,%ecx + mov %rdx, %rcx /* move by longwords first */ + shr $3, %rcx RECOVERY_SECTION RECOVER(_bcopy_fail) rep movsq /* move longwords */ - movl %edx,%ecx /* now move remaining bytes */ - andl $7,%ecx + movl %edx, %ecx /* now move remaining bytes */ + andl $7, %ecx RECOVERY_SECTION RECOVER(_bcopy_fail) rep @@ -217,6 +226,54 @@ _pmap_safe_read_fail: xor %eax, %eax ret +/* + * 2-byte copy used by ml_copy_phys(). + * rdi: source address + * rsi: destination address + */ +Entry(_bcopy2) + RECOVERY_SECTION + RECOVER(_bcopy_fail) + movw (%rdi), %cx + RECOVERY_SECTION + RECOVER(_bcopy_fail) + movw %cx, (%rsi) + + xorl %eax,%eax /* return 0 for success */ + ret /* and return */ + +/* + * 4-byte copy used by ml_copy_phys(). + * rdi: source address + * rsi: destination address + */ +Entry(_bcopy4) + RECOVERY_SECTION + RECOVER(_bcopy_fail) + movl (%rdi), %ecx + RECOVERY_SECTION + RECOVER(_bcopy_fail) + mov %ecx, (%rsi) + + xorl %eax,%eax /* return 0 for success */ + ret /* and return */ + +/* + * 8-byte copy used by ml_copy_phys(). + * rdi: source address + * rsi: destination address + */ +Entry(_bcopy8) + RECOVERY_SECTION + RECOVER(_bcopy_fail) + movq (%rdi), %rcx + RECOVERY_SECTION + RECOVER(_bcopy_fail) + mov %rcx, (%rsi) + + xorl %eax,%eax /* return 0 for success */ + ret /* and return */ + /* @@ -261,9 +318,103 @@ _bcopystr_fail: movl $(EFAULT),%eax /* return error for failure */ ret +/* + * Copyin 32 bit aligned word as a single transaction + * rdi: source address (user) + * rsi: destination address (kernel) + */ +Entry(_copyin_atomic32) + pushq %rbp /* Save registers */ + movq %rsp, %rbp + RECOVERY_SECTION + RECOVER(L_copyin_atomic32_fail) /* Set up recovery handler for next instruction */ + movl (%rdi), %eax /* Load long from user */ + movl %eax, (%rsi) /* Store to kernel */ + xorl %eax, %eax /* Return success */ + popq %rbp /* Restore registers */ + retq /* Return */ + +L_copyin_atomic32_fail: + movl $(EFAULT), %eax /* Return error for failure */ + popq %rbp /* Restore registers */ + retq /* Return */ + +/* + * Copyin 64 bit aligned word as a single transaction + * rdi: source address (user) + * rsi: destination address (kernel) + */ +Entry(_copyin_atomic64) + pushq %rbp /* Save registers */ + movq %rsp, %rbp + RECOVERY_SECTION + RECOVER(L_copyin_atomic64_fail) /* Set up recovery handler for next instruction*/ + movq (%rdi), %rax /* Load quad from user */ + movq %rax, (%rsi) /* Store to kernel */ + xorl %eax, %eax /* Return success */ + popq %rbp /* Restore registers */ + retq /* Return */ + +L_copyin_atomic64_fail: + movl $(EFAULT), %eax /* Return error for failure */ + popq %rbp /* Restore registers */ + retq /* Return */ + +/* + * Copyin 32 bit aligned word as a single transaction + * rdi: source address (kernel) + * rsi: destination address (user) + */ +Entry(_copyout_atomic32) + pushq %rbp /* Save registers */ + movq %rsp, %rbp + movl (%rdi), %eax /* Load long from kernel */ + RECOVERY_SECTION + RECOVER(L_copyout_atomic32_fail) /* Set up recovery handler for next instruction*/ + movl %eax, (%rsi) /* Store long to user */ + xorl %eax, %eax /* Return success */ + popq %rbp /* Restore registers */ + retq /* Return */ + +L_copyout_atomic32_fail: + movl $(EFAULT), %eax /* Return error for failure */ + popq %rbp /* Restore registers */ + retq /* Return */ + +/* + * Copyin 64 bit aligned word as a single transaction + * rdi: source address (kernel) + * rsi: destination address (user) + */ +Entry(_copyout_atomic64) + pushq %rbp /* Save registers */ + movq %rsp, %rbp + movq (%rdi), %rax /* Load quad from kernel */ + RECOVERY_SECTION + RECOVER(L_copyout_atomic64_fail) /* Set up recovery handler for next instruction*/ + movq %rax, (%rsi) /* Store quad to user */ + xorl %eax, %eax /* Return success */ + popq %rbp /* Restore registers */ + retq /* Return */ + +L_copyout_atomic64_fail: + movl $(EFAULT), %eax /* Return error for failure */ + popq %rbp /* Restore registers */ + retq /* Return */ + + /* * Done with recovery table. */ RECOVERY_SECTION RECOVER_TABLE_END + +/* + * Vector here on any exception at startup prior to switching to + * the kernel's idle page-tables and installing the kernel master IDT. + */ +Entry(vstart_trap_handler) + POSTCODE(BOOT_TRAP_HLT) + hlt +