X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..143464d58d2bd6378e74eec636961ceb0d32fb91:/osfmk/x86_64/machine_routines_asm.s diff --git a/osfmk/x86_64/machine_routines_asm.s b/osfmk/x86_64/machine_routines_asm.s index 1c74f9fc8..0d304f26a 100644 --- a/osfmk/x86_64/machine_routines_asm.s +++ b/osfmk/x86_64/machine_routines_asm.s @@ -33,6 +33,7 @@ #include #include +#include #include /* @@ -103,28 +104,36 @@ ENTRY(_rtc_nanotime_adjust) ret /* - * unint64_t _rtc_nanotime_read(rtc_nanotime_t *rntp, int slow); + * uint64_t _rtc_nanotime_read(rtc_nanotime_t *rntp); * * This is the same as the commpage nanotime routine, except that it uses the * kernel internal "rtc_nanotime_info" data instead of the commpage data. * These two copies of data are kept in sync by rtc_clock_napped(). * - * Warning! There is another copy of this code in osfmk/x86_64/idt64.s. - * These are kept in sync by both using the RTC_NANOTIME_READ() macro. + * Warning! There are several copies of this code in the trampolines found in + * osfmk/x86_64/idt64.s, coming from the various TIMER macros in rtclock_asm.h. + * They're all kept in sync by using the RTC_NANOTIME_READ() macro. * - * There are two versions of this algorithm, for "slow" and "fast" processors. - * The more common "fast" algorithm is: + * The algorithm we use is: * - * ns = (((rdtsc - rnt_tsc_base)*rnt_tsc_scale) / 2**32) + rnt_ns_base; + * ns = ((((rdtsc - rnt_tsc_base)< SLOW_TSC_THRESHOLD * - * The "slow" algorithm uses long division: + * Where SLOW_TSC_THRESHOLD is about 10e9. Since most processor's tscFreqs are greater + * than 1GHz, rnt_shift is usually 0. rnt_tsc_scale is also a 32-bit constant: * - * ns = (((rdtsc - rnt_tsc_base) * 10e9) / tscFreq) + rnt_ns_base; + * rnt_tsc_scale = (10e9 * 2**32) / (tscFreq << rnt_shift); + * + * On 64-bit processors this algorithm could be simplified by doing a 64x64 bit + * multiply of rdtsc by tscFCvtt2n: + * + * ns = (((rdtsc - rnt_tsc_base) * tscFCvtt2n) / 2**32) + rnt_ns_base; + * + * We don't do so in order to use the same algorithm in 32- and 64-bit mode. + * When U32 goes away, we should reconsider. * * Since this routine is not synchronized and can be called in any context, * we use a generation count to guard against seeing partially updated data. @@ -135,33 +144,36 @@ ENTRY(_rtc_nanotime_adjust) * the generation is zero. * * unint64_t _rtc_nanotime_read( - * rtc_nanotime_t *rntp, // %rdi - * int slow); // %rsi + * rtc_nanotime_t *rntp); // %rdi * */ ENTRY(_rtc_nanotime_read) - test %rsi,%rsi - jnz Lslow - - /* - * Processor whose TSC frequency is faster than SLOW_TSC_THRESHOLD - */ + PAL_RTC_NANOTIME_READ_FAST() ret + +/* + * extern uint64_t _rtc_tsc_to_nanoseconds( + * uint64_t value, // %rdi + * pal_rtc_nanotime_t *rntp); // %rsi + * + * Converts TSC units to nanoseconds, using an abbreviated form of the above + * algorithm. Note that while we could have simply used tmrCvt(value,tscFCvtt2n), + * which would avoid the need for this asm, doing so is a bit more risky since + * we'd be using a different algorithm with possibly different rounding etc. + */ - /* - * Processor whose TSC frequency is not faster than SLOW_TSC_THRESHOLD - * But K64 doesn't support this... - */ -Lslow: - lea 1f(%rip),%rdi - xorb %al,%al - call EXT(panic) - hlt - .data -1: String "_rtc_nanotime_read() - slow algorithm not supported" - +ENTRY(_rtc_tsc_to_nanoseconds) + movq %rdi,%rax /* copy value (in TSC units) to convert */ + movl RNT_SHIFT(%rsi),%ecx + movl RNT_SCALE(%rsi),%edx + shlq %cl,%rax /* tscUnits << shift */ + mulq %rdx /* (tscUnits << shift) * scale */ + shrdq $32,%rdx,%rax /* %rdx:%rax >>= 32 */ + ret + + Entry(call_continuation) movq %rdi,%rcx /* get continuation */ @@ -173,3 +185,97 @@ Entry(call_continuation) movq %gs:CPU_ACTIVE_THREAD,%rdi call EXT(thread_terminate) +Entry(x86_init_wrapper) + xor %rbp, %rbp + movq %rsi, %rsp + callq *%rdi + + /* + * Generate a 64-bit quantity with possibly random characteristics, intended for use + * before the kernel entropy pool is available. The processor's RNG is used if + * available, and a value derived from the Time Stamp Counter is returned if not. + * Multiple invocations may result in well-correlated values if sourced from the TSC. + */ +Entry(ml_early_random) + mov %rbx, %rsi + mov $1, %eax + cpuid + mov %rsi, %rbx + test $(1 << 30), %ecx + jz Lnon_rdrand + RDRAND_RAX /* RAX := 64 bits of DRBG entropy */ + jnc Lnon_rdrand + ret +Lnon_rdrand: + rdtsc /* EDX:EAX := TSC */ + /* Distribute low order bits */ + mov %eax, %ecx + xor %al, %ah + shl $16, %rcx + xor %rcx, %rax + xor %eax, %edx + + /* Incorporate ASLR entropy, if any */ + lea (%rip), %rcx + shr $21, %rcx + movzbl %cl, %ecx + shl $16, %ecx + xor %ecx, %edx + + mov %ah, %cl + ror %cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */ + shl $32, %rdx + xor %rdx, %rax + mov %cl, %al + ret + +#if CONFIG_VMX + +/* + * __vmxon -- Enter VMX Operation + * int __vmxon(addr64_t v); + */ +Entry(__vmxon) + FRAME + push %rdi + + mov $(VMX_FAIL_INVALID), %ecx + mov $(VMX_FAIL_VALID), %edx + mov $(VMX_SUCCEED), %eax + vmxon (%rsp) + cmovcl %ecx, %eax /* CF = 1, ZF = 0 */ + cmovzl %edx, %eax /* CF = 0, ZF = 1 */ + + pop %rdi + EMARF + ret + +/* + * __vmxoff -- Leave VMX Operation + * int __vmxoff(void); + */ +Entry(__vmxoff) + FRAME + + mov $(VMX_FAIL_INVALID), %ecx + mov $(VMX_FAIL_VALID), %edx + mov $(VMX_SUCCEED), %eax + vmxoff + cmovcl %ecx, %eax /* CF = 1, ZF = 0 */ + cmovzl %edx, %eax /* CF = 0, ZF = 1 */ + + EMARF + ret + +#endif /* CONFIG_VMX */ + +/* + * mfence -- Memory Barrier + * Use out-of-line assembly to get + * standard x86-64 ABI guarantees + * about what the caller's codegen + * has in registers vs. memory + */ +Entry(do_mfence) + mfence + ret