+ PAL_RTC_NANOTIME_READ_FAST()
+
+ ret
+
+/*
+ * extern uint64_t _rtc_tsc_to_nanoseconds(
+ * uint64_t value, // %rdi
+ * pal_rtc_nanotime_t *rntp); // %rsi
+ *
+ * Converts TSC units to nanoseconds, using an abbreviated form of the above
+ * algorithm. Note that while we could have simply used tmrCvt(value,tscFCvtt2n),
+ * which would avoid the need for this asm, doing so is a bit more risky since
+ * we'd be using a different algorithm with possibly different rounding etc.
+ */
+
+ENTRY(_rtc_tsc_to_nanoseconds)
+ movq %rdi,%rax /* copy value (in TSC units) to convert */
+ movl RNT_SHIFT(%rsi),%ecx
+ movl RNT_SCALE(%rsi),%edx
+ shlq %cl,%rax /* tscUnits << shift */
+ mulq %rdx /* (tscUnits << shift) * scale */
+ shrdq $32,%rdx,%rax /* %rdx:%rax >>= 32 */
+ ret
+
+
+/*
+ * typedef void (*thread_continue_t)(void *param, wait_result_t)
+ *
+ * void call_continuation( thread_continue_t continuation,
+ * void *param,
+ * wait_result_t wresult,
+ * bool enable interrupts)
+ */
+
+Entry(call_continuation)
+
+ movq %rdi, %r12 /* continuation */
+ movq %rsi, %r13 /* continuation param */
+ movq %rdx, %r14 /* wait result */
+
+ movq %gs:CPU_KERNEL_STACK,%rsp /* set the stack */
+ xorq %rbp,%rbp /* zero frame pointer */
+
+ test %ecx, %ecx
+ jz 1f
+ mov $1, %edi
+ call _ml_set_interrupts_enabled
+1:
+
+ movq %r12,%rcx /* continuation */
+ movq %r13,%rdi /* continuation param */
+ movq %r14,%rsi /* wait result */
+
+ call *%rcx /* call continuation */
+ movq %gs:CPU_ACTIVE_THREAD,%rdi
+ call EXT(thread_terminate)
+
+
+Entry(x86_init_wrapper)
+ xor %rbp, %rbp
+ movq %rsi, %rsp
+ callq *%rdi
+
+#if CONFIG_VMX
+
+/*
+ * __vmxon -- Enter VMX Operation
+ * int __vmxon(addr64_t v);
+ */
+Entry(__vmxon)
+ FRAME
+ push %rdi
+
+ mov $(VMX_FAIL_INVALID), %ecx
+ mov $(VMX_FAIL_VALID), %edx
+ mov $(VMX_SUCCEED), %eax
+ vmxon (%rsp)
+ cmovcl %ecx, %eax /* CF = 1, ZF = 0 */
+ cmovzl %edx, %eax /* CF = 0, ZF = 1 */
+
+ pop %rdi
+ EMARF
+ ret
+
+/*
+ * __vmxoff -- Leave VMX Operation
+ * int __vmxoff(void);
+ */
+Entry(__vmxoff)
+ FRAME
+
+ mov $(VMX_FAIL_INVALID), %ecx
+ mov $(VMX_FAIL_VALID), %edx
+ mov $(VMX_SUCCEED), %eax
+ vmxoff
+ cmovcl %ecx, %eax /* CF = 1, ZF = 0 */
+ cmovzl %edx, %eax /* CF = 0, ZF = 1 */
+
+ EMARF