+{
+ return(current_thread_fast());
+}
+
+#undef current_thread
+extern thread_t current_thread(void);
+thread_t
+current_thread(void)
+{
+ return(current_thread_fast());
+}
+
+
+boolean_t ml_is64bit(void) {
+
+ return (cpu_mode_is64bit());
+}
+
+
+boolean_t ml_thread_is64bit(thread_t thread) {
+
+ return (thread_is_64bit(thread));
+}
+
+
+boolean_t ml_state_is64bit(void *saved_state) {
+
+ return is_saved_state64(saved_state);
+}
+
+void ml_cpu_set_ldt(int selector)
+{
+ /*
+ * Avoid loading the LDT
+ * if we're setting the KERNEL LDT and it's already set.
+ */
+ if (selector == KERNEL_LDT &&
+ current_cpu_datap()->cpu_ldt == KERNEL_LDT)
+ return;
+
+ lldt(selector);
+ current_cpu_datap()->cpu_ldt = selector;
+}
+
+void ml_fp_setvalid(boolean_t value)
+{
+ fp_setvalid(value);
+}
+
+uint64_t ml_cpu_int_event_time(void)
+{
+ return current_cpu_datap()->cpu_int_event_time;
+}
+
+vm_offset_t ml_stack_remaining(void)
+{
+ uintptr_t local = (uintptr_t) &local;
+
+ if (ml_at_interrupt_context() != 0) {
+ return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE));
+ } else {
+ return (local - current_thread()->kernel_stack);
+ }
+}
+
+void
+kernel_preempt_check(void)
+{
+ boolean_t intr;
+ unsigned long flags;
+
+ assert(get_preemption_level() == 0);
+
+ __asm__ volatile("pushf; pop %0" : "=r" (flags));
+
+ intr = ((flags & EFL_IF) != 0);
+
+ if ((*ast_pending() & AST_URGENT) && intr == TRUE) {
+ /*
+ * can handle interrupts and preemptions
+ * at this point
+ */
+
+ /*
+ * now cause the PRE-EMPTION trap
+ */
+ __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
+ }
+}
+
+boolean_t machine_timeout_suspended(void) {
+ return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake());
+}
+
+/* Eagerly evaluate all pending timer and thread callouts
+ */
+void ml_timer_evaluate(void) {
+ KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ uint64_t te_end, te_start = mach_absolute_time();
+ simple_lock(&ml_timer_evaluation_slock);
+ ml_timer_evaluation_in_progress = TRUE;
+ thread_call_delayed_timer_rescan_all();
+ mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
+ ml_timer_evaluation_in_progress = FALSE;
+ ml_timer_eager_evaluations++;
+ te_end = mach_absolute_time();
+ ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
+ simple_unlock(&ml_timer_evaluation_slock);
+
+ KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+boolean_t
+ml_timer_forced_evaluation(void) {
+ return ml_timer_evaluation_in_progress;
+}
+
+/* 32-bit right-rotate n bits */
+static inline uint32_t ror32(uint32_t val, const unsigned int n)
+{
+ __asm__ volatile("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n));
+ return val;
+}
+
+void
+ml_entropy_collect(void)
+{
+ uint32_t tsc_lo, tsc_hi;
+ uint32_t *ep;
+
+ assert(cpu_number() == master_cpu);
+
+ /* update buffer pointer cyclically */
+ if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE)
+ ep = EntropyData.index_ptr = EntropyData.buffer;
+ else
+ ep = EntropyData.index_ptr++;
+
+ rdtsc_nofence(tsc_lo, tsc_hi);
+ *ep = ror32(*ep, 9) ^ tsc_lo;
+}
+
+void
+ml_gpu_stat_update(uint64_t gpu_ns_delta) {
+ current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
+}
+
+uint64_t
+ml_gpu_stat(thread_t t) {
+ return t->machine.thread_gpu_ns;
+}