+boolean_t
+machine_timeout_suspended(void)
+{
+ return pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake();
+}
+
+/* Eagerly evaluate all pending timer and thread callouts
+ */
+void
+ml_timer_evaluate(void)
+{
+ KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ uint64_t te_end, te_start = mach_absolute_time();
+ simple_lock(&ml_timer_evaluation_slock, LCK_GRP_NULL);
+ ml_timer_evaluation_in_progress = TRUE;
+ thread_call_delayed_timer_rescan_all();
+ mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
+ ml_timer_evaluation_in_progress = FALSE;
+ ml_timer_eager_evaluations++;
+ te_end = mach_absolute_time();
+ ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
+ simple_unlock(&ml_timer_evaluation_slock);
+
+ KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+boolean_t
+ml_timer_forced_evaluation(void)
+{
+ return ml_timer_evaluation_in_progress;
+}
+
+uint64_t
+ml_energy_stat(__unused thread_t t)
+{
+ return 0;
+}
+
+void
+ml_gpu_stat_update(uint64_t gpu_ns_delta)
+{
+ current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
+}
+
+uint64_t
+ml_gpu_stat(thread_t t)
+{
+ return t->machine.thread_gpu_ns;
+}
+
+int plctrace_enabled = 0;
+
+void
+_disable_preemption(void)
+{
+ disable_preemption_internal();
+}
+
+void
+_enable_preemption(void)
+{
+ enable_preemption_internal();
+}
+
+void
+plctrace_disable(void)
+{
+ plctrace_enabled = 0;
+}
+
+static boolean_t ml_quiescing;
+
+void
+ml_set_is_quiescing(boolean_t quiescing)
+{
+ ml_quiescing = quiescing;
+}
+
+boolean_t
+ml_is_quiescing(void)
+{
+ return ml_quiescing;
+}
+
+uint64_t
+ml_get_booter_memory_size(void)
+{
+ return 0;
+}
+
+void
+machine_lockdown(void)
+{
+ x86_64_protect_data_const();
+}
+
+bool
+ml_cpu_can_exit(__unused int cpu_id)
+{
+ return true;
+}
+
+void
+ml_cpu_begin_state_transition(__unused int cpu_id)
+{
+}
+
+void
+ml_cpu_end_state_transition(__unused int cpu_id)
+{
+}
+
+void
+ml_cpu_begin_loop(void)
+{
+}
+
+void
+ml_cpu_end_loop(void)
+{
+}
+
+size_t
+ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions)
+{
+#pragma unused(vm_is64bit)
+ assert(regions != NULL);
+
+ *regions = NULL;
+ return 0;
+}