+ml_get_max_cpus(void)
+{
+ boolean_t current_state;
+
+ current_state = ml_set_interrupts_enabled(FALSE);
+ if (max_cpus_initialized != MAX_CPUS_SET) {
+ max_cpus_initialized = MAX_CPUS_WAIT;
+ assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
+ (void)thread_block(THREAD_CONTINUE_NULL);
+ }
+ (void) ml_set_interrupts_enabled(current_state);
+ return(machine_info.max_cpus);
+}
+
+/*
+ * Routine: ml_init_lock_timeout
+ * Function:
+ */
+void
+ml_init_lock_timeout(void)
+{
+ uint64_t abstime;
+ uint32_t mtxspin;
+#if DEVELOPMENT || DEBUG
+ uint64_t default_timeout_ns = NSEC_PER_SEC>>2;
+#else
+ uint64_t default_timeout_ns = NSEC_PER_SEC>>1;
+#endif
+ uint32_t slto;
+ uint32_t prt;
+
+ if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto)))
+ default_timeout_ns = slto * NSEC_PER_USEC;
+
+ /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */
+ nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
+ LockTimeOut = (uint32_t) abstime;
+ LockTimeOutTSC = (uint32_t) tmrCvt(abstime, tscFCvtn2t);
+
+ /*
+ * TLBTimeOut dictates the TLB flush timeout period. It defaults to
+ * LockTimeOut but can be overriden separately. In particular, a
+ * zero value inhibits the timeout-panic and cuts a trace evnt instead
+ * - see pmap_flush_tlbs().
+ */
+ if (PE_parse_boot_argn("tlbto_us", &slto, sizeof (slto))) {
+ default_timeout_ns = slto * NSEC_PER_USEC;
+ nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
+ TLBTimeOut = (uint32_t) abstime;
+ } else {
+ TLBTimeOut = LockTimeOut;
+ }
+
+ if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
+ if (mtxspin > USEC_PER_SEC>>4)
+ mtxspin = USEC_PER_SEC>>4;
+ nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
+ } else {
+ nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
+ }
+ MutexSpin = (unsigned int)abstime;
+
+ nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance);
+ if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof (prt)))
+ nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout);
+ virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0);
+ interrupt_latency_tracker_setup();
+ simple_lock_init(&ml_timer_evaluation_slock, 0);
+}
+
+/*
+ * Threshold above which we should attempt to block
+ * instead of spinning for clock_delay_until().
+ */
+
+void
+ml_init_delay_spin_threshold(int threshold_us)
+{
+ nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold);
+}
+
+boolean_t
+ml_delay_should_spin(uint64_t interval)
+{
+ return (interval < delay_spin_threshold) ? TRUE : FALSE;
+}
+
+/*
+ * This is called from the machine-independent layer
+ * to perform machine-dependent info updates. Defer to cpu_thread_init().
+ */
+void
+ml_cpu_up(void)
+{
+ return;
+}
+
+/*
+ * This is called from the machine-independent layer
+ * to perform machine-dependent info updates.
+ */
+void
+ml_cpu_down(void)
+{
+ i386_deactivate_cpu();
+
+ return;
+}
+
+/*
+ * The following are required for parts of the kernel
+ * that cannot resolve these functions as inlines:
+ */
+extern thread_t current_act(void);
+thread_t
+current_act(void)
+{
+ return(current_thread_fast());
+}
+
+#undef current_thread
+extern thread_t current_thread(void);
+thread_t
+current_thread(void)
+{
+ return(current_thread_fast());
+}
+
+
+boolean_t ml_is64bit(void) {
+
+ return (cpu_mode_is64bit());
+}
+
+
+boolean_t ml_thread_is64bit(thread_t thread) {
+
+ return (thread_is_64bit(thread));
+}
+
+
+boolean_t ml_state_is64bit(void *saved_state) {
+
+ return is_saved_state64(saved_state);
+}
+
+void ml_cpu_set_ldt(int selector)
+{
+ /*
+ * Avoid loading the LDT
+ * if we're setting the KERNEL LDT and it's already set.
+ */
+ if (selector == KERNEL_LDT &&
+ current_cpu_datap()->cpu_ldt == KERNEL_LDT)
+ return;
+
+ lldt(selector);
+ current_cpu_datap()->cpu_ldt = selector;
+}
+
+void ml_fp_setvalid(boolean_t value)