+void
+mp_rendezvous_break_lock(void)
+{
+ simple_lock_init(&mp_rv_lock, 0);
+}
+
+static void
+setup_disable_intrs(__unused void * param_not_used)
+{
+ /* disable interrupts before the first barrier */
+ boolean_t intr = ml_set_interrupts_enabled(FALSE);
+
+ current_cpu_datap()->cpu_iflag = intr;
+ DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
+}
+
+static void
+teardown_restore_intrs(__unused void * param_not_used)
+{
+ /* restore interrupt flag following MTRR changes */
+ ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
+ DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
+}
+
+/*
+ * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
+ * This is exported for use by kexts.
+ */
+void
+mp_rendezvous_no_intrs(
+ void (*action_func)(void *),
+ void *arg)
+{
+ mp_rendezvous(setup_disable_intrs,
+ action_func,
+ teardown_restore_intrs,
+ arg);
+}
+
+void
+handle_pending_TLB_flushes(void)
+{
+ volatile int *my_word = ¤t_cpu_datap()->cpu_signals;
+
+ if (i_bit(MP_TLB_FLUSH, my_word)) {
+ DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH);
+ i_bit_clear(MP_TLB_FLUSH, my_word);
+ pmap_update_interrupt();
+ }
+}
+
+/*
+ * This is called from cpu_signal_handler() to process an MP_CALL signal.
+ */
+static void
+mp_cpus_call_action(void)
+{
+ if (mp_rv_action_func != NULL)
+ mp_rv_action_func(mp_rv_func_arg);
+ atomic_incl(&mp_rv_complete, 1);
+}
+
+/*
+ * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
+ * If the mode is SYNC, the function is called serially on the target cpus
+ * in logical cpu order. If the mode is ASYNC, the function is called in
+ * parallel over the specified cpus.
+ * The action function may be NULL.
+ * The cpu mask may include the local cpu. Offline cpus are ignored.
+ * Return does not occur until the function has completed on all cpus.
+ * The return value is the number of cpus on which the function was called.
+ */
+cpu_t
+mp_cpus_call(
+ cpumask_t cpus,
+ mp_sync_t mode,
+ void (*action_func)(void *),
+ void *arg)
+{
+ cpu_t cpu;
+ boolean_t intrs_enabled = ml_get_interrupts_enabled();
+ boolean_t call_self = FALSE;
+
+ if (!smp_initialized) {
+ if ((cpus & CPUMASK_SELF) == 0)
+ return 0;
+ if (action_func != NULL) {
+ (void) ml_set_interrupts_enabled(FALSE);
+ action_func(arg);
+ ml_set_interrupts_enabled(intrs_enabled);
+ }
+ return 1;
+ }
+
+ /* obtain rendezvous lock */
+ simple_lock(&mp_rv_lock);
+
+ /* Use the rendezvous data structures for this call */
+ mp_rv_action_func = action_func;
+ mp_rv_func_arg = arg;
+ mp_rv_ncpus = 0;
+ mp_rv_complete = 0;
+
+ simple_lock(&x86_topo_lock);
+ for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) {
+ if (((cpu_to_cpumask(cpu) & cpus) == 0) ||
+ !cpu_datap(cpu)->cpu_running)
+ continue;
+ if (cpu == (cpu_t) cpu_number()) {
+ /*
+ * We don't IPI ourself and if calling asynchronously,
+ * we defer our call until we have signalled all others.
+ */
+ call_self = TRUE;
+ if (mode == SYNC && action_func != NULL) {
+ (void) ml_set_interrupts_enabled(FALSE);
+ action_func(arg);
+ ml_set_interrupts_enabled(intrs_enabled);
+ }
+ } else {
+ /*
+ * Bump count of other cpus called and signal this cpu.
+ * Note: we signal asynchronously regardless of mode
+ * because we wait on mp_rv_complete either here
+ * (if mode == SYNC) or later (if mode == ASYNC).
+ * While spinning, poll for TLB flushes if interrupts
+ * are disabled.
+ */
+ mp_rv_ncpus++;
+ i386_signal_cpu(cpu, MP_CALL, ASYNC);
+ if (mode == SYNC) {
+ simple_unlock(&x86_topo_lock);
+ while (mp_rv_complete < mp_rv_ncpus) {
+ if (!intrs_enabled)
+ handle_pending_TLB_flushes();
+ cpu_pause();
+ }
+ simple_lock(&x86_topo_lock);
+ }
+ }
+ }
+ simple_unlock(&x86_topo_lock);
+
+ /*
+ * If calls are being made asynchronously,
+ * make the local call now if needed, and then
+ * wait for all other cpus to finish their calls.
+ */
+ if (mode == ASYNC) {
+ if (call_self && action_func != NULL) {
+ (void) ml_set_interrupts_enabled(FALSE);
+ action_func(arg);
+ ml_set_interrupts_enabled(intrs_enabled);
+ }
+ while (mp_rv_complete < mp_rv_ncpus) {
+ if (!intrs_enabled)
+ handle_pending_TLB_flushes();
+ cpu_pause();
+ }
+ }
+
+ /* Determine the number of cpus called */
+ cpu = mp_rv_ncpus + (call_self ? 1 : 0);
+
+ simple_unlock(&mp_rv_lock);
+
+ return cpu;
+}
+
+static void
+mp_broadcast_action(void)
+{
+ /* call action function */
+ if (mp_bc_action_func != NULL)
+ mp_bc_action_func(mp_bc_func_arg);
+
+ /* if we're the last one through, wake up the instigator */
+ if (atomic_decl_and_test((volatile long *)&mp_bc_count, 1))
+ thread_wakeup(((event_t)(unsigned int *) &mp_bc_count));
+}
+
+/*
+ * mp_broadcast() runs a given function on all active cpus.
+ * The caller blocks until the functions has run on all cpus.
+ * The caller will also block if there is another pending braodcast.
+ */
+void
+mp_broadcast(
+ void (*action_func)(void *),
+ void *arg)
+{
+ if (!smp_initialized) {
+ if (action_func != NULL)
+ action_func(arg);
+ return;
+ }
+
+ /* obtain broadcast lock */
+ mutex_lock(&mp_bc_lock);
+
+ /* set static function pointers */
+ mp_bc_action_func = action_func;
+ mp_bc_func_arg = arg;
+
+ assert_wait(&mp_bc_count, THREAD_UNINT);
+
+ /*
+ * signal other processors, which will call mp_broadcast_action()
+ */
+ simple_lock(&x86_topo_lock);
+ mp_bc_ncpus = i386_active_cpus(); /* total including this cpu */
+ mp_bc_count = mp_bc_ncpus;
+ i386_signal_cpus(MP_BROADCAST, ASYNC);
+
+ /* call executor function on this cpu */
+ mp_broadcast_action();
+ simple_unlock(&x86_topo_lock);
+
+ /* block for all cpus to have run action_func */
+ if (mp_bc_ncpus > 1)
+ thread_block(THREAD_CONTINUE_NULL);
+ else
+ clear_wait(current_thread(), THREAD_AWAKENED);
+
+ /* release lock */
+ mutex_unlock(&mp_bc_lock);
+}
+
+void
+i386_activate_cpu(void)
+{
+ cpu_data_t *cdp = current_cpu_datap();
+
+ assert(!ml_get_interrupts_enabled());
+
+ if (!smp_initialized) {
+ cdp->cpu_running = TRUE;
+ return;
+ }
+
+ simple_lock(&x86_topo_lock);
+ cdp->cpu_running = TRUE;
+ simple_unlock(&x86_topo_lock);
+}
+
+void
+i386_deactivate_cpu(void)
+{
+ cpu_data_t *cdp = current_cpu_datap();
+
+ assert(!ml_get_interrupts_enabled());
+
+ simple_lock(&x86_topo_lock);
+ cdp->cpu_running = FALSE;
+ simple_unlock(&x86_topo_lock);
+
+ /*
+ * In case a rendezvous/braodcast/call was initiated to this cpu
+ * before we cleared cpu_running, we must perform any actions due.
+ */
+ if (i_bit(MP_RENDEZVOUS, &cdp->cpu_signals))
+ mp_rendezvous_action();
+ if (i_bit(MP_BROADCAST, &cdp->cpu_signals))
+ mp_broadcast_action();
+ if (i_bit(MP_CALL, &cdp->cpu_signals))
+ mp_cpus_call_action();
+ cdp->cpu_signals = 0; /* all clear */
+}
+
+int pmsafe_debug = 1;
+