+mp_rendezvous_action(void)
+{
+ boolean_t intrs_enabled;
+
+ /* setup function */
+ if (mp_rv_setup_func != NULL)
+ mp_rv_setup_func(mp_rv_func_arg);
+
+ intrs_enabled = ml_get_interrupts_enabled();
+
+ /* spin on entry rendezvous */
+ atomic_incl(&mp_rv_entry, 1);
+ while (mp_rv_entry < mp_rv_ncpus) {
+ /* poll for pesky tlb flushes if interrupts disabled */
+ if (!intrs_enabled)
+ handle_pending_TLB_flushes();
+ cpu_pause();
+ }
+
+ /* action function */
+ if (mp_rv_action_func != NULL)
+ mp_rv_action_func(mp_rv_func_arg);
+
+ /* spin on exit rendezvous */
+ atomic_incl(&mp_rv_exit, 1);
+ while (mp_rv_exit < mp_rv_ncpus) {
+ if (!intrs_enabled)
+ handle_pending_TLB_flushes();
+ cpu_pause();
+ }
+
+ /* teardown function */
+ if (mp_rv_teardown_func != NULL)
+ mp_rv_teardown_func(mp_rv_func_arg);
+
+ /* Bump completion count */
+ atomic_incl(&mp_rv_complete, 1);
+}
+
+void
+mp_rendezvous(void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
+ void *arg)
+{
+
+ if (!smp_initialized) {
+ if (setup_func != NULL)
+ setup_func(arg);
+ if (action_func != NULL)
+ action_func(arg);
+ if (teardown_func != NULL)
+ teardown_func(arg);
+ return;
+ }
+
+ /* obtain rendezvous lock */
+ simple_lock(&mp_rv_lock);
+
+ /* set static function pointers */
+ mp_rv_setup_func = setup_func;
+ mp_rv_action_func = action_func;
+ mp_rv_teardown_func = teardown_func;
+ mp_rv_func_arg = arg;
+
+ mp_rv_entry = 0;
+ mp_rv_exit = 0;
+ mp_rv_complete = 0;
+
+ /*
+ * signal other processors, which will call mp_rendezvous_action()
+ * with interrupts disabled
+ */
+ simple_lock(&x86_topo_lock);
+ mp_rv_ncpus = i386_active_cpus();
+ i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
+ simple_unlock(&x86_topo_lock);
+
+ /* call executor function on this cpu */
+ mp_rendezvous_action();
+
+ /*
+ * Spin for everyone to complete.
+ * This is necessary to ensure that all processors have proceeded
+ * from the exit barrier before we release the rendezvous structure.
+ */
+ while (mp_rv_complete < mp_rv_ncpus) {
+ cpu_pause();
+ }
+
+ /* Tidy up */
+ mp_rv_setup_func = NULL;
+ mp_rv_action_func = NULL;
+ mp_rv_teardown_func = NULL;
+ mp_rv_func_arg = NULL;
+
+ /* release lock */
+ simple_unlock(&mp_rv_lock);
+}
+
+void
+mp_rendezvous_break_lock(void)
+{
+ simple_lock_init(&mp_rv_lock, 0);
+}
+
+static void
+setup_disable_intrs(__unused void * param_not_used)
+{
+ /* disable interrupts before the first barrier */
+ boolean_t intr = ml_set_interrupts_enabled(FALSE);
+
+ current_cpu_datap()->cpu_iflag = intr;
+ DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
+}
+
+static void
+teardown_restore_intrs(__unused void * param_not_used)
+{
+ /* restore interrupt flag following MTRR changes */
+ ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
+ DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
+}
+
+/*
+ * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
+ * This is exported for use by kexts.
+ */
+void
+mp_rendezvous_no_intrs(
+ void (*action_func)(void *),
+ void *arg)
+{
+ mp_rendezvous(setup_disable_intrs,
+ action_func,
+ teardown_restore_intrs,
+ arg);
+}
+
+
+typedef struct {
+ queue_chain_t link; /* queue linkage */
+ void (*func)(void *,void *); /* routine to call */
+ void *arg0; /* routine's 1st arg */
+ void *arg1; /* routine's 2nd arg */
+ volatile long *countp; /* completion counter */
+} mp_call_t;
+
+#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS
+static queue_head_t mp_cpus_call_freelist;
+static queue_head_t mp_cpus_call_queue[MAX_CPUS];
+/*
+ * The free list and the per-cpu call queues are protected by the following
+ * lock which is taken wil interrupts disabled.
+ */
+decl_simple_lock_data(,mp_cpus_call_lock);
+
+static inline boolean_t
+mp_call_lock(void)
+{
+ boolean_t intrs_enabled;
+
+ intrs_enabled = ml_set_interrupts_enabled(FALSE);
+ simple_lock(&mp_cpus_call_lock);
+
+ return intrs_enabled;
+}
+
+static inline boolean_t
+mp_call_is_locked(void)
+{
+ return !ml_get_interrupts_enabled() &&
+ hw_lock_held((hw_lock_t)&mp_cpus_call_lock);
+}
+
+static inline void
+mp_call_unlock(boolean_t intrs_enabled)
+{
+ simple_unlock(&mp_cpus_call_lock);
+ ml_set_interrupts_enabled(intrs_enabled);
+}
+
+static inline mp_call_t *
+mp_call_alloc(void)
+{
+ mp_call_t *callp;
+
+ assert(mp_call_is_locked());
+ if (queue_empty(&mp_cpus_call_freelist))
+ return NULL;
+ queue_remove_first(&mp_cpus_call_freelist, callp, typeof(callp), link);
+ return callp;
+}
+
+static inline void
+mp_call_free(mp_call_t *callp)
+{
+ assert(mp_call_is_locked());
+ queue_enter_first(&mp_cpus_call_freelist, callp, typeof(callp), link);
+}
+
+static inline mp_call_t *
+mp_call_dequeue(queue_t call_queue)
+{
+ mp_call_t *callp;
+
+ assert(mp_call_is_locked());
+ if (queue_empty(call_queue))
+ return NULL;
+ queue_remove_first(call_queue, callp, typeof(callp), link);
+ return callp;
+}
+
+/* Called on the boot processor to initialize global structures */
+static void
+mp_cpus_call_init(void)
+{
+ DBG("mp_cpus_call_init()\n");
+ simple_lock_init(&mp_cpus_call_lock, 0);
+ queue_init(&mp_cpus_call_freelist);
+}
+
+/*
+ * Called by each processor to add call buffers to the free list
+ * and to initialize the per-cpu call queue.
+ * Also called but ignored on slave processors on re-start/wake.
+ */
+static void
+mp_cpus_call_cpu_init(void)
+{
+ boolean_t intrs_enabled;
+ int i;
+ mp_call_t *callp;
+
+ if (mp_cpus_call_queue[cpu_number()].next != NULL)
+ return; /* restart/wake case: called already */
+
+ queue_init(&mp_cpus_call_queue[cpu_number()]);
+ for (i = 0; i < MP_CPUS_CALL_BUFS_PER_CPU; i++) {
+ callp = (mp_call_t *) kalloc(sizeof(mp_call_t));
+ intrs_enabled = mp_call_lock();
+ mp_call_free(callp);
+ mp_call_unlock(intrs_enabled);
+ }
+
+ DBG("mp_cpus_call_init() done on cpu %d\n", cpu_number());
+}
+
+/*
+ * This is called from cpu_signal_handler() to process an MP_CALL signal.
+ * And also from i386_deactivate_cpu() when a cpu is being taken offline.
+ */
+static void
+mp_cpus_call_action(void)
+{
+ queue_t cpu_head;
+ boolean_t intrs_enabled;
+ mp_call_t *callp;
+ mp_call_t call;
+
+ assert(!ml_get_interrupts_enabled());
+ cpu_head = &mp_cpus_call_queue[cpu_number()];
+ intrs_enabled = mp_call_lock();
+ while ((callp = mp_call_dequeue(cpu_head)) != NULL) {
+ /* Copy call request to the stack to free buffer */
+ call = *callp;
+ mp_call_free(callp);
+ if (call.func != NULL) {
+ mp_call_unlock(intrs_enabled);
+ KERNEL_DEBUG_CONSTANT(
+ TRACE_MP_CPUS_CALL_ACTION,
+ call.func, call.arg0, call.arg1, call.countp, 0);
+ call.func(call.arg0, call.arg1);
+ (void) mp_call_lock();
+ }
+ if (call.countp != NULL)
+ atomic_incl(call.countp, 1);
+ }
+ mp_call_unlock(intrs_enabled);
+}
+
+static boolean_t
+mp_call_queue(
+ int cpu,
+ void (*action_func)(void *, void *),
+ void *arg0,
+ void *arg1,
+ volatile long *countp)
+{
+ queue_t cpu_head = &mp_cpus_call_queue[cpu];
+ mp_call_t *callp;
+
+ assert(mp_call_is_locked());
+ callp = mp_call_alloc();
+ if (callp == NULL)
+ return FALSE;
+
+ callp->func = action_func;
+ callp->arg0 = arg0;
+ callp->arg1 = arg1;
+ callp->countp = countp;
+
+ queue_enter(cpu_head, callp, typeof(callp), link);
+
+ return TRUE;
+}
+
+/*
+ * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
+ * Possible modes are:
+ * SYNC: function is called serially on target cpus in logical cpu order
+ * waiting for each call to be acknowledged before proceeding
+ * ASYNC: function call is queued to the specified cpus
+ * waiting for all calls to complete in parallel before returning
+ * NOSYNC: function calls are queued
+ * but we return before confirmation of calls completing.
+ * The action function may be NULL.
+ * The cpu mask may include the local cpu. Offline cpus are ignored.
+ * The return value is the number of cpus on which the call was made or queued.
+ */
+cpu_t
+mp_cpus_call(
+ cpumask_t cpus,
+ mp_sync_t mode,
+ void (*action_func)(void *),
+ void *arg)
+{
+ return mp_cpus_call1(
+ cpus,
+ mode,
+ (void (*)(void *,void *))action_func,
+ arg,
+ NULL,
+ NULL,
+ NULL);
+}
+
+static void
+mp_cpus_call_wait(boolean_t intrs_enabled,
+ long mp_cpus_signals,
+ volatile long *mp_cpus_calls)