+/*
+ * Routine: cpu_exit_wait
+ * Function:
+ */
+void
+cpu_exit_wait(
+ int cpu)
+{
+ struct per_proc_info *tpproc;
+
+ if ( cpu != master_cpu) {
+ tpproc = PerProcTable[cpu].ppe_vaddr;
+ while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
+ }
+}
+
+
+/*
+ * Routine: cpu_doshutdown
+ * Function:
+ */
+void
+cpu_doshutdown(
+ void)
+{
+ enable_preemption();
+ processor_offline(current_processor());
+}
+
+
+/*
+ * Routine: cpu_sleep
+ * Function:
+ */
+void
+cpu_sleep(
+ void)
+{
+ struct per_proc_info *proc_info;
+ unsigned int i;
+ unsigned int wait_ncpus_sleep, ncpus_sleep;
+ facility_context *fowner;
+
+ proc_info = getPerProc();
+
+ proc_info->running = FALSE;
+
+ fowner = proc_info->FPU_owner; /* Cache this */
+ if(fowner) /* If anyone owns FPU, save it */
+ fpu_save(fowner);
+ proc_info->FPU_owner = NULL; /* Set no fpu owner now */
+
+ fowner = proc_info->VMX_owner; /* Cache this */
+ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
+ proc_info->VMX_owner = NULL; /* Set no vector owner now */
+
+ if (proc_info->cpu_number == master_cpu) {
+ proc_info->cpu_flags &= BootDone;
+ proc_info->interrupts_enabled = 0;
+ proc_info->pending_ast = AST_NONE;
+
+ if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+ ml_phys_write((vm_offset_t)&ResetHandler + 0,
+ RESET_HANDLER_START);
+ ml_phys_write((vm_offset_t)&ResetHandler + 4,
+ (vm_offset_t)_start_cpu);
+ ml_phys_write((vm_offset_t)&ResetHandler + 8,
+ (vm_offset_t)&PerProcTable[master_cpu]);
+
+ __asm__ volatile("sync");
+ __asm__ volatile("isync");
+ }
+
+ wait_ncpus_sleep = real_ncpus-1;
+ ncpus_sleep = 0;
+ while (wait_ncpus_sleep != ncpus_sleep) {
+ ncpus_sleep = 0;
+ for(i=1; i < real_ncpus ; i++) {
+ if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
+ ncpus_sleep++;
+ }
+ }
+
+ }
+
+ /*
+ * Save the TBR before stopping.
+ */
+ do {
+ proc_info->save_tbu = mftbu();
+ proc_info->save_tbl = mftb();
+ } while (mftbu() != proc_info->save_tbu);
+
+ PE_cpu_machine_quiesce(proc_info->cpu_id);
+}
+
+
+/*
+ * Routine: cpu_signal
+ * Function:
+ * Here is where we send a message to another processor. So far we only have two:
+ * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
+ * currently disabled). SIGPdebug is used to enter the debugger.
+ *
+ * We set up the SIGP function to indicate that this is a simple message and set the
+ * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
+ * block for the target, we lock the message block. Then we set the parameter(s).
+ * Next we change the lock (also called "busy") to "passing" and finally signal
+ * the other processor. Note that we only wait about 1ms to get the message lock.
+ * If we time out, we return failure to our caller. It is their responsibility to
+ * recover.
+ */
+kern_return_t
+cpu_signal(
+ int target,
+ int signal,
+ unsigned int p1,
+ unsigned int p2)
+{
+
+ unsigned int holdStat;
+ struct per_proc_info *tpproc, *mpproc;
+ int busybitset=0;
+
+#if DEBUG
+ if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
+#endif
+
+ mpproc = getPerProc(); /* Point to our block */
+ tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
+ if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
+
+ if(!tpproc->running) return KERN_FAILURE;
+
+ if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
+
+ if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
+
+ if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
+ mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
+ return KERN_SUCCESS;
+ }
+
+ if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
+ mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
+ return KERN_SUCCESS; /* Don't bother to send this one... */
+ }
+
+ if (tpproc->MPsigpParm0 == SIGPwake) {
+ if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
+ (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
+ busybitset = 1;
+ mpproc->hwCtr.numSIGPmwake++;
+ }
+ }
+ }
+
+ if((busybitset == 0) &&
+ (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
+ (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
+ mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
+ return KERN_FAILURE; /* Timed out, take your ball and go home... */
+ }
+
+ holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
+ tpproc->MPsigpParm0 = signal; /* Set message order */
+ tpproc->MPsigpParm1 = p1; /* Set additional parm */
+ tpproc->MPsigpParm2 = p2; /* Set additional parm */
+
+ __asm__ volatile("sync"); /* Make sure it's all there */
+
+ tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
+ __asm__ volatile("eieio"); /* I'm a paraniod freak */
+
+ if (busybitset == 0)
+ PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
+
+ return KERN_SUCCESS; /* All is goodness and rainbows... */
+}
+