+ proc_info->pms.pmsStamp = 0; /* Dummy transition time */
+ proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
+ proc_info->pms.pmsState = pmsParked; /* Park the stepper */
+ proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
+ mp = (mapping_t *)(&proc_info->ppUMWmp);
+ mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
+ mp->mpSpace = invalSpace;
+
+ if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+
+ simple_lock(&rht_lock);
+ while (rht_state & RHT_BUSY) {
+ rht_state |= RHT_WAIT;
+ thread_sleep_usimple_lock((event_t)&rht_state,
+ &rht_lock, THREAD_UNINT);
+ }
+ rht_state |= RHT_BUSY;
+ simple_unlock(&rht_lock);
+
+ ml_phys_write((vm_offset_t)&ResetHandler + 0,
+ RESET_HANDLER_START);
+ ml_phys_write((vm_offset_t)&ResetHandler + 4,
+ (vm_offset_t)_start_cpu);
+ ml_phys_write((vm_offset_t)&ResetHandler + 8,
+ (vm_offset_t)&PerProcTable[cpu]);
+ }
+/*
+ * Note: we pass the current time to the other processor here. He will load it
+ * as early as possible so that there is a chance that it is close to accurate.
+ * After the machine is up a while, we will officially resync the clocks so
+ * that all processors are the same. This is just to get close.
+ */
+
+ ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
+
+ __asm__ volatile("sync"); /* Commit to storage */
+ __asm__ volatile("isync"); /* Wait a second */
+ ret = PE_cpu_start(proc_info->cpu_id,
+ proc_info->start_paddr, (vm_offset_t)proc_info);
+
+ if (ret != KERN_SUCCESS) {
+ if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+ simple_lock(&rht_lock);
+ if (rht_state & RHT_WAIT)
+ thread_wakeup(&rht_state);
+ rht_state &= ~(RHT_BUSY|RHT_WAIT);
+ simple_unlock(&rht_lock);
+ };
+ } else {
+ simple_lock(&SignalReadyLock);
+ if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
+ hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
+ thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
+ &SignalReadyLock, THREAD_UNINT);
+ }
+ simple_unlock(&SignalReadyLock);
+
+ }
+ return(ret);
+ }
+}
+
+/*
+ * Routine: cpu_exit_wait
+ * Function:
+ */
+void
+cpu_exit_wait(
+ int cpu)
+{
+ struct per_proc_info *tpproc;
+
+ if ( cpu != master_cpu) {
+ tpproc = PerProcTable[cpu].ppe_vaddr;
+ while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
+ }
+}
+
+
+/*
+ * Routine: cpu_doshutdown
+ * Function:
+ */
+void
+cpu_doshutdown(
+ void)
+{
+ enable_preemption();
+ processor_offline(current_processor());
+}
+
+
+/*
+ * Routine: cpu_sleep
+ * Function:
+ */
+void
+cpu_sleep(
+ void)
+{
+ struct per_proc_info *proc_info;
+ unsigned int i;
+ unsigned int wait_ncpus_sleep, ncpus_sleep;
+ facility_context *fowner;
+
+ proc_info = getPerProc();
+
+ proc_info->running = FALSE;
+
+ fowner = proc_info->FPU_owner; /* Cache this */
+ if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
+ proc_info->FPU_owner = 0; /* Set no fpu owner now */
+
+ fowner = proc_info->VMX_owner; /* Cache this */
+ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
+ proc_info->VMX_owner = 0; /* Set no vector owner now */
+
+ if (proc_info->cpu_number == master_cpu) {
+ proc_info->cpu_flags &= BootDone;
+ proc_info->interrupts_enabled = 0;
+ proc_info->pending_ast = AST_NONE;
+
+ if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+ ml_phys_write((vm_offset_t)&ResetHandler + 0,
+ RESET_HANDLER_START);
+ ml_phys_write((vm_offset_t)&ResetHandler + 4,
+ (vm_offset_t)_start_cpu);
+ ml_phys_write((vm_offset_t)&ResetHandler + 8,
+ (vm_offset_t)&PerProcTable[master_cpu]);
+
+ __asm__ volatile("sync");
+ __asm__ volatile("isync");
+ }
+
+ wait_ncpus_sleep = real_ncpus-1;
+ ncpus_sleep = 0;
+ while (wait_ncpus_sleep != ncpus_sleep) {
+ ncpus_sleep = 0;
+ for(i=1; i < real_ncpus ; i++) {
+ if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
+ ncpus_sleep++;
+ }
+ }
+
+ }
+
+ /*
+ * Save the TBR before stopping.
+ */
+ do {
+ proc_info->save_tbu = mftbu();
+ proc_info->save_tbl = mftb();
+ } while (mftbu() != proc_info->save_tbu);
+
+ PE_cpu_machine_quiesce(proc_info->cpu_id);
+}
+
+
+/*
+ * Routine: cpu_signal
+ * Function:
+ * Here is where we send a message to another processor. So far we only have two:
+ * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
+ * currently disabled). SIGPdebug is used to enter the debugger.
+ *
+ * We set up the SIGP function to indicate that this is a simple message and set the
+ * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
+ * block for the target, we lock the message block. Then we set the parameter(s).
+ * Next we change the lock (also called "busy") to "passing" and finally signal
+ * the other processor. Note that we only wait about 1ms to get the message lock.
+ * If we time out, we return failure to our caller. It is their responsibility to
+ * recover.
+ */
+kern_return_t
+cpu_signal(
+ int target,
+ int signal,
+ unsigned int p1,
+ unsigned int p2)
+{
+
+ unsigned int holdStat;
+ struct per_proc_info *tpproc, *mpproc;
+ int busybitset=0;
+
+#if DEBUG
+ if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
+#endif