X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..55e303ae13a4cf49d70f2294092726f2fffb9ef2:/osfmk/ppc/cpu.c

diff --git a/osfmk/ppc/cpu.c b/osfmk/ppc/cpu.c
index 360c29c6e..05b49814b 100644
--- a/osfmk/ppc/cpu.c
+++ b/osfmk/ppc/cpu.c
@@ -3,19 +3,22 @@
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -37,8 +40,13 @@
 #include <ppc/machine_routines.h>
 #include <ppc/machine_cpu.h>
 #include <ppc/exception.h>
+#include <ppc/asm.h>
+#include <ppc/hw_perfmon.h>
 #include <pexpert/pexpert.h>
-//#include <pexpert/ppc/powermac.h>
+#include <kern/cpu_data.h>
+#include <ppc/mappings.h>
+#include <ppc/Diagnostics.h>
+#include <ppc/trap.h>
 
 /* TODO: BOGUS TO BE REMOVED */
 int real_ncpus = 1;
@@ -59,9 +67,11 @@ struct SIGtimebase {
 	boolean_t	avail;
 	boolean_t	ready;
 	boolean_t	done;
-	AbsoluteTime	abstime;
+	uint64_t	abstime;
 };
 
+struct per_proc_info	*pper_proc_info = per_proc_info; 
+ 
 extern struct SIGtimebase syncClkSpot;
 
 void cpu_sync_timebase(void);
@@ -89,20 +99,15 @@ cpu_control(
 	     cpu_subtype != cmd->cmd_cpu_subtype)
 	  return(KERN_FAILURE);
 
+	if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
+		return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
+	}
+
 	switch (cmd->cmd_op)
 	  {
 	  case PROCESSOR_PM_CLR_PMC:       /* Clear Performance Monitor Counters */
 	    switch (cpu_subtype)
 	      {
-	      case CPU_SUBTYPE_POWERPC_604:
-		{
-		  oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
-		  mtpmc1(0x0);
-		  mtpmc2(0x0);
-		  ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
-		  return(KERN_SUCCESS);
-		}
-	      case CPU_SUBTYPE_POWERPC_604e:
 	      case CPU_SUBTYPE_POWERPC_750:
 	      case CPU_SUBTYPE_POWERPC_7400:
 	      case CPU_SUBTYPE_POWERPC_7450:
@@ -121,21 +126,6 @@ cpu_control(
 	  case PROCESSOR_PM_SET_REGS:      /* Set Performance Monitor Registors */
 	    switch (cpu_subtype)
 	      {
-	      case CPU_SUBTYPE_POWERPC_604:
-		if (count <  (PROCESSOR_CONTROL_CMD_COUNT 
-			       + PROCESSOR_PM_REGS_COUNT_POWERPC_604))
-		  return(KERN_FAILURE);
-		else
-		  {
-		    perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
-		    oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
-		    mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
-		    mtpmc1(PERFMON_PMC1(perf_regs));
-		    mtpmc2(PERFMON_PMC2(perf_regs));
-		    ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
-		    return(KERN_SUCCESS);
-		  }
-	      case CPU_SUBTYPE_POWERPC_604e:
 	      case CPU_SUBTYPE_POWERPC_750:
 		if (count <  (PROCESSOR_CONTROL_CMD_COUNT +
 		       PROCESSOR_PM_REGS_COUNT_POWERPC_750))
@@ -178,17 +168,6 @@ cpu_control(
 	  case PROCESSOR_PM_SET_MMCR:
 	    switch (cpu_subtype)
 	      {
-	      case CPU_SUBTYPE_POWERPC_604:
-		if (count < (PROCESSOR_CONTROL_CMD_COUNT +
-		       PROCESSOR_PM_REGS_COUNT_POWERPC_604))
-		  return(KERN_FAILURE);
-		else
-		  {
-		    perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
-		    mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
-		    return(KERN_SUCCESS);
-		  }
-	      case CPU_SUBTYPE_POWERPC_604e:
 	      case CPU_SUBTYPE_POWERPC_750:
 		if (count < (PROCESSOR_CONTROL_CMD_COUNT +
 		      PROCESSOR_PM_REGS_COUNT_POWERPC_750))
@@ -239,11 +218,6 @@ cpu_info_count(
 	switch (flavor) {
 		case PROCESSOR_PM_REGS_INFO:
 			switch (cpu_subtype) {
-				case CPU_SUBTYPE_POWERPC_604:
-					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
-					return(KERN_SUCCESS);
-
-				case CPU_SUBTYPE_POWERPC_604e:
 				case CPU_SUBTYPE_POWERPC_750:
 		
 					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
@@ -291,21 +265,6 @@ cpu_info(
 			perf_regs = (processor_pm_regs_t) info;
 
 			switch (cpu_subtype) {
-				case CPU_SUBTYPE_POWERPC_604:
-
-					if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_604)
-					  return(KERN_FAILURE);
-				  
-					oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
-					PERFMON_MMCR0(perf_regs) = mfmmcr0();
-					PERFMON_PMC1(perf_regs)  = mfpmc1();
-					PERFMON_PMC2(perf_regs)  = mfpmc2();
-					ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
-		
-					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
-					return(KERN_SUCCESS);
-
-				case CPU_SUBTYPE_POWERPC_604e:
 				case CPU_SUBTYPE_POWERPC_750:
 
 					if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
@@ -390,18 +349,23 @@ void
 cpu_machine_init(
 	void)
 {
-	struct per_proc_info	*proc_info;
+	struct per_proc_info	*tproc_info;
+	volatile struct per_proc_info	*mproc_info;
 	int cpu;
 
 	/* TODO: realese mutex lock reset_handler_lock */
 
 	cpu = cpu_number();
-	proc_info = &per_proc_info[cpu];
-	PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
-	if (cpu != master_cpu)
+	tproc_info = &per_proc_info[cpu];
+	mproc_info = &per_proc_info[master_cpu];
+	PE_cpu_machine_init(tproc_info->cpu_id, !(tproc_info->cpu_flags & BootDone));
+	if (cpu != master_cpu) {
+		while (!((mproc_info->cpu_flags) & SignalReady))
+			continue;
 		cpu_sync_timebase();
+	}
 	ml_init_interrupt();
-	proc_info->cpu_flags |= BootDone;
+	tproc_info->cpu_flags |= BootDone|SignalReady;
 }
 
 kern_return_t
@@ -437,8 +401,8 @@ cpu_start(
 {
 	struct per_proc_info	*proc_info;
 	kern_return_t		ret;
+	mapping *mp;
 
-	extern void (*exception_handlers[])(void);
 	extern vm_offset_t	intstack;
 	extern vm_offset_t	debstack;
 
@@ -447,7 +411,7 @@ cpu_start(
 	if (cpu == cpu_number()) {
  	  PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
 	  ml_init_interrupt();
-	  proc_info->cpu_flags |= BootDone;
+	  proc_info->cpu_flags |= BootDone|SignalReady;
 
 	  return KERN_SUCCESS;
 	} else {
@@ -455,30 +419,27 @@ cpu_start(
 
 		proc_info->cpu_number = cpu;
 		proc_info->cpu_flags &= BootDone;
-		proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+		proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
 		proc_info->intstack_top_ss = proc_info->istackptr;
 #if     MACH_KDP || MACH_KDB
-		proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+		proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
 		proc_info->debstack_top_ss = proc_info->debstackptr;
 #endif  /* MACH_KDP || MACH_KDB */
-		proc_info->get_interrupts_enabled = fake_get_interrupts_enabled;
-		proc_info->set_interrupts_enabled = fake_set_interrupts_enabled;
-		proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu];
-		proc_info->cpu_data = (unsigned int)&cpu_data[cpu];
-		proc_info->active_stacks = (unsigned int)&active_stacks[cpu];
+		proc_info->interrupts_enabled = 0;
 		proc_info->need_ast = (unsigned int)&need_ast[cpu];
-		proc_info->FPU_thread = 0;
-		proc_info->FPU_vmmCtx = 0;
-		proc_info->VMX_thread = 0;
-		proc_info->VMX_vmmCtx = 0;
+		proc_info->FPU_owner = 0;
+		proc_info->VMX_owner = 0;
+		mp = (mapping *)(&proc_info->ppCIOmp);
+		mp->mpFlags = 0x01000000 | mpSpecial | 1;
+		mp->mpSpace = invalSpace;
 
 		if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
 
 			/* TODO: get mutex lock reset_handler_lock */
 
 			resethandler_target.type = RESET_HANDLER_START;
-			resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu); 
-			resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
+			resethandler_target.call_paddr = (vm_offset_t)_start_cpu; 	/* Note: these routines are always V=R */
+			resethandler_target.arg__paddr = (vm_offset_t)proc_info; 	/* Note: these routines are always V=R */
 			
 			ml_phys_write((vm_offset_t)&ResetHandler + 0,
 				      resethandler_target.type);
@@ -495,7 +456,7 @@ cpu_start(
  *		that all processors are the same.  This is just to get close.
  */
 
-		ml_get_timebase(&proc_info->ruptStamp);	/* Pass our current time to the other guy */
+		ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);	/* Pass our current time to the other guy */
 		
 		__asm__ volatile("sync");				/* Commit to storage */
 		__asm__ volatile("isync");				/* Wait a second */
@@ -511,6 +472,8 @@ cpu_start(
 	}
 }
 
+perfTrap perfCpuSigHook = 0;            /* Pointer to CHUD cpu signal hook routine */
+
 /*
  *	Here is where we implement the receiver of the signaling protocol.
  *	We wait for the signal status area to be passed to us. Then we snarf
@@ -534,10 +497,10 @@ cpu_signal_handler(
 	pproc = &per_proc_info[cpu];					/* Point to our block */
 
 /*
- *	Since we've been signaled, wait just under 1ms for the signal lock to pass
+ *	Since we've been signaled, wait about 31 ms for the signal lock to pass
  */
-	if(!hw_lock_mbits(&pproc->MPsigpStat, MPsigpMsgp, (MPsigpBusy | MPsigpPass),
-	  (MPsigpBusy | MPsigpPass), (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) {
+	if(!hw_lock_mbits(&pproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
+	  (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
 		panic("cpu_signal_handler: Lock pass timed out\n");
 	}
 	
@@ -548,7 +511,7 @@ cpu_signal_handler(
 	
 	__asm__ volatile("isync");						/* Make sure we don't unlock until memory is in */
 
-	pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpFunc);	/* Release lock */
+	pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc);	/* Release lock */
 
 	switch ((holdStat & MPsigpFunc) >> 8) {			/* Decode function code */
 
@@ -560,18 +523,16 @@ cpu_signal_handler(
 			switch (holdParm0) {					/* Decode SIGP message order */
 
 				case SIGPast:						/* Should we do an AST? */
-					pproc->numSIGPast++;			/* Count this one */
+					pproc->hwCtr.numSIGPast++;		/* Count this one */
 #if 0
 					kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
 #endif
-					ast_check();					/* Yes, do it */
-					/* XXX: Should check if AST_URGENT is needed */
-					ast_on(AST_URGENT);
+					ast_check(cpu_to_processor(cpu));
 					return;							/* All done... */
 					
 				case SIGPcpureq:					/* CPU specific function? */
 				
-					pproc->numSIGPcpureq++;			/* Count this one */
+					pproc->hwCtr.numSIGPcpureq++;	/* Count this one */
 					switch (holdParm1) {			/* Select specific function */
 					
 						case CPRQtemp:				/* Get the temperature */
@@ -589,7 +550,7 @@ cpu_signal_handler(
 							if(pproc->time_base_enable !=  (void(*)(cpu_id_t, boolean_t ))NULL)
 								pproc->time_base_enable(pproc->cpu_id, FALSE);
 
-							timebaseAddr->abstime.hi = 0;	/* Touch to force into cache */
+							timebaseAddr->abstime = 0;	/* Touch to force into cache */
 							sync();
 							
 							do {
@@ -598,8 +559,7 @@ cpu_signal_handler(
 								asm volatile("	mftbu %0" : "=r" (tbu2));
 							} while (tbu != tbu2);
 							
-							timebaseAddr->abstime.lo = tbl;	/* Set low order */
-							timebaseAddr->abstime.hi = tbu;	/* Set high order */
+							timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
 							sync();					/* Force order */
 						
 							timebaseAddr->avail = TRUE;
@@ -613,6 +573,25 @@ cpu_signal_handler(
 
 							return;
 
+						case CPRQsegload:
+							return;
+						
+ 						case CPRQchud:
+ 							parmAddr = (unsigned int *)holdParm2;	/* Get the destination address */
+ 							if(perfCpuSigHook) {
+ 								struct savearea *ssp = current_act()->mact.pcb;
+ 								if(ssp) {
+ 									(perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
+ 								}
+   							}
+ 							parmAddr[1] = 0;
+ 							parmAddr[0] = 0;		/* Show we're done */
+  							return;
+						
+						case CPRQscom:
+							fwSCOM((scomcomm *)holdParm2);	/* Do the function */
+							return;
+
 						default:
 							panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
 							return;
@@ -621,14 +600,14 @@ cpu_signal_handler(
 	
 				case SIGPdebug:						/* Enter the debugger? */		
 
-					pproc->numSIGPdebug++;			/* Count this one */
+					pproc->hwCtr.numSIGPdebug++;	/* Count this one */
 					debugger_is_slave[cpu]++;		/* Bump up the count to show we're here */
 					hw_atomic_sub(&debugger_sync, 1);	/* Show we've received the 'rupt */
 					__asm__ volatile("tw 4,r3,r3");	/* Enter the debugger */
 					return;							/* All done now... */
 					
 				case SIGPwake:						/* Wake up CPU */
-					pproc->numSIGPwake++;			/* Count this one */
+					pproc->hwCtr.numSIGPwake++;		/* Count this one */
 					return;							/* No need to do anything, the interrupt does it all... */
 					
 				default:
@@ -670,6 +649,7 @@ cpu_signal(
 	unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
 	struct per_proc_info *tpproc, *mpproc;			/* Area for per_proc addresses */
 	int cpu;
+	int busybitset =0;
 
 #if DEBUG
 	if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
@@ -681,9 +661,34 @@ cpu_signal(
 
 	mpproc = &per_proc_info[cpu];					/* Point to our block */
 	tpproc = &per_proc_info[target];				/* Point to the target's block */
+
+	if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
+		
+	if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) {	/* Is there an unreceived message already pending? */
+
+		if(signal == SIGPwake) {					/* SIGPwake can merge into all others... */
+			mpproc->hwCtr.numSIGPmwake++;			/* Account for merged wakes */
+			return KERN_SUCCESS;
+		}
+
+		if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) {	/* We can merge ASTs */
+			mpproc->hwCtr.numSIGPmast++;			/* Account for merged ASTs */
+			return KERN_SUCCESS;					/* Don't bother to send this one... */
+		}
+
+		if (tpproc->MPsigpParm0 == SIGPwake) {
+			if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), 
+			                  (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
+				busybitset = 1;
+				mpproc->hwCtr.numSIGPmwake++;	
+			}
+		}
+	}	
 	
-	if(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy, 
-	  (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) {	/* Try to lock the message block */
+	if((busybitset == 0) && 
+	   (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy, 
+	   (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) {	/* Try to lock the message block with a .5ms timeout */
+		mpproc->hwCtr.numSIGPtimo++;				/* Account for timeouts */
 		return KERN_FAILURE;						/* Timed out, take your ball and go home... */
 	}
 
@@ -697,7 +702,8 @@ cpu_signal(
 	tpproc->MPsigpStat = holdStat;					/* Set status and pass the lock */
 	__asm__ volatile("eieio");						/* I'm a paraniod freak */
 	
-	PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id);	/* Kick the other processor */
+	if (busybitset == 0)
+		PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id);	/* Kick the other processor */
 
 	return KERN_SUCCESS;							/* All is goodness and rainbows... */
 }
@@ -706,7 +712,8 @@ void
 cpu_doshutdown(
 	void)
 {
-	processor_doshutdown(current_processor());
+	enable_preemption();
+	processor_offline(current_processor());
 }
 
 void
@@ -714,48 +721,62 @@ cpu_sleep(
 	void)
 {
 	struct per_proc_info	*proc_info;
-	unsigned int	cpu;
-	extern void (*exception_handlers[])(void);
+	unsigned int	cpu, i;
+	unsigned int	wait_ncpus_sleep, ncpus_sleep;
+	facility_context *fowner;
 	extern vm_offset_t	intstack;
 	extern vm_offset_t	debstack;
 	extern void _restart_cpu(void);
 
 	cpu = cpu_number();
-#if 0
-	kprintf("******* About to sleep cpu %d\n", cpu);
-#endif
 
 	proc_info = &per_proc_info[cpu];
 
+	fowner = proc_info->FPU_owner;					/* Cache this */
+	if(fowner) fpu_save(fowner);					/* If anyone owns FPU, save it */
+	proc_info->FPU_owner = 0;						/* Set no fpu owner now */
+
+	fowner = proc_info->VMX_owner;					/* Cache this */
+	if(fowner) vec_save(fowner);					/* If anyone owns vectors, save it */
+	proc_info->VMX_owner = 0;						/* Set no vector owner now */
+
 	if (proc_info->cpu_number == 0)  {
 		proc_info->cpu_flags &= BootDone;
-		proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+		proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
 		proc_info->intstack_top_ss = proc_info->istackptr;
 #if     MACH_KDP || MACH_KDB
-		proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+		proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
 		proc_info->debstack_top_ss = proc_info->debstackptr;
 #endif  /* MACH_KDP || MACH_KDB */
-		proc_info->get_interrupts_enabled = fake_get_interrupts_enabled;
-		proc_info->set_interrupts_enabled = fake_set_interrupts_enabled;
-		proc_info->FPU_thread = 0;
+		proc_info->interrupts_enabled = 0;
 
-	    	if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+		if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
 			extern void _start_cpu(void);
-
+	
 			resethandler_target.type = RESET_HANDLER_START;
-			resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu); 
-			resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
-
+			resethandler_target.call_paddr = (vm_offset_t)_start_cpu; 	/* Note: these routines are always V=R */
+			resethandler_target.arg__paddr = (vm_offset_t)proc_info; 	/* Note: these routines are always V=R */
+	
 			ml_phys_write((vm_offset_t)&ResetHandler + 0,
-				      resethandler_target.type);
+					  resethandler_target.type);
 			ml_phys_write((vm_offset_t)&ResetHandler + 4,
-				      resethandler_target.call_paddr);
+					  resethandler_target.call_paddr);
 			ml_phys_write((vm_offset_t)&ResetHandler + 8,
-				      resethandler_target.arg__paddr);
+					  resethandler_target.arg__paddr);
 					  
 			__asm__ volatile("sync");
 			__asm__ volatile("isync");
+		}
+
+		wait_ncpus_sleep = real_ncpus-1; 
+		ncpus_sleep = 0;
+		while (wait_ncpus_sleep != ncpus_sleep) {
+			ncpus_sleep = 0;
+			for(i=1; i < real_ncpus ; i++) {
+				if ((*(volatile short *)&per_proc_info[i].cpu_flags) & SleepState)
+					ncpus_sleep++;
 			}
+		}
 	}
 
 	PE_cpu_machine_quiesce(proc_info->cpu_id);
@@ -775,19 +796,21 @@ cpu_sync_timebase(
 	syncClkSpot.ready = FALSE;
 	syncClkSpot.done = FALSE;
 
-	while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase, (unsigned int)&syncClkSpot) 
-	       != KERN_SUCCESS);
+	while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
+							(unsigned int)&syncClkSpot) != KERN_SUCCESS)
+		continue;
 
+	while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
+		continue;
 
-	while (*(volatile int *)&(syncClkSpot.avail) == FALSE);
 	isync();
 
 	/*
 	 * We do the following to keep the compiler from generating extra stuff 
 	 * in tb set part
 	 */
-	tbu = syncClkSpot.abstime.hi;
-	tbl = syncClkSpot.abstime.lo;
+	tbu = syncClkSpot.abstime >> 32;
+	tbl = (uint32_t)syncClkSpot.abstime;
 
 	mttb(0);
 	mttbu(tbu);
@@ -795,7 +818,8 @@ cpu_sync_timebase(
 
 	syncClkSpot.ready = TRUE;
 
-	while (*(volatile int *)&(syncClkSpot.done) == FALSE);
+	while (*(volatile int *)&(syncClkSpot.done) == FALSE)
+		continue;
 
 	(void)ml_set_interrupts_enabled(intr);
 }