*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <ppc/machine_routines.h>
#include <ppc/machine_cpu.h>
#include <ppc/exception.h>
+#include <ppc/asm.h>
#include <pexpert/pexpert.h>
-//#include <pexpert/ppc/powermac.h>
+#include <kern/cpu_data.h>
/* TODO: BOGUS TO BE REMOVED */
int real_ncpus = 1;
boolean_t avail;
boolean_t ready;
boolean_t done;
- AbsoluteTime abstime;
+ uint64_t abstime;
};
+struct per_proc_info *pper_proc_info = per_proc_info;
+
extern struct SIGtimebase syncClkSpot;
void cpu_sync_timebase(void);
cpu_machine_init(
void)
{
- struct per_proc_info *proc_info;
+ struct per_proc_info *tproc_info;
+ volatile struct per_proc_info *mproc_info;
int cpu;
/* TODO: realese mutex lock reset_handler_lock */
cpu = cpu_number();
- proc_info = &per_proc_info[cpu];
- PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
- if (cpu != master_cpu)
+ tproc_info = &per_proc_info[cpu];
+ mproc_info = &per_proc_info[master_cpu];
+ PE_cpu_machine_init(tproc_info->cpu_id, !(tproc_info->cpu_flags & BootDone));
+ if (cpu != master_cpu) {
+ while (!((mproc_info->cpu_flags) & SignalReady))
+ continue;
cpu_sync_timebase();
+ }
ml_init_interrupt();
- proc_info->cpu_flags |= BootDone;
+ tproc_info->cpu_flags |= BootDone|SignalReady;
}
kern_return_t
if (cpu == cpu_number()) {
PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
ml_init_interrupt();
- proc_info->cpu_flags |= BootDone;
+ proc_info->cpu_flags |= BootDone|SignalReady;
return KERN_SUCCESS;
} else {
proc_info->cpu_number = cpu;
proc_info->cpu_flags &= BootDone;
- proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
#if MACH_KDP || MACH_KDB
- proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif /* MACH_KDP || MACH_KDB */
- proc_info->get_interrupts_enabled = fake_get_interrupts_enabled;
- proc_info->set_interrupts_enabled = fake_set_interrupts_enabled;
+ proc_info->interrupts_enabled = 0;
proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu];
- proc_info->cpu_data = (unsigned int)&cpu_data[cpu];
proc_info->active_stacks = (unsigned int)&active_stacks[cpu];
proc_info->need_ast = (unsigned int)&need_ast[cpu];
- proc_info->FPU_thread = 0;
- proc_info->FPU_vmmCtx = 0;
- proc_info->VMX_thread = 0;
- proc_info->VMX_vmmCtx = 0;
+ proc_info->FPU_owner = 0;
+ proc_info->VMX_owner = 0;
+
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
* that all processors are the same. This is just to get close.
*/
- ml_get_timebase(&proc_info->ruptStamp); /* Pass our current time to the other guy */
+ ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); /* Pass our current time to the other guy */
__asm__ volatile("sync"); /* Commit to storage */
__asm__ volatile("isync"); /* Wait a second */
pproc = &per_proc_info[cpu]; /* Point to our block */
/*
- * Since we've been signaled, wait just under 1ms for the signal lock to pass
+ * Since we've been signaled, wait about 31 ms for the signal lock to pass
*/
- if(!hw_lock_mbits(&pproc->MPsigpStat, MPsigpMsgp, (MPsigpBusy | MPsigpPass),
- (MPsigpBusy | MPsigpPass), (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) {
+ if(!hw_lock_mbits(&pproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
+ (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
panic("cpu_signal_handler: Lock pass timed out\n");
}
__asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
- pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpFunc); /* Release lock */
+ pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
#if 0
kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
#endif
- ast_check(); /* Yes, do it */
- /* XXX: Should check if AST_URGENT is needed */
- ast_on(AST_URGENT);
+ ast_check(cpu_to_processor(cpu));
return; /* All done... */
case SIGPcpureq: /* CPU specific function? */
if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
pproc->time_base_enable(pproc->cpu_id, FALSE);
- timebaseAddr->abstime.hi = 0; /* Touch to force into cache */
+ timebaseAddr->abstime = 0; /* Touch to force into cache */
sync();
do {
asm volatile(" mftbu %0" : "=r" (tbu2));
} while (tbu != tbu2);
- timebaseAddr->abstime.lo = tbl; /* Set low order */
- timebaseAddr->abstime.hi = tbu; /* Set high order */
+ timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
sync(); /* Force order */
timebaseAddr->avail = TRUE;
unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
struct per_proc_info *tpproc, *mpproc; /* Area for per_proc addresses */
int cpu;
+ int busybitset =0;
#if DEBUG
if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
mpproc = &per_proc_info[cpu]; /* Point to our block */
tpproc = &per_proc_info[target]; /* Point to the target's block */
+
+ if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
+
+ if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
+
+ if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
+ mpproc->numSIGPmwake++; /* Account for merged wakes */
+ return KERN_SUCCESS;
+ }
+
+ if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
+ mpproc->numSIGPmast++; /* Account for merged ASTs */
+ return KERN_SUCCESS; /* Don't bother to send this one... */
+ }
+
+ if (tpproc->MPsigpParm0 == SIGPwake) {
+ if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
+ (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
+ busybitset = 1;
+ mpproc->numSIGPmwake++;
+ }
+ }
+ }
- if(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
- (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) { /* Try to lock the message block */
+ if((busybitset == 0) &&
+ (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
+ (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
+ mpproc->numSIGPtimo++; /* Account for timeouts */
return KERN_FAILURE; /* Timed out, take your ball and go home... */
}
tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
__asm__ volatile("eieio"); /* I'm a paraniod freak */
- PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
+ if (busybitset == 0)
+ PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
return KERN_SUCCESS; /* All is goodness and rainbows... */
}
{
struct per_proc_info *proc_info;
unsigned int cpu;
+ facility_context *fowner;
extern void (*exception_handlers[])(void);
extern vm_offset_t intstack;
extern vm_offset_t debstack;
proc_info = &per_proc_info[cpu];
+ fowner = proc_info->FPU_owner; /* Cache this */
+ if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
+ proc_info->FPU_owner = 0; /* Set no fpu owner now */
+
+ fowner = proc_info->VMX_owner; /* Cache this */
+ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
+ proc_info->VMX_owner = 0; /* Set no vector owner now */
+
if (proc_info->cpu_number == 0) {
proc_info->cpu_flags &= BootDone;
- proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
#if MACH_KDP || MACH_KDB
- proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif /* MACH_KDP || MACH_KDB */
- proc_info->get_interrupts_enabled = fake_get_interrupts_enabled;
- proc_info->set_interrupts_enabled = fake_set_interrupts_enabled;
- proc_info->FPU_thread = 0;
+ proc_info->interrupts_enabled = 0;
- if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+ if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
extern void _start_cpu(void);
-
+
resethandler_target.type = RESET_HANDLER_START;
resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
-
+
ml_phys_write((vm_offset_t)&ResetHandler + 0,
- resethandler_target.type);
+ resethandler_target.type);
ml_phys_write((vm_offset_t)&ResetHandler + 4,
- resethandler_target.call_paddr);
+ resethandler_target.call_paddr);
ml_phys_write((vm_offset_t)&ResetHandler + 8,
- resethandler_target.arg__paddr);
+ resethandler_target.arg__paddr);
__asm__ volatile("sync");
__asm__ volatile("isync");
- }
+ }
}
PE_cpu_machine_quiesce(proc_info->cpu_id);
syncClkSpot.ready = FALSE;
syncClkSpot.done = FALSE;
- while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase, (unsigned int)&syncClkSpot)
- != KERN_SUCCESS);
+ while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
+ (unsigned int)&syncClkSpot) != KERN_SUCCESS)
+ continue;
+ while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
+ continue;
- while (*(volatile int *)&(syncClkSpot.avail) == FALSE);
isync();
/*
* We do the following to keep the compiler from generating extra stuff
* in tb set part
*/
- tbu = syncClkSpot.abstime.hi;
- tbl = syncClkSpot.abstime.lo;
+ tbu = syncClkSpot.abstime >> 32;
+ tbl = (uint32_t)syncClkSpot.abstime;
mttb(0);
mttbu(tbu);
syncClkSpot.ready = TRUE;
- while (*(volatile int *)&(syncClkSpot.done) == FALSE);
+ while (*(volatile int *)&(syncClkSpot.done) == FALSE)
+ continue;
(void)ml_set_interrupts_enabled(intr);
}