*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <ppc/machine_routines.h>
#include <ppc/machine_cpu.h>
#include <ppc/exception.h>
+#include <ppc/asm.h>
+#include <ppc/hw_perfmon.h>
#include <pexpert/pexpert.h>
-//#include <pexpert/ppc/powermac.h>
+#include <kern/cpu_data.h>
+#include <ppc/mappings.h>
+#include <ppc/Diagnostics.h>
/* TODO: BOGUS TO BE REMOVED */
int real_ncpus = 1;
uint64_t abstime;
};
+struct per_proc_info *pper_proc_info = per_proc_info;
+
extern struct SIGtimebase syncClkSpot;
void cpu_sync_timebase(void);
cpu_subtype != cmd->cmd_cpu_subtype)
return(KERN_FAILURE);
+ if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
+ return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
+ }
+
switch (cmd->cmd_op)
{
case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
switch (cpu_subtype)
{
- case CPU_SUBTYPE_POWERPC_604:
- {
- oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
- mtpmc1(0x0);
- mtpmc2(0x0);
- ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
- return(KERN_SUCCESS);
- }
- case CPU_SUBTYPE_POWERPC_604e:
case CPU_SUBTYPE_POWERPC_750:
case CPU_SUBTYPE_POWERPC_7400:
case CPU_SUBTYPE_POWERPC_7450:
case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
switch (cpu_subtype)
{
- case CPU_SUBTYPE_POWERPC_604:
- if (count < (PROCESSOR_CONTROL_CMD_COUNT
- + PROCESSOR_PM_REGS_COUNT_POWERPC_604))
- return(KERN_FAILURE);
- else
- {
- perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
- oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
- mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
- mtpmc1(PERFMON_PMC1(perf_regs));
- mtpmc2(PERFMON_PMC2(perf_regs));
- ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
- return(KERN_SUCCESS);
- }
- case CPU_SUBTYPE_POWERPC_604e:
case CPU_SUBTYPE_POWERPC_750:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_750))
case PROCESSOR_PM_SET_MMCR:
switch (cpu_subtype)
{
- case CPU_SUBTYPE_POWERPC_604:
- if (count < (PROCESSOR_CONTROL_CMD_COUNT +
- PROCESSOR_PM_REGS_COUNT_POWERPC_604))
- return(KERN_FAILURE);
- else
- {
- perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
- mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
- return(KERN_SUCCESS);
- }
- case CPU_SUBTYPE_POWERPC_604e:
case CPU_SUBTYPE_POWERPC_750:
if (count < (PROCESSOR_CONTROL_CMD_COUNT +
PROCESSOR_PM_REGS_COUNT_POWERPC_750))
switch (flavor) {
case PROCESSOR_PM_REGS_INFO:
switch (cpu_subtype) {
- case CPU_SUBTYPE_POWERPC_604:
- *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
- return(KERN_SUCCESS);
-
- case CPU_SUBTYPE_POWERPC_604e:
case CPU_SUBTYPE_POWERPC_750:
*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
perf_regs = (processor_pm_regs_t) info;
switch (cpu_subtype) {
- case CPU_SUBTYPE_POWERPC_604:
-
- if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_604)
- return(KERN_FAILURE);
-
- oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
- PERFMON_MMCR0(perf_regs) = mfmmcr0();
- PERFMON_PMC1(perf_regs) = mfpmc1();
- PERFMON_PMC2(perf_regs) = mfpmc2();
- ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
-
- *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
- return(KERN_SUCCESS);
-
- case CPU_SUBTYPE_POWERPC_604e:
case CPU_SUBTYPE_POWERPC_750:
if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
{
struct per_proc_info *proc_info;
kern_return_t ret;
+ mapping *mp;
- extern void (*exception_handlers[])(void);
extern vm_offset_t intstack;
extern vm_offset_t debstack;
proc_info->cpu_number = cpu;
proc_info->cpu_flags &= BootDone;
- proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
#if MACH_KDP || MACH_KDB
- proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif /* MACH_KDP || MACH_KDB */
proc_info->interrupts_enabled = 0;
proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu];
- proc_info->cpu_data = (unsigned int)&cpu_data[cpu];
proc_info->active_stacks = (unsigned int)&active_stacks[cpu];
proc_info->need_ast = (unsigned int)&need_ast[cpu];
- proc_info->FPU_thread = 0;
- proc_info->FPU_vmmCtx = 0;
- proc_info->VMX_thread = 0;
- proc_info->VMX_vmmCtx = 0;
+ proc_info->FPU_owner = 0;
+ proc_info->VMX_owner = 0;
+ mp = (mapping *)(&proc_info->ppCIOmp);
+ mp->mpFlags = 0x01000000 | mpSpecial | 1;
+ mp->mpSpace = invalSpace;
if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
/* TODO: get mutex lock reset_handler_lock */
resethandler_target.type = RESET_HANDLER_START;
- resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
- resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
+ resethandler_target.call_paddr = (vm_offset_t)_start_cpu; /* Note: these routines are always V=R */
+ resethandler_target.arg__paddr = (vm_offset_t)proc_info; /* Note: these routines are always V=R */
ml_phys_write((vm_offset_t)&ResetHandler + 0,
resethandler_target.type);
* that all processors are the same. This is just to get close.
*/
- ml_get_timebase(&proc_info->ruptStamp); /* Pass our current time to the other guy */
+ ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); /* Pass our current time to the other guy */
__asm__ volatile("sync"); /* Commit to storage */
__asm__ volatile("isync"); /* Wait a second */
pproc = &per_proc_info[cpu]; /* Point to our block */
/*
- * Since we've been signaled, wait just under 1ms for the signal lock to pass
+ * Since we've been signaled, wait about 31 ms for the signal lock to pass
*/
- if(!hw_lock_mbits(&pproc->MPsigpStat, MPsigpMsgp, (MPsigpBusy | MPsigpPass),
- (MPsigpBusy | MPsigpPass), (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) {
+ if(!hw_lock_mbits(&pproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
+ (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
panic("cpu_signal_handler: Lock pass timed out\n");
}
__asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
- pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpFunc); /* Release lock */
+ pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
switch (holdParm0) { /* Decode SIGP message order */
case SIGPast: /* Should we do an AST? */
- pproc->numSIGPast++; /* Count this one */
+ pproc->hwCtr.numSIGPast++; /* Count this one */
#if 0
kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
#endif
- ast_check(); /* Yes, do it */
- /* XXX: Should check if AST_URGENT is needed */
- ast_on(AST_URGENT);
+ ast_check(cpu_to_processor(cpu));
return; /* All done... */
case SIGPcpureq: /* CPU specific function? */
- pproc->numSIGPcpureq++; /* Count this one */
+ pproc->hwCtr.numSIGPcpureq++; /* Count this one */
switch (holdParm1) { /* Select specific function */
case CPRQtemp: /* Get the temperature */
timebaseAddr->done = TRUE;
return;
+
+ case CPRQscom:
+ fwSCOM((scomcomm *)holdParm2); /* Do the function */
+ return;
default:
panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
case SIGPdebug: /* Enter the debugger? */
- pproc->numSIGPdebug++; /* Count this one */
+ pproc->hwCtr.numSIGPdebug++; /* Count this one */
debugger_is_slave[cpu]++; /* Bump up the count to show we're here */
hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
__asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
return; /* All done now... */
case SIGPwake: /* Wake up CPU */
- pproc->numSIGPwake++; /* Count this one */
+ pproc->hwCtr.numSIGPwake++; /* Count this one */
return; /* No need to do anything, the interrupt does it all... */
default:
unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
struct per_proc_info *tpproc, *mpproc; /* Area for per_proc addresses */
int cpu;
+ int busybitset =0;
#if DEBUG
if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
tpproc = &per_proc_info[target]; /* Point to the target's block */
if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
-
+
if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
- if(signal == SIGPwake) return KERN_SUCCESS; /* SIGPwake can merge into all others... */
+ if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
+ mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
+ return KERN_SUCCESS;
+ }
if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
+ mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
return KERN_SUCCESS; /* Don't bother to send this one... */
}
+
+ if (tpproc->MPsigpParm0 == SIGPwake) {
+ if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
+ (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
+ busybitset = 1;
+ mpproc->hwCtr.numSIGPmwake++;
+ }
+ }
}
- if(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
- (gPEClockFrequencyInfo.bus_clock_rate_hz >> 13))) { /* Try to lock the message block with a .5ms timeout */
+ if((busybitset == 0) &&
+ (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
+ (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
+ mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
return KERN_FAILURE; /* Timed out, take your ball and go home... */
}
tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
__asm__ volatile("eieio"); /* I'm a paraniod freak */
- PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
+ if (busybitset == 0)
+ PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
return KERN_SUCCESS; /* All is goodness and rainbows... */
}
cpu_doshutdown(
void)
{
+ enable_preemption();
processor_doshutdown(current_processor());
}
{
struct per_proc_info *proc_info;
unsigned int cpu;
- extern void (*exception_handlers[])(void);
+ facility_context *fowner;
extern vm_offset_t intstack;
extern vm_offset_t debstack;
extern void _restart_cpu(void);
cpu = cpu_number();
-#if 0
- kprintf("******* About to sleep cpu %d\n", cpu);
-#endif
proc_info = &per_proc_info[cpu];
- if(proc_info->FPU_thread) fpu_save(proc_info->FPU_thread); /* If anyone owns FPU, save it */
- proc_info->FPU_thread = 0; /* Set no fpu owner now */
+ fowner = proc_info->FPU_owner; /* Cache this */
+ if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
+ proc_info->FPU_owner = 0; /* Set no fpu owner now */
- if(proc_info->VMX_thread) vec_save(proc_info->VMX_thread); /* If anyone owns vectors, save it */
- proc_info->VMX_thread = 0; /* Set no vector owner now */
+ fowner = proc_info->VMX_owner; /* Cache this */
+ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
+ proc_info->VMX_owner = 0; /* Set no vector owner now */
if (proc_info->cpu_number == 0) {
proc_info->cpu_flags &= BootDone;
- proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->intstack_top_ss = proc_info->istackptr;
#if MACH_KDP || MACH_KDB
- proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
+ proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif /* MACH_KDP || MACH_KDB */
proc_info->interrupts_enabled = 0;
extern void _start_cpu(void);
resethandler_target.type = RESET_HANDLER_START;
- resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
- resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
+ resethandler_target.call_paddr = (vm_offset_t)_start_cpu; /* Note: these routines are always V=R */
+ resethandler_target.arg__paddr = (vm_offset_t)proc_info; /* Note: these routines are always V=R */
ml_phys_write((vm_offset_t)&ResetHandler + 0,
resethandler_target.type);