/*
- * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
+
#include <mach/mach_types.h>
#include <mach/mach_host.h>
#include <i386/cpu_data.h>
#include <i386/machine_routines.h>
-#include <i386/perfmon.h>
+#include <i386/lapic.h>
#include <i386/mp.h>
+#include <i386/trap.h>
+#include <mach/i386/syscall_sw.h>
#include <chud/chud_xnu.h>
+#if 0
#pragma mark **** cpu enable/disable ****
+#endif
extern kern_return_t processor_start(processor_t processor); // osfmk/kern/processor.c
extern kern_return_t processor_exit(processor_t processor); // osfmk/kern/processor.c
__private_extern__
kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable)
{
- chudxnu_unbind_thread(current_thread());
+ chudxnu_unbind_thread(current_thread(), 0);
if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
return KERN_FAILURE;
return KERN_FAILURE;
if(enable) {
- // make sure it isn't already running
- if(processor->state == PROCESSOR_OFF_LINE ||
- processor->state == PROCESSOR_SHUTDOWN) {
- return processor_start(processor);
- }
- return KERN_SUCCESS; // it's already running
+ return processor_start(processor);
} else {
- // make sure it hasn't already exited
- if(processor->state != PROCESSOR_OFF_LINE &&
- processor->state != PROCESSOR_SHUTDOWN) {
- return processor_exit(processor);
- }
- return KERN_SUCCESS;
+ return processor_exit(processor);
}
}
return KERN_FAILURE;
}
-#pragma mark **** cache flush ****
-
-__private_extern__
-void
-chudxnu_flush_caches(void)
-{
-/* XXX */
-}
-
-__private_extern__
-void
-chudxnu_enable_caches(boolean_t enable)
-{
-#pragma unused (enable)
-/* XXX */
-}
-
+#if 0
#pragma mark **** perfmon facility ****
+#endif
__private_extern__ kern_return_t
-chudxnu_perfmon_acquire_facility(task_t task)
+chudxnu_perfmon_acquire_facility(task_t task __unused)
{
- return pmc_acquire(task);
+ return KERN_SUCCESS;
}
__private_extern__ kern_return_t
-chudxnu_perfmon_release_facility(task_t task)
+chudxnu_perfmon_release_facility(task_t task __unused)
{
- return pmc_release(task);
+ return KERN_SUCCESS;
}
-#pragma mark **** rupt counters ****
+#if 0
+#pragma mark **** interrupt counters ****
+#endif
__private_extern__ kern_return_t
-chudxnu_get_cpu_rupt_counters(int cpu, rupt_counters_t *rupts)
+chudxnu_get_cpu_interrupt_counters(int cpu, interrupt_counters_t *rupts)
{
if(cpu < 0 || (unsigned int)cpu >= real_ncpus) { // sanity check
return KERN_FAILURE;
cpu_data_t *per_proc;
per_proc = cpu_data_ptr[cpu];
- rupts->hwResets = 0;
- rupts->hwMachineChecks = 0;
+ // For now, we'll call an NMI a 'reset' interrupt
+ rupts->hwResets = per_proc->cpu_hwIntCnt[T_NMI];
+ rupts->hwMachineChecks = per_proc->cpu_hwIntCnt[T_MACHINE_CHECK];
rupts->hwDSIs = 0;
rupts->hwISIs = 0;
+ // we could accumulate 0x20-0x7f, but that'd likely overflow...
rupts->hwExternals = 0;
- rupts->hwAlignments = 0;
+ // This appears to be wrong.
+ rupts->hwAlignments = 0; //per_proc->cpu_hwIntCnt[0x11];
rupts->hwPrograms = 0;
- rupts->hwFloatPointUnavailable = 0;
- rupts->hwDecrementers = 0;
- rupts->hwIOErrors = 0;
- rupts->hwSystemCalls = 0;
- rupts->hwTraces = 0;
+ rupts->hwFloatPointUnavailable = per_proc->cpu_hwIntCnt[T_NO_FPU];
+ // osfmk/i386/mp.h
+ rupts->hwDecrementers = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(TIMER)];
+ // LAPIC_ERROR == IO ERROR??
+ rupts->hwIOErrors = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(ERROR)];
+
+ // accumulate all system call types
+ // osfmk/mach/i386/syscall_sw.h
+ rupts->hwSystemCalls = per_proc->cpu_hwIntCnt[UNIX_INT] +
+ per_proc->cpu_hwIntCnt[MACH_INT] +
+ per_proc->cpu_hwIntCnt[MACHDEP_INT] +
+ per_proc->cpu_hwIntCnt[DIAG_INT];
+
+ rupts->hwTraces = per_proc->cpu_hwIntCnt[T_DEBUG]; // single steps == traces??
rupts->hwFloatingPointAssists = 0;
- rupts->hwPerformanceMonitors = 0;
+ // osfmk/i386/mp.h
+ rupts->hwPerformanceMonitors =
+ per_proc->cpu_hwIntCnt[LAPIC_VECTOR(PERFCNT)];
rupts->hwAltivecs = 0;
- rupts->hwInstBreakpoints = 0;
+ rupts->hwInstBreakpoints = per_proc->cpu_hwIntCnt[T_INT3];
rupts->hwSystemManagements = 0;
rupts->hwAltivecAssists = 0;
- rupts->hwThermal = 0;
+ rupts->hwThermal = per_proc->cpu_hwIntCnt[LAPIC_VECTOR(THERMAL)];
rupts->hwSoftPatches = 0;
rupts->hwMaintenances = 0;
- rupts->hwInstrumentations = 0;
+ // Watchpoint == instrumentation
+ rupts->hwInstrumentations = per_proc->cpu_hwIntCnt[T_WATCHPOINT];
ml_set_interrupts_enabled(oldlevel);
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
-chudxnu_clear_cpu_rupt_counters(int cpu)
+chudxnu_clear_cpu_interrupt_counters(int cpu)
{
if(cpu < 0 || (unsigned int)cpu >= real_ncpus) { // sanity check
return KERN_FAILURE;
}
+ cpu_data_t *per_proc;
+
+ per_proc = cpu_data_ptr[cpu];
+
+ bzero((char *)per_proc->cpu_hwIntCnt, sizeof(uint32_t)*256);
-/*
- * XXX
- * bzero((char *)&(cpu_data_ptr[cpu]->hwCtrs), sizeof(struct hwCtrs));
- */
return KERN_SUCCESS;
}
+