2 * Copyright (c) 2005-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * @APPLE_FREE_COPYRIGHT@
36 * Author: Bill Angell, Apple
39 * Random diagnostics, augmented Derek Kumar 2011
45 #include <kern/machine.h>
46 #include <kern/processor.h>
47 #include <mach/machine.h>
48 #include <mach/processor_info.h>
49 #include <mach/mach_types.h>
50 #include <mach/boolean.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/ipc_kobject.h>
54 #include <mach/vm_param.h>
56 #include <ipc/ipc_entry.h>
57 #include <ipc/ipc_space.h>
58 #include <ipc/ipc_object.h>
59 #include <ipc/ipc_port.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
64 #include <pexpert/pexpert.h>
65 #include <console/video_console.h>
66 #include <i386/cpu_data.h>
67 #include <i386/Diagnostics.h>
69 #include <i386/pmCPU.h>
71 #include <mach/i386/syscall_sw.h>
72 #include <kern/kalloc.h>
73 #include <sys/kdebug.h>
75 #include <i386/machine_cpu.h>
76 #include <i386/misc_protos.h>
77 #include <i386/cpuid.h>
79 #define PERMIT_PERMCHECK (0)
82 uint64_t lastRuptClear
= 0ULL;
84 void cpu_powerstats(void *);
90 uint64_t crtimes
[CPU_RTIME_BINS
];
91 uint64_t citimes
[CPU_ITIME_BINS
];
92 uint64_t crtime_total
;
93 uint64_t citime_total
;
94 uint64_t cpu_idle_exits
;
101 uint64_t pkg_cres
[2][7];
102 uint64_t pkg_power_unit
;
107 uint64_t llc_flushed_cycles
;
108 uint64_t ring_ratio_instantaneous
;
109 uint64_t IA_frequency_clipping_cause
;
110 uint64_t GT_frequency_clipping_cause
;
111 uint64_t pkg_idle_exits
;
112 uint64_t pkg_rtimes
[CPU_RTIME_BINS
];
113 uint64_t pkg_itimes
[CPU_ITIME_BINS
];
114 uint64_t mbus_delay_time
;
115 uint64_t mint_delay_time
;
117 core_energy_stat_t cest
[];
118 } pkg_energy_statistics_t
;
122 diagCall64(x86_saved_state_t
* state
)
124 uint64_t curpos
, i
, j
;
125 uint64_t selector
, data
;
126 uint64_t currNap
, durNap
;
127 x86_saved_state64_t
*regs
;
131 assert(is_saved_state64(state
));
132 regs
= saved_state64(state
);
133 diagflag
= ((dgWork
.dgFlags
& enaDiagSCs
) != 0);
134 selector
= regs
->rdi
;
136 switch (selector
) { /* Select the routine */
137 case dgRuptStat
: /* Suck Interruption statistics */
138 (void) ml_set_interrupts_enabled(TRUE
);
139 data
= regs
->rsi
; /* Get the number of processors */
141 if (data
== 0) { /* If no location is specified for data, clear all
144 for (i
= 0; i
< real_ncpus
; i
++) { /* Cycle through
146 for (j
= 0; j
< 256; j
++)
147 cpu_data_ptr
[i
]->cpu_hwIntCnt
[j
] = 0;
150 lastRuptClear
= mach_absolute_time(); /* Get the time of clear */
151 rval
= 1; /* Normal return */
155 (void) copyout((char *) &real_ncpus
, data
, sizeof(real_ncpus
)); /* Copy out number of
158 currNap
= mach_absolute_time(); /* Get the time now */
159 durNap
= currNap
- lastRuptClear
; /* Get the last interval
162 durNap
= 1; /* This is a very short time, make it
165 curpos
= data
+ sizeof(real_ncpus
); /* Point to the next
168 for (i
= 0; i
< real_ncpus
; i
++) { /* Move 'em all out */
169 (void) copyout((char *) &durNap
, curpos
, 8); /* Copy out the time
170 * since last clear */
171 (void) copyout((char *) &cpu_data_ptr
[i
]->cpu_hwIntCnt
, curpos
+ 8, 256 * sizeof(uint32_t)); /* Copy out interrupt
174 curpos
= curpos
+ (256 * sizeof(uint32_t) + 8); /* Point to next out put
181 uint32_t c2l
= 0, c2h
= 0, c3l
= 0, c3h
= 0, c6l
= 0, c6h
= 0, c7l
= 0, c7h
= 0;
182 uint32_t pkg_unit_l
= 0, pkg_unit_h
= 0, pkg_ecl
= 0, pkg_ech
= 0;
184 pkg_energy_statistics_t pkes
;
185 core_energy_stat_t cest
;
187 bzero(&pkes
, sizeof(pkes
));
188 bzero(&cest
, sizeof(cest
));
190 rdmsr_carefully(MSR_IA32_PKG_C2_RESIDENCY
, &c2l
, &c2h
);
191 rdmsr_carefully(MSR_IA32_PKG_C3_RESIDENCY
, &c3l
, &c3h
);
192 rdmsr_carefully(MSR_IA32_PKG_C6_RESIDENCY
, &c6l
, &c6h
);
193 rdmsr_carefully(MSR_IA32_PKG_C7_RESIDENCY
, &c7l
, &c7h
);
195 pkes
.pkg_cres
[0][0] = ((uint64_t)c2h
<< 32) | c2l
;
196 pkes
.pkg_cres
[0][1] = ((uint64_t)c3h
<< 32) | c3l
;
197 pkes
.pkg_cres
[0][2] = ((uint64_t)c6h
<< 32) | c6l
;
198 pkes
.pkg_cres
[0][3] = ((uint64_t)c7h
<< 32) | c7l
;
200 uint32_t cpumodel
= cpuid_info()->cpuid_model
;
203 case CPUID_MODEL_HASWELL_ULT
:
210 uint64_t c8r
= ~0ULL, c9r
= ~0ULL, c10r
= ~0ULL;
213 rdmsr64_carefully(MSR_IA32_PKG_C8_RESIDENCY
, &c8r
);
214 rdmsr64_carefully(MSR_IA32_PKG_C9_RESIDENCY
, &c9r
);
215 rdmsr64_carefully(MSR_IA32_PKG_C10_RESIDENCY
, &c10r
);
218 pkes
.pkg_cres
[0][4] = c8r
;
219 pkes
.pkg_cres
[0][5] = c9r
;
220 pkes
.pkg_cres
[0][6] = c10r
;
222 pkes
.ddr_energy
= ~0ULL;
223 rdmsr64_carefully(MSR_IA32_DDR_ENERGY_STATUS
, &pkes
.ddr_energy
);
224 pkes
.llc_flushed_cycles
= ~0ULL;
225 rdmsr64_carefully(MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER
, &pkes
.llc_flushed_cycles
);
227 pkes
.ring_ratio_instantaneous
= ~0ULL;
228 rdmsr64_carefully(MSR_IA32_RING_PERF_STATUS
, &pkes
.ring_ratio_instantaneous
);
230 pkes
.IA_frequency_clipping_cause
= ~0ULL;
231 rdmsr64_carefully(MSR_IA32_IA_PERF_LIMIT_REASONS
, &pkes
.IA_frequency_clipping_cause
);
233 pkes
.GT_frequency_clipping_cause
= ~0ULL;
234 rdmsr64_carefully(MSR_IA32_GT_PERF_LIMIT_REASONS
, &pkes
.GT_frequency_clipping_cause
);
236 rdmsr_carefully(MSR_IA32_PKG_POWER_SKU_UNIT
, &pkg_unit_l
, &pkg_unit_h
);
237 rdmsr_carefully(MSR_IA32_PKG_ENERGY_STATUS
, &pkg_ecl
, &pkg_ech
);
238 pkes
.pkg_power_unit
= ((uint64_t)pkg_unit_h
<< 32) | pkg_unit_l
;
239 pkes
.pkg_energy
= ((uint64_t)pkg_ech
<< 32) | pkg_ecl
;
241 rdmsr_carefully(MSR_IA32_PP0_ENERGY_STATUS
, &pkg_ecl
, &pkg_ech
);
242 pkes
.pp0_energy
= ((uint64_t)pkg_ech
<< 32) | pkg_ecl
;
244 rdmsr_carefully(MSR_IA32_PP1_ENERGY_STATUS
, &pkg_ecl
, &pkg_ech
);
245 pkes
.pp1_energy
= ((uint64_t)pkg_ech
<< 32) | pkg_ecl
;
247 pkes
.pkg_idle_exits
= current_cpu_datap()->lcpu
.package
->package_idle_exits
;
248 pkes
.ncpus
= real_ncpus
;
250 (void) ml_set_interrupts_enabled(TRUE
);
252 copyout(&pkes
, regs
->rsi
, sizeof(pkes
));
253 curpos
= regs
->rsi
+ sizeof(pkes
);
255 mp_cpus_call(CPUMASK_ALL
, ASYNC
, cpu_powerstats
, NULL
);
257 for (i
= 0; i
< real_ncpus
; i
++) {
258 (void) ml_set_interrupts_enabled(FALSE
);
260 cest
.caperf
= cpu_data_ptr
[i
]->cpu_aperf
;
261 cest
.cmperf
= cpu_data_ptr
[i
]->cpu_mperf
;
262 cest
.ccres
[0] = cpu_data_ptr
[i
]->cpu_c3res
;
263 cest
.ccres
[1] = cpu_data_ptr
[i
]->cpu_c6res
;
264 cest
.ccres
[2] = cpu_data_ptr
[i
]->cpu_c7res
;
266 bcopy(&cpu_data_ptr
[i
]->cpu_rtimes
[0], &cest
.crtimes
[0], sizeof(cest
.crtimes
));
267 bcopy(&cpu_data_ptr
[i
]->cpu_itimes
[0], &cest
.citimes
[0], sizeof(cest
.citimes
));
269 cest
.citime_total
= cpu_data_ptr
[i
]->cpu_itime_total
;
270 cest
.crtime_total
= cpu_data_ptr
[i
]->cpu_rtime_total
;
271 cest
.cpu_idle_exits
= cpu_data_ptr
[i
]->cpu_idle_exits
;
272 cest
.cpu_insns
= cpu_data_ptr
[i
]->cpu_cur_insns
;
273 cest
.cpu_ucc
= cpu_data_ptr
[i
]->cpu_cur_ucc
;
274 cest
.cpu_urc
= cpu_data_ptr
[i
]->cpu_cur_urc
;
275 (void) ml_set_interrupts_enabled(TRUE
);
277 copyout(&cest
, curpos
, sizeof(cest
));
278 curpos
+= sizeof(cest
);
285 boolean_t enable
= TRUE
;
286 mp_cpus_call(CPUMASK_ALL
, ASYNC
, cpu_pmc_control
, &enable
);
294 (void) ml_set_interrupts_enabled(TRUE
);
298 unsigned *ptr
= (unsigned *)kalloc(1024);
308 (void) ml_set_interrupts_enabled(TRUE
);
312 rval
= pmap_permissions_verify(kernel_pmap
, kernel_map
, 0, ~0ULL);
315 #endif /* PERMIT_PERMCHECK */
317 default: /* Handle invalid ones */
318 rval
= 0; /* Return an exception */
323 return rval
; /* Normal non-ast check return */
326 void cpu_powerstats(__unused
void *arg
) {
327 cpu_data_t
*cdp
= current_cpu_datap();
328 __unused
int cnum
= cdp
->cpu_number
;
329 uint32_t cl
= 0, ch
= 0, mpl
= 0, mph
= 0, apl
= 0, aph
= 0;
331 rdmsr_carefully(MSR_IA32_MPERF
, &mpl
, &mph
);
332 rdmsr_carefully(MSR_IA32_APERF
, &apl
, &aph
);
334 cdp
->cpu_mperf
= ((uint64_t)mph
<< 32) | mpl
;
335 cdp
->cpu_aperf
= ((uint64_t)aph
<< 32) | apl
;
337 uint64_t ctime
= mach_absolute_time();
338 cdp
->cpu_rtime_total
+= ctime
- cdp
->cpu_ixtime
;
339 cdp
->cpu_ixtime
= ctime
;
341 rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY
, &cl
, &ch
);
342 cdp
->cpu_c3res
= ((uint64_t)ch
<< 32) | cl
;
344 rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY
, &cl
, &ch
);
345 cdp
->cpu_c6res
= ((uint64_t)ch
<< 32) | cl
;
347 rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY
, &cl
, &ch
);
348 cdp
->cpu_c7res
= ((uint64_t)ch
<< 32) | cl
;
350 uint64_t insns
= read_pmc(FIXED_PMC0
);
351 uint64_t ucc
= read_pmc(FIXED_PMC1
);
352 uint64_t urc
= read_pmc(FIXED_PMC2
);
353 cdp
->cpu_cur_insns
= insns
;
354 cdp
->cpu_cur_ucc
= ucc
;
355 cdp
->cpu_cur_urc
= urc
;
358 void cpu_pmc_control(void *enablep
) {
359 boolean_t enable
= *(boolean_t
*)enablep
;
360 cpu_data_t
*cdp
= current_cpu_datap();
363 wrmsr64(0x38F, 0x70000000FULL
);
364 wrmsr64(0x38D, 0x333);
365 set_cr4(get_cr4() | CR4_PCE
);
370 set_cr4((get_cr4() & ~CR4_PCE
));
372 cdp
->cpu_fixed_pmcs_enabled
= enable
;