]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/kpc_x86.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / x86_64 / kpc_x86.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <i386/cpuid.h>
34 #include <i386/proc_reg.h>
35 #include <i386/mp.h>
36 #include <i386/lapic.h>
37 #include <sys/errno.h>
38 #include <kperf/buffer.h>
39
40 #include <kern/kpc.h>
41
42 #include <kperf/kperf.h>
43 #include <kperf/sample.h>
44 #include <kperf/context.h>
45 #include <kperf/action.h>
46
47 #include <chud/chud_xnu.h>
48
49
50
51 /* Fixed counter mask -- three counters, each with OS and USER */
52 #define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333)
53 #define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888)
54
55 #define IA32_PERFEVTSEL_PMI (1ull << 20)
56 #define IA32_PERFEVTSEL_EN (1ull << 22)
57
58 /* Non-serialising */
59 #define USE_RDPMC
60
61 #define RDPMC_FIXED_COUNTER_SELECTOR (1ULL<<30)
62
63 /* track the last config we enabled */
64 static uint32_t kpc_running = 0;
65
66 /* PMC / MSR accesses */
67
68 static uint64_t
69 IA32_FIXED_CTR_CTRL(void)
70 {
71 return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
72 }
73
74 static uint64_t
75 IA32_FIXED_CTRx(uint32_t ctr)
76 {
77 #ifdef USE_RDPMC
78 return rdpmc64(RDPMC_FIXED_COUNTER_SELECTOR | ctr);
79 #else /* !USE_RDPMC */
80 return rdmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr);
81 #endif /* !USE_RDPMC */
82 }
83
84 #ifdef FIXED_COUNTER_RELOAD
85 static void
86 wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
87 {
88 return wrmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr, value);
89 }
90 #endif
91
92 static uint64_t
93 IA32_PMCx(uint32_t ctr)
94 {
95 #ifdef USE_RDPMC
96 return rdpmc64(ctr);
97 #else /* !USE_RDPMC */
98 return rdmsr64(MSR_IA32_PERFCTR0 + ctr);
99 #endif /* !USE_RDPMC */
100 }
101
102 static void
103 wrIA32_PMCx(uint32_t ctr, uint64_t value)
104 {
105 return wrmsr64(MSR_IA32_PERFCTR0 + ctr, value);
106 }
107
108 static uint64_t
109 IA32_PERFEVTSELx(uint32_t ctr)
110 {
111 return rdmsr64(MSR_IA32_EVNTSEL0 + ctr);
112 }
113
114 static void
115 wrIA32_PERFEVTSELx(uint32_t ctr, uint64_t value)
116 {
117 wrmsr64(MSR_IA32_EVNTSEL0 + ctr, value);
118 }
119
120
121 /* internal functions */
122
123 boolean_t
124 kpc_is_running_fixed(void)
125 {
126 return (kpc_running & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
127 }
128
129 boolean_t
130 kpc_is_running_configurable(void)
131 {
132 return (kpc_running & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK;
133 }
134
135 uint32_t
136 kpc_fixed_count(void)
137 {
138 i386_cpu_info_t *info = NULL;
139
140 info = cpuid_info();
141
142 return info->cpuid_arch_perf_leaf.fixed_number;
143 }
144
145 uint32_t
146 kpc_configurable_count(void)
147 {
148 i386_cpu_info_t *info = NULL;
149
150 info = cpuid_info();
151
152 return info->cpuid_arch_perf_leaf.number;
153 }
154
155 uint32_t
156 kpc_fixed_config_count(void)
157 {
158 return KPC_X86_64_FIXED_CONFIGS;
159 }
160
161 uint32_t
162 kpc_configurable_config_count(void)
163 {
164 return kpc_configurable_count();
165 }
166
167 static uint8_t
168 kpc_fixed_width(void)
169 {
170 i386_cpu_info_t *info = NULL;
171
172 info = cpuid_info();
173
174 return info->cpuid_arch_perf_leaf.fixed_width;
175 }
176
177 static uint8_t
178 kpc_configurable_width(void)
179 {
180 i386_cpu_info_t *info = NULL;
181
182 info = cpuid_info();
183
184 return info->cpuid_arch_perf_leaf.width;
185 }
186
187 uint64_t
188 kpc_fixed_max(void)
189 {
190 return (1ULL << kpc_fixed_width()) - 1;
191 }
192
193 uint64_t
194 kpc_configurable_max(void)
195 {
196 return (1ULL << kpc_configurable_width()) - 1;
197 }
198
199 #ifdef FIXED_COUNTER_SHADOW
200 static uint64_t
201 kpc_reload_fixed(int ctr)
202 {
203 uint64_t old = IA32_FIXED_CTRx(ctr);
204 wrIA32_FIXED_CTRx(ctr, FIXED_RELOAD(ctr));
205 return old;
206 }
207 #endif
208
209 static uint64_t
210 kpc_reload_configurable(int ctr)
211 {
212 uint64_t cfg = IA32_PERFEVTSELx(ctr);
213
214 /* counters must be disabled before they can be written to */
215 uint64_t old = IA32_PMCx(ctr);
216 wrIA32_PERFEVTSELx(ctr, cfg & ~IA32_PERFEVTSEL_EN);
217 wrIA32_PMCx(ctr, CONFIGURABLE_RELOAD(ctr));
218 wrIA32_PERFEVTSELx(ctr, cfg);
219 return old;
220 }
221
222 void kpc_pmi_handler(x86_saved_state_t *state);
223
224 static void
225 set_running_fixed(boolean_t on)
226 {
227 uint64_t global = 0, mask = 0, fixed_ctrl = 0;
228 int i;
229 boolean_t enabled;
230
231 if( on )
232 /* these are per-thread in SMT */
233 fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI;
234 else
235 /* don't allow disabling fixed counters */
236 return;
237
238 wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
239
240 enabled = ml_set_interrupts_enabled(FALSE);
241
242 /* rmw the global control */
243 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
244 for( i = 0; i < (int) kpc_fixed_count(); i++ )
245 mask |= (1ULL<<(32+i));
246
247 if( on )
248 global |= mask;
249 else
250 global &= ~mask;
251
252 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
253
254 ml_set_interrupts_enabled(enabled);
255 }
256
257 static void
258 set_running_configurable(boolean_t on)
259 {
260 uint64_t global = 0, mask = 0;
261 uint64_t cfg, save;
262 int i;
263 boolean_t enabled;
264 int ncnt = (int) kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
265
266 enabled = ml_set_interrupts_enabled(FALSE);
267
268 /* rmw the global control */
269 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
270 for( i = 0; i < ncnt; i++ ) {
271 mask |= (1ULL<<i);
272
273 /* need to save and restore counter since it resets when reconfigured */
274 cfg = IA32_PERFEVTSELx(i);
275 save = IA32_PMCx(i);
276 wrIA32_PERFEVTSELx(i, cfg | IA32_PERFEVTSEL_PMI | IA32_PERFEVTSEL_EN);
277 wrIA32_PMCx(i, save);
278 }
279
280 if( on )
281 global |= mask;
282 else
283 global &= ~mask;
284
285 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
286
287 ml_set_interrupts_enabled(enabled);
288 }
289
290 static void
291 kpc_set_running_mp_call( void *vstate )
292 {
293 uint32_t new_state = *(uint32_t*)vstate;
294
295 set_running_fixed((new_state & KPC_CLASS_FIXED_MASK) != 0);
296 set_running_configurable((new_state & KPC_CLASS_CONFIGURABLE_MASK) != 0);
297 }
298
299 int
300 kpc_get_fixed_config(kpc_config_t *configv)
301 {
302 configv[0] = IA32_FIXED_CTR_CTRL();
303
304 return 0;
305 }
306
307 static int
308 kpc_set_fixed_config(kpc_config_t *configv)
309 {
310 (void) configv;
311
312 /* NYI */
313 return -1;
314 }
315
316 int
317 kpc_get_fixed_counters(uint64_t *counterv)
318 {
319 int i, n = kpc_fixed_count();
320
321 #ifdef FIXED_COUNTER_SHADOW
322 uint64_t status;
323
324 /* snap the counters */
325 for( i = 0; i < n; i++ ) {
326 counterv[i] = FIXED_SHADOW(ctr) +
327 (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr));
328 }
329
330 /* Grab the overflow bits */
331 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
332
333 /* If the overflow bit is set for a counter, our previous read may or may not have been
334 * before the counter overflowed. Re-read any counter with it's overflow bit set so
335 * we know for sure that it has overflowed. The reason this matters is that the math
336 * is different for a counter that has overflowed. */
337 for( i = 0; i < n; i++ ) {
338 if ((1ull << (i + 32)) & status)
339 counterv[i] = FIXED_SHADOW(ctr) +
340 (kpc_fixed_max() - FIXED_RELOAD(ctr)) + IA32_FIXED_CTRx(i);
341 }
342 #else
343 for( i = 0; i < n; i++ )
344 counterv[i] = IA32_FIXED_CTRx(i);
345 #endif
346
347 return 0;
348 }
349
350 int
351 kpc_get_configurable_config(kpc_config_t *configv)
352 {
353 int i, n = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
354
355 for( i = 0; i < n; i++ )
356 configv[i] = IA32_PERFEVTSELx(i);
357
358 return 0;
359 }
360
361 static int
362 kpc_set_configurable_config(kpc_config_t *configv)
363 {
364 int i, n = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
365 uint64_t save;
366
367 for( i = 0; i < n; i++ ) {
368 /* need to save and restore counter since it resets when reconfigured */
369 save = IA32_PMCx(i);
370 wrIA32_PERFEVTSELx(i, configv[i]);
371 wrIA32_PMCx(i, save);
372 }
373
374 return 0;
375 }
376
377 int
378 kpc_get_configurable_counters(uint64_t *counterv)
379 {
380 int i, n = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
381 uint64_t status;
382
383 /* snap the counters */
384 for( i = 0; i < n; i++ ) {
385 counterv[i] = CONFIGURABLE_SHADOW(i) +
386 (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
387 }
388
389 /* Grab the overflow bits */
390 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
391
392 /* If the overflow bit is set for a counter, our previous read may or may not have been
393 * before the counter overflowed. Re-read any counter with it's overflow bit set so
394 * we know for sure that it has overflowed. The reason this matters is that the math
395 * is different for a counter that has overflowed. */
396 for( i = 0; i < n; i++ ) {
397 if ((1ull << i) & status) {
398 counterv[i] = CONFIGURABLE_SHADOW(i) +
399 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
400 }
401 }
402
403 return 0;
404 }
405
406 static void
407 kpc_set_config_mp_call(void *vmp_config)
408 {
409 struct kpc_config_remote *mp_config = vmp_config;
410 uint32_t classes = mp_config->classes;
411 kpc_config_t *new_config = mp_config->configv;
412 int count = 0;
413 boolean_t enabled;
414
415 enabled = ml_set_interrupts_enabled(FALSE);
416
417 if( classes & KPC_CLASS_FIXED_MASK )
418 {
419 kpc_set_fixed_config(&new_config[count]);
420 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
421 }
422
423 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
424 {
425 kpc_set_configurable_config(&new_config[count]);
426 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
427 }
428
429 ml_set_interrupts_enabled(enabled);
430 }
431
432 static void
433 kpc_set_reload_mp_call(void *vmp_config)
434 {
435 struct kpc_config_remote *mp_config = vmp_config;
436 uint64_t max = kpc_configurable_max();
437 uint32_t i, count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
438 uint64_t *new_period;
439 uint64_t classes;
440 int enabled;
441
442 classes = mp_config->classes;
443 new_period = mp_config->configv;
444
445 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
446 enabled = ml_set_interrupts_enabled(FALSE);
447
448 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0));
449
450 for (i = 0; i < count; i++) {
451 if (new_period[i] == 0)
452 new_period[i] = kpc_configurable_max();
453
454 CONFIGURABLE_RELOAD(i) = max - new_period[i];
455
456 kpc_reload_configurable(i);
457
458 /* clear overflow bit just in case */
459 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << i);
460 }
461
462 ml_set_interrupts_enabled(enabled);
463 }
464 }
465
466 int
467 kpc_set_period_arch( struct kpc_config_remote *mp_config )
468 {
469 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_reload_mp_call, mp_config );
470
471 return 0;
472 }
473
474
475 /* interface functions */
476
477 uint32_t
478 kpc_get_classes(void)
479 {
480 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
481 }
482
483 int
484 kpc_set_running(uint32_t new_state)
485 {
486 lapic_set_pmi_func((i386_intr_func_t)kpc_pmi_handler);
487
488 /* dispatch to all CPUs */
489 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_running_mp_call, &new_state );
490
491 kpc_running = new_state;
492
493 return 0;
494 }
495
496 int
497 kpc_set_config_arch(struct kpc_config_remote *mp_config)
498 {
499 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_config_mp_call, mp_config );
500
501 return 0;
502 }
503
504 /* PMI stuff */
505 void kpc_pmi_handler(__unused x86_saved_state_t *state)
506 {
507 uint64_t status, extra;
508 uint32_t ctr;
509 int enabled;
510
511 enabled = ml_set_interrupts_enabled(FALSE);
512
513 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
514
515 #ifdef FIXED_COUNTER_SHADOW
516 for (ctr = 0; ctr < kpc_fixed_count(); ctr++) {
517 if ((1ULL << (ctr + 32)) & status) {
518 extra = kpc_reload_fixed(ctr);
519
520 FIXED_SHADOW(ctr)
521 += kpc_fixed_max() - FIXED_RELOAD(ctr) + extra;
522
523 BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr));
524
525 if (FIXED_ACTIONID(ctr))
526 kpc_sample_kperf(FIXED_ACTIONID(ctr));
527 }
528 }
529 #endif
530
531 for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
532 if ((1ULL << ctr) & status) {
533 extra = kpc_reload_configurable(ctr);
534
535 CONFIGURABLE_SHADOW(ctr)
536 += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra;
537
538 /* kperf can grab the PMCs when it samples so we need to make sure the overflow
539 * bits are in the correct state before the call to kperf_sample */
540 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
541
542 BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr));
543
544 if (CONFIGURABLE_ACTIONID(ctr))
545 kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr));
546 }
547 }
548
549 ml_set_interrupts_enabled(enabled);
550 }
551
552
553
554