]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/kpc_x86.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / osfmk / x86_64 / kpc_x86.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <i386/cpuid.h>
34 #include <i386/proc_reg.h>
35 #include <i386/mp.h>
36 #include <i386/lapic.h>
37 #include <sys/errno.h>
38 #include <kperf/buffer.h>
39
40 #include <kern/kpc.h>
41
42 #include <kperf/kperf.h>
43 #include <kperf/sample.h>
44 #include <kperf/context.h>
45 #include <kperf/action.h>
46
47 #include <chud/chud_xnu.h>
48
49
50
51 /* Fixed counter mask -- three counters, each with OS and USER */
52 #define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333)
53 #define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888)
54
55 #define IA32_PERFEVTSEL_PMI (1ull << 20)
56 #define IA32_PERFEVTSEL_EN (1ull << 22)
57
58 /* Non-serialising */
59 #define USE_RDPMC
60
61 #define RDPMC_FIXED_COUNTER_SELECTOR (1ULL<<30)
62
63 /* track the last config we enabled */
64 static uint32_t kpc_running = 0;
65
66 /* PMC / MSR accesses */
67
68 static uint64_t
69 IA32_FIXED_CTR_CTRL(void)
70 {
71 return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
72 }
73
74 static uint64_t
75 IA32_FIXED_CTRx(uint32_t ctr)
76 {
77 #ifdef USE_RDPMC
78 return rdpmc64(RDPMC_FIXED_COUNTER_SELECTOR | ctr);
79 #else /* !USE_RDPMC */
80 return rdmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr);
81 #endif /* !USE_RDPMC */
82 }
83
84 #ifdef FIXED_COUNTER_RELOAD
85 static void
86 wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
87 {
88 return wrmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr, value);
89 }
90 #endif
91
92 static uint64_t
93 IA32_PMCx(uint32_t ctr)
94 {
95 #ifdef USE_RDPMC
96 return rdpmc64(ctr);
97 #else /* !USE_RDPMC */
98 return rdmsr64(MSR_IA32_PERFCTR0 + ctr);
99 #endif /* !USE_RDPMC */
100 }
101
102 static void
103 wrIA32_PMCx(uint32_t ctr, uint64_t value)
104 {
105 return wrmsr64(MSR_IA32_PERFCTR0 + ctr, value);
106 }
107
108 static uint64_t
109 IA32_PERFEVTSELx(uint32_t ctr)
110 {
111 return rdmsr64(MSR_IA32_EVNTSEL0 + ctr);
112 }
113
114 static void
115 wrIA32_PERFEVTSELx(uint32_t ctr, uint64_t value)
116 {
117 wrmsr64(MSR_IA32_EVNTSEL0 + ctr, value);
118 }
119
120
121 /* internal functions */
122
123 boolean_t
124 kpc_is_running_fixed(void)
125 {
126 return (kpc_running & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
127 }
128
129 boolean_t
130 kpc_is_running_configurable(void)
131 {
132 return (kpc_running & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK;
133 }
134
135 uint32_t
136 kpc_fixed_count(void)
137 {
138 i386_cpu_info_t *info = NULL;
139
140 info = cpuid_info();
141
142 return info->cpuid_arch_perf_leaf.fixed_number;
143 }
144
145 uint32_t
146 kpc_configurable_count(void)
147 {
148 i386_cpu_info_t *info = NULL;
149
150 info = cpuid_info();
151
152 return info->cpuid_arch_perf_leaf.number;
153 }
154
155 uint32_t
156 kpc_fixed_config_count(void)
157 {
158 return KPC_X86_64_FIXED_CONFIGS;
159 }
160
161 uint32_t
162 kpc_configurable_config_count(void)
163 {
164 return kpc_configurable_count();
165 }
166
167 uint32_t
168 kpc_rawpmu_config_count(void)
169 {
170 // RAW PMU access not implemented.
171 return 0;
172 }
173
174 int
175 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
176 {
177 return 0;
178 }
179
180 static uint8_t
181 kpc_fixed_width(void)
182 {
183 i386_cpu_info_t *info = NULL;
184
185 info = cpuid_info();
186
187 return info->cpuid_arch_perf_leaf.fixed_width;
188 }
189
190 static uint8_t
191 kpc_configurable_width(void)
192 {
193 i386_cpu_info_t *info = NULL;
194
195 info = cpuid_info();
196
197 return info->cpuid_arch_perf_leaf.width;
198 }
199
200 uint64_t
201 kpc_fixed_max(void)
202 {
203 return (1ULL << kpc_fixed_width()) - 1;
204 }
205
206 uint64_t
207 kpc_configurable_max(void)
208 {
209 return (1ULL << kpc_configurable_width()) - 1;
210 }
211
212 #ifdef FIXED_COUNTER_SHADOW
213 static uint64_t
214 kpc_reload_fixed(int ctr)
215 {
216 uint64_t old = IA32_FIXED_CTRx(ctr);
217 wrIA32_FIXED_CTRx(ctr, FIXED_RELOAD(ctr));
218 return old;
219 }
220 #endif
221
222 static uint64_t
223 kpc_reload_configurable(int ctr)
224 {
225 uint64_t cfg = IA32_PERFEVTSELx(ctr);
226
227 /* counters must be disabled before they can be written to */
228 uint64_t old = IA32_PMCx(ctr);
229 wrIA32_PERFEVTSELx(ctr, cfg & ~IA32_PERFEVTSEL_EN);
230 wrIA32_PMCx(ctr, CONFIGURABLE_RELOAD(ctr));
231 wrIA32_PERFEVTSELx(ctr, cfg);
232 return old;
233 }
234
235 void kpc_pmi_handler(x86_saved_state_t *state);
236
237 static void
238 set_running_fixed(boolean_t on)
239 {
240 uint64_t global = 0, mask = 0, fixed_ctrl = 0;
241 int i;
242 boolean_t enabled;
243
244 if( on )
245 /* these are per-thread in SMT */
246 fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI;
247 else
248 /* don't allow disabling fixed counters */
249 return;
250
251 wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
252
253 enabled = ml_set_interrupts_enabled(FALSE);
254
255 /* rmw the global control */
256 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
257 for( i = 0; i < (int) kpc_fixed_count(); i++ )
258 mask |= (1ULL<<(32+i));
259
260 if( on )
261 global |= mask;
262 else
263 global &= ~mask;
264
265 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
266
267 ml_set_interrupts_enabled(enabled);
268 }
269
270 static void
271 set_running_configurable(boolean_t on)
272 {
273 uint64_t global = 0, mask = 0;
274 uint64_t cfg, save;
275 int i;
276 boolean_t enabled;
277 int ncnt = (int) kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
278
279 enabled = ml_set_interrupts_enabled(FALSE);
280
281 /* rmw the global control */
282 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
283 for( i = 0; i < ncnt; i++ ) {
284 mask |= (1ULL<<i);
285
286 /* need to save and restore counter since it resets when reconfigured */
287 cfg = IA32_PERFEVTSELx(i);
288 save = IA32_PMCx(i);
289 wrIA32_PERFEVTSELx(i, cfg | IA32_PERFEVTSEL_PMI | IA32_PERFEVTSEL_EN);
290 wrIA32_PMCx(i, save);
291 }
292
293 if( on )
294 global |= mask;
295 else
296 global &= ~mask;
297
298 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
299
300 ml_set_interrupts_enabled(enabled);
301 }
302
303 static void
304 kpc_set_running_mp_call( void *vstate )
305 {
306 uint32_t new_state = *(uint32_t*)vstate;
307
308 set_running_fixed((new_state & KPC_CLASS_FIXED_MASK) != 0);
309 set_running_configurable((new_state & KPC_CLASS_CONFIGURABLE_MASK) != 0);
310 }
311
312 int
313 kpc_get_fixed_config(kpc_config_t *configv)
314 {
315 configv[0] = IA32_FIXED_CTR_CTRL();
316
317 return 0;
318 }
319
320 static int
321 kpc_set_fixed_config(kpc_config_t *configv)
322 {
323 (void) configv;
324
325 /* NYI */
326 return -1;
327 }
328
329 int
330 kpc_get_fixed_counters(uint64_t *counterv)
331 {
332 int i, n = kpc_fixed_count();
333
334 #ifdef FIXED_COUNTER_SHADOW
335 uint64_t status;
336
337 /* snap the counters */
338 for( i = 0; i < n; i++ ) {
339 counterv[i] = FIXED_SHADOW(ctr) +
340 (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr));
341 }
342
343 /* Grab the overflow bits */
344 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
345
346 /* If the overflow bit is set for a counter, our previous read may or may not have been
347 * before the counter overflowed. Re-read any counter with it's overflow bit set so
348 * we know for sure that it has overflowed. The reason this matters is that the math
349 * is different for a counter that has overflowed. */
350 for( i = 0; i < n; i++ ) {
351 if ((1ull << (i + 32)) & status)
352 counterv[i] = FIXED_SHADOW(ctr) +
353 (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + IA32_FIXED_CTRx(i);
354 }
355 #else
356 for( i = 0; i < n; i++ )
357 counterv[i] = IA32_FIXED_CTRx(i);
358 #endif
359
360 return 0;
361 }
362
363 int
364 kpc_get_configurable_config(kpc_config_t *configv)
365 {
366 int i, n = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
367
368 for( i = 0; i < n; i++ )
369 configv[i] = IA32_PERFEVTSELx(i);
370
371 return 0;
372 }
373
374 static int
375 kpc_set_configurable_config(kpc_config_t *configv)
376 {
377 int i, n = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
378 uint64_t save;
379
380 for( i = 0; i < n; i++ ) {
381 /* need to save and restore counter since it resets when reconfigured */
382 save = IA32_PMCx(i);
383 /*
384 * Some bits are not safe to set from user space.
385 * Allow these bits to be set:
386 *
387 * 0-7 Event select
388 * 8-15 UMASK
389 * 16 USR
390 * 17 OS
391 * 18 E
392 * 22 EN
393 * 23 INV
394 * 24-31 CMASK
395 *
396 * Excluding:
397 *
398 * 19 PC
399 * 20 INT
400 * 21 AnyThread
401 * 32 IN_TX
402 * 33 IN_TXCP
403 * 34-63 Reserved
404 */
405 wrIA32_PERFEVTSELx(i, configv[i] & 0xffc7ffffull);
406 wrIA32_PMCx(i, save);
407 }
408
409 return 0;
410 }
411
412 int
413 kpc_get_configurable_counters(uint64_t *counterv)
414 {
415 int i, n = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
416 uint64_t status;
417
418 /* snap the counters */
419 for( i = 0; i < n; i++ ) {
420 counterv[i] = CONFIGURABLE_SHADOW(i) +
421 (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
422 }
423
424 /* Grab the overflow bits */
425 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
426
427 /* If the overflow bit is set for a counter, our previous read may or may not have been
428 * before the counter overflowed. Re-read any counter with it's overflow bit set so
429 * we know for sure that it has overflowed. The reason this matters is that the math
430 * is different for a counter that has overflowed. */
431 for( i = 0; i < n; i++ ) {
432 if ((1ull << i) & status) {
433 counterv[i] = CONFIGURABLE_SHADOW(i) +
434 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
435 }
436 }
437
438 return 0;
439 }
440
441 static void
442 kpc_set_config_mp_call(void *vmp_config)
443 {
444 struct kpc_config_remote *mp_config = vmp_config;
445 uint32_t classes = mp_config->classes;
446 kpc_config_t *new_config = mp_config->configv;
447 int count = 0;
448 boolean_t enabled;
449
450 enabled = ml_set_interrupts_enabled(FALSE);
451
452 if( classes & KPC_CLASS_FIXED_MASK )
453 {
454 kpc_set_fixed_config(&new_config[count]);
455 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
456 }
457
458 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
459 {
460 kpc_set_configurable_config(&new_config[count]);
461 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
462 }
463
464 ml_set_interrupts_enabled(enabled);
465 }
466
467 static void
468 kpc_set_reload_mp_call(void *vmp_config)
469 {
470 struct kpc_config_remote *mp_config = vmp_config;
471 uint64_t max = kpc_configurable_max();
472 uint32_t i, count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
473 uint64_t *new_period;
474 uint64_t classes;
475 int enabled;
476
477 classes = mp_config->classes;
478 new_period = mp_config->configv;
479
480 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
481 enabled = ml_set_interrupts_enabled(FALSE);
482
483 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0));
484
485 for (i = 0; i < count; i++) {
486 if (new_period[i] == 0)
487 new_period[i] = kpc_configurable_max();
488
489 CONFIGURABLE_RELOAD(i) = max - new_period[i];
490
491 kpc_reload_configurable(i);
492
493 /* clear overflow bit just in case */
494 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << i);
495 }
496
497 ml_set_interrupts_enabled(enabled);
498 }
499 }
500
501 int
502 kpc_set_period_arch( struct kpc_config_remote *mp_config )
503 {
504 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_reload_mp_call, mp_config );
505
506 return 0;
507 }
508
509
510 /* interface functions */
511
512 void
513 kpc_arch_init(void)
514 {
515 /* No-op */
516 }
517
518 uint32_t
519 kpc_get_classes(void)
520 {
521 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
522 }
523
524 int
525 kpc_set_running(uint32_t new_state)
526 {
527 lapic_set_pmi_func((i386_intr_func_t)kpc_pmi_handler);
528
529 /* dispatch to all CPUs */
530 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_running_mp_call, &new_state );
531
532 kpc_running = new_state;
533
534 return 0;
535 }
536
537 int
538 kpc_set_config_arch(struct kpc_config_remote *mp_config)
539 {
540 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_config_mp_call, mp_config );
541
542 return 0;
543 }
544
545 /* PMI stuff */
546 void kpc_pmi_handler(__unused x86_saved_state_t *state)
547 {
548 uint64_t status, extra;
549 uint32_t ctr;
550 int enabled;
551
552 enabled = ml_set_interrupts_enabled(FALSE);
553
554 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
555
556 #ifdef FIXED_COUNTER_SHADOW
557 for (ctr = 0; ctr < kpc_fixed_count(); ctr++) {
558 if ((1ULL << (ctr + 32)) & status) {
559 extra = kpc_reload_fixed(ctr);
560
561 FIXED_SHADOW(ctr)
562 += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
563
564 BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr));
565
566 if (FIXED_ACTIONID(ctr))
567 kpc_sample_kperf(FIXED_ACTIONID(ctr));
568 }
569 }
570 #endif
571
572 for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
573 if ((1ULL << ctr) & status) {
574 extra = kpc_reload_configurable(ctr);
575
576 CONFIGURABLE_SHADOW(ctr)
577 += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra;
578
579 /* kperf can grab the PMCs when it samples so we need to make sure the overflow
580 * bits are in the correct state before the call to kperf_sample */
581 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
582
583 BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr));
584
585 if (CONFIGURABLE_ACTIONID(ctr))
586 kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr));
587 }
588 }
589
590 ml_set_interrupts_enabled(enabled);
591 }
592
593 int
594 kpc_force_all_ctrs_arch( task_t task __unused, int val __unused )
595 {
596 /* TODO: reclaim counters ownership from XCPM */
597 return 0;
598 }
599
600 int
601 kpc_set_sw_inc( uint32_t mask __unused )
602 {
603 return ENOTSUP;
604 }