]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/kpc_arm.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm / kpc_arm.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <kern/thread.h>
34 #include <sys/errno.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/cpu_internal.h>
37 #include <kern/kpc.h>
38
39 #ifdef ARMA7
40 /* PMU v2 based implementation for A7 */
41 static uint32_t saved_PMXEVTYPER[MAX_CPUS][KPC_ARM_TOTAL_COUNT];
42 static uint32_t saved_PMCNTENSET[MAX_CPUS];
43 static uint64_t saved_counter[MAX_CPUS][KPC_ARM_TOTAL_COUNT];
44 static uint32_t saved_PMOVSR[MAX_CPUS];
45
46 static uint32_t kpc_configured = 0;
47 static uint32_t kpc_xcall_sync;
48 static uint64_t kpc_running_cfg_pmc_mask = 0;
49 static uint32_t kpc_running_classes = 0;
50 static uint32_t kpc_reload_sync;
51 static uint32_t kpc_enabled_counters = 0;
52
53 static int first_time = 1;
54
55 /* Private */
56
57 static boolean_t
58 enable_counter(uint32_t counter)
59 {
60 boolean_t enabled;
61 uint32_t PMCNTENSET;
62 /* Cycle counter is MSB; configurable counters reside in LSBs */
63 uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1));
64
65 /* Enabled? */
66 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET));
67
68 enabled = (PMCNTENSET & mask);
69 if (!enabled) {
70 /* Counter interrupt enable (PMINTENSET) */
71 __asm__ volatile ("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask));
72
73 /* Individual counter enable set (PMCNTENSET) */
74 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask));
75
76 kpc_enabled_counters++;
77
78 /* 1st enabled counter? Set the master enable bit in PMCR */
79 if (kpc_enabled_counters == 1) {
80 uint32_t PMCR = 1;
81 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
82 }
83 }
84
85 return enabled;
86 }
87
88 static boolean_t
89 disable_counter(uint32_t counter)
90 {
91 boolean_t enabled;
92 uint32_t PMCNTENCLR;
93 /* Cycle counter is MSB; configurable counters reside in LSBs */
94 uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1));
95
96 /* Enabled? */
97 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR));
98
99 enabled = (PMCNTENCLR & mask);
100 if (enabled) {
101 /* Individual counter enable clear (PMCNTENCLR) */
102 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask));
103
104 /* Counter interrupt disable (PMINTENCLR) */
105 __asm__ volatile ("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask));
106
107 kpc_enabled_counters--;
108
109 /* Last enabled counter? Clear the master enable bit in PMCR */
110 if (kpc_enabled_counters == 0) {
111 uint32_t PMCR = 0;
112 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
113 }
114 }
115
116 return enabled;
117 }
118
119 static uint64_t
120 read_counter(uint32_t counter)
121 {
122 uint32_t low = 0;
123
124 switch (counter) {
125 case 0:
126 /* Fixed counter */
127 __asm__ volatile ("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low));
128 break;
129 case 1:
130 case 2:
131 case 3:
132 case 4:
133 /* Configurable. Set PMSELR... */
134 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
135 /* ...then read PMXEVCNTR */
136 __asm__ volatile ("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low));
137 break;
138 default:
139 /* ??? */
140 break;
141 }
142
143 return (uint64_t)low;
144 }
145
146 static void
147 write_counter(uint32_t counter, uint64_t value)
148 {
149 uint32_t low = value & 0xFFFFFFFF;
150
151 switch (counter) {
152 case 0:
153 /* Fixed counter */
154 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low));
155 break;
156 case 1:
157 case 2:
158 case 3:
159 case 4:
160 /* Configurable. Set PMSELR... */
161 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
162 /* ...then write PMXEVCNTR */
163 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low));
164 break;
165 default:
166 /* ??? */
167 break;
168 }
169 }
170
171 static uint64_t
172 kpc_reload_counter(int ctr)
173 {
174 uint64_t old = read_counter(ctr);
175 write_counter(ctr, FIXED_RELOAD(ctr));
176 return old;
177 }
178
179 static void
180 set_running_fixed(boolean_t on)
181 {
182 int i;
183 boolean_t enabled;
184 int n = KPC_ARM_FIXED_COUNT;
185
186 enabled = ml_set_interrupts_enabled(FALSE);
187
188 for (i = 0; i < n; i++) {
189 if (on) {
190 enable_counter(i);
191 } else {
192 disable_counter(i);
193 }
194 }
195
196 ml_set_interrupts_enabled(enabled);
197 }
198
199 static void
200 set_running_configurable(uint64_t target_mask, uint64_t state_mask)
201 {
202 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
203 boolean_t enabled;
204
205 enabled = ml_set_interrupts_enabled(FALSE);
206
207 for (uint32_t i = 0; i < cfg_count; ++i) {
208 if (((1ULL << i) & target_mask) == 0) {
209 continue;
210 }
211 assert(kpc_controls_counter(offset + i));
212
213 if ((1ULL << i) & state_mask) {
214 enable_counter(offset + i);
215 } else {
216 disable_counter(offset + i);
217 }
218 }
219
220 ml_set_interrupts_enabled(enabled);
221 }
222
223 void kpc_pmi_handler(cpu_id_t source);
224 void
225 kpc_pmi_handler(cpu_id_t source)
226 {
227 uint64_t extra;
228 int ctr;
229 int enabled;
230
231 enabled = ml_set_interrupts_enabled(FALSE);
232
233 /* The pmi must be delivered to the CPU that generated it */
234 if (source != getCpuDatap()->interrupt_nub) {
235 panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub);
236 }
237
238 for (ctr = 0;
239 ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT);
240 ctr++) {
241 uint32_t PMOVSR;
242 uint32_t mask;
243
244 /* check the counter for overflow */
245 if (ctr == 0) {
246 mask = 1 << 31;
247 } else {
248 mask = 1 << (ctr - 1);
249 }
250
251 /* read PMOVSR */
252 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
253
254 if (PMOVSR & mask) {
255 extra = kpc_reload_counter(ctr);
256
257 FIXED_SHADOW(ctr)
258 += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra;
259
260 if (FIXED_ACTIONID(ctr)) {
261 kpc_sample_kperf(FIXED_ACTIONID(ctr));
262 }
263
264 /* clear PMOVSR bit */
265 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask));
266 }
267 }
268
269 ml_set_interrupts_enabled(enabled);
270 }
271
272 static void
273 kpc_set_running_xcall( void *vstate )
274 {
275 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
276 assert(mp_config);
277
278 if (kpc_controls_fixed_counters()) {
279 set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
280 }
281
282 set_running_configurable(mp_config->cfg_target_mask,
283 mp_config->cfg_state_mask);
284
285 if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) {
286 thread_wakeup((event_t) &kpc_xcall_sync);
287 }
288 }
289
290 static uint64_t
291 get_counter_config(uint32_t counter)
292 {
293 uint32_t config = 0;
294
295 switch (counter) {
296 case 0:
297 /* Fixed counter accessed via top bit... */
298 counter = 31;
299 /* Write PMSELR.SEL */
300 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
301 /* Read PMXEVTYPER */
302 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config));
303 break;
304 case 1:
305 case 2:
306 case 3:
307 case 4:
308 /* Offset */
309 counter -= 1;
310 /* Write PMSELR.SEL to select the configurable counter */
311 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter));
312 /* Read PMXEVTYPER to get the config */
313 __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config));
314 break;
315 default:
316 break;
317 }
318
319 return config;
320 }
321
322 static void
323 set_counter_config(uint32_t counter, uint64_t config)
324 {
325 switch (counter) {
326 case 0:
327 /* Write PMSELR.SEL */
328 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
329 /* Write PMXEVTYPER */
330 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
331 break;
332 case 1:
333 case 2:
334 case 3:
335 case 4:
336 /* Write PMSELR.SEL */
337 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1));
338 /* Write PMXEVTYPER */
339 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF));
340 break;
341 default:
342 break;
343 }
344 }
345
346 /* Common */
347
348 void
349 kpc_arch_init(void)
350 {
351 uint32_t PMCR;
352 uint32_t event_counters;
353
354 /* read PMOVSR and determine the number of event counters */
355 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR));
356 event_counters = (PMCR >> 11) & 0x1F;
357
358 assert(event_counters >= KPC_ARM_CONFIGURABLE_COUNT);
359 }
360
361 uint32_t
362 kpc_get_classes(void)
363 {
364 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
365 }
366
367 uint32_t
368 kpc_fixed_count(void)
369 {
370 return KPC_ARM_FIXED_COUNT;
371 }
372
373 uint32_t
374 kpc_configurable_count(void)
375 {
376 return KPC_ARM_CONFIGURABLE_COUNT;
377 }
378
379 uint32_t
380 kpc_fixed_config_count(void)
381 {
382 return KPC_ARM_FIXED_COUNT;
383 }
384
385 uint32_t
386 kpc_configurable_config_count(uint64_t pmc_mask)
387 {
388 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
389 return kpc_popcount(pmc_mask);
390 }
391
392 int
393 kpc_get_fixed_config(kpc_config_t *configv)
394 {
395 configv[0] = get_counter_config(0);
396 return 0;
397 }
398
399 uint64_t
400 kpc_fixed_max(void)
401 {
402 return (1ULL << KPC_ARM_COUNTER_WIDTH) - 1;
403 }
404
405 uint64_t
406 kpc_configurable_max(void)
407 {
408 return (1ULL << KPC_ARM_COUNTER_WIDTH) - 1;
409 }
410
411 int
412 kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
413 {
414 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
415
416 assert(counterv);
417
418 for (uint32_t i = 0; i < cfg_count; ++i) {
419 uint32_t PMOVSR;
420 uint32_t mask;
421 uint64_t ctr;
422
423 if (((1ULL << i) & pmc_mask) == 0) {
424 continue;
425 }
426 ctr = read_counter(i + offset);
427
428 /* check the counter for overflow */
429 mask = 1 << i;
430
431 /* read PMOVSR */
432 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
433
434 if (PMOVSR & mask) {
435 ctr = CONFIGURABLE_SHADOW(i) +
436 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
437 ctr;
438 } else {
439 ctr = CONFIGURABLE_SHADOW(i) +
440 (ctr - CONFIGURABLE_RELOAD(i));
441 }
442
443 *counterv++ = ctr;
444 }
445
446 return 0;
447 }
448
449 int
450 kpc_get_fixed_counters(uint64_t *counterv)
451 {
452 uint32_t PMOVSR;
453 uint32_t mask;
454 uint64_t ctr;
455
456 /* check the counter for overflow */
457 mask = 1 << 31;
458
459 /* read PMOVSR */
460 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR));
461
462 ctr = read_counter(0);
463
464 if (PMOVSR & mask) {
465 ctr = FIXED_SHADOW(0) +
466 (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) +
467 (ctr & 0xFFFFFFFF);
468 } else {
469 ctr = FIXED_SHADOW(0) +
470 (ctr - FIXED_RELOAD(0));
471 }
472
473 counterv[0] = ctr;
474
475 return 0;
476 }
477 boolean_t
478 kpc_is_running_fixed(void)
479 {
480 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
481 }
482
483 boolean_t
484 kpc_is_running_configurable(uint64_t pmc_mask)
485 {
486 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
487 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
488 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
489 }
490
491 int
492 kpc_set_running_arch(struct kpc_running_remote *mp_config)
493 {
494 unsigned int cpu;
495
496 assert(mp_config);
497
498 if (first_time) {
499 kprintf( "kpc: setting PMI handler\n" );
500 PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler);
501 for (cpu = 0; cpu < real_ncpus; cpu++) {
502 PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu)->cpu_id,
503 TRUE);
504 }
505 first_time = 0;
506 }
507
508 /* dispatch to all CPUs */
509 cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall,
510 mp_config);
511
512 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
513 kpc_running_classes = mp_config->classes;
514 kpc_configured = 1;
515
516 return 0;
517 }
518
519 static void
520 save_regs(void)
521 {
522 int i;
523 int cpuid = current_processor()->cpu_id;
524 uint32_t PMCR = 0;
525
526 __asm__ volatile ("dmb ish");
527
528 /* Clear master enable */
529 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
530
531 /* Save individual enable state */
532 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid]));
533
534 /* Save PMOVSR */
535 __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid]));
536
537 /* Select fixed counter with PMSELR.SEL */
538 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
539 /* Read PMXEVTYPER */
540 __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0]));
541
542 /* Save configurable event selections */
543 for (i = 0; i < 4; i++) {
544 /* Select counter with PMSELR.SEL */
545 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
546 /* Read PMXEVTYPER */
547 __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1]));
548 }
549
550 /* Finally, save count for each counter */
551 for (i = 0; i < 5; i++) {
552 saved_counter[cpuid][i] = read_counter(i);
553 }
554 }
555
556 static void
557 restore_regs(void)
558 {
559 int i;
560 int cpuid = current_processor()->cpu_id;
561 uint64_t extra;
562 uint32_t PMCR = 1;
563
564 /* Restore counter values */
565 for (i = 0; i < 5; i++) {
566 /* did we overflow? if so handle it now since we won't get a pmi */
567 uint32_t mask;
568
569 /* check the counter for overflow */
570 if (i == 0) {
571 mask = 1 << 31;
572 } else {
573 mask = 1 << (i - 1);
574 }
575
576 if (saved_PMOVSR[cpuid] & mask) {
577 extra = kpc_reload_counter(i);
578
579 /*
580 * CONFIGURABLE_* directly follows FIXED, so we can simply
581 * increment the index here. Although it's ugly.
582 */
583 FIXED_SHADOW(i)
584 += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra;
585
586 if (FIXED_ACTIONID(i)) {
587 kpc_sample_kperf(FIXED_ACTIONID(i));
588 }
589 } else {
590 write_counter(i, saved_counter[cpuid][i]);
591 }
592 }
593
594 /* Restore configuration - first, the fixed... */
595 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31));
596 /* Write PMXEVTYPER */
597 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0]));
598
599 /* ...then the configurable */
600 for (i = 0; i < 4; i++) {
601 /* Select counter with PMSELR.SEL */
602 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i));
603 /* Write PMXEVTYPER */
604 __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1]));
605 }
606
607 /* Restore enable state */
608 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid]));
609
610 /* Counter master re-enable */
611 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR));
612 }
613
614 static void
615 kpc_set_reload_xcall(void *vmp_config)
616 {
617 struct kpc_config_remote *mp_config = vmp_config;
618 uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
619 uint64_t *new_period = NULL, max = kpc_configurable_max();
620 boolean_t enabled;
621
622 assert(mp_config);
623 assert(mp_config->configv);
624 classes = mp_config->classes;
625 new_period = mp_config->configv;
626
627 enabled = ml_set_interrupts_enabled(FALSE);
628
629 if ((classes & KPC_CLASS_FIXED_MASK) && kpc_controls_fixed_counters()) {
630 /* update shadow counters */
631 kpc_get_fixed_counters(&FIXED_SHADOW(0));
632
633 /* set the new period */
634 count = kpc_fixed_count();
635 for (uint32_t i = 0; i < count; ++i) {
636 if (*new_period == 0) {
637 *new_period = kpc_fixed_max();
638 }
639 FIXED_RELOAD(i) = max - *new_period;
640 /* reload the counter if possible */
641 kpc_reload_counter(i);
642 /* next period value */
643 new_period++;
644 }
645 }
646
647 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
648 /*
649 * Update _all_ shadow counters, this cannot be done for only
650 * selected PMCs. Otherwise, we would corrupt the configurable
651 * shadow buffer since the PMCs are muxed according to the pmc
652 * mask.
653 */
654 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
655 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
656
657 /* set the new period */
658 count = kpc_configurable_count();
659 for (uint32_t i = 0; i < count; ++i) {
660 /* ignore the counter */
661 if (((1ULL << i) & mp_config->pmc_mask) == 0) {
662 continue;
663 }
664 if (*new_period == 0) {
665 *new_period = kpc_configurable_max();
666 }
667 CONFIGURABLE_RELOAD(i) = max - *new_period;
668 /* reload the counter */
669 kpc_reload_counter(offset + i);
670 /* next period value */
671 new_period++;
672 }
673 }
674
675 ml_set_interrupts_enabled(enabled);
676
677 if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) {
678 thread_wakeup((event_t) &kpc_reload_sync);
679 }
680 }
681
682
683 int
684 kpc_set_period_arch(struct kpc_config_remote *mp_config)
685 {
686 /* dispatch to all CPUs */
687 cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
688
689 kpc_configured = 1;
690
691 return 0;
692 }
693
694 int
695 kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
696 {
697 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
698
699 assert(configv);
700
701 for (uint32_t i = 0; i < cfg_count; ++i) {
702 if ((1ULL << i) & pmc_mask) {
703 *configv++ = get_counter_config(i + offset);
704 }
705 }
706
707 return 0;
708 }
709
710 static int
711 kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
712 {
713 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
714 boolean_t enabled;
715
716 assert(configv);
717
718 enabled = ml_set_interrupts_enabled(FALSE);
719
720 for (uint32_t i = 0; i < cfg_count; ++i) {
721 if (((1ULL << i) & pmc_mask) == 0) {
722 continue;
723 }
724 assert(kpc_controls_counter(i + offset));
725
726 set_counter_config(i + offset, *configv++);
727 }
728
729 ml_set_interrupts_enabled(enabled);
730
731 return 0;
732 }
733
734 static uint32_t kpc_config_sync;
735 static void
736 kpc_set_config_xcall(void *vmp_config)
737 {
738 struct kpc_config_remote *mp_config = vmp_config;
739 kpc_config_t *new_config = NULL;
740 uint32_t classes = 0ULL;
741
742 assert(mp_config);
743 assert(mp_config->configv);
744 classes = mp_config->classes;
745 new_config = mp_config->configv;
746
747 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
748 kpc_set_configurable_config(new_config, mp_config->pmc_mask);
749 new_config += kpc_popcount(mp_config->pmc_mask);
750 }
751
752 if (hw_atomic_sub(&kpc_config_sync, 1) == 0) {
753 thread_wakeup((event_t) &kpc_config_sync);
754 }
755 }
756
757 int
758 kpc_set_config_arch(struct kpc_config_remote *mp_config)
759 {
760 /* dispatch to all CPUs */
761 cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
762
763 kpc_configured = 1;
764
765 return 0;
766 }
767
768 void
769 kpc_idle(void)
770 {
771 if (kpc_configured) {
772 save_regs();
773 }
774 }
775
776 void
777 kpc_idle_exit(void)
778 {
779 if (kpc_configured) {
780 restore_regs();
781 }
782 }
783
784 static uint32_t kpc_xread_sync;
785 static void
786 kpc_get_curcpu_counters_xcall(void *args)
787 {
788 struct kpc_get_counters_remote *handler = args;
789 int offset = 0, r = 0;
790
791 assert(handler);
792 assert(handler->buf);
793
794 offset = cpu_number() * handler->buf_stride;
795 r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
796
797 /* number of counters added by this CPU, needs to be atomic */
798 hw_atomic_add(&(handler->nb_counters), r);
799
800 if (hw_atomic_sub(&kpc_xread_sync, 1) == 0) {
801 thread_wakeup((event_t) &kpc_xread_sync);
802 }
803 }
804
805 int
806 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
807 {
808 int enabled = 0;
809
810 struct kpc_get_counters_remote hdl = {
811 .classes = classes, .nb_counters = 0,
812 .buf_stride = kpc_get_counter_count(classes),
813 .buf = buf
814 };
815
816 assert(buf);
817
818 enabled = ml_set_interrupts_enabled(FALSE);
819
820 if (curcpu) {
821 *curcpu = current_processor()->cpu_id;
822 }
823 cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
824
825 ml_set_interrupts_enabled(enabled);
826
827 return hdl.nb_counters;
828 }
829
830 int
831 kpc_get_pmu_version(void)
832 {
833 return KPC_PMU_ARM_V2;
834 }
835
836 int
837 kpc_set_sw_inc( uint32_t mask )
838 {
839 /* Only works with the configurable counters set to count the increment event (0x0) */
840
841 /* Write to PMSWINC */
842 __asm__ volatile ("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask));
843
844 return 0;
845 }
846
847 #else /* !ARMA7 */
848
849 /* no kpc */
850
851 void
852 kpc_arch_init(void)
853 {
854 /* No-op */
855 }
856
857 uint32_t
858 kpc_get_classes(void)
859 {
860 return 0;
861 }
862
863 uint32_t
864 kpc_fixed_count(void)
865 {
866 return 0;
867 }
868
869 uint32_t
870 kpc_configurable_count(void)
871 {
872 return 0;
873 }
874
875 uint32_t
876 kpc_fixed_config_count(void)
877 {
878 return 0;
879 }
880
881 uint32_t
882 kpc_configurable_config_count(uint64_t pmc_mask __unused)
883 {
884 return 0;
885 }
886
887 int
888 kpc_get_fixed_config(kpc_config_t *configv __unused)
889 {
890 return 0;
891 }
892
893 uint64_t
894 kpc_fixed_max(void)
895 {
896 return 0;
897 }
898
899 uint64_t
900 kpc_configurable_max(void)
901 {
902 return 0;
903 }
904
905 int
906 kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
907 {
908 return ENOTSUP;
909 }
910
911 int
912 kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
913 {
914 return ENOTSUP;
915 }
916
917 int
918 kpc_get_fixed_counters(uint64_t *counterv __unused)
919 {
920 return 0;
921 }
922
923 boolean_t
924 kpc_is_running_fixed(void)
925 {
926 return FALSE;
927 }
928
929 boolean_t
930 kpc_is_running_configurable(uint64_t pmc_mask __unused)
931 {
932 return FALSE;
933 }
934
935 int
936 kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
937 {
938 return ENOTSUP;
939 }
940
941 int
942 kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
943 {
944 return ENOTSUP;
945 }
946
947 int
948 kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
949 {
950 return ENOTSUP;
951 }
952
953 void
954 kpc_idle(void)
955 {
956 // do nothing
957 }
958
959 void
960 kpc_idle_exit(void)
961 {
962 // do nothing
963 }
964
965 int
966 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
967 {
968 #pragma unused(classes)
969 #pragma unused(curcpu)
970 #pragma unused(buf)
971
972 return 0;
973 }
974
975 int
976 kpc_set_sw_inc( uint32_t mask __unused )
977 {
978 return ENOTSUP;
979 }
980
981 int
982 kpc_get_pmu_version(void)
983 {
984 return KPC_PMU_ERROR;
985 }
986
987 #endif
988
989 /*
990 * RAWPMU isn't implemented for any of the 32-bit ARMs.
991 */
992
993 uint32_t
994 kpc_rawpmu_config_count(void)
995 {
996 return 0;
997 }
998
999 int
1000 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1001 {
1002 return 0;
1003 }