]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <machine/machine_routines.h> | |
31 | #include <kern/processor.h> | |
32 | #include <kern/kalloc.h> | |
33 | #include <kern/thread.h> | |
34 | #include <sys/errno.h> | |
35 | #include <arm/cpu_data_internal.h> | |
36 | #include <arm/cpu_internal.h> | |
37 | #include <kern/kpc.h> | |
38 | ||
39 | #ifdef ARMA7 | |
40 | /* PMU v2 based implementation for A7 */ | |
41 | static uint32_t saved_PMXEVTYPER[MAX_CPUS][KPC_ARM_TOTAL_COUNT]; | |
42 | static uint32_t saved_PMCNTENSET[MAX_CPUS]; | |
43 | static uint64_t saved_counter[MAX_CPUS][KPC_ARM_TOTAL_COUNT]; | |
44 | static uint32_t saved_PMOVSR[MAX_CPUS]; | |
45 | ||
46 | static uint32_t kpc_configured = 0; | |
47 | static uint32_t kpc_xcall_sync; | |
48 | static uint64_t kpc_running_cfg_pmc_mask = 0; | |
49 | static uint32_t kpc_running_classes = 0; | |
50 | static uint32_t kpc_reload_sync; | |
51 | static uint32_t kpc_enabled_counters = 0; | |
52 | ||
53 | static int first_time = 1; | |
54 | ||
55 | /* Private */ | |
56 | ||
57 | static boolean_t | |
58 | enable_counter(uint32_t counter) | |
59 | { | |
60 | boolean_t enabled; | |
61 | uint32_t PMCNTENSET; | |
62 | /* Cycle counter is MSB; configurable counters reside in LSBs */ | |
63 | uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1)); | |
64 | ||
65 | /* Enabled? */ | |
66 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET)); | |
67 | ||
68 | enabled = (PMCNTENSET & mask); | |
69 | if (!enabled) { | |
70 | /* Counter interrupt enable (PMINTENSET) */ | |
71 | __asm__ volatile("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask)); | |
72 | ||
73 | /* Individual counter enable set (PMCNTENSET) */ | |
74 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask)); | |
75 | ||
76 | kpc_enabled_counters++; | |
77 | ||
78 | /* 1st enabled counter? Set the master enable bit in PMCR */ | |
79 | if (kpc_enabled_counters == 1) { | |
80 | uint32_t PMCR = 1; | |
81 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); | |
82 | } | |
83 | } | |
84 | ||
85 | return enabled; | |
86 | } | |
87 | ||
88 | static boolean_t | |
89 | disable_counter(uint32_t counter) | |
90 | { | |
91 | boolean_t enabled; | |
92 | uint32_t PMCNTENCLR; | |
93 | /* Cycle counter is MSB; configurable counters reside in LSBs */ | |
94 | uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1)); | |
95 | ||
96 | /* Enabled? */ | |
97 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR)); | |
98 | ||
99 | enabled = (PMCNTENCLR & mask); | |
100 | if (enabled) { | |
101 | /* Individual counter enable clear (PMCNTENCLR) */ | |
102 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask)); | |
103 | ||
104 | /* Counter interrupt disable (PMINTENCLR) */ | |
105 | __asm__ volatile("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask)); | |
106 | ||
107 | kpc_enabled_counters--; | |
108 | ||
109 | /* Last enabled counter? Clear the master enable bit in PMCR */ | |
110 | if (kpc_enabled_counters == 0) { | |
111 | uint32_t PMCR = 0; | |
112 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); | |
113 | } | |
114 | } | |
115 | ||
116 | return enabled; | |
117 | } | |
118 | ||
119 | static uint64_t | |
120 | read_counter(uint32_t counter) | |
121 | { | |
122 | uint32_t low = 0; | |
123 | ||
124 | switch (counter) { | |
125 | case 0: | |
126 | /* Fixed counter */ | |
127 | __asm__ volatile("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low)); | |
128 | break; | |
129 | case 1: | |
130 | case 2: | |
131 | case 3: | |
132 | case 4: | |
133 | /* Configurable. Set PMSELR... */ | |
134 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); | |
135 | /* ...then read PMXEVCNTR */ | |
136 | __asm__ volatile("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low)); | |
137 | break; | |
138 | default: | |
139 | /* ??? */ | |
140 | break; | |
141 | } | |
142 | ||
143 | return (uint64_t)low; | |
144 | } | |
145 | ||
146 | static void | |
147 | write_counter(uint32_t counter, uint64_t value) | |
148 | { | |
149 | uint32_t low = value & 0xFFFFFFFF; | |
150 | ||
151 | switch (counter) { | |
152 | case 0: | |
153 | /* Fixed counter */ | |
154 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low)); | |
155 | break; | |
156 | case 1: | |
157 | case 2: | |
158 | case 3: | |
159 | case 4: | |
160 | /* Configurable. Set PMSELR... */ | |
161 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); | |
162 | /* ...then write PMXEVCNTR */ | |
163 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low)); | |
164 | break; | |
165 | default: | |
166 | /* ??? */ | |
167 | break; | |
168 | } | |
169 | } | |
170 | ||
171 | static uint64_t | |
172 | kpc_reload_counter(int ctr) | |
173 | { | |
174 | uint64_t old = read_counter(ctr); | |
175 | write_counter(ctr, FIXED_RELOAD(ctr)); | |
176 | return old; | |
177 | } | |
178 | ||
179 | static void | |
180 | set_running_fixed(boolean_t on) | |
181 | { | |
182 | int i; | |
183 | boolean_t enabled; | |
184 | int n = KPC_ARM_FIXED_COUNT; | |
185 | ||
186 | enabled = ml_set_interrupts_enabled(FALSE); | |
187 | ||
188 | for( i = 0; i < n; i++ ) { | |
189 | if (on) { | |
190 | enable_counter(i); | |
191 | } else { | |
192 | disable_counter(i); | |
193 | } | |
194 | } | |
195 | ||
196 | ml_set_interrupts_enabled(enabled); | |
197 | } | |
198 | ||
199 | static void | |
200 | set_running_configurable(uint64_t target_mask, uint64_t state_mask) | |
201 | { | |
202 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
203 | boolean_t enabled; | |
204 | ||
205 | enabled = ml_set_interrupts_enabled(FALSE); | |
206 | ||
207 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
208 | if (((1ULL << i) & target_mask) == 0) | |
209 | continue; | |
210 | assert(kpc_controls_counter(offset + i)); | |
211 | ||
212 | if ((1ULL << i) & state_mask) { | |
213 | enable_counter(offset + i); | |
214 | } else { | |
215 | disable_counter(offset + i); | |
216 | } | |
217 | } | |
218 | ||
219 | ml_set_interrupts_enabled(enabled); | |
220 | } | |
221 | ||
222 | void kpc_pmi_handler(cpu_id_t source); | |
223 | void | |
224 | kpc_pmi_handler(cpu_id_t source) | |
225 | { | |
226 | uint64_t extra; | |
227 | int ctr; | |
228 | int enabled; | |
229 | ||
230 | enabled = ml_set_interrupts_enabled(FALSE); | |
231 | ||
232 | /* The pmi must be delivered to the CPU that generated it */ | |
233 | if (source != getCpuDatap()->interrupt_nub) { | |
234 | panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub); | |
235 | } | |
236 | ||
237 | for (ctr = 0; | |
238 | ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT); | |
239 | ctr++) | |
240 | { | |
241 | uint32_t PMOVSR; | |
242 | uint32_t mask; | |
243 | ||
244 | /* check the counter for overflow */ | |
245 | if (ctr == 0) { | |
246 | mask = 1 << 31; | |
247 | } else { | |
248 | mask = 1 << (ctr - 1); | |
249 | } | |
250 | ||
251 | /* read PMOVSR */ | |
252 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); | |
253 | ||
254 | if (PMOVSR & mask) { | |
255 | extra = kpc_reload_counter(ctr); | |
256 | ||
257 | FIXED_SHADOW(ctr) | |
258 | += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra; | |
259 | ||
260 | if (FIXED_ACTIONID(ctr)) | |
261 | kpc_sample_kperf(FIXED_ACTIONID(ctr)); | |
262 | ||
263 | /* clear PMOVSR bit */ | |
264 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask)); | |
265 | } | |
266 | } | |
267 | ||
268 | ml_set_interrupts_enabled(enabled); | |
269 | } | |
270 | ||
271 | static void | |
272 | kpc_set_running_xcall( void *vstate ) | |
273 | { | |
274 | struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate; | |
275 | assert(mp_config); | |
276 | ||
277 | if (kpc_controls_fixed_counters()) | |
278 | set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK); | |
279 | ||
280 | set_running_configurable(mp_config->cfg_target_mask, | |
281 | mp_config->cfg_state_mask); | |
282 | ||
283 | if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) { | |
284 | thread_wakeup((event_t) &kpc_xcall_sync); | |
285 | } | |
286 | } | |
287 | ||
288 | static uint64_t | |
289 | get_counter_config(uint32_t counter) | |
290 | { | |
291 | uint32_t config = 0; | |
292 | ||
293 | switch (counter) { | |
294 | case 0: | |
295 | /* Fixed counter accessed via top bit... */ | |
296 | counter = 31; | |
297 | /* Write PMSELR.SEL */ | |
298 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter)); | |
299 | /* Read PMXEVTYPER */ | |
300 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config)); | |
301 | break; | |
302 | case 1: | |
303 | case 2: | |
304 | case 3: | |
305 | case 4: | |
306 | /* Offset */ | |
307 | counter -= 1; | |
308 | /* Write PMSELR.SEL to select the configurable counter */ | |
309 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter)); | |
310 | /* Read PMXEVTYPER to get the config */ | |
311 | __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config)); | |
312 | break; | |
313 | default: | |
314 | break; | |
315 | } | |
316 | ||
317 | return config; | |
318 | } | |
319 | ||
320 | static void | |
321 | set_counter_config(uint32_t counter, uint64_t config) | |
322 | { | |
323 | switch (counter) { | |
324 | case 0: | |
325 | /* Write PMSELR.SEL */ | |
326 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); | |
327 | /* Write PMXEVTYPER */ | |
328 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF)); | |
329 | break; | |
330 | case 1: | |
331 | case 2: | |
332 | case 3: | |
333 | case 4: | |
334 | /* Write PMSELR.SEL */ | |
335 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); | |
336 | /* Write PMXEVTYPER */ | |
337 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF)); | |
338 | break; | |
339 | default: | |
340 | break; | |
341 | } | |
342 | } | |
343 | ||
344 | /* Common */ | |
345 | ||
346 | void | |
347 | kpc_arch_init(void) | |
348 | { | |
349 | uint32_t PMCR; | |
350 | uint32_t event_counters; | |
351 | ||
352 | /* read PMOVSR and determine the number of event counters */ | |
353 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR)); | |
354 | event_counters = (PMCR >> 11) & 0x1F; | |
355 | ||
356 | assert(event_counters >= KPC_ARM_CONFIGURABLE_COUNT); | |
357 | } | |
358 | ||
359 | uint32_t | |
360 | kpc_get_classes(void) | |
361 | { | |
362 | return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK; | |
363 | } | |
364 | ||
365 | uint32_t | |
366 | kpc_fixed_count(void) | |
367 | { | |
368 | return KPC_ARM_FIXED_COUNT; | |
369 | } | |
370 | ||
371 | uint32_t | |
372 | kpc_configurable_count(void) | |
373 | { | |
374 | return KPC_ARM_CONFIGURABLE_COUNT; | |
375 | } | |
376 | ||
377 | uint32_t | |
378 | kpc_fixed_config_count(void) | |
379 | { | |
380 | return KPC_ARM_FIXED_COUNT; | |
381 | } | |
382 | ||
383 | uint32_t | |
384 | kpc_configurable_config_count(uint64_t pmc_mask) | |
385 | { | |
386 | assert(kpc_popcount(pmc_mask) <= kpc_configurable_count()); | |
387 | return kpc_popcount(pmc_mask); | |
388 | } | |
389 | ||
390 | int | |
391 | kpc_get_fixed_config(kpc_config_t *configv) | |
392 | { | |
393 | configv[0] = get_counter_config(0); | |
394 | return 0; | |
395 | } | |
396 | ||
397 | uint64_t | |
398 | kpc_fixed_max(void) | |
399 | { | |
400 | return (1ULL << KPC_ARM_COUNTER_WIDTH) - 1; | |
401 | } | |
402 | ||
403 | uint64_t | |
404 | kpc_configurable_max(void) | |
405 | { | |
406 | return (1ULL << KPC_ARM_COUNTER_WIDTH) - 1; | |
407 | } | |
408 | ||
409 | int | |
410 | kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) | |
411 | { | |
412 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
413 | ||
414 | assert(counterv); | |
415 | ||
416 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
417 | uint32_t PMOVSR; | |
418 | uint32_t mask; | |
419 | uint64_t ctr; | |
420 | ||
421 | if (((1ULL << i) & pmc_mask) == 0) | |
422 | continue; | |
423 | ctr = read_counter(i + offset); | |
424 | ||
425 | /* check the counter for overflow */ | |
426 | mask = 1 << i; | |
427 | ||
428 | /* read PMOVSR */ | |
429 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); | |
430 | ||
431 | if (PMOVSR & mask) { | |
432 | ctr = CONFIGURABLE_SHADOW(i) + | |
433 | (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + | |
434 | ctr; | |
435 | } else { | |
436 | ctr = CONFIGURABLE_SHADOW(i) + | |
437 | (ctr - CONFIGURABLE_RELOAD(i)); | |
438 | } | |
439 | ||
440 | *counterv++ = ctr; | |
441 | } | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | int | |
447 | kpc_get_fixed_counters(uint64_t *counterv) | |
448 | { | |
449 | uint32_t PMOVSR; | |
450 | uint32_t mask; | |
451 | uint64_t ctr; | |
452 | ||
453 | /* check the counter for overflow */ | |
454 | mask = 1 << 31; | |
455 | ||
456 | /* read PMOVSR */ | |
457 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); | |
458 | ||
459 | ctr = read_counter(0); | |
460 | ||
461 | if (PMOVSR & mask) { | |
462 | ctr = FIXED_SHADOW(0) + | |
463 | (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) + | |
464 | (ctr & 0xFFFFFFFF); | |
465 | } else { | |
466 | ctr = FIXED_SHADOW(0) + | |
467 | (ctr - FIXED_RELOAD(0)); | |
468 | } | |
469 | ||
470 | counterv[0] = ctr; | |
471 | ||
472 | return 0; | |
473 | } | |
474 | boolean_t | |
475 | kpc_is_running_fixed(void) | |
476 | { | |
477 | return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK; | |
478 | } | |
479 | ||
480 | boolean_t | |
481 | kpc_is_running_configurable(uint64_t pmc_mask) | |
482 | { | |
483 | assert(kpc_popcount(pmc_mask) <= kpc_configurable_count()); | |
484 | return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) && | |
485 | ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask); | |
486 | } | |
487 | ||
488 | int | |
489 | kpc_set_running_arch(struct kpc_running_remote *mp_config) | |
490 | { | |
491 | unsigned int cpu; | |
492 | ||
493 | assert(mp_config); | |
494 | ||
495 | if (first_time) { | |
496 | kprintf( "kpc: setting PMI handler\n" ); | |
497 | PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler); | |
498 | for (cpu = 0; cpu < real_ncpus; cpu++) | |
499 | PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu)->cpu_id, | |
500 | TRUE); | |
501 | first_time = 0; | |
502 | } | |
503 | ||
504 | /* dispatch to all CPUs */ | |
505 | cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, | |
506 | mp_config); | |
507 | ||
508 | kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask; | |
509 | kpc_running_classes = mp_config->classes; | |
510 | kpc_configured = 1; | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | static void | |
516 | save_regs(void) | |
517 | { | |
518 | int i; | |
519 | int cpuid = current_processor()->cpu_id; | |
520 | uint32_t PMCR = 0; | |
521 | ||
522 | __asm__ volatile("dmb ish"); | |
523 | ||
524 | /* Clear master enable */ | |
525 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); | |
526 | ||
527 | /* Save individual enable state */ | |
528 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid])); | |
529 | ||
530 | /* Save PMOVSR */ | |
531 | __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid])); | |
532 | ||
533 | /* Select fixed counter with PMSELR.SEL */ | |
534 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); | |
535 | /* Read PMXEVTYPER */ | |
536 | __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0])); | |
537 | ||
538 | /* Save configurable event selections */ | |
539 | for (i = 0; i < 4; i++) { | |
540 | /* Select counter with PMSELR.SEL */ | |
541 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i)); | |
542 | /* Read PMXEVTYPER */ | |
543 | __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1])); | |
544 | } | |
545 | ||
546 | /* Finally, save count for each counter */ | |
547 | for (i=0; i < 5; i++) { | |
548 | saved_counter[cpuid][i] = read_counter(i); | |
549 | } | |
550 | } | |
551 | ||
552 | static void | |
553 | restore_regs(void) | |
554 | { | |
555 | int i; | |
556 | int cpuid = current_processor()->cpu_id; | |
557 | uint64_t extra; | |
558 | uint32_t PMCR = 1; | |
559 | ||
560 | /* Restore counter values */ | |
561 | for (i = 0; i < 5; i++) { | |
562 | /* did we overflow? if so handle it now since we won't get a pmi */ | |
563 | uint32_t mask; | |
564 | ||
565 | /* check the counter for overflow */ | |
566 | if (i == 0) { | |
567 | mask = 1 << 31; | |
568 | } else { | |
569 | mask = 1 << (i - 1); | |
570 | } | |
571 | ||
572 | if (saved_PMOVSR[cpuid] & mask) { | |
573 | extra = kpc_reload_counter(i); | |
574 | ||
575 | /* | |
576 | * CONFIGURABLE_* directly follows FIXED, so we can simply | |
577 | * increment the index here. Although it's ugly. | |
578 | */ | |
579 | FIXED_SHADOW(i) | |
580 | += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra; | |
581 | ||
582 | if (FIXED_ACTIONID(i)) | |
583 | kpc_sample_kperf(FIXED_ACTIONID(i)); | |
584 | } else { | |
585 | write_counter(i, saved_counter[cpuid][i]); | |
586 | } | |
587 | } | |
588 | ||
589 | /* Restore configuration - first, the fixed... */ | |
590 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); | |
591 | /* Write PMXEVTYPER */ | |
592 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0])); | |
593 | ||
594 | /* ...then the configurable */ | |
595 | for (i = 0; i < 4; i++) { | |
596 | /* Select counter with PMSELR.SEL */ | |
597 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i)); | |
598 | /* Write PMXEVTYPER */ | |
599 | __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1])); | |
600 | } | |
601 | ||
602 | /* Restore enable state */ | |
603 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid])); | |
604 | ||
605 | /* Counter master re-enable */ | |
606 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); | |
607 | } | |
608 | ||
609 | static void | |
610 | kpc_set_reload_xcall(void *vmp_config) | |
611 | { | |
612 | struct kpc_config_remote *mp_config = vmp_config; | |
613 | uint32_t classes = 0, count = 0, offset = kpc_fixed_count(); | |
614 | uint64_t *new_period = NULL, max = kpc_configurable_max(); | |
615 | boolean_t enabled; | |
616 | ||
617 | assert(mp_config); | |
618 | assert(mp_config->configv); | |
619 | classes = mp_config->classes; | |
620 | new_period = mp_config->configv; | |
621 | ||
622 | enabled = ml_set_interrupts_enabled(FALSE); | |
623 | ||
624 | if ((classes & KPC_CLASS_FIXED_MASK) && kpc_controls_fixed_counters()) { | |
625 | /* update shadow counters */ | |
626 | kpc_get_fixed_counters(&FIXED_SHADOW(0)); | |
627 | ||
628 | /* set the new period */ | |
629 | count = kpc_fixed_count(); | |
630 | for (uint32_t i = 0; i < count; ++i) { | |
631 | if (*new_period == 0) | |
632 | *new_period = kpc_fixed_max(); | |
633 | FIXED_RELOAD(i) = max - *new_period; | |
634 | /* reload the counter if possible */ | |
635 | kpc_reload_counter(i); | |
636 | /* next period value */ | |
637 | new_period++; | |
638 | } | |
639 | } | |
640 | ||
641 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
642 | /* | |
643 | * Update _all_ shadow counters, this cannot be done for only | |
644 | * selected PMCs. Otherwise, we would corrupt the configurable | |
645 | * shadow buffer since the PMCs are muxed according to the pmc | |
646 | * mask. | |
647 | */ | |
648 | uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1; | |
649 | kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask); | |
650 | ||
651 | /* set the new period */ | |
652 | count = kpc_configurable_count(); | |
653 | for (uint32_t i = 0; i < count; ++i) { | |
654 | /* ignore the counter */ | |
655 | if (((1ULL << i) & mp_config->pmc_mask) == 0) | |
656 | continue; | |
657 | if (*new_period == 0) | |
658 | *new_period = kpc_configurable_max(); | |
659 | CONFIGURABLE_RELOAD(i) = max - *new_period; | |
660 | /* reload the counter */ | |
661 | kpc_reload_counter(offset + i); | |
662 | /* next period value */ | |
663 | new_period++; | |
664 | } | |
665 | } | |
666 | ||
667 | ml_set_interrupts_enabled(enabled); | |
668 | ||
669 | if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) | |
670 | thread_wakeup((event_t) &kpc_reload_sync); | |
671 | } | |
672 | ||
673 | ||
674 | int | |
675 | kpc_set_period_arch(struct kpc_config_remote *mp_config) | |
676 | { | |
677 | /* dispatch to all CPUs */ | |
678 | cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config); | |
679 | ||
680 | kpc_configured = 1; | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
685 | int | |
686 | kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) | |
687 | { | |
688 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
689 | ||
690 | assert(configv); | |
691 | ||
692 | for (uint32_t i = 0; i < cfg_count; ++i) | |
693 | if ((1ULL << i) & pmc_mask) | |
694 | *configv++ = get_counter_config(i + offset); | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
699 | static int | |
700 | kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) | |
701 | { | |
702 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
703 | boolean_t enabled; | |
704 | ||
705 | assert(configv); | |
706 | ||
707 | enabled = ml_set_interrupts_enabled(FALSE); | |
708 | ||
709 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
710 | if (((1ULL << i) & pmc_mask) == 0) | |
711 | continue; | |
712 | assert(kpc_controls_counter(i + offset)); | |
713 | ||
714 | set_counter_config(i + offset, *configv++); | |
715 | } | |
716 | ||
717 | ml_set_interrupts_enabled(enabled); | |
718 | ||
719 | return 0; | |
720 | } | |
721 | ||
722 | static uint32_t kpc_config_sync; | |
723 | static void | |
724 | kpc_set_config_xcall(void *vmp_config) | |
725 | { | |
726 | struct kpc_config_remote *mp_config = vmp_config; | |
727 | kpc_config_t *new_config = NULL; | |
728 | uint32_t classes = 0ULL; | |
729 | ||
730 | assert(mp_config); | |
731 | assert(mp_config->configv); | |
732 | classes = mp_config->classes; | |
733 | new_config = mp_config->configv; | |
734 | ||
735 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
736 | kpc_set_configurable_config(new_config, mp_config->pmc_mask); | |
737 | new_config += kpc_popcount(mp_config->pmc_mask); | |
738 | } | |
739 | ||
740 | if (hw_atomic_sub(&kpc_config_sync, 1) == 0) | |
741 | thread_wakeup((event_t) &kpc_config_sync); | |
742 | } | |
743 | ||
744 | int | |
745 | kpc_set_config_arch(struct kpc_config_remote *mp_config) | |
746 | { | |
747 | /* dispatch to all CPUs */ | |
748 | cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config); | |
749 | ||
750 | kpc_configured = 1; | |
751 | ||
752 | return 0; | |
753 | } | |
754 | ||
755 | void | |
756 | kpc_idle(void) | |
757 | { | |
758 | if (kpc_configured) | |
759 | save_regs(); | |
760 | } | |
761 | ||
762 | void | |
763 | kpc_idle_exit(void) | |
764 | { | |
765 | if (kpc_configured) | |
766 | restore_regs(); | |
767 | } | |
768 | ||
769 | static uint32_t kpc_xread_sync; | |
770 | static void | |
771 | kpc_get_curcpu_counters_xcall(void *args) | |
772 | { | |
773 | struct kpc_get_counters_remote *handler = args; | |
774 | int offset=0, r=0; | |
775 | ||
776 | assert(handler); | |
777 | assert(handler->buf); | |
778 | ||
779 | offset = cpu_number() * handler->buf_stride; | |
780 | r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]); | |
781 | ||
782 | /* number of counters added by this CPU, needs to be atomic */ | |
783 | hw_atomic_add(&(handler->nb_counters), r); | |
784 | ||
785 | if (hw_atomic_sub(&kpc_xread_sync, 1) == 0) | |
786 | thread_wakeup((event_t) &kpc_xread_sync); | |
787 | } | |
788 | ||
789 | int | |
790 | kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) | |
791 | { | |
792 | int enabled = 0; | |
793 | ||
794 | struct kpc_get_counters_remote hdl = { | |
795 | .classes = classes, .nb_counters = 0, | |
796 | .buf_stride = kpc_get_counter_count(classes), | |
797 | .buf = buf | |
798 | }; | |
799 | ||
800 | assert(buf); | |
801 | ||
802 | enabled = ml_set_interrupts_enabled(FALSE); | |
803 | ||
804 | if (curcpu) | |
805 | *curcpu = current_processor()->cpu_id; | |
806 | cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl); | |
807 | ||
808 | ml_set_interrupts_enabled(enabled); | |
809 | ||
810 | return hdl.nb_counters; | |
811 | } | |
812 | ||
813 | int | |
814 | kpc_get_pmu_version(void) | |
815 | { | |
816 | return KPC_PMU_ARM_V2; | |
817 | } | |
818 | ||
819 | int | |
820 | kpc_set_sw_inc( uint32_t mask ) | |
821 | { | |
822 | /* Only works with the configurable counters set to count the increment event (0x0) */ | |
823 | ||
824 | /* Write to PMSWINC */ | |
825 | __asm__ volatile("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask)); | |
826 | ||
827 | return 0; | |
828 | } | |
829 | ||
830 | #else /* !ARMA7 */ | |
831 | ||
832 | /* no kpc */ | |
833 | ||
834 | void | |
835 | kpc_arch_init(void) | |
836 | { | |
837 | /* No-op */ | |
838 | } | |
839 | ||
840 | uint32_t | |
841 | kpc_get_classes(void) | |
842 | { | |
843 | return 0; | |
844 | } | |
845 | ||
846 | uint32_t | |
847 | kpc_fixed_count(void) | |
848 | { | |
849 | return 0; | |
850 | } | |
851 | ||
852 | uint32_t | |
853 | kpc_configurable_count(void) | |
854 | { | |
855 | return 0; | |
856 | } | |
857 | ||
858 | uint32_t | |
859 | kpc_fixed_config_count(void) | |
860 | { | |
861 | return 0; | |
862 | } | |
863 | ||
864 | uint32_t | |
865 | kpc_configurable_config_count(uint64_t pmc_mask __unused) | |
866 | { | |
867 | return 0; | |
868 | } | |
869 | ||
870 | int | |
871 | kpc_get_fixed_config(kpc_config_t *configv __unused) | |
872 | { | |
873 | return 0; | |
874 | } | |
875 | ||
876 | uint64_t | |
877 | kpc_fixed_max(void) | |
878 | { | |
879 | return 0; | |
880 | } | |
881 | ||
882 | uint64_t | |
883 | kpc_configurable_max(void) | |
884 | { | |
885 | return 0; | |
886 | } | |
887 | ||
888 | int | |
889 | kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused) | |
890 | { | |
891 | return ENOTSUP; | |
892 | } | |
893 | ||
894 | int | |
895 | kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused) | |
896 | { | |
897 | return ENOTSUP; | |
898 | } | |
899 | ||
900 | int | |
901 | kpc_get_fixed_counters(uint64_t *counterv __unused) | |
902 | { | |
903 | return 0; | |
904 | } | |
905 | ||
906 | boolean_t | |
907 | kpc_is_running_fixed(void) | |
908 | { | |
909 | return FALSE; | |
910 | } | |
911 | ||
912 | boolean_t | |
913 | kpc_is_running_configurable(uint64_t pmc_mask __unused) | |
914 | { | |
915 | return FALSE; | |
916 | } | |
917 | ||
918 | int | |
919 | kpc_set_running_arch(struct kpc_running_remote *mp_config __unused) | |
920 | { | |
921 | return ENOTSUP; | |
922 | } | |
923 | ||
924 | int | |
925 | kpc_set_period_arch(struct kpc_config_remote *mp_config __unused) | |
926 | { | |
927 | return ENOTSUP; | |
928 | } | |
929 | ||
930 | int | |
931 | kpc_set_config_arch(struct kpc_config_remote *mp_config __unused) | |
932 | { | |
933 | return ENOTSUP; | |
934 | } | |
935 | ||
936 | void | |
937 | kpc_idle(void) | |
938 | { | |
939 | // do nothing | |
940 | } | |
941 | ||
942 | void | |
943 | kpc_idle_exit(void) | |
944 | { | |
945 | // do nothing | |
946 | } | |
947 | ||
948 | int | |
949 | kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) | |
950 | { | |
951 | #pragma unused(classes) | |
952 | #pragma unused(curcpu) | |
953 | #pragma unused(buf) | |
954 | ||
955 | return 0; | |
956 | } | |
957 | ||
958 | int | |
959 | kpc_set_sw_inc( uint32_t mask __unused ) | |
960 | { | |
961 | return ENOTSUP; | |
962 | } | |
963 | ||
964 | int | |
965 | kpc_get_pmu_version(void) | |
966 | { | |
967 | return KPC_PMU_ERROR; | |
968 | } | |
969 | ||
970 | #endif | |
971 | ||
972 | /* | |
973 | * RAWPMU isn't implemented for any of the 32-bit ARMs. | |
974 | */ | |
975 | ||
976 | uint32_t | |
977 | kpc_rawpmu_config_count(void) | |
978 | { | |
979 | return 0; | |
980 | } | |
981 | ||
982 | int | |
983 | kpc_get_rawpmu_config(__unused kpc_config_t *configv) | |
984 | { | |
985 | return 0; | |
986 | } |