]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kpc_common.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_common.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <sys/errno.h>
39037602 34#include <sys/vm.h>
39236c6e
A
35#include <kperf/buffer.h>
36#include <kern/thread.h>
37
38#include <kern/kpc.h>
39
40#include <kperf/kperf.h>
41#include <kperf/sample.h>
42#include <kperf/context.h>
43#include <kperf/action.h>
44
45#include <chud/chud_xnu.h>
46
47uint32_t kpc_actionid[KPC_MAX_COUNTERS];
48
3e170ce0
A
49#define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
50#define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
51 COUNTERBUF_SIZE_PER_CPU)
52
39236c6e
A
53/* locks */
54static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
55static lck_grp_t *kpc_config_lckgrp = NULL;
56static lck_mtx_t kpc_config_lock;
57
fe8ab488
A
58/* state specifying if all counters have been requested by kperf */
59static boolean_t force_all_ctrs = FALSE;
60
3e170ce0
A
61/* power manager */
62static kpc_pm_handler_t kpc_pm_handler;
63static boolean_t kpc_pm_has_custom_config;
64static uint64_t kpc_pm_pmc_mask;
fe8ab488 65
39037602
A
66boolean_t kpc_context_switch_active = FALSE;
67
fe8ab488 68void kpc_common_init(void);
39236c6e 69void
fe8ab488 70kpc_common_init(void)
39236c6e
A
71{
72 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
73 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
74 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
75}
76
3e170ce0
A
77boolean_t
78kpc_register_cpu(struct cpu_data *cpu_data)
79{
80 assert(cpu_data);
81 assert(cpu_data->cpu_kpc_buf[0] == NULL);
82 assert(cpu_data->cpu_kpc_buf[1] == NULL);
83 assert(cpu_data->cpu_kpc_shadow == NULL);
84 assert(cpu_data->cpu_kpc_reload == NULL);
85
86 /*
87 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
88 * store all PMCs values from all CPUs. This mimics the userspace API.
89 * This does not suit well with the per-CPU kpc buffers, since:
90 * 1. Buffers don't need to be this large.
91 * 2. The actual number of CPUs is not known at this point.
92 *
93 * CPUs are asked to callout into kpc when being registered, we'll
94 * allocate the memory here.
95 */
96
97 if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
98 goto error;
99 if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
100 goto error;
101 if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
102 goto error;
103 if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
104 goto error;
105
106 memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU);
107 memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU);
108 memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU);
109 memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU);
110
111 /* success */
112 return TRUE;
113
114error:
39037602 115 kpc_unregister_cpu(cpu_data);
3e170ce0
A
116 return FALSE;
117}
118
39037602
A
119void
120kpc_unregister_cpu(struct cpu_data *cpu_data)
121{
122 assert(cpu_data);
123 if (cpu_data->cpu_kpc_buf[0] != NULL) {
124 kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU);
125 cpu_data->cpu_kpc_buf[0] = NULL;
126 }
127 if (cpu_data->cpu_kpc_buf[1] != NULL) {
128 kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU);
129 cpu_data->cpu_kpc_buf[1] = NULL;
130 }
131 if (cpu_data->cpu_kpc_shadow != NULL) {
132 kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU);
133 cpu_data->cpu_kpc_shadow = NULL;
134 }
135 if (cpu_data->cpu_kpc_reload != NULL) {
136 kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU);
137 cpu_data->cpu_kpc_reload = NULL;
138 }
139}
140
141
fe8ab488
A
142static void
143kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
144{
145 assert(task);
146
147 task_lock(task);
148 if (state)
149 task->t_chud |= TASK_KPC_FORCED_ALL_CTRS;
150 else
151 task->t_chud &= ~TASK_KPC_FORCED_ALL_CTRS;
152 task_unlock(task);
153}
154
155static boolean_t
156kpc_task_get_forced_all_ctrs(task_t task)
157{
158 assert(task);
159 return task->t_chud & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
160}
161
162int
163kpc_force_all_ctrs(task_t task, int val)
164{
3e170ce0
A
165 boolean_t new_state = val ? TRUE : FALSE;
166 boolean_t old_state = kpc_get_force_all_ctrs();
fe8ab488
A
167
168 /*
169 * Refuse to do the operation if the counters are already forced by
170 * another task.
171 */
172 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
173 return EACCES;
174
175 /* nothing to do if the state is not changing */
176 if (old_state == new_state)
177 return 0;
178
fe8ab488 179 /* notify the power manager */
3e170ce0
A
180 if (kpc_pm_handler)
181 kpc_pm_handler( new_state ? FALSE : TRUE );
fe8ab488
A
182
183 /* update the task bits */
184 kpc_task_set_forced_all_ctrs(task, val);
185
186 /* update the internal state */
187 force_all_ctrs = val;
188
189 return 0;
190}
191
192int
193kpc_get_force_all_ctrs(void)
194{
195 return force_all_ctrs;
196}
197
198boolean_t
3e170ce0 199kpc_multiple_clients(void)
fe8ab488 200{
3e170ce0 201 return kpc_pm_handler != NULL;
fe8ab488
A
202}
203
204boolean_t
3e170ce0 205kpc_controls_fixed_counters(void)
fe8ab488 206{
3e170ce0 207 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
fe8ab488
A
208}
209
210boolean_t
3e170ce0 211kpc_controls_counter(uint32_t ctr)
fe8ab488 212{
3e170ce0
A
213 uint64_t pmc_mask = 0ULL;
214
215 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
216
217 if (ctr < kpc_fixed_count())
218 return kpc_controls_fixed_counters();
219
220 /*
221 * By default kpc manages all PMCs, but if the Power Manager registered
222 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
223 * However, kpc takes ownership back if a task acquired all PMCs via
224 * force_all_ctrs.
225 */
226 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
227 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs)
228 return FALSE;
229
230 return TRUE;
fe8ab488
A
231}
232
39236c6e
A
233uint32_t
234kpc_get_running(void)
235{
3e170ce0 236 uint64_t pmc_mask = 0;
39236c6e
A
237 uint32_t cur_state = 0;
238
3e170ce0 239 if (kpc_is_running_fixed())
39236c6e
A
240 cur_state |= KPC_CLASS_FIXED_MASK;
241
3e170ce0
A
242 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
243 if (kpc_is_running_configurable(pmc_mask))
39236c6e
A
244 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
245
3e170ce0
A
246 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
247 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask))
248 cur_state |= KPC_CLASS_POWER_MASK;
249
39236c6e
A
250 return cur_state;
251}
252
3e170ce0 253/* may be called from an IPI */
39236c6e 254int
3e170ce0 255kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
39236c6e 256{
3e170ce0
A
257 int enabled=0, offset=0;
258 uint64_t pmc_mask = 0ULL;
39236c6e 259
3e170ce0 260 assert(buf);
39236c6e 261
39236c6e
A
262 enabled = ml_set_interrupts_enabled(FALSE);
263
3e170ce0
A
264 /* grab counters and CPU number as close as possible */
265 if (curcpu)
39236c6e
A
266 *curcpu = current_processor()->cpu_id;
267
3e170ce0
A
268 if (classes & KPC_CLASS_FIXED_MASK) {
269 kpc_get_fixed_counters(&buf[offset]);
39236c6e
A
270 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
271 }
272
3e170ce0
A
273 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
274 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
275 kpc_get_configurable_counters(&buf[offset], pmc_mask);
276 offset += kpc_popcount(pmc_mask);
277 }
39236c6e 278
3e170ce0
A
279 if (classes & KPC_CLASS_POWER_MASK) {
280 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
281 kpc_get_configurable_counters(&buf[offset], pmc_mask);
282 offset += kpc_popcount(pmc_mask);
39236c6e
A
283 }
284
285 ml_set_interrupts_enabled(enabled);
286
287 return offset;
288}
289
3e170ce0
A
290/* generic counter reading function, public api */
291int
292kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
293 int *curcpu, uint64_t *buf)
294{
295 assert(buf);
296
297 /*
298 * Unlike reading the current CPU counters, reading counters from all
299 * CPUs is architecture dependent. This allows kpc to make the most of
300 * the platform if memory mapped registers is supported.
301 */
302 if (all_cpus)
303 return kpc_get_all_cpus_counters(classes, curcpu, buf);
304 else
305 return kpc_get_curcpu_counters(classes, curcpu, buf);
306}
307
39236c6e 308int
3e170ce0
A
309kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
310 int *curcpu, uint64_t *buf)
39236c6e 311{
3e170ce0
A
312 int curcpu_id = current_processor()->cpu_id;
313 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
314 uint64_t pmc_mask = 0ULL;
315 boolean_t enabled;
39236c6e 316
3e170ce0 317 assert(buf);
39236c6e
A
318
319 enabled = ml_set_interrupts_enabled(FALSE);
320
3e170ce0
A
321 curcpu_id = current_processor()->cpu_id;
322 if (curcpu)
323 *curcpu = curcpu_id;
39236c6e 324
3e170ce0
A
325 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
326 /* filter if the caller did not request all cpus */
327 if (!all_cpus && (cpu != curcpu_id))
328 continue;
39236c6e 329
3e170ce0
A
330 if (classes & KPC_CLASS_FIXED_MASK) {
331 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
332 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
333 offset += count;
334 }
39236c6e 335
3e170ce0
A
336 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
337 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 338
3e170ce0
A
339 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
340 if ((1ULL << cfg_ctr) & pmc_mask)
341 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
342 }
39236c6e 343
3e170ce0
A
344 if (classes & KPC_CLASS_POWER_MASK) {
345 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
39236c6e 346
3e170ce0
A
347 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
348 if ((1ULL << cfg_ctr) & pmc_mask)
349 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
350 }
39236c6e
A
351 }
352
353 ml_set_interrupts_enabled(enabled);
354
355 return offset;
356}
357
358uint32_t
359kpc_get_counter_count(uint32_t classes)
360{
3e170ce0 361 uint32_t count = 0;
39236c6e 362
3e170ce0 363 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
364 count += kpc_fixed_count();
365
3e170ce0
A
366 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
367 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
368 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
369 count += pmc_cnt;
370 }
39236c6e
A
371
372 return count;
373}
374
375uint32_t
376kpc_get_config_count(uint32_t classes)
377{
3e170ce0 378 uint32_t count = 0;
39236c6e 379
3e170ce0 380 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
381 count += kpc_fixed_config_count();
382
3e170ce0
A
383 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
384 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
385 count += kpc_configurable_config_count(pmc_mask);
386 }
39236c6e 387
3e170ce0 388 if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients())
fe8ab488
A
389 count += kpc_rawpmu_config_count();
390
39236c6e
A
391 return count;
392}
393
394int
395kpc_get_config(uint32_t classes, kpc_config_t *current_config)
396{
3e170ce0 397 uint32_t count = 0;
39236c6e 398
3e170ce0
A
399 assert(current_config);
400
401 if (classes & KPC_CLASS_FIXED_MASK) {
39236c6e
A
402 kpc_get_fixed_config(&current_config[count]);
403 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
404 }
405
3e170ce0
A
406 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
407 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
408 kpc_get_configurable_config(&current_config[count], pmc_mask);
39236c6e
A
409 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
410 }
411
3e170ce0
A
412 if (classes & KPC_CLASS_POWER_MASK) {
413 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
414 kpc_get_configurable_config(&current_config[count], pmc_mask);
415 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
416 }
417
418 if (classes & KPC_CLASS_RAWPMU_MASK)
fe8ab488
A
419 {
420 // Client shouldn't ask for config words that aren't available.
421 // Most likely, they'd misinterpret the returned buffer if we
422 // allowed this.
423 if( kpc_multiple_clients() )
424 {
425 return EPERM;
426 }
427 kpc_get_rawpmu_config(&current_config[count]);
428 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
429 }
430
39236c6e
A
431 return 0;
432}
433
434int
435kpc_set_config(uint32_t classes, kpc_config_t *configv)
436{
4bd07ac2 437 int ret = 0;
3e170ce0
A
438 struct kpc_config_remote mp_config = {
439 .classes = classes, .configv = configv,
440 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
441 };
442
443 assert(configv);
444
445 /* don't allow RAWPMU configuration when sharing counters */
446 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
447 return EPERM;
448 }
39236c6e 449
3e170ce0
A
450 /* no clients have the right to modify both classes */
451 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
452 (classes & (KPC_CLASS_POWER_MASK)))
fe8ab488
A
453 {
454 return EPERM;
455 }
456
39236c6e
A
457 lck_mtx_lock(&kpc_config_lock);
458
3e170ce0
A
459 /* translate the power class for the machine layer */
460 if (classes & KPC_CLASS_POWER_MASK)
461 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
39236c6e 462
4bd07ac2 463 ret = kpc_set_config_arch( &mp_config );
39236c6e
A
464
465 lck_mtx_unlock(&kpc_config_lock);
466
4bd07ac2 467 return ret;
39236c6e
A
468}
469
3e170ce0 470/* allocate a buffer large enough for all possible counters */
39236c6e
A
471uint64_t *
472kpc_counterbuf_alloc(void)
473{
3e170ce0 474 uint64_t *buf = NULL;
39236c6e 475
3e170ce0
A
476 buf = kalloc(COUNTERBUF_SIZE);
477 if (buf) {
478 bzero(buf, COUNTERBUF_SIZE);
479 }
39236c6e
A
480
481 return buf;
482}
483
484void
485kpc_counterbuf_free(uint64_t *buf)
486{
3e170ce0
A
487 if (buf) {
488 kfree(buf, COUNTERBUF_SIZE);
489 }
39236c6e
A
490}
491
3e170ce0
A
492void
493kpc_sample_kperf(uint32_t actionid)
39236c6e
A
494{
495 struct kperf_sample sbuf;
496 struct kperf_context ctx;
39236c6e 497
39037602 498 BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START);
39236c6e
A
499
500 ctx.cur_pid = 0;
501 ctx.cur_thread = current_thread();
39037602 502 ctx.cur_pid = task_pid(current_task());
39236c6e
A
503
504 ctx.trigger_type = TRIGGER_TYPE_PMI;
505 ctx.trigger_id = 0;
506
39037602 507 int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
39236c6e 508
39037602 509 BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
39236c6e
A
510}
511
512
3e170ce0
A
513int
514kpc_set_period(uint32_t classes, uint64_t *val)
39236c6e 515{
3e170ce0
A
516 struct kpc_config_remote mp_config = {
517 .classes = classes, .configv = val,
518 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
519 };
520
521 assert(val);
522
523 /* no clients have the right to modify both classes */
524 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
525 (classes & (KPC_CLASS_POWER_MASK)))
526 {
527 return EPERM;
528 }
39236c6e
A
529
530 lck_mtx_lock(&kpc_config_lock);
531
3e170ce0
A
532#ifdef FIXED_COUNTER_SHADOW
533 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
534 lck_mtx_unlock(&kpc_config_lock);
535 return EPERM;
536 }
537# else
39236c6e
A
538 if (classes & KPC_CLASS_FIXED_MASK) {
539 lck_mtx_unlock(&kpc_config_lock);
3e170ce0 540 return EINVAL;
39236c6e
A
541 }
542#endif
543
3e170ce0
A
544 /* translate the power class for the machine layer */
545 if (classes & KPC_CLASS_POWER_MASK)
546 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
39236c6e 547
3e170ce0 548 kprintf("setting period %u\n", classes);
39236c6e
A
549 kpc_set_period_arch( &mp_config );
550
551 lck_mtx_unlock(&kpc_config_lock);
552
553 return 0;
554}
555
3e170ce0
A
556int
557kpc_get_period(uint32_t classes, uint64_t *val)
39236c6e 558{
3e170ce0
A
559 uint32_t count = 0 ;
560 uint64_t pmc_mask = 0ULL;
561
562 assert(val);
39236c6e
A
563
564 lck_mtx_lock(&kpc_config_lock);
565
566 if (classes & KPC_CLASS_FIXED_MASK) {
3e170ce0 567 /* convert reload values to periods */
39236c6e 568 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
3e170ce0
A
569 for (uint32_t i = 0; i < count; ++i)
570 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
571 }
39236c6e 572
3e170ce0
A
573 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
574 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 575
3e170ce0
A
576 /* convert reload values to periods */
577 count = kpc_configurable_count();
578 for (uint32_t i = 0; i < count; ++i)
579 if ((1ULL << i) & pmc_mask)
580 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
39236c6e
A
581 }
582
3e170ce0
A
583 if (classes & KPC_CLASS_POWER_MASK) {
584 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
39236c6e
A
585
586 /* convert reload values to periods */
3e170ce0
A
587 count = kpc_configurable_count();
588 for (uint32_t i = 0; i < count; ++i)
589 if ((1ULL << i) & pmc_mask)
590 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
39236c6e
A
591 }
592
593 lck_mtx_unlock(&kpc_config_lock);
594
595 return 0;
596}
597
3e170ce0
A
598int
599kpc_set_actionid(uint32_t classes, uint32_t *val)
39236c6e 600{
3e170ce0
A
601 uint32_t count = 0;
602 uint64_t pmc_mask = 0ULL;
603
604 assert(val);
39236c6e
A
605
606 /* NOTE: what happens if a pmi occurs while actionids are being
607 * set is undefined. */
608 lck_mtx_lock(&kpc_config_lock);
609
610 if (classes & KPC_CLASS_FIXED_MASK) {
611 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
39236c6e 612 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
3e170ce0 613 val += count;
39236c6e
A
614 }
615
616 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0 617 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 618
3e170ce0
A
619 count = kpc_configurable_count();
620 for (uint32_t i = 0; i < count; ++i)
621 if ((1ULL << i) & pmc_mask)
622 CONFIGURABLE_ACTIONID(i) = *val++;
623 }
624
625 if (classes & KPC_CLASS_POWER_MASK) {
626 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
627
628 count = kpc_configurable_count();
629 for (uint32_t i = 0; i < count; ++i)
630 if ((1ULL << i) & pmc_mask)
631 CONFIGURABLE_ACTIONID(i) = *val++;
39236c6e
A
632 }
633
634 lck_mtx_unlock(&kpc_config_lock);
635
636 return 0;
637}
638
639int kpc_get_actionid(uint32_t classes, uint32_t *val)
640{
3e170ce0
A
641 uint32_t count = 0;
642 uint64_t pmc_mask = 0ULL;
643
644 assert(val);
39236c6e
A
645
646 lck_mtx_lock(&kpc_config_lock);
647
648 if (classes & KPC_CLASS_FIXED_MASK) {
649 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
39236c6e 650 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
3e170ce0 651 val += count;
39236c6e
A
652 }
653
654 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0 655 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 656
3e170ce0
A
657 count = kpc_configurable_count();
658 for (uint32_t i = 0; i < count; ++i)
659 if ((1ULL << i) & pmc_mask)
660 *val++ = CONFIGURABLE_ACTIONID(i);
661 }
662
663 if (classes & KPC_CLASS_POWER_MASK) {
664 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
665
666 count = kpc_configurable_count();
667 for (uint32_t i = 0; i < count; ++i)
668 if ((1ULL << i) & pmc_mask)
669 *val++ = CONFIGURABLE_ACTIONID(i);
39236c6e
A
670 }
671
672 lck_mtx_unlock(&kpc_config_lock);
673
674 return 0;
675
676}
3e170ce0
A
677
678int
679kpc_set_running(uint32_t classes)
680{
681 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
682 struct kpc_running_remote mp_config = {
683 .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL
684 };
685
686 /* target all available PMCs */
687 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
688
689 /* translate the power class for the machine layer */
690 if (classes & KPC_CLASS_POWER_MASK)
691 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
692
693 /* generate the state of each configurable PMCs */
694 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
695
696 return kpc_set_running_arch(&mp_config);
697}
698
699boolean_t
700kpc_register_pm_handler(kpc_pm_handler_t handler)
701{
702 return kpc_reserve_pm_counters(0x38, handler, TRUE);
703}
704
705boolean_t
706kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
707 boolean_t custom_config)
708{
709 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
710 uint64_t req_mask = 0ULL;
711
712 /* pre-condition */
713 assert(handler != NULL);
714 assert(kpc_pm_handler == NULL);
715
716 /* check number of counters requested */
717 req_mask = (pmc_mask & all_mask);
718 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
719
720 /* save the power manager states */
721 kpc_pm_has_custom_config = custom_config;
722 kpc_pm_pmc_mask = req_mask;
723 kpc_pm_handler = handler;
724
725 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
726 req_mask, custom_config);
727
728 /* post-condition */
729 {
730 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
731 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
732#pragma unused(cfg_count, pwr_count)
733 assert((cfg_count + pwr_count) == kpc_configurable_count());
734 }
735
736 return force_all_ctrs ? FALSE : TRUE;
737}
738
739void
740kpc_release_pm_counters(void)
741{
742 /* pre-condition */
743 assert(kpc_pm_handler != NULL);
744
745 /* release the counters */
746 kpc_pm_has_custom_config = FALSE;
747 kpc_pm_pmc_mask = 0ULL;
748 kpc_pm_handler = NULL;
749
750 printf("kpc: pm released counters\n");
751
752 /* post-condition */
753 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
754}
755
756uint8_t
757kpc_popcount(uint64_t value)
758{
759 return __builtin_popcountll(value);
760}
761
762uint64_t
763kpc_get_configurable_pmc_mask(uint32_t classes)
764{
765 uint32_t configurable_count = kpc_configurable_count();
766 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
767
768 /* not configurable classes or no configurable counters */
769 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
770 (configurable_count == 0))
771 {
772 goto exit;
773 }
774
775 assert(configurable_count < 64);
776 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
777
778 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
779 if (force_all_ctrs == TRUE)
780 cfg_mask |= all_cfg_pmcs_mask;
781 else
782 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
783 }
784
785 /*
786 * The power class exists iff:
787 * - No tasks acquired all PMCs
788 * - PM registered and uses kpc to interact with PMCs
789 */
790 if ((force_all_ctrs == FALSE) &&
791 (kpc_pm_handler != NULL) &&
792 (kpc_pm_has_custom_config == FALSE) &&
793 (classes & KPC_CLASS_POWER_MASK))
794 {
795 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
796 }
797
798exit:
799 /* post-conditions */
800 assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
801 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() );
802 assert( (cfg_mask & pwr_mask) == 0ULL );
803
804 return cfg_mask | pwr_mask;
805}