]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kpc_common.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_common.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <sys/errno.h>
34#include <kperf/buffer.h>
35#include <kern/thread.h>
36
37#include <kern/kpc.h>
38
39#include <kperf/kperf.h>
40#include <kperf/sample.h>
41#include <kperf/context.h>
42#include <kperf/action.h>
43
44#include <chud/chud_xnu.h>
45
46uint32_t kpc_actionid[KPC_MAX_COUNTERS];
47
3e170ce0
A
48#define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
49#define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
50 COUNTERBUF_SIZE_PER_CPU)
51
39236c6e
A
52/* locks */
53static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
54static lck_grp_t *kpc_config_lckgrp = NULL;
55static lck_mtx_t kpc_config_lock;
56
fe8ab488
A
57/* state specifying if all counters have been requested by kperf */
58static boolean_t force_all_ctrs = FALSE;
59
3e170ce0
A
60/* power manager */
61static kpc_pm_handler_t kpc_pm_handler;
62static boolean_t kpc_pm_has_custom_config;
63static uint64_t kpc_pm_pmc_mask;
fe8ab488
A
64
65void kpc_common_init(void);
39236c6e 66void
fe8ab488 67kpc_common_init(void)
39236c6e
A
68{
69 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
70 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
71 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
72}
73
3e170ce0
A
74boolean_t
75kpc_register_cpu(struct cpu_data *cpu_data)
76{
77 assert(cpu_data);
78 assert(cpu_data->cpu_kpc_buf[0] == NULL);
79 assert(cpu_data->cpu_kpc_buf[1] == NULL);
80 assert(cpu_data->cpu_kpc_shadow == NULL);
81 assert(cpu_data->cpu_kpc_reload == NULL);
82
83 /*
84 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
85 * store all PMCs values from all CPUs. This mimics the userspace API.
86 * This does not suit well with the per-CPU kpc buffers, since:
87 * 1. Buffers don't need to be this large.
88 * 2. The actual number of CPUs is not known at this point.
89 *
90 * CPUs are asked to callout into kpc when being registered, we'll
91 * allocate the memory here.
92 */
93
94 if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
95 goto error;
96 if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
97 goto error;
98 if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
99 goto error;
100 if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
101 goto error;
102
103 memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU);
104 memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU);
105 memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU);
106 memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU);
107
108 /* success */
109 return TRUE;
110
111error:
112 kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU);
113 kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU);
114 kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU);
115 kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU);
116
117 return FALSE;
118}
119
fe8ab488
A
120static void
121kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
122{
123 assert(task);
124
125 task_lock(task);
126 if (state)
127 task->t_chud |= TASK_KPC_FORCED_ALL_CTRS;
128 else
129 task->t_chud &= ~TASK_KPC_FORCED_ALL_CTRS;
130 task_unlock(task);
131}
132
133static boolean_t
134kpc_task_get_forced_all_ctrs(task_t task)
135{
136 assert(task);
137 return task->t_chud & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
138}
139
140int
141kpc_force_all_ctrs(task_t task, int val)
142{
3e170ce0
A
143 boolean_t new_state = val ? TRUE : FALSE;
144 boolean_t old_state = kpc_get_force_all_ctrs();
fe8ab488
A
145
146 /*
147 * Refuse to do the operation if the counters are already forced by
148 * another task.
149 */
150 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
151 return EACCES;
152
153 /* nothing to do if the state is not changing */
154 if (old_state == new_state)
155 return 0;
156
fe8ab488 157 /* notify the power manager */
3e170ce0
A
158 if (kpc_pm_handler)
159 kpc_pm_handler( new_state ? FALSE : TRUE );
fe8ab488
A
160
161 /* update the task bits */
162 kpc_task_set_forced_all_ctrs(task, val);
163
164 /* update the internal state */
165 force_all_ctrs = val;
166
167 return 0;
168}
169
170int
171kpc_get_force_all_ctrs(void)
172{
173 return force_all_ctrs;
174}
175
176boolean_t
3e170ce0 177kpc_multiple_clients(void)
fe8ab488 178{
3e170ce0 179 return kpc_pm_handler != NULL;
fe8ab488
A
180}
181
182boolean_t
3e170ce0 183kpc_controls_fixed_counters(void)
fe8ab488 184{
3e170ce0 185 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
fe8ab488
A
186}
187
188boolean_t
3e170ce0 189kpc_controls_counter(uint32_t ctr)
fe8ab488 190{
3e170ce0
A
191 uint64_t pmc_mask = 0ULL;
192
193 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
194
195 if (ctr < kpc_fixed_count())
196 return kpc_controls_fixed_counters();
197
198 /*
199 * By default kpc manages all PMCs, but if the Power Manager registered
200 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
201 * However, kpc takes ownership back if a task acquired all PMCs via
202 * force_all_ctrs.
203 */
204 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
205 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs)
206 return FALSE;
207
208 return TRUE;
fe8ab488
A
209}
210
39236c6e
A
211uint32_t
212kpc_get_running(void)
213{
3e170ce0 214 uint64_t pmc_mask = 0;
39236c6e
A
215 uint32_t cur_state = 0;
216
3e170ce0 217 if (kpc_is_running_fixed())
39236c6e
A
218 cur_state |= KPC_CLASS_FIXED_MASK;
219
3e170ce0
A
220 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
221 if (kpc_is_running_configurable(pmc_mask))
39236c6e
A
222 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
223
3e170ce0
A
224 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
225 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask))
226 cur_state |= KPC_CLASS_POWER_MASK;
227
39236c6e
A
228 return cur_state;
229}
230
3e170ce0 231/* may be called from an IPI */
39236c6e 232int
3e170ce0 233kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
39236c6e 234{
3e170ce0
A
235 int enabled=0, offset=0;
236 uint64_t pmc_mask = 0ULL;
39236c6e 237
3e170ce0 238 assert(buf);
39236c6e 239
39236c6e
A
240 enabled = ml_set_interrupts_enabled(FALSE);
241
3e170ce0
A
242 /* grab counters and CPU number as close as possible */
243 if (curcpu)
39236c6e
A
244 *curcpu = current_processor()->cpu_id;
245
3e170ce0
A
246 if (classes & KPC_CLASS_FIXED_MASK) {
247 kpc_get_fixed_counters(&buf[offset]);
39236c6e
A
248 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
249 }
250
3e170ce0
A
251 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
252 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
253 kpc_get_configurable_counters(&buf[offset], pmc_mask);
254 offset += kpc_popcount(pmc_mask);
255 }
39236c6e 256
3e170ce0
A
257 if (classes & KPC_CLASS_POWER_MASK) {
258 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
259 kpc_get_configurable_counters(&buf[offset], pmc_mask);
260 offset += kpc_popcount(pmc_mask);
39236c6e
A
261 }
262
263 ml_set_interrupts_enabled(enabled);
264
265 return offset;
266}
267
3e170ce0
A
268/* generic counter reading function, public api */
269int
270kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
271 int *curcpu, uint64_t *buf)
272{
273 assert(buf);
274
275 /*
276 * Unlike reading the current CPU counters, reading counters from all
277 * CPUs is architecture dependent. This allows kpc to make the most of
278 * the platform if memory mapped registers is supported.
279 */
280 if (all_cpus)
281 return kpc_get_all_cpus_counters(classes, curcpu, buf);
282 else
283 return kpc_get_curcpu_counters(classes, curcpu, buf);
284}
285
39236c6e 286int
3e170ce0
A
287kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
288 int *curcpu, uint64_t *buf)
39236c6e 289{
3e170ce0
A
290 int curcpu_id = current_processor()->cpu_id;
291 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
292 uint64_t pmc_mask = 0ULL;
293 boolean_t enabled;
39236c6e 294
3e170ce0 295 assert(buf);
39236c6e
A
296
297 enabled = ml_set_interrupts_enabled(FALSE);
298
3e170ce0
A
299 curcpu_id = current_processor()->cpu_id;
300 if (curcpu)
301 *curcpu = curcpu_id;
39236c6e 302
3e170ce0
A
303 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
304 /* filter if the caller did not request all cpus */
305 if (!all_cpus && (cpu != curcpu_id))
306 continue;
39236c6e 307
3e170ce0
A
308 if (classes & KPC_CLASS_FIXED_MASK) {
309 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
310 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
311 offset += count;
312 }
39236c6e 313
3e170ce0
A
314 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
315 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 316
3e170ce0
A
317 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
318 if ((1ULL << cfg_ctr) & pmc_mask)
319 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
320 }
39236c6e 321
3e170ce0
A
322 if (classes & KPC_CLASS_POWER_MASK) {
323 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
39236c6e 324
3e170ce0
A
325 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
326 if ((1ULL << cfg_ctr) & pmc_mask)
327 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
328 }
39236c6e
A
329 }
330
331 ml_set_interrupts_enabled(enabled);
332
333 return offset;
334}
335
336uint32_t
337kpc_get_counter_count(uint32_t classes)
338{
3e170ce0 339 uint32_t count = 0;
39236c6e 340
3e170ce0 341 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
342 count += kpc_fixed_count();
343
3e170ce0
A
344 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
345 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
346 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
347 count += pmc_cnt;
348 }
39236c6e
A
349
350 return count;
351}
352
353uint32_t
354kpc_get_config_count(uint32_t classes)
355{
3e170ce0 356 uint32_t count = 0;
39236c6e 357
3e170ce0 358 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
359 count += kpc_fixed_config_count();
360
3e170ce0
A
361 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
362 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
363 count += kpc_configurable_config_count(pmc_mask);
364 }
39236c6e 365
3e170ce0 366 if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients())
fe8ab488
A
367 count += kpc_rawpmu_config_count();
368
39236c6e
A
369 return count;
370}
371
372int
373kpc_get_config(uint32_t classes, kpc_config_t *current_config)
374{
3e170ce0 375 uint32_t count = 0;
39236c6e 376
3e170ce0
A
377 assert(current_config);
378
379 if (classes & KPC_CLASS_FIXED_MASK) {
39236c6e
A
380 kpc_get_fixed_config(&current_config[count]);
381 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
382 }
383
3e170ce0
A
384 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
385 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
386 kpc_get_configurable_config(&current_config[count], pmc_mask);
39236c6e
A
387 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
388 }
389
3e170ce0
A
390 if (classes & KPC_CLASS_POWER_MASK) {
391 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
392 kpc_get_configurable_config(&current_config[count], pmc_mask);
393 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
394 }
395
396 if (classes & KPC_CLASS_RAWPMU_MASK)
fe8ab488
A
397 {
398 // Client shouldn't ask for config words that aren't available.
399 // Most likely, they'd misinterpret the returned buffer if we
400 // allowed this.
401 if( kpc_multiple_clients() )
402 {
403 return EPERM;
404 }
405 kpc_get_rawpmu_config(&current_config[count]);
406 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
407 }
408
39236c6e
A
409 return 0;
410}
411
412int
413kpc_set_config(uint32_t classes, kpc_config_t *configv)
414{
4bd07ac2 415 int ret = 0;
3e170ce0
A
416 struct kpc_config_remote mp_config = {
417 .classes = classes, .configv = configv,
418 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
419 };
420
421 assert(configv);
422
423 /* don't allow RAWPMU configuration when sharing counters */
424 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
425 return EPERM;
426 }
39236c6e 427
3e170ce0
A
428 /* no clients have the right to modify both classes */
429 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
430 (classes & (KPC_CLASS_POWER_MASK)))
fe8ab488
A
431 {
432 return EPERM;
433 }
434
39236c6e
A
435 lck_mtx_lock(&kpc_config_lock);
436
3e170ce0
A
437 /* translate the power class for the machine layer */
438 if (classes & KPC_CLASS_POWER_MASK)
439 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
39236c6e 440
4bd07ac2 441 ret = kpc_set_config_arch( &mp_config );
39236c6e
A
442
443 lck_mtx_unlock(&kpc_config_lock);
444
4bd07ac2 445 return ret;
39236c6e
A
446}
447
3e170ce0 448/* allocate a buffer large enough for all possible counters */
39236c6e
A
449uint64_t *
450kpc_counterbuf_alloc(void)
451{
3e170ce0 452 uint64_t *buf = NULL;
39236c6e 453
3e170ce0
A
454 buf = kalloc(COUNTERBUF_SIZE);
455 if (buf) {
456 bzero(buf, COUNTERBUF_SIZE);
457 }
39236c6e
A
458
459 return buf;
460}
461
462void
463kpc_counterbuf_free(uint64_t *buf)
464{
3e170ce0
A
465 if (buf) {
466 kfree(buf, COUNTERBUF_SIZE);
467 }
39236c6e
A
468}
469
3e170ce0
A
470void
471kpc_sample_kperf(uint32_t actionid)
39236c6e
A
472{
473 struct kperf_sample sbuf;
474 struct kperf_context ctx;
475 task_t task = NULL;
476 int r;
477
478 BUF_DATA1(PERF_KPC_HNDLR | DBG_FUNC_START, 0);
479
480 ctx.cur_pid = 0;
481 ctx.cur_thread = current_thread();
482
483 task = chudxnu_task_for_thread(ctx.cur_thread);
484 if (task)
485 ctx.cur_pid = chudxnu_pid_for_task(task);
486
487 ctx.trigger_type = TRIGGER_TYPE_PMI;
488 ctx.trigger_id = 0;
489
490 r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
491
492 BUF_INFO1(PERF_KPC_HNDLR | DBG_FUNC_END, r);
493}
494
495
3e170ce0
A
496int
497kpc_set_period(uint32_t classes, uint64_t *val)
39236c6e 498{
3e170ce0
A
499 struct kpc_config_remote mp_config = {
500 .classes = classes, .configv = val,
501 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
502 };
503
504 assert(val);
505
506 /* no clients have the right to modify both classes */
507 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
508 (classes & (KPC_CLASS_POWER_MASK)))
509 {
510 return EPERM;
511 }
39236c6e
A
512
513 lck_mtx_lock(&kpc_config_lock);
514
3e170ce0
A
515#ifdef FIXED_COUNTER_SHADOW
516 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
517 lck_mtx_unlock(&kpc_config_lock);
518 return EPERM;
519 }
520# else
39236c6e
A
521 if (classes & KPC_CLASS_FIXED_MASK) {
522 lck_mtx_unlock(&kpc_config_lock);
3e170ce0 523 return EINVAL;
39236c6e
A
524 }
525#endif
526
3e170ce0
A
527 /* translate the power class for the machine layer */
528 if (classes & KPC_CLASS_POWER_MASK)
529 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
39236c6e 530
3e170ce0 531 kprintf("setting period %u\n", classes);
39236c6e
A
532 kpc_set_period_arch( &mp_config );
533
534 lck_mtx_unlock(&kpc_config_lock);
535
536 return 0;
537}
538
3e170ce0
A
539int
540kpc_get_period(uint32_t classes, uint64_t *val)
39236c6e 541{
3e170ce0
A
542 uint32_t count = 0 ;
543 uint64_t pmc_mask = 0ULL;
544
545 assert(val);
39236c6e
A
546
547 lck_mtx_lock(&kpc_config_lock);
548
549 if (classes & KPC_CLASS_FIXED_MASK) {
3e170ce0 550 /* convert reload values to periods */
39236c6e 551 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
3e170ce0
A
552 for (uint32_t i = 0; i < count; ++i)
553 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
554 }
39236c6e 555
3e170ce0
A
556 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
557 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 558
3e170ce0
A
559 /* convert reload values to periods */
560 count = kpc_configurable_count();
561 for (uint32_t i = 0; i < count; ++i)
562 if ((1ULL << i) & pmc_mask)
563 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
39236c6e
A
564 }
565
3e170ce0
A
566 if (classes & KPC_CLASS_POWER_MASK) {
567 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
39236c6e
A
568
569 /* convert reload values to periods */
3e170ce0
A
570 count = kpc_configurable_count();
571 for (uint32_t i = 0; i < count; ++i)
572 if ((1ULL << i) & pmc_mask)
573 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
39236c6e
A
574 }
575
576 lck_mtx_unlock(&kpc_config_lock);
577
578 return 0;
579}
580
3e170ce0
A
581int
582kpc_set_actionid(uint32_t classes, uint32_t *val)
39236c6e 583{
3e170ce0
A
584 uint32_t count = 0;
585 uint64_t pmc_mask = 0ULL;
586
587 assert(val);
39236c6e
A
588
589 /* NOTE: what happens if a pmi occurs while actionids are being
590 * set is undefined. */
591 lck_mtx_lock(&kpc_config_lock);
592
593 if (classes & KPC_CLASS_FIXED_MASK) {
594 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
39236c6e 595 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
3e170ce0 596 val += count;
39236c6e
A
597 }
598
599 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0 600 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 601
3e170ce0
A
602 count = kpc_configurable_count();
603 for (uint32_t i = 0; i < count; ++i)
604 if ((1ULL << i) & pmc_mask)
605 CONFIGURABLE_ACTIONID(i) = *val++;
606 }
607
608 if (classes & KPC_CLASS_POWER_MASK) {
609 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
610
611 count = kpc_configurable_count();
612 for (uint32_t i = 0; i < count; ++i)
613 if ((1ULL << i) & pmc_mask)
614 CONFIGURABLE_ACTIONID(i) = *val++;
39236c6e
A
615 }
616
617 lck_mtx_unlock(&kpc_config_lock);
618
619 return 0;
620}
621
622int kpc_get_actionid(uint32_t classes, uint32_t *val)
623{
3e170ce0
A
624 uint32_t count = 0;
625 uint64_t pmc_mask = 0ULL;
626
627 assert(val);
39236c6e
A
628
629 lck_mtx_lock(&kpc_config_lock);
630
631 if (classes & KPC_CLASS_FIXED_MASK) {
632 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
39236c6e 633 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
3e170ce0 634 val += count;
39236c6e
A
635 }
636
637 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0 638 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 639
3e170ce0
A
640 count = kpc_configurable_count();
641 for (uint32_t i = 0; i < count; ++i)
642 if ((1ULL << i) & pmc_mask)
643 *val++ = CONFIGURABLE_ACTIONID(i);
644 }
645
646 if (classes & KPC_CLASS_POWER_MASK) {
647 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
648
649 count = kpc_configurable_count();
650 for (uint32_t i = 0; i < count; ++i)
651 if ((1ULL << i) & pmc_mask)
652 *val++ = CONFIGURABLE_ACTIONID(i);
39236c6e
A
653 }
654
655 lck_mtx_unlock(&kpc_config_lock);
656
657 return 0;
658
659}
3e170ce0
A
660
661int
662kpc_set_running(uint32_t classes)
663{
664 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
665 struct kpc_running_remote mp_config = {
666 .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL
667 };
668
669 /* target all available PMCs */
670 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
671
672 /* translate the power class for the machine layer */
673 if (classes & KPC_CLASS_POWER_MASK)
674 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
675
676 /* generate the state of each configurable PMCs */
677 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
678
679 return kpc_set_running_arch(&mp_config);
680}
681
682boolean_t
683kpc_register_pm_handler(kpc_pm_handler_t handler)
684{
685 return kpc_reserve_pm_counters(0x38, handler, TRUE);
686}
687
688boolean_t
689kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
690 boolean_t custom_config)
691{
692 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
693 uint64_t req_mask = 0ULL;
694
695 /* pre-condition */
696 assert(handler != NULL);
697 assert(kpc_pm_handler == NULL);
698
699 /* check number of counters requested */
700 req_mask = (pmc_mask & all_mask);
701 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
702
703 /* save the power manager states */
704 kpc_pm_has_custom_config = custom_config;
705 kpc_pm_pmc_mask = req_mask;
706 kpc_pm_handler = handler;
707
708 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
709 req_mask, custom_config);
710
711 /* post-condition */
712 {
713 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
714 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
715#pragma unused(cfg_count, pwr_count)
716 assert((cfg_count + pwr_count) == kpc_configurable_count());
717 }
718
719 return force_all_ctrs ? FALSE : TRUE;
720}
721
722void
723kpc_release_pm_counters(void)
724{
725 /* pre-condition */
726 assert(kpc_pm_handler != NULL);
727
728 /* release the counters */
729 kpc_pm_has_custom_config = FALSE;
730 kpc_pm_pmc_mask = 0ULL;
731 kpc_pm_handler = NULL;
732
733 printf("kpc: pm released counters\n");
734
735 /* post-condition */
736 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
737}
738
739uint8_t
740kpc_popcount(uint64_t value)
741{
742 return __builtin_popcountll(value);
743}
744
745uint64_t
746kpc_get_configurable_pmc_mask(uint32_t classes)
747{
748 uint32_t configurable_count = kpc_configurable_count();
749 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
750
751 /* not configurable classes or no configurable counters */
752 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
753 (configurable_count == 0))
754 {
755 goto exit;
756 }
757
758 assert(configurable_count < 64);
759 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
760
761 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
762 if (force_all_ctrs == TRUE)
763 cfg_mask |= all_cfg_pmcs_mask;
764 else
765 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
766 }
767
768 /*
769 * The power class exists iff:
770 * - No tasks acquired all PMCs
771 * - PM registered and uses kpc to interact with PMCs
772 */
773 if ((force_all_ctrs == FALSE) &&
774 (kpc_pm_handler != NULL) &&
775 (kpc_pm_has_custom_config == FALSE) &&
776 (classes & KPC_CLASS_POWER_MASK))
777 {
778 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
779 }
780
781exit:
782 /* post-conditions */
783 assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
784 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() );
785 assert( (cfg_mask & pwr_mask) == 0ULL );
786
787 return cfg_mask | pwr_mask;
788}
789