]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kpc_common.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_common.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <sys/errno.h>
39037602 34#include <sys/vm.h>
39236c6e
A
35#include <kperf/buffer.h>
36#include <kern/thread.h>
5ba3f43e
A
37#if defined(__arm64__) || defined(__arm__)
38#include <arm/cpu_data_internal.h>
39#endif
39236c6e
A
40
41#include <kern/kpc.h>
42
43#include <kperf/kperf.h>
44#include <kperf/sample.h>
45#include <kperf/context.h>
46#include <kperf/action.h>
47
48#include <chud/chud_xnu.h>
49
50uint32_t kpc_actionid[KPC_MAX_COUNTERS];
51
3e170ce0
A
52#define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
53#define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
54 COUNTERBUF_SIZE_PER_CPU)
55
39236c6e
A
56/* locks */
57static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
58static lck_grp_t *kpc_config_lckgrp = NULL;
59static lck_mtx_t kpc_config_lock;
60
fe8ab488
A
61/* state specifying if all counters have been requested by kperf */
62static boolean_t force_all_ctrs = FALSE;
63
3e170ce0 64/* power manager */
5ba3f43e
A
65static kpc_pm_handler_t kpc_pm_handler;
66static boolean_t kpc_pm_has_custom_config;
67static uint64_t kpc_pm_pmc_mask;
68#if MACH_ASSERT
69static bool kpc_calling_pm = false;
70#endif /* MACH_ASSERT */
fe8ab488 71
39037602
A
72boolean_t kpc_context_switch_active = FALSE;
73
fe8ab488 74void kpc_common_init(void);
39236c6e 75void
fe8ab488 76kpc_common_init(void)
39236c6e
A
77{
78 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
79 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
80 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
81}
82
3e170ce0
A
83boolean_t
84kpc_register_cpu(struct cpu_data *cpu_data)
85{
86 assert(cpu_data);
87 assert(cpu_data->cpu_kpc_buf[0] == NULL);
88 assert(cpu_data->cpu_kpc_buf[1] == NULL);
89 assert(cpu_data->cpu_kpc_shadow == NULL);
90 assert(cpu_data->cpu_kpc_reload == NULL);
91
92 /*
93 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
94 * store all PMCs values from all CPUs. This mimics the userspace API.
95 * This does not suit well with the per-CPU kpc buffers, since:
96 * 1. Buffers don't need to be this large.
97 * 2. The actual number of CPUs is not known at this point.
98 *
99 * CPUs are asked to callout into kpc when being registered, we'll
100 * allocate the memory here.
101 */
102
103 if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
104 goto error;
105 if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
106 goto error;
107 if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
108 goto error;
109 if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
110 goto error;
111
112 memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU);
113 memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU);
114 memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU);
115 memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU);
116
117 /* success */
118 return TRUE;
119
120error:
39037602 121 kpc_unregister_cpu(cpu_data);
3e170ce0
A
122 return FALSE;
123}
124
39037602
A
125void
126kpc_unregister_cpu(struct cpu_data *cpu_data)
127{
128 assert(cpu_data);
129 if (cpu_data->cpu_kpc_buf[0] != NULL) {
130 kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU);
131 cpu_data->cpu_kpc_buf[0] = NULL;
132 }
133 if (cpu_data->cpu_kpc_buf[1] != NULL) {
134 kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU);
135 cpu_data->cpu_kpc_buf[1] = NULL;
136 }
137 if (cpu_data->cpu_kpc_shadow != NULL) {
138 kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU);
139 cpu_data->cpu_kpc_shadow = NULL;
140 }
141 if (cpu_data->cpu_kpc_reload != NULL) {
142 kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU);
143 cpu_data->cpu_kpc_reload = NULL;
144 }
145}
146
147
fe8ab488
A
148static void
149kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
150{
151 assert(task);
152
153 task_lock(task);
154 if (state)
155 task->t_chud |= TASK_KPC_FORCED_ALL_CTRS;
156 else
157 task->t_chud &= ~TASK_KPC_FORCED_ALL_CTRS;
158 task_unlock(task);
159}
160
161static boolean_t
162kpc_task_get_forced_all_ctrs(task_t task)
163{
164 assert(task);
165 return task->t_chud & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
166}
167
168int
169kpc_force_all_ctrs(task_t task, int val)
170{
3e170ce0
A
171 boolean_t new_state = val ? TRUE : FALSE;
172 boolean_t old_state = kpc_get_force_all_ctrs();
fe8ab488
A
173
174 /*
175 * Refuse to do the operation if the counters are already forced by
176 * another task.
177 */
178 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
179 return EACCES;
180
181 /* nothing to do if the state is not changing */
182 if (old_state == new_state)
183 return 0;
184
fe8ab488 185 /* notify the power manager */
5ba3f43e
A
186 if (kpc_pm_handler) {
187#if MACH_ASSERT
188 kpc_calling_pm = true;
189#endif /* MACH_ASSERT */
3e170ce0 190 kpc_pm_handler( new_state ? FALSE : TRUE );
5ba3f43e
A
191#if MACH_ASSERT
192 kpc_calling_pm = false;
193#endif /* MACH_ASSERT */
194 }
fe8ab488 195
5ba3f43e
A
196 /*
197 * This is a force -- ensure that counters are forced, even if power
198 * management fails to acknowledge it.
199 */
200 if (force_all_ctrs != new_state) {
201 force_all_ctrs = new_state;
202 }
fe8ab488 203
5ba3f43e
A
204 /* update the task bits */
205 kpc_task_set_forced_all_ctrs(task, new_state);
fe8ab488
A
206
207 return 0;
208}
209
5ba3f43e
A
210void
211kpc_pm_acknowledge(boolean_t available_to_pm)
212{
213 /*
214 * Force-all-counters should still be true when the counters are being
215 * made available to power management and false when counters are going
216 * to be taken away.
217 */
218 assert(force_all_ctrs == available_to_pm);
219 /*
220 * Make sure power management isn't playing games with us.
221 */
222 assert(kpc_calling_pm == true);
223
224 /*
225 * Counters being available means no one is forcing all counters.
226 */
227 force_all_ctrs = available_to_pm ? FALSE : TRUE;
228}
229
fe8ab488
A
230int
231kpc_get_force_all_ctrs(void)
232{
233 return force_all_ctrs;
234}
235
236boolean_t
3e170ce0 237kpc_multiple_clients(void)
fe8ab488 238{
3e170ce0 239 return kpc_pm_handler != NULL;
fe8ab488
A
240}
241
242boolean_t
3e170ce0 243kpc_controls_fixed_counters(void)
fe8ab488 244{
3e170ce0 245 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
fe8ab488
A
246}
247
248boolean_t
3e170ce0 249kpc_controls_counter(uint32_t ctr)
fe8ab488 250{
3e170ce0
A
251 uint64_t pmc_mask = 0ULL;
252
253 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
254
255 if (ctr < kpc_fixed_count())
256 return kpc_controls_fixed_counters();
257
258 /*
259 * By default kpc manages all PMCs, but if the Power Manager registered
260 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
261 * However, kpc takes ownership back if a task acquired all PMCs via
262 * force_all_ctrs.
263 */
264 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
265 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs)
266 return FALSE;
267
268 return TRUE;
fe8ab488
A
269}
270
39236c6e
A
271uint32_t
272kpc_get_running(void)
273{
3e170ce0 274 uint64_t pmc_mask = 0;
39236c6e
A
275 uint32_t cur_state = 0;
276
3e170ce0 277 if (kpc_is_running_fixed())
39236c6e
A
278 cur_state |= KPC_CLASS_FIXED_MASK;
279
3e170ce0
A
280 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
281 if (kpc_is_running_configurable(pmc_mask))
39236c6e
A
282 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
283
3e170ce0
A
284 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
285 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask))
286 cur_state |= KPC_CLASS_POWER_MASK;
287
39236c6e
A
288 return cur_state;
289}
290
3e170ce0 291/* may be called from an IPI */
39236c6e 292int
3e170ce0 293kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
39236c6e 294{
3e170ce0
A
295 int enabled=0, offset=0;
296 uint64_t pmc_mask = 0ULL;
39236c6e 297
3e170ce0 298 assert(buf);
39236c6e 299
39236c6e
A
300 enabled = ml_set_interrupts_enabled(FALSE);
301
3e170ce0
A
302 /* grab counters and CPU number as close as possible */
303 if (curcpu)
39236c6e
A
304 *curcpu = current_processor()->cpu_id;
305
3e170ce0
A
306 if (classes & KPC_CLASS_FIXED_MASK) {
307 kpc_get_fixed_counters(&buf[offset]);
39236c6e
A
308 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
309 }
310
3e170ce0
A
311 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
312 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
313 kpc_get_configurable_counters(&buf[offset], pmc_mask);
314 offset += kpc_popcount(pmc_mask);
315 }
39236c6e 316
3e170ce0
A
317 if (classes & KPC_CLASS_POWER_MASK) {
318 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
319 kpc_get_configurable_counters(&buf[offset], pmc_mask);
320 offset += kpc_popcount(pmc_mask);
39236c6e
A
321 }
322
323 ml_set_interrupts_enabled(enabled);
324
325 return offset;
326}
327
3e170ce0
A
328/* generic counter reading function, public api */
329int
330kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
331 int *curcpu, uint64_t *buf)
332{
333 assert(buf);
334
335 /*
336 * Unlike reading the current CPU counters, reading counters from all
337 * CPUs is architecture dependent. This allows kpc to make the most of
338 * the platform if memory mapped registers is supported.
339 */
340 if (all_cpus)
341 return kpc_get_all_cpus_counters(classes, curcpu, buf);
342 else
343 return kpc_get_curcpu_counters(classes, curcpu, buf);
344}
345
39236c6e 346int
3e170ce0
A
347kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
348 int *curcpu, uint64_t *buf)
39236c6e 349{
3e170ce0
A
350 int curcpu_id = current_processor()->cpu_id;
351 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
352 uint64_t pmc_mask = 0ULL;
353 boolean_t enabled;
39236c6e 354
3e170ce0 355 assert(buf);
39236c6e
A
356
357 enabled = ml_set_interrupts_enabled(FALSE);
358
3e170ce0
A
359 curcpu_id = current_processor()->cpu_id;
360 if (curcpu)
361 *curcpu = curcpu_id;
39236c6e 362
3e170ce0
A
363 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
364 /* filter if the caller did not request all cpus */
365 if (!all_cpus && (cpu != curcpu_id))
366 continue;
39236c6e 367
3e170ce0
A
368 if (classes & KPC_CLASS_FIXED_MASK) {
369 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
370 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
371 offset += count;
372 }
39236c6e 373
3e170ce0
A
374 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
375 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 376
3e170ce0
A
377 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
378 if ((1ULL << cfg_ctr) & pmc_mask)
379 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
380 }
39236c6e 381
3e170ce0
A
382 if (classes & KPC_CLASS_POWER_MASK) {
383 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
39236c6e 384
3e170ce0
A
385 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
386 if ((1ULL << cfg_ctr) & pmc_mask)
387 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
388 }
39236c6e
A
389 }
390
391 ml_set_interrupts_enabled(enabled);
392
393 return offset;
394}
395
396uint32_t
397kpc_get_counter_count(uint32_t classes)
398{
3e170ce0 399 uint32_t count = 0;
39236c6e 400
3e170ce0 401 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
402 count += kpc_fixed_count();
403
3e170ce0
A
404 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
405 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
406 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
407 count += pmc_cnt;
408 }
39236c6e
A
409
410 return count;
411}
412
413uint32_t
414kpc_get_config_count(uint32_t classes)
415{
3e170ce0 416 uint32_t count = 0;
39236c6e 417
3e170ce0 418 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
419 count += kpc_fixed_config_count();
420
3e170ce0
A
421 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
422 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
423 count += kpc_configurable_config_count(pmc_mask);
424 }
39236c6e 425
3e170ce0 426 if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients())
fe8ab488
A
427 count += kpc_rawpmu_config_count();
428
39236c6e
A
429 return count;
430}
431
432int
433kpc_get_config(uint32_t classes, kpc_config_t *current_config)
434{
3e170ce0 435 uint32_t count = 0;
39236c6e 436
3e170ce0
A
437 assert(current_config);
438
439 if (classes & KPC_CLASS_FIXED_MASK) {
39236c6e
A
440 kpc_get_fixed_config(&current_config[count]);
441 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
442 }
443
3e170ce0
A
444 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
445 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
446 kpc_get_configurable_config(&current_config[count], pmc_mask);
39236c6e
A
447 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
448 }
449
3e170ce0
A
450 if (classes & KPC_CLASS_POWER_MASK) {
451 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
452 kpc_get_configurable_config(&current_config[count], pmc_mask);
453 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
454 }
455
456 if (classes & KPC_CLASS_RAWPMU_MASK)
fe8ab488
A
457 {
458 // Client shouldn't ask for config words that aren't available.
459 // Most likely, they'd misinterpret the returned buffer if we
460 // allowed this.
461 if( kpc_multiple_clients() )
462 {
463 return EPERM;
464 }
465 kpc_get_rawpmu_config(&current_config[count]);
466 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
467 }
468
39236c6e
A
469 return 0;
470}
471
472int
473kpc_set_config(uint32_t classes, kpc_config_t *configv)
474{
4bd07ac2 475 int ret = 0;
3e170ce0
A
476 struct kpc_config_remote mp_config = {
477 .classes = classes, .configv = configv,
478 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
479 };
480
481 assert(configv);
482
483 /* don't allow RAWPMU configuration when sharing counters */
484 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
485 return EPERM;
486 }
39236c6e 487
3e170ce0
A
488 /* no clients have the right to modify both classes */
489 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
490 (classes & (KPC_CLASS_POWER_MASK)))
fe8ab488
A
491 {
492 return EPERM;
493 }
494
39236c6e
A
495 lck_mtx_lock(&kpc_config_lock);
496
3e170ce0
A
497 /* translate the power class for the machine layer */
498 if (classes & KPC_CLASS_POWER_MASK)
499 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
39236c6e 500
4bd07ac2 501 ret = kpc_set_config_arch( &mp_config );
39236c6e
A
502
503 lck_mtx_unlock(&kpc_config_lock);
504
4bd07ac2 505 return ret;
39236c6e
A
506}
507
3e170ce0 508/* allocate a buffer large enough for all possible counters */
39236c6e
A
509uint64_t *
510kpc_counterbuf_alloc(void)
511{
3e170ce0 512 uint64_t *buf = NULL;
39236c6e 513
3e170ce0
A
514 buf = kalloc(COUNTERBUF_SIZE);
515 if (buf) {
516 bzero(buf, COUNTERBUF_SIZE);
517 }
39236c6e
A
518
519 return buf;
520}
521
522void
523kpc_counterbuf_free(uint64_t *buf)
524{
3e170ce0
A
525 if (buf) {
526 kfree(buf, COUNTERBUF_SIZE);
527 }
39236c6e
A
528}
529
3e170ce0
A
530void
531kpc_sample_kperf(uint32_t actionid)
39236c6e
A
532{
533 struct kperf_sample sbuf;
534 struct kperf_context ctx;
39236c6e 535
39037602 536 BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START);
39236c6e
A
537
538 ctx.cur_pid = 0;
539 ctx.cur_thread = current_thread();
39037602 540 ctx.cur_pid = task_pid(current_task());
39236c6e
A
541
542 ctx.trigger_type = TRIGGER_TYPE_PMI;
543 ctx.trigger_id = 0;
544
39037602 545 int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
39236c6e 546
39037602 547 BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
39236c6e
A
548}
549
550
3e170ce0
A
551int
552kpc_set_period(uint32_t classes, uint64_t *val)
39236c6e 553{
3e170ce0
A
554 struct kpc_config_remote mp_config = {
555 .classes = classes, .configv = val,
556 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
557 };
558
559 assert(val);
560
561 /* no clients have the right to modify both classes */
562 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
563 (classes & (KPC_CLASS_POWER_MASK)))
564 {
565 return EPERM;
566 }
39236c6e
A
567
568 lck_mtx_lock(&kpc_config_lock);
569
3e170ce0
A
570#ifdef FIXED_COUNTER_SHADOW
571 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
572 lck_mtx_unlock(&kpc_config_lock);
573 return EPERM;
574 }
575# else
39236c6e
A
576 if (classes & KPC_CLASS_FIXED_MASK) {
577 lck_mtx_unlock(&kpc_config_lock);
3e170ce0 578 return EINVAL;
39236c6e
A
579 }
580#endif
581
3e170ce0
A
582 /* translate the power class for the machine layer */
583 if (classes & KPC_CLASS_POWER_MASK)
584 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
39236c6e 585
3e170ce0 586 kprintf("setting period %u\n", classes);
39236c6e
A
587 kpc_set_period_arch( &mp_config );
588
589 lck_mtx_unlock(&kpc_config_lock);
590
591 return 0;
592}
593
3e170ce0
A
594int
595kpc_get_period(uint32_t classes, uint64_t *val)
39236c6e 596{
3e170ce0
A
597 uint32_t count = 0 ;
598 uint64_t pmc_mask = 0ULL;
599
600 assert(val);
39236c6e
A
601
602 lck_mtx_lock(&kpc_config_lock);
603
604 if (classes & KPC_CLASS_FIXED_MASK) {
3e170ce0 605 /* convert reload values to periods */
39236c6e 606 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
3e170ce0
A
607 for (uint32_t i = 0; i < count; ++i)
608 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
609 }
39236c6e 610
3e170ce0
A
611 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
612 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 613
3e170ce0
A
614 /* convert reload values to periods */
615 count = kpc_configurable_count();
616 for (uint32_t i = 0; i < count; ++i)
617 if ((1ULL << i) & pmc_mask)
618 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
39236c6e
A
619 }
620
3e170ce0
A
621 if (classes & KPC_CLASS_POWER_MASK) {
622 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
39236c6e
A
623
624 /* convert reload values to periods */
3e170ce0
A
625 count = kpc_configurable_count();
626 for (uint32_t i = 0; i < count; ++i)
627 if ((1ULL << i) & pmc_mask)
628 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
39236c6e
A
629 }
630
631 lck_mtx_unlock(&kpc_config_lock);
632
633 return 0;
634}
635
3e170ce0
A
636int
637kpc_set_actionid(uint32_t classes, uint32_t *val)
39236c6e 638{
3e170ce0
A
639 uint32_t count = 0;
640 uint64_t pmc_mask = 0ULL;
641
642 assert(val);
39236c6e
A
643
644 /* NOTE: what happens if a pmi occurs while actionids are being
645 * set is undefined. */
646 lck_mtx_lock(&kpc_config_lock);
647
648 if (classes & KPC_CLASS_FIXED_MASK) {
649 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
39236c6e 650 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
3e170ce0 651 val += count;
39236c6e
A
652 }
653
654 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0 655 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 656
3e170ce0
A
657 count = kpc_configurable_count();
658 for (uint32_t i = 0; i < count; ++i)
659 if ((1ULL << i) & pmc_mask)
660 CONFIGURABLE_ACTIONID(i) = *val++;
661 }
662
663 if (classes & KPC_CLASS_POWER_MASK) {
664 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
665
666 count = kpc_configurable_count();
667 for (uint32_t i = 0; i < count; ++i)
668 if ((1ULL << i) & pmc_mask)
669 CONFIGURABLE_ACTIONID(i) = *val++;
39236c6e
A
670 }
671
672 lck_mtx_unlock(&kpc_config_lock);
673
674 return 0;
675}
676
677int kpc_get_actionid(uint32_t classes, uint32_t *val)
678{
3e170ce0
A
679 uint32_t count = 0;
680 uint64_t pmc_mask = 0ULL;
681
682 assert(val);
39236c6e
A
683
684 lck_mtx_lock(&kpc_config_lock);
685
686 if (classes & KPC_CLASS_FIXED_MASK) {
687 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
39236c6e 688 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
3e170ce0 689 val += count;
39236c6e
A
690 }
691
692 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0 693 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
39236c6e 694
3e170ce0
A
695 count = kpc_configurable_count();
696 for (uint32_t i = 0; i < count; ++i)
697 if ((1ULL << i) & pmc_mask)
698 *val++ = CONFIGURABLE_ACTIONID(i);
699 }
700
701 if (classes & KPC_CLASS_POWER_MASK) {
702 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
703
704 count = kpc_configurable_count();
705 for (uint32_t i = 0; i < count; ++i)
706 if ((1ULL << i) & pmc_mask)
707 *val++ = CONFIGURABLE_ACTIONID(i);
39236c6e
A
708 }
709
710 lck_mtx_unlock(&kpc_config_lock);
711
712 return 0;
713
714}
3e170ce0
A
715
716int
717kpc_set_running(uint32_t classes)
718{
719 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
720 struct kpc_running_remote mp_config = {
721 .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL
722 };
723
724 /* target all available PMCs */
725 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
726
727 /* translate the power class for the machine layer */
728 if (classes & KPC_CLASS_POWER_MASK)
729 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
730
731 /* generate the state of each configurable PMCs */
732 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
733
734 return kpc_set_running_arch(&mp_config);
735}
736
737boolean_t
738kpc_register_pm_handler(kpc_pm_handler_t handler)
739{
740 return kpc_reserve_pm_counters(0x38, handler, TRUE);
741}
742
743boolean_t
744kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
745 boolean_t custom_config)
746{
747 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
748 uint64_t req_mask = 0ULL;
749
750 /* pre-condition */
751 assert(handler != NULL);
752 assert(kpc_pm_handler == NULL);
753
754 /* check number of counters requested */
755 req_mask = (pmc_mask & all_mask);
756 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
757
758 /* save the power manager states */
759 kpc_pm_has_custom_config = custom_config;
760 kpc_pm_pmc_mask = req_mask;
761 kpc_pm_handler = handler;
762
763 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
764 req_mask, custom_config);
765
766 /* post-condition */
767 {
768 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
769 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
770#pragma unused(cfg_count, pwr_count)
771 assert((cfg_count + pwr_count) == kpc_configurable_count());
772 }
773
774 return force_all_ctrs ? FALSE : TRUE;
775}
776
777void
778kpc_release_pm_counters(void)
779{
780 /* pre-condition */
781 assert(kpc_pm_handler != NULL);
782
783 /* release the counters */
784 kpc_pm_has_custom_config = FALSE;
785 kpc_pm_pmc_mask = 0ULL;
786 kpc_pm_handler = NULL;
787
788 printf("kpc: pm released counters\n");
789
790 /* post-condition */
791 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
792}
793
794uint8_t
795kpc_popcount(uint64_t value)
796{
797 return __builtin_popcountll(value);
798}
799
800uint64_t
801kpc_get_configurable_pmc_mask(uint32_t classes)
802{
803 uint32_t configurable_count = kpc_configurable_count();
804 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
805
806 /* not configurable classes or no configurable counters */
807 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
808 (configurable_count == 0))
809 {
810 goto exit;
811 }
812
813 assert(configurable_count < 64);
814 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
815
816 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
817 if (force_all_ctrs == TRUE)
818 cfg_mask |= all_cfg_pmcs_mask;
819 else
820 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
821 }
822
823 /*
824 * The power class exists iff:
825 * - No tasks acquired all PMCs
826 * - PM registered and uses kpc to interact with PMCs
827 */
828 if ((force_all_ctrs == FALSE) &&
829 (kpc_pm_handler != NULL) &&
830 (kpc_pm_has_custom_config == FALSE) &&
831 (classes & KPC_CLASS_POWER_MASK))
832 {
833 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
834 }
835
836exit:
837 /* post-conditions */
838 assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
839 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() );
840 assert( (cfg_mask & pwr_mask) == 0ULL );
841
842 return cfg_mask | pwr_mask;
843}