]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kpc_common.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_common.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <kperf/buffer.h>
35 #include <kern/thread.h>
36
37 #include <kern/kpc.h>
38
39 #include <kperf/kperf.h>
40 #include <kperf/sample.h>
41 #include <kperf/context.h>
42 #include <kperf/action.h>
43
44 #include <chud/chud_xnu.h>
45
46 uint32_t kpc_actionid[KPC_MAX_COUNTERS];
47
48 #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
49 #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
50 COUNTERBUF_SIZE_PER_CPU)
51
52 /* locks */
53 static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
54 static lck_grp_t *kpc_config_lckgrp = NULL;
55 static lck_mtx_t kpc_config_lock;
56
57 /* state specifying if all counters have been requested by kperf */
58 static boolean_t force_all_ctrs = FALSE;
59
60 /* power manager */
61 static kpc_pm_handler_t kpc_pm_handler;
62 static boolean_t kpc_pm_has_custom_config;
63 static uint64_t kpc_pm_pmc_mask;
64
65 void kpc_common_init(void);
66 void
67 kpc_common_init(void)
68 {
69 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
70 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
71 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
72 }
73
74 boolean_t
75 kpc_register_cpu(struct cpu_data *cpu_data)
76 {
77 assert(cpu_data);
78 assert(cpu_data->cpu_kpc_buf[0] == NULL);
79 assert(cpu_data->cpu_kpc_buf[1] == NULL);
80 assert(cpu_data->cpu_kpc_shadow == NULL);
81 assert(cpu_data->cpu_kpc_reload == NULL);
82
83 /*
84 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
85 * store all PMCs values from all CPUs. This mimics the userspace API.
86 * This does not suit well with the per-CPU kpc buffers, since:
87 * 1. Buffers don't need to be this large.
88 * 2. The actual number of CPUs is not known at this point.
89 *
90 * CPUs are asked to callout into kpc when being registered, we'll
91 * allocate the memory here.
92 */
93
94 if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
95 goto error;
96 if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
97 goto error;
98 if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
99 goto error;
100 if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
101 goto error;
102
103 memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU);
104 memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU);
105 memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU);
106 memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU);
107
108 /* success */
109 return TRUE;
110
111 error:
112 kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU);
113 kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU);
114 kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU);
115 kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU);
116
117 return FALSE;
118 }
119
120 static void
121 kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
122 {
123 assert(task);
124
125 task_lock(task);
126 if (state)
127 task->t_chud |= TASK_KPC_FORCED_ALL_CTRS;
128 else
129 task->t_chud &= ~TASK_KPC_FORCED_ALL_CTRS;
130 task_unlock(task);
131 }
132
133 static boolean_t
134 kpc_task_get_forced_all_ctrs(task_t task)
135 {
136 assert(task);
137 return task->t_chud & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
138 }
139
140 int
141 kpc_force_all_ctrs(task_t task, int val)
142 {
143 boolean_t new_state = val ? TRUE : FALSE;
144 boolean_t old_state = kpc_get_force_all_ctrs();
145
146 /*
147 * Refuse to do the operation if the counters are already forced by
148 * another task.
149 */
150 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
151 return EACCES;
152
153 /* nothing to do if the state is not changing */
154 if (old_state == new_state)
155 return 0;
156
157 /* notify the power manager */
158 if (kpc_pm_handler)
159 kpc_pm_handler( new_state ? FALSE : TRUE );
160
161 /* update the task bits */
162 kpc_task_set_forced_all_ctrs(task, val);
163
164 /* update the internal state */
165 force_all_ctrs = val;
166
167 return 0;
168 }
169
170 int
171 kpc_get_force_all_ctrs(void)
172 {
173 return force_all_ctrs;
174 }
175
176 boolean_t
177 kpc_multiple_clients(void)
178 {
179 return kpc_pm_handler != NULL;
180 }
181
182 boolean_t
183 kpc_controls_fixed_counters(void)
184 {
185 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
186 }
187
188 boolean_t
189 kpc_controls_counter(uint32_t ctr)
190 {
191 uint64_t pmc_mask = 0ULL;
192
193 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
194
195 if (ctr < kpc_fixed_count())
196 return kpc_controls_fixed_counters();
197
198 /*
199 * By default kpc manages all PMCs, but if the Power Manager registered
200 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
201 * However, kpc takes ownership back if a task acquired all PMCs via
202 * force_all_ctrs.
203 */
204 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
205 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs)
206 return FALSE;
207
208 return TRUE;
209 }
210
211 uint32_t
212 kpc_get_running(void)
213 {
214 uint64_t pmc_mask = 0;
215 uint32_t cur_state = 0;
216
217 if (kpc_is_running_fixed())
218 cur_state |= KPC_CLASS_FIXED_MASK;
219
220 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
221 if (kpc_is_running_configurable(pmc_mask))
222 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
223
224 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
225 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask))
226 cur_state |= KPC_CLASS_POWER_MASK;
227
228 return cur_state;
229 }
230
231 /* may be called from an IPI */
232 int
233 kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
234 {
235 int enabled=0, offset=0;
236 uint64_t pmc_mask = 0ULL;
237
238 assert(buf);
239
240 enabled = ml_set_interrupts_enabled(FALSE);
241
242 /* grab counters and CPU number as close as possible */
243 if (curcpu)
244 *curcpu = current_processor()->cpu_id;
245
246 if (classes & KPC_CLASS_FIXED_MASK) {
247 kpc_get_fixed_counters(&buf[offset]);
248 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
249 }
250
251 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
252 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
253 kpc_get_configurable_counters(&buf[offset], pmc_mask);
254 offset += kpc_popcount(pmc_mask);
255 }
256
257 if (classes & KPC_CLASS_POWER_MASK) {
258 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
259 kpc_get_configurable_counters(&buf[offset], pmc_mask);
260 offset += kpc_popcount(pmc_mask);
261 }
262
263 ml_set_interrupts_enabled(enabled);
264
265 return offset;
266 }
267
268 /* generic counter reading function, public api */
269 int
270 kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
271 int *curcpu, uint64_t *buf)
272 {
273 assert(buf);
274
275 /*
276 * Unlike reading the current CPU counters, reading counters from all
277 * CPUs is architecture dependent. This allows kpc to make the most of
278 * the platform if memory mapped registers is supported.
279 */
280 if (all_cpus)
281 return kpc_get_all_cpus_counters(classes, curcpu, buf);
282 else
283 return kpc_get_curcpu_counters(classes, curcpu, buf);
284 }
285
286 int
287 kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
288 int *curcpu, uint64_t *buf)
289 {
290 int curcpu_id = current_processor()->cpu_id;
291 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
292 uint64_t pmc_mask = 0ULL;
293 boolean_t enabled;
294
295 assert(buf);
296
297 enabled = ml_set_interrupts_enabled(FALSE);
298
299 curcpu_id = current_processor()->cpu_id;
300 if (curcpu)
301 *curcpu = curcpu_id;
302
303 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
304 /* filter if the caller did not request all cpus */
305 if (!all_cpus && (cpu != curcpu_id))
306 continue;
307
308 if (classes & KPC_CLASS_FIXED_MASK) {
309 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
310 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
311 offset += count;
312 }
313
314 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
315 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
316
317 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
318 if ((1ULL << cfg_ctr) & pmc_mask)
319 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
320 }
321
322 if (classes & KPC_CLASS_POWER_MASK) {
323 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
324
325 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
326 if ((1ULL << cfg_ctr) & pmc_mask)
327 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
328 }
329 }
330
331 ml_set_interrupts_enabled(enabled);
332
333 return offset;
334 }
335
336 uint32_t
337 kpc_get_counter_count(uint32_t classes)
338 {
339 uint32_t count = 0;
340
341 if (classes & KPC_CLASS_FIXED_MASK)
342 count += kpc_fixed_count();
343
344 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
345 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
346 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
347 count += pmc_cnt;
348 }
349
350 return count;
351 }
352
353 uint32_t
354 kpc_get_config_count(uint32_t classes)
355 {
356 uint32_t count = 0;
357
358 if (classes & KPC_CLASS_FIXED_MASK)
359 count += kpc_fixed_config_count();
360
361 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
362 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
363 count += kpc_configurable_config_count(pmc_mask);
364 }
365
366 if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients())
367 count += kpc_rawpmu_config_count();
368
369 return count;
370 }
371
372 int
373 kpc_get_config(uint32_t classes, kpc_config_t *current_config)
374 {
375 uint32_t count = 0;
376
377 assert(current_config);
378
379 if (classes & KPC_CLASS_FIXED_MASK) {
380 kpc_get_fixed_config(&current_config[count]);
381 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
382 }
383
384 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
385 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
386 kpc_get_configurable_config(&current_config[count], pmc_mask);
387 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
388 }
389
390 if (classes & KPC_CLASS_POWER_MASK) {
391 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
392 kpc_get_configurable_config(&current_config[count], pmc_mask);
393 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
394 }
395
396 if (classes & KPC_CLASS_RAWPMU_MASK)
397 {
398 // Client shouldn't ask for config words that aren't available.
399 // Most likely, they'd misinterpret the returned buffer if we
400 // allowed this.
401 if( kpc_multiple_clients() )
402 {
403 return EPERM;
404 }
405 kpc_get_rawpmu_config(&current_config[count]);
406 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
407 }
408
409 return 0;
410 }
411
412 int
413 kpc_set_config(uint32_t classes, kpc_config_t *configv)
414 {
415 int ret = 0;
416 struct kpc_config_remote mp_config = {
417 .classes = classes, .configv = configv,
418 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
419 };
420
421 assert(configv);
422
423 /* don't allow RAWPMU configuration when sharing counters */
424 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
425 return EPERM;
426 }
427
428 /* no clients have the right to modify both classes */
429 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
430 (classes & (KPC_CLASS_POWER_MASK)))
431 {
432 return EPERM;
433 }
434
435 lck_mtx_lock(&kpc_config_lock);
436
437 /* translate the power class for the machine layer */
438 if (classes & KPC_CLASS_POWER_MASK)
439 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
440
441 ret = kpc_set_config_arch( &mp_config );
442
443 lck_mtx_unlock(&kpc_config_lock);
444
445 return ret;
446 }
447
448 /* allocate a buffer large enough for all possible counters */
449 uint64_t *
450 kpc_counterbuf_alloc(void)
451 {
452 uint64_t *buf = NULL;
453
454 buf = kalloc(COUNTERBUF_SIZE);
455 if (buf) {
456 bzero(buf, COUNTERBUF_SIZE);
457 }
458
459 return buf;
460 }
461
462 void
463 kpc_counterbuf_free(uint64_t *buf)
464 {
465 if (buf) {
466 kfree(buf, COUNTERBUF_SIZE);
467 }
468 }
469
470 void
471 kpc_sample_kperf(uint32_t actionid)
472 {
473 struct kperf_sample sbuf;
474 struct kperf_context ctx;
475 task_t task = NULL;
476 int r;
477
478 BUF_DATA1(PERF_KPC_HNDLR | DBG_FUNC_START, 0);
479
480 ctx.cur_pid = 0;
481 ctx.cur_thread = current_thread();
482
483 task = chudxnu_task_for_thread(ctx.cur_thread);
484 if (task)
485 ctx.cur_pid = chudxnu_pid_for_task(task);
486
487 ctx.trigger_type = TRIGGER_TYPE_PMI;
488 ctx.trigger_id = 0;
489
490 r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
491
492 BUF_INFO1(PERF_KPC_HNDLR | DBG_FUNC_END, r);
493 }
494
495
496 int
497 kpc_set_period(uint32_t classes, uint64_t *val)
498 {
499 struct kpc_config_remote mp_config = {
500 .classes = classes, .configv = val,
501 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
502 };
503
504 assert(val);
505
506 /* no clients have the right to modify both classes */
507 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
508 (classes & (KPC_CLASS_POWER_MASK)))
509 {
510 return EPERM;
511 }
512
513 lck_mtx_lock(&kpc_config_lock);
514
515 #ifdef FIXED_COUNTER_SHADOW
516 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
517 lck_mtx_unlock(&kpc_config_lock);
518 return EPERM;
519 }
520 # else
521 if (classes & KPC_CLASS_FIXED_MASK) {
522 lck_mtx_unlock(&kpc_config_lock);
523 return EINVAL;
524 }
525 #endif
526
527 /* translate the power class for the machine layer */
528 if (classes & KPC_CLASS_POWER_MASK)
529 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
530
531 kprintf("setting period %u\n", classes);
532 kpc_set_period_arch( &mp_config );
533
534 lck_mtx_unlock(&kpc_config_lock);
535
536 return 0;
537 }
538
539 int
540 kpc_get_period(uint32_t classes, uint64_t *val)
541 {
542 uint32_t count = 0 ;
543 uint64_t pmc_mask = 0ULL;
544
545 assert(val);
546
547 lck_mtx_lock(&kpc_config_lock);
548
549 if (classes & KPC_CLASS_FIXED_MASK) {
550 /* convert reload values to periods */
551 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
552 for (uint32_t i = 0; i < count; ++i)
553 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
554 }
555
556 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
557 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
558
559 /* convert reload values to periods */
560 count = kpc_configurable_count();
561 for (uint32_t i = 0; i < count; ++i)
562 if ((1ULL << i) & pmc_mask)
563 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
564 }
565
566 if (classes & KPC_CLASS_POWER_MASK) {
567 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
568
569 /* convert reload values to periods */
570 count = kpc_configurable_count();
571 for (uint32_t i = 0; i < count; ++i)
572 if ((1ULL << i) & pmc_mask)
573 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
574 }
575
576 lck_mtx_unlock(&kpc_config_lock);
577
578 return 0;
579 }
580
581 int
582 kpc_set_actionid(uint32_t classes, uint32_t *val)
583 {
584 uint32_t count = 0;
585 uint64_t pmc_mask = 0ULL;
586
587 assert(val);
588
589 /* NOTE: what happens if a pmi occurs while actionids are being
590 * set is undefined. */
591 lck_mtx_lock(&kpc_config_lock);
592
593 if (classes & KPC_CLASS_FIXED_MASK) {
594 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
595 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
596 val += count;
597 }
598
599 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
600 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
601
602 count = kpc_configurable_count();
603 for (uint32_t i = 0; i < count; ++i)
604 if ((1ULL << i) & pmc_mask)
605 CONFIGURABLE_ACTIONID(i) = *val++;
606 }
607
608 if (classes & KPC_CLASS_POWER_MASK) {
609 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
610
611 count = kpc_configurable_count();
612 for (uint32_t i = 0; i < count; ++i)
613 if ((1ULL << i) & pmc_mask)
614 CONFIGURABLE_ACTIONID(i) = *val++;
615 }
616
617 lck_mtx_unlock(&kpc_config_lock);
618
619 return 0;
620 }
621
622 int kpc_get_actionid(uint32_t classes, uint32_t *val)
623 {
624 uint32_t count = 0;
625 uint64_t pmc_mask = 0ULL;
626
627 assert(val);
628
629 lck_mtx_lock(&kpc_config_lock);
630
631 if (classes & KPC_CLASS_FIXED_MASK) {
632 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
633 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
634 val += count;
635 }
636
637 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
638 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
639
640 count = kpc_configurable_count();
641 for (uint32_t i = 0; i < count; ++i)
642 if ((1ULL << i) & pmc_mask)
643 *val++ = CONFIGURABLE_ACTIONID(i);
644 }
645
646 if (classes & KPC_CLASS_POWER_MASK) {
647 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
648
649 count = kpc_configurable_count();
650 for (uint32_t i = 0; i < count; ++i)
651 if ((1ULL << i) & pmc_mask)
652 *val++ = CONFIGURABLE_ACTIONID(i);
653 }
654
655 lck_mtx_unlock(&kpc_config_lock);
656
657 return 0;
658
659 }
660
661 int
662 kpc_set_running(uint32_t classes)
663 {
664 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
665 struct kpc_running_remote mp_config = {
666 .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL
667 };
668
669 /* target all available PMCs */
670 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
671
672 /* translate the power class for the machine layer */
673 if (classes & KPC_CLASS_POWER_MASK)
674 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
675
676 /* generate the state of each configurable PMCs */
677 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
678
679 return kpc_set_running_arch(&mp_config);
680 }
681
682 boolean_t
683 kpc_register_pm_handler(kpc_pm_handler_t handler)
684 {
685 return kpc_reserve_pm_counters(0x38, handler, TRUE);
686 }
687
688 boolean_t
689 kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
690 boolean_t custom_config)
691 {
692 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
693 uint64_t req_mask = 0ULL;
694
695 /* pre-condition */
696 assert(handler != NULL);
697 assert(kpc_pm_handler == NULL);
698
699 /* check number of counters requested */
700 req_mask = (pmc_mask & all_mask);
701 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
702
703 /* save the power manager states */
704 kpc_pm_has_custom_config = custom_config;
705 kpc_pm_pmc_mask = req_mask;
706 kpc_pm_handler = handler;
707
708 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
709 req_mask, custom_config);
710
711 /* post-condition */
712 {
713 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
714 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
715 #pragma unused(cfg_count, pwr_count)
716 assert((cfg_count + pwr_count) == kpc_configurable_count());
717 }
718
719 return force_all_ctrs ? FALSE : TRUE;
720 }
721
722 void
723 kpc_release_pm_counters(void)
724 {
725 /* pre-condition */
726 assert(kpc_pm_handler != NULL);
727
728 /* release the counters */
729 kpc_pm_has_custom_config = FALSE;
730 kpc_pm_pmc_mask = 0ULL;
731 kpc_pm_handler = NULL;
732
733 printf("kpc: pm released counters\n");
734
735 /* post-condition */
736 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
737 }
738
739 uint8_t
740 kpc_popcount(uint64_t value)
741 {
742 return __builtin_popcountll(value);
743 }
744
745 uint64_t
746 kpc_get_configurable_pmc_mask(uint32_t classes)
747 {
748 uint32_t configurable_count = kpc_configurable_count();
749 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
750
751 /* not configurable classes or no configurable counters */
752 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
753 (configurable_count == 0))
754 {
755 goto exit;
756 }
757
758 assert(configurable_count < 64);
759 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
760
761 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
762 if (force_all_ctrs == TRUE)
763 cfg_mask |= all_cfg_pmcs_mask;
764 else
765 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
766 }
767
768 /*
769 * The power class exists iff:
770 * - No tasks acquired all PMCs
771 * - PM registered and uses kpc to interact with PMCs
772 */
773 if ((force_all_ctrs == FALSE) &&
774 (kpc_pm_handler != NULL) &&
775 (kpc_pm_has_custom_config == FALSE) &&
776 (classes & KPC_CLASS_POWER_MASK))
777 {
778 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
779 }
780
781 exit:
782 /* post-conditions */
783 assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
784 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() );
785 assert( (cfg_mask & pwr_mask) == 0ULL );
786
787 return cfg_mask | pwr_mask;
788 }
789