]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kpc_common.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_common.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <sys/errno.h>
34#include <kperf/buffer.h>
35#include <kern/thread.h>
36
37#include <kern/kpc.h>
38
39#include <kperf/kperf.h>
40#include <kperf/sample.h>
41#include <kperf/context.h>
42#include <kperf/action.h>
43
44#include <chud/chud_xnu.h>
45
46uint32_t kpc_actionid[KPC_MAX_COUNTERS];
47
48/* locks */
49static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
50static lck_grp_t *kpc_config_lckgrp = NULL;
51static lck_mtx_t kpc_config_lock;
52
fe8ab488
A
53/* state specifying if all counters have been requested by kperf */
54static boolean_t force_all_ctrs = FALSE;
55
56/* PM handler called when forcing/releasing all counters */
57static void (*pm_handler)(boolean_t) = NULL;
58
59void kpc_common_init(void);
39236c6e 60void
fe8ab488 61kpc_common_init(void)
39236c6e
A
62{
63 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
64 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
65 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
66}
67
fe8ab488
A
68static void
69kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
70{
71 assert(task);
72
73 task_lock(task);
74 if (state)
75 task->t_chud |= TASK_KPC_FORCED_ALL_CTRS;
76 else
77 task->t_chud &= ~TASK_KPC_FORCED_ALL_CTRS;
78 task_unlock(task);
79}
80
81static boolean_t
82kpc_task_get_forced_all_ctrs(task_t task)
83{
84 assert(task);
85 return task->t_chud & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
86}
87
88int
89kpc_force_all_ctrs(task_t task, int val)
90{
91 int ret = 0;
92 boolean_t new_state = val ? TRUE : FALSE;
93 boolean_t old_state = kpc_get_force_all_ctrs();
94
95 /*
96 * Refuse to do the operation if the counters are already forced by
97 * another task.
98 */
99 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
100 return EACCES;
101
102 /* nothing to do if the state is not changing */
103 if (old_state == new_state)
104 return 0;
105
106 /* do the architecture specific work */
107 if ((ret = kpc_force_all_ctrs_arch(task, val)) != 0)
108 return ret;
109
110 /* notify the power manager */
111 if (pm_handler)
112 pm_handler( new_state ? FALSE : TRUE );
113
114 /* update the task bits */
115 kpc_task_set_forced_all_ctrs(task, val);
116
117 /* update the internal state */
118 force_all_ctrs = val;
119
120 return 0;
121}
122
123int
124kpc_get_force_all_ctrs(void)
125{
126 return force_all_ctrs;
127}
128
129boolean_t
130kpc_register_pm_handler(void (*handler)(boolean_t))
131{
132 if (!pm_handler) {
133 pm_handler = handler;
134 }
135
136 /* Notify machine-dependent code. Reserved PMCs could change. */
137 kpc_force_all_ctrs_arch(TASK_NULL, force_all_ctrs);
138
139 return force_all_ctrs ? FALSE : TRUE;
140}
141
142boolean_t
143kpc_multiple_clients(void)
144{
145 return pm_handler != NULL;
146}
147
148boolean_t
149kpc_controls_fixed_counters(void)
150{
151 return !pm_handler || force_all_ctrs;
152}
153
39236c6e
A
154uint32_t
155kpc_get_running(void)
156{
157 uint32_t cur_state = 0;
158
159 if( kpc_is_running_fixed() )
160 cur_state |= KPC_CLASS_FIXED_MASK;
161
162 if( kpc_is_running_configurable() )
163 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
164
165 return cur_state;
166}
167
168/* generic counter reading function */
169int
170kpc_get_cpu_counters( boolean_t all_cpus, uint32_t classes,
171 int *curcpu, uint64_t *buf )
172{
173 int r, enabled, offset = 0;
174
175 (void) all_cpus;
176
177 /* grab counters and CPU number as close as possible */
178 enabled = ml_set_interrupts_enabled(FALSE);
179
180 /* and the CPU ID */
181 if( curcpu )
182 *curcpu = current_processor()->cpu_id;
183
184 if( classes & KPC_CLASS_FIXED_MASK )
185 {
186 kpc_get_fixed_counters( &buf[offset] );
187
188 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
189 }
190
191 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
192 {
193 r = kpc_get_configurable_counters( &buf[offset] );
194
195 offset += kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
196 }
197
198 ml_set_interrupts_enabled(enabled);
199
200 return offset;
201}
202
203int
204kpc_get_shadow_counters( boolean_t all_cpus, uint32_t classes,
205 int *curcpu, uint64_t *buf )
206{
207 int enabled, count, offset = 0;
208
209 (void)all_cpus;
210
211 enabled = ml_set_interrupts_enabled(FALSE);
212
213 if( curcpu )
214 *curcpu = current_processor()->cpu_id;
215
216 if( classes & KPC_CLASS_FIXED_MASK )
217 {
218 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
219
220 memcpy( &buf[offset], &FIXED_SHADOW(0), count*sizeof(uint64_t) );
221
222 offset += count;
223 }
224
225 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
226 {
227 count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
228
229 memcpy( &buf[offset], &CONFIGURABLE_SHADOW(0), count*sizeof(uint64_t) );
230
231 offset += count;
232 }
233
234 ml_set_interrupts_enabled(enabled);
235
236 return offset;
237}
238
239uint32_t
240kpc_get_counter_count(uint32_t classes)
241{
242 int count = 0;
243
244 if( classes & KPC_CLASS_FIXED_MASK )
245 count += kpc_fixed_count();
246
247 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
248 count += kpc_configurable_count() ;
249
250 return count;
251}
252
253uint32_t
254kpc_get_config_count(uint32_t classes)
255{
256 int count = 0;
257
258 if( classes & KPC_CLASS_FIXED_MASK )
259 count += kpc_fixed_config_count();
260
261 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
262 count += kpc_configurable_config_count();
263
fe8ab488
A
264 if( (classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients() )
265 count += kpc_rawpmu_config_count();
266
39236c6e
A
267 return count;
268}
269
270int
271kpc_get_config(uint32_t classes, kpc_config_t *current_config)
272{
273 int count = 0;
274
275 if( classes & KPC_CLASS_FIXED_MASK )
276 {
277 kpc_get_fixed_config(&current_config[count]);
278 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
279 }
280
281 if( classes & KPC_CLASS_CONFIGURABLE_MASK )
282 {
283 kpc_get_configurable_config(&current_config[count]);
284 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
285 }
286
fe8ab488
A
287 if( classes & KPC_CLASS_RAWPMU_MASK )
288 {
289 // Client shouldn't ask for config words that aren't available.
290 // Most likely, they'd misinterpret the returned buffer if we
291 // allowed this.
292 if( kpc_multiple_clients() )
293 {
294 return EPERM;
295 }
296 kpc_get_rawpmu_config(&current_config[count]);
297 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
298 }
299
39236c6e
A
300 return 0;
301}
302
303int
304kpc_set_config(uint32_t classes, kpc_config_t *configv)
305{
306 struct kpc_config_remote mp_config;
307
fe8ab488
A
308 // Don't allow RAWPMU configuration when sharing counters.
309 if( (classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients() )
310 {
311 return EPERM;
312 }
313
39236c6e
A
314 lck_mtx_lock(&kpc_config_lock);
315
316 mp_config.classes = classes;
317 mp_config.configv = configv;
318
319 kpc_set_config_arch( &mp_config );
320
321 lck_mtx_unlock(&kpc_config_lock);
322
323 return 0;
324}
325
326/* allocate a buffer big enough for all the counters */
327uint64_t *
328kpc_counterbuf_alloc(void)
329{
330 uint64_t *buf;
331
332 buf = kalloc(KPC_MAX_COUNTERS * sizeof(uint64_t));
333 if(buf)
334 bzero( buf, KPC_MAX_COUNTERS * sizeof(uint64_t) );
335
336 return buf;
337}
338
339void
340kpc_counterbuf_free(uint64_t *buf)
341{
342 if( buf )
343 kfree(buf, KPC_MAX_COUNTERS * sizeof(uint64_t));
344}
345
346void kpc_sample_kperf(uint32_t actionid)
347{
348 struct kperf_sample sbuf;
349 struct kperf_context ctx;
350 task_t task = NULL;
351 int r;
352
353 BUF_DATA1(PERF_KPC_HNDLR | DBG_FUNC_START, 0);
354
355 ctx.cur_pid = 0;
356 ctx.cur_thread = current_thread();
357
358 task = chudxnu_task_for_thread(ctx.cur_thread);
359 if (task)
360 ctx.cur_pid = chudxnu_pid_for_task(task);
361
362 ctx.trigger_type = TRIGGER_TYPE_PMI;
363 ctx.trigger_id = 0;
364
365 r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
366
367 BUF_INFO1(PERF_KPC_HNDLR | DBG_FUNC_END, r);
368}
369
370
371int kpc_set_period(uint32_t classes, uint64_t *val)
372{
373 struct kpc_config_remote mp_config;
374
375 lck_mtx_lock(&kpc_config_lock);
376
377#ifndef FIXED_COUNTER_SHADOW
378 if (classes & KPC_CLASS_FIXED_MASK) {
379 lck_mtx_unlock(&kpc_config_lock);
380 return -1;
381 }
382#endif
383
384 kprintf("setting period %u\n", classes);
385
386 mp_config.classes = classes;
387 mp_config.configv = val;
388
389 kpc_set_period_arch( &mp_config );
390
391 lck_mtx_unlock(&kpc_config_lock);
392
393 return 0;
394}
395
396
397int kpc_get_period(uint32_t classes, uint64_t *val)
398{
399 uint32_t i, count, offset = 0;
400
401 lck_mtx_lock(&kpc_config_lock);
402
403 if (classes & KPC_CLASS_FIXED_MASK) {
404 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
405
406 /* convert reload values to periods */
407 for (i = 0; i < count; i++)
408 val[i] = kpc_fixed_max() - FIXED_RELOAD(i);
409
410 offset += count;
411 }
412
413 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
414 count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
415
416 /* convert reload values to periods */
417 for (i = 0; i < count; i++)
418 val[i + offset] = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
419 }
420
421 lck_mtx_unlock(&kpc_config_lock);
422
423 return 0;
424}
425
426int kpc_set_actionid(uint32_t classes, uint32_t *val)
427{
428 uint32_t count, offset = 0;
429
430 /* NOTE: what happens if a pmi occurs while actionids are being
431 * set is undefined. */
432 lck_mtx_lock(&kpc_config_lock);
433
434 if (classes & KPC_CLASS_FIXED_MASK) {
435 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
436
437 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
438
439 offset += count;
440 }
441
442 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
443 count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
444
445 memcpy(&CONFIGURABLE_ACTIONID(0), &val[offset], count*sizeof(uint32_t));
446 }
447
448 lck_mtx_unlock(&kpc_config_lock);
449
450 return 0;
451}
452
453int kpc_get_actionid(uint32_t classes, uint32_t *val)
454{
455 uint32_t count, offset = 0;
456
457 lck_mtx_lock(&kpc_config_lock);
458
459 if (classes & KPC_CLASS_FIXED_MASK) {
460 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
461
462 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
463
464 offset += count;
465 }
466
467 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
468 count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
469
470 memcpy(&val[offset], &CONFIGURABLE_ACTIONID(0), count*sizeof(uint32_t));
471 }
472
473 lck_mtx_unlock(&kpc_config_lock);
474
475 return 0;
476
477}