2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <kperf/buffer.h>
35 #include <kern/thread.h>
39 #include <kperf/kperf.h>
40 #include <kperf/sample.h>
41 #include <kperf/context.h>
42 #include <kperf/action.h>
44 #include <chud/chud_xnu.h>
46 uint32_t kpc_actionid
[KPC_MAX_COUNTERS
];
49 static lck_grp_attr_t
*kpc_config_lckgrp_attr
= NULL
;
50 static lck_grp_t
*kpc_config_lckgrp
= NULL
;
51 static lck_mtx_t kpc_config_lock
;
53 /* state specifying if all counters have been requested by kperf */
54 static boolean_t force_all_ctrs
= FALSE
;
56 /* PM handler called when forcing/releasing all counters */
57 static void (*pm_handler
)(boolean_t
) = NULL
;
59 void kpc_common_init(void);
63 kpc_config_lckgrp_attr
= lck_grp_attr_alloc_init();
64 kpc_config_lckgrp
= lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr
);
65 lck_mtx_init(&kpc_config_lock
, kpc_config_lckgrp
, LCK_ATTR_NULL
);
69 kpc_task_set_forced_all_ctrs(task_t task
, boolean_t state
)
75 task
->t_chud
|= TASK_KPC_FORCED_ALL_CTRS
;
77 task
->t_chud
&= ~TASK_KPC_FORCED_ALL_CTRS
;
82 kpc_task_get_forced_all_ctrs(task_t task
)
85 return task
->t_chud
& TASK_KPC_FORCED_ALL_CTRS
? TRUE
: FALSE
;
89 kpc_force_all_ctrs(task_t task
, int val
)
92 boolean_t new_state
= val
? TRUE
: FALSE
;
93 boolean_t old_state
= kpc_get_force_all_ctrs();
96 * Refuse to do the operation if the counters are already forced by
99 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task
))
102 /* nothing to do if the state is not changing */
103 if (old_state
== new_state
)
106 /* do the architecture specific work */
107 if ((ret
= kpc_force_all_ctrs_arch(task
, val
)) != 0)
110 /* notify the power manager */
112 pm_handler( new_state
? FALSE
: TRUE
);
114 /* update the task bits */
115 kpc_task_set_forced_all_ctrs(task
, val
);
117 /* update the internal state */
118 force_all_ctrs
= val
;
124 kpc_get_force_all_ctrs(void)
126 return force_all_ctrs
;
130 kpc_register_pm_handler(void (*handler
)(boolean_t
))
133 pm_handler
= handler
;
136 /* Notify machine-dependent code. Reserved PMCs could change. */
137 kpc_force_all_ctrs_arch(TASK_NULL
, force_all_ctrs
);
139 return force_all_ctrs
? FALSE
: TRUE
;
143 kpc_multiple_clients(void)
145 return pm_handler
!= NULL
;
149 kpc_controls_fixed_counters(void)
151 return !pm_handler
|| force_all_ctrs
;
155 kpc_get_running(void)
157 uint32_t cur_state
= 0;
159 if( kpc_is_running_fixed() )
160 cur_state
|= KPC_CLASS_FIXED_MASK
;
162 if( kpc_is_running_configurable() )
163 cur_state
|= KPC_CLASS_CONFIGURABLE_MASK
;
168 /* generic counter reading function */
170 kpc_get_cpu_counters( boolean_t all_cpus
, uint32_t classes
,
171 int *curcpu
, uint64_t *buf
)
173 int r
, enabled
, offset
= 0;
177 /* grab counters and CPU number as close as possible */
178 enabled
= ml_set_interrupts_enabled(FALSE
);
182 *curcpu
= current_processor()->cpu_id
;
184 if( classes
& KPC_CLASS_FIXED_MASK
)
186 kpc_get_fixed_counters( &buf
[offset
] );
188 offset
+= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
191 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
193 r
= kpc_get_configurable_counters( &buf
[offset
] );
195 offset
+= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
198 ml_set_interrupts_enabled(enabled
);
204 kpc_get_shadow_counters( boolean_t all_cpus
, uint32_t classes
,
205 int *curcpu
, uint64_t *buf
)
207 int enabled
, count
, offset
= 0;
211 enabled
= ml_set_interrupts_enabled(FALSE
);
214 *curcpu
= current_processor()->cpu_id
;
216 if( classes
& KPC_CLASS_FIXED_MASK
)
218 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
220 memcpy( &buf
[offset
], &FIXED_SHADOW(0), count
*sizeof(uint64_t) );
225 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
227 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
229 memcpy( &buf
[offset
], &CONFIGURABLE_SHADOW(0), count
*sizeof(uint64_t) );
234 ml_set_interrupts_enabled(enabled
);
240 kpc_get_counter_count(uint32_t classes
)
244 if( classes
& KPC_CLASS_FIXED_MASK
)
245 count
+= kpc_fixed_count();
247 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
248 count
+= kpc_configurable_count() ;
254 kpc_get_config_count(uint32_t classes
)
258 if( classes
& KPC_CLASS_FIXED_MASK
)
259 count
+= kpc_fixed_config_count();
261 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
262 count
+= kpc_configurable_config_count();
264 if( (classes
& KPC_CLASS_RAWPMU_MASK
) && !kpc_multiple_clients() )
265 count
+= kpc_rawpmu_config_count();
271 kpc_get_config(uint32_t classes
, kpc_config_t
*current_config
)
275 if( classes
& KPC_CLASS_FIXED_MASK
)
277 kpc_get_fixed_config(¤t_config
[count
]);
278 count
+= kpc_get_config_count(KPC_CLASS_FIXED_MASK
);
281 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
283 kpc_get_configurable_config(¤t_config
[count
]);
284 count
+= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
287 if( classes
& KPC_CLASS_RAWPMU_MASK
)
289 // Client shouldn't ask for config words that aren't available.
290 // Most likely, they'd misinterpret the returned buffer if we
292 if( kpc_multiple_clients() )
296 kpc_get_rawpmu_config(¤t_config
[count
]);
297 count
+= kpc_get_config_count(KPC_CLASS_RAWPMU_MASK
);
304 kpc_set_config(uint32_t classes
, kpc_config_t
*configv
)
306 struct kpc_config_remote mp_config
;
308 // Don't allow RAWPMU configuration when sharing counters.
309 if( (classes
& KPC_CLASS_RAWPMU_MASK
) && kpc_multiple_clients() )
314 lck_mtx_lock(&kpc_config_lock
);
316 mp_config
.classes
= classes
;
317 mp_config
.configv
= configv
;
319 kpc_set_config_arch( &mp_config
);
321 lck_mtx_unlock(&kpc_config_lock
);
326 /* allocate a buffer big enough for all the counters */
328 kpc_counterbuf_alloc(void)
332 buf
= kalloc(KPC_MAX_COUNTERS
* sizeof(uint64_t));
334 bzero( buf
, KPC_MAX_COUNTERS
* sizeof(uint64_t) );
340 kpc_counterbuf_free(uint64_t *buf
)
343 kfree(buf
, KPC_MAX_COUNTERS
* sizeof(uint64_t));
346 void kpc_sample_kperf(uint32_t actionid
)
348 struct kperf_sample sbuf
;
349 struct kperf_context ctx
;
353 BUF_DATA1(PERF_KPC_HNDLR
| DBG_FUNC_START
, 0);
356 ctx
.cur_thread
= current_thread();
358 task
= chudxnu_task_for_thread(ctx
.cur_thread
);
360 ctx
.cur_pid
= chudxnu_pid_for_task(task
);
362 ctx
.trigger_type
= TRIGGER_TYPE_PMI
;
365 r
= kperf_sample(&sbuf
, &ctx
, actionid
, SAMPLE_FLAG_PEND_USER
);
367 BUF_INFO1(PERF_KPC_HNDLR
| DBG_FUNC_END
, r
);
371 int kpc_set_period(uint32_t classes
, uint64_t *val
)
373 struct kpc_config_remote mp_config
;
375 lck_mtx_lock(&kpc_config_lock
);
377 #ifndef FIXED_COUNTER_SHADOW
378 if (classes
& KPC_CLASS_FIXED_MASK
) {
379 lck_mtx_unlock(&kpc_config_lock
);
384 kprintf("setting period %u\n", classes
);
386 mp_config
.classes
= classes
;
387 mp_config
.configv
= val
;
389 kpc_set_period_arch( &mp_config
);
391 lck_mtx_unlock(&kpc_config_lock
);
397 int kpc_get_period(uint32_t classes
, uint64_t *val
)
399 uint32_t i
, count
, offset
= 0;
401 lck_mtx_lock(&kpc_config_lock
);
403 if (classes
& KPC_CLASS_FIXED_MASK
) {
404 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
406 /* convert reload values to periods */
407 for (i
= 0; i
< count
; i
++)
408 val
[i
] = kpc_fixed_max() - FIXED_RELOAD(i
);
413 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
414 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
416 /* convert reload values to periods */
417 for (i
= 0; i
< count
; i
++)
418 val
[i
+ offset
] = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
421 lck_mtx_unlock(&kpc_config_lock
);
426 int kpc_set_actionid(uint32_t classes
, uint32_t *val
)
428 uint32_t count
, offset
= 0;
430 /* NOTE: what happens if a pmi occurs while actionids are being
431 * set is undefined. */
432 lck_mtx_lock(&kpc_config_lock
);
434 if (classes
& KPC_CLASS_FIXED_MASK
) {
435 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
437 memcpy(&FIXED_ACTIONID(0), val
, count
*sizeof(uint32_t));
442 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
443 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
445 memcpy(&CONFIGURABLE_ACTIONID(0), &val
[offset
], count
*sizeof(uint32_t));
448 lck_mtx_unlock(&kpc_config_lock
);
453 int kpc_get_actionid(uint32_t classes
, uint32_t *val
)
455 uint32_t count
, offset
= 0;
457 lck_mtx_lock(&kpc_config_lock
);
459 if (classes
& KPC_CLASS_FIXED_MASK
) {
460 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
462 memcpy(val
, &FIXED_ACTIONID(0), count
*sizeof(uint32_t));
467 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
468 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
470 memcpy(&val
[offset
], &CONFIGURABLE_ACTIONID(0), count
*sizeof(uint32_t));
473 lck_mtx_unlock(&kpc_config_lock
);