2 * Copyright (c) 2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <machine/machine_routines.h>
31 #include <kern/processor.h>
32 #include <kern/kalloc.h>
33 #include <sys/errno.h>
34 #include <kperf/buffer.h>
35 #include <kern/thread.h>
39 #include <kperf/kperf.h>
40 #include <kperf/sample.h>
41 #include <kperf/context.h>
42 #include <kperf/action.h>
44 #include <chud/chud_xnu.h>
46 uint32_t kpc_actionid
[KPC_MAX_COUNTERS
];
49 static lck_grp_attr_t
*kpc_config_lckgrp_attr
= NULL
;
50 static lck_grp_t
*kpc_config_lckgrp
= NULL
;
51 static lck_mtx_t kpc_config_lock
;
53 void kpc_arch_init(void);
57 kpc_config_lckgrp_attr
= lck_grp_attr_alloc_init();
58 kpc_config_lckgrp
= lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr
);
59 lck_mtx_init(&kpc_config_lock
, kpc_config_lckgrp
, LCK_ATTR_NULL
);
65 uint32_t cur_state
= 0;
67 if( kpc_is_running_fixed() )
68 cur_state
|= KPC_CLASS_FIXED_MASK
;
70 if( kpc_is_running_configurable() )
71 cur_state
|= KPC_CLASS_CONFIGURABLE_MASK
;
76 /* generic counter reading function */
78 kpc_get_cpu_counters( boolean_t all_cpus
, uint32_t classes
,
79 int *curcpu
, uint64_t *buf
)
81 int r
, enabled
, offset
= 0;
85 /* grab counters and CPU number as close as possible */
86 enabled
= ml_set_interrupts_enabled(FALSE
);
90 *curcpu
= current_processor()->cpu_id
;
92 if( classes
& KPC_CLASS_FIXED_MASK
)
94 kpc_get_fixed_counters( &buf
[offset
] );
96 offset
+= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
99 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
101 r
= kpc_get_configurable_counters( &buf
[offset
] );
103 offset
+= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
106 ml_set_interrupts_enabled(enabled
);
112 kpc_get_shadow_counters( boolean_t all_cpus
, uint32_t classes
,
113 int *curcpu
, uint64_t *buf
)
115 int enabled
, count
, offset
= 0;
119 enabled
= ml_set_interrupts_enabled(FALSE
);
122 *curcpu
= current_processor()->cpu_id
;
124 if( classes
& KPC_CLASS_FIXED_MASK
)
126 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
128 memcpy( &buf
[offset
], &FIXED_SHADOW(0), count
*sizeof(uint64_t) );
133 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
135 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
137 memcpy( &buf
[offset
], &CONFIGURABLE_SHADOW(0), count
*sizeof(uint64_t) );
142 ml_set_interrupts_enabled(enabled
);
148 kpc_get_counter_count(uint32_t classes
)
152 if( classes
& KPC_CLASS_FIXED_MASK
)
153 count
+= kpc_fixed_count();
155 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
156 count
+= kpc_configurable_count() ;
162 kpc_get_config_count(uint32_t classes
)
166 if( classes
& KPC_CLASS_FIXED_MASK
)
167 count
+= kpc_fixed_config_count();
169 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
170 count
+= kpc_configurable_config_count();
176 kpc_get_config(uint32_t classes
, kpc_config_t
*current_config
)
180 if( classes
& KPC_CLASS_FIXED_MASK
)
182 kpc_get_fixed_config(¤t_config
[count
]);
183 count
+= kpc_get_config_count(KPC_CLASS_FIXED_MASK
);
186 if( classes
& KPC_CLASS_CONFIGURABLE_MASK
)
188 kpc_get_configurable_config(¤t_config
[count
]);
189 count
+= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
196 kpc_set_config(uint32_t classes
, kpc_config_t
*configv
)
198 struct kpc_config_remote mp_config
;
200 lck_mtx_lock(&kpc_config_lock
);
202 mp_config
.classes
= classes
;
203 mp_config
.configv
= configv
;
205 kpc_set_config_arch( &mp_config
);
207 lck_mtx_unlock(&kpc_config_lock
);
212 /* allocate a buffer big enough for all the counters */
214 kpc_counterbuf_alloc(void)
218 buf
= kalloc(KPC_MAX_COUNTERS
* sizeof(uint64_t));
220 bzero( buf
, KPC_MAX_COUNTERS
* sizeof(uint64_t) );
226 kpc_counterbuf_free(uint64_t *buf
)
229 kfree(buf
, KPC_MAX_COUNTERS
* sizeof(uint64_t));
232 void kpc_sample_kperf(uint32_t actionid
)
234 struct kperf_sample sbuf
;
235 struct kperf_context ctx
;
239 BUF_DATA1(PERF_KPC_HNDLR
| DBG_FUNC_START
, 0);
242 ctx
.cur_thread
= current_thread();
244 task
= chudxnu_task_for_thread(ctx
.cur_thread
);
246 ctx
.cur_pid
= chudxnu_pid_for_task(task
);
248 ctx
.trigger_type
= TRIGGER_TYPE_PMI
;
251 r
= kperf_sample(&sbuf
, &ctx
, actionid
, SAMPLE_FLAG_PEND_USER
);
253 BUF_INFO1(PERF_KPC_HNDLR
| DBG_FUNC_END
, r
);
257 int kpc_set_period(uint32_t classes
, uint64_t *val
)
259 struct kpc_config_remote mp_config
;
261 lck_mtx_lock(&kpc_config_lock
);
263 #ifndef FIXED_COUNTER_SHADOW
264 if (classes
& KPC_CLASS_FIXED_MASK
) {
265 lck_mtx_unlock(&kpc_config_lock
);
270 kprintf("setting period %u\n", classes
);
272 mp_config
.classes
= classes
;
273 mp_config
.configv
= val
;
275 kpc_set_period_arch( &mp_config
);
277 lck_mtx_unlock(&kpc_config_lock
);
283 int kpc_get_period(uint32_t classes
, uint64_t *val
)
285 uint32_t i
, count
, offset
= 0;
287 lck_mtx_lock(&kpc_config_lock
);
289 if (classes
& KPC_CLASS_FIXED_MASK
) {
290 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
292 /* convert reload values to periods */
293 for (i
= 0; i
< count
; i
++)
294 val
[i
] = kpc_fixed_max() - FIXED_RELOAD(i
);
299 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
300 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
302 /* convert reload values to periods */
303 for (i
= 0; i
< count
; i
++)
304 val
[i
+ offset
] = kpc_configurable_max() - CONFIGURABLE_RELOAD(i
);
307 lck_mtx_unlock(&kpc_config_lock
);
312 int kpc_set_actionid(uint32_t classes
, uint32_t *val
)
314 uint32_t count
, offset
= 0;
316 /* NOTE: what happens if a pmi occurs while actionids are being
317 * set is undefined. */
318 lck_mtx_lock(&kpc_config_lock
);
320 if (classes
& KPC_CLASS_FIXED_MASK
) {
321 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
323 memcpy(&FIXED_ACTIONID(0), val
, count
*sizeof(uint32_t));
328 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
329 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
331 memcpy(&CONFIGURABLE_ACTIONID(0), &val
[offset
], count
*sizeof(uint32_t));
334 lck_mtx_unlock(&kpc_config_lock
);
339 int kpc_get_actionid(uint32_t classes
, uint32_t *val
)
341 uint32_t count
, offset
= 0;
343 lck_mtx_lock(&kpc_config_lock
);
345 if (classes
& KPC_CLASS_FIXED_MASK
) {
346 count
= kpc_get_counter_count(KPC_CLASS_FIXED_MASK
);
348 memcpy(val
, &FIXED_ACTIONID(0), count
*sizeof(uint32_t));
353 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
354 count
= kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK
);
356 memcpy(&val
[offset
], &CONFIGURABLE_ACTIONID(0), count
*sizeof(uint32_t));
359 lck_mtx_unlock(&kpc_config_lock
);