]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/action.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
d9a64523 5 *
316670eb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
d9a64523 14 *
316670eb
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
d9a64523 17 *
316670eb
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
d9a64523 25 *
316670eb
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34#include <mach/mach_types.h>
35#include <machine/machine_routines.h>
316670eb
A
36#include <kern/kalloc.h>
37#include <kern/debug.h> /* panic */
38#include <kern/thread.h>
39#include <sys/errno.h>
39037602 40#include <sys/vm.h>
d9a64523 41#include <vm/vm_object.h>
5ba3f43e
A
42#include <vm/vm_page.h>
43#include <vm/vm_pageout.h>
316670eb 44
39037602
A
45#include <kperf/action.h>
46#include <kperf/ast.h>
316670eb 47#include <kperf/buffer.h>
316670eb 48#include <kperf/callstack.h>
316670eb 49#include <kperf/context.h>
39037602
A
50#include <kperf/kdebug_trigger.h>
51#include <kperf/kperf.h>
3e170ce0 52#include <kperf/kperf_kpc.h>
39037602
A
53#include <kperf/kperf_timer.h>
54#include <kperf/pet.h>
55#include <kperf/sample.h>
56#include <kperf/thread_samplers.h>
316670eb 57
39037602 58#define ACTION_MAX (32)
316670eb 59
316670eb 60/* the list of different actions to take */
d9a64523 61struct action {
39236c6e 62 uint32_t sample;
39037602
A
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
39236c6e
A
65 uint32_t userdata;
66 int pid_filter;
316670eb
A
67};
68
69/* the list of actions */
d9a64523 70static unsigned int actionc = 0;
316670eb
A
71static struct action *actionv = NULL;
72
3e170ce0 73/* should emit tracepoint on context switch */
39037602 74int kperf_kdebug_cswitch = 0;
316670eb 75
5ba3f43e 76bool
d9a64523 77kperf_action_has_non_system(unsigned int actionid)
5ba3f43e
A
78{
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88}
89
d9a64523
A
90bool
91kperf_action_has_task(unsigned int actionid)
92{
93 if (actionid > actionc) {
94 return false;
95 }
96
0a7de745 97 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
d9a64523
A
98}
99
100bool
101kperf_action_has_thread(unsigned int actionid)
102{
103 if (actionid > actionc) {
104 return false;
105 }
106
0a7de745 107 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
d9a64523
A
108}
109
5ba3f43e
A
110static void
111kperf_system_memory_log(void)
112{
113 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
0a7de745
A
114 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
115 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
116 vm_page_speculative_count));
d9a64523 117 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
0a7de745
A
118 (uintptr_t)vm_page_internal_count,
119 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
5ba3f43e
A
121}
122
94ff46dc
A
123static void
124kperf_sample_user_internal(struct kperf_usample *sbuf,
125 struct kperf_context *context, unsigned int actionid,
126 unsigned int sample_what)
127{
128 if (sample_what & SAMPLER_USTACK) {
129 kperf_ucallstack_sample(&sbuf->ucallstack, context);
130 }
131 if (sample_what & SAMPLER_TH_DISPATCH) {
132 kperf_thread_dispatch_sample(&sbuf->th_dispatch, context);
133 }
134 if (sample_what & SAMPLER_TH_INFO) {
135 kperf_thread_info_sample(&sbuf->th_info, context);
136 }
137
138 boolean_t intren = ml_set_interrupts_enabled(FALSE);
139
140 /*
141 * No userdata or sample_flags for this one.
142 */
143 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, actionid);
144
145 if (sample_what & SAMPLER_USTACK) {
146 kperf_ucallstack_log(&sbuf->ucallstack);
147 }
148 if (sample_what & SAMPLER_TH_DISPATCH) {
149 kperf_thread_dispatch_log(&sbuf->th_dispatch);
150 }
151 if (sample_what & SAMPLER_TH_INFO) {
152 kperf_thread_info_log(&sbuf->th_info);
153 }
154
155 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
156
157 ml_set_interrupts_enabled(intren);
158}
159
160void
161kperf_sample_user(struct kperf_usample *sbuf, struct kperf_context *context,
162 unsigned int actionid, unsigned int sample_flags)
163{
164 if (actionid == 0 || actionid > actionc) {
165 return;
166 }
167
168 unsigned int sample_what = actionv[actionid - 1].sample;
169 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
170
171 /* callstacks should be explicitly ignored */
172 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
173 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
174 }
175 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
176 sample_what &= SAMPLER_SYS_MEM;
177 }
178 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
179 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
180 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
181 sample_what &= SAMPLER_THREAD_MASK;
182 }
183 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
184 sample_what &= SAMPLER_TASK_MASK;
185 }
186
187 if (sample_what == 0) {
188 return;
189 }
190
191 sbuf->ucallstack.kpuc_nframes = ucallstack_depth ?:
192 MAX_UCALLSTACK_FRAMES;
193
194 kperf_sample_user_internal(sbuf, context, actionid, sample_what);
195}
196
316670eb 197static kern_return_t
3e170ce0 198kperf_sample_internal(struct kperf_sample *sbuf,
0a7de745
A
199 struct kperf_context *context,
200 unsigned sample_what, unsigned sample_flags,
cb323159 201 unsigned actionid, unsigned ucallstack_depth)
316670eb 202{
39037602
A
203 int pended_ucallstack = 0;
204 int pended_th_dispatch = 0;
5ba3f43e
A
205 bool on_idle_thread = false;
206 uint32_t userdata = actionid;
d9a64523 207 bool task_only = false;
316670eb 208
3e170ce0 209 if (sample_what == 0) {
316670eb 210 return SAMPLE_CONTINUE;
3e170ce0 211 }
316670eb 212
39037602
A
213 /* callstacks should be explicitly ignored */
214 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
215 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
216 }
217
5ba3f43e
A
218 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
219 sample_what &= SAMPLER_SYS_MEM;
220 }
221
d9a64523 222 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
0a7de745 223 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
d9a64523
A
224 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
225 sample_what &= SAMPLER_THREAD_MASK;
226 }
227 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
228 task_only = true;
229 sample_what &= SAMPLER_TASK_MASK;
230 }
231
232 if (!task_only) {
233 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
234 }
235 bool is_kernel = (context->cur_pid == 0);
39037602
A
236
237 if (actionid && actionid <= actionc) {
cb323159
A
238 sbuf->kcallstack.kpkc_nframes =
239 actionv[actionid - 1].kcallstack_depth;
39037602 240 } else {
cb323159 241 sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
39037602
A
242 }
243
94ff46dc 244 ucallstack_depth = ucallstack_depth ?: MAX_UCALLSTACK_FRAMES;
cb323159 245 sbuf->kcallstack.kpkc_flags = 0;
94ff46dc 246 sbuf->usample.ucallstack.kpuc_flags = 0;
39236c6e 247
39037602
A
248 if (sample_what & SAMPLER_TH_INFO) {
249 kperf_thread_info_sample(&sbuf->th_info, context);
3e170ce0 250
3e170ce0 251 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
39037602 252 if (sbuf->th_info.kpthi_runmode & 0x40) {
5ba3f43e
A
253 on_idle_thread = true;
254 goto log_sample;
3e170ce0
A
255 }
256 }
316670eb
A
257 }
258
39037602
A
259 if (sample_what & SAMPLER_TH_SNAPSHOT) {
260 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
261 }
262 if (sample_what & SAMPLER_TH_SCHEDULING) {
263 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
264 }
265 if (sample_what & SAMPLER_KSTACK) {
266 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
267 kperf_continuation_sample(&(sbuf->kcallstack), context);
39037602 268 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
94ff46dc 269 /* outside of interrupt context, backtrace the current thread */
39037602
A
270 kperf_backtrace_sample(&(sbuf->kcallstack), context);
271 } else {
272 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
273 }
274 }
275 if (sample_what & SAMPLER_TK_SNAPSHOT) {
d9a64523 276 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
3e170ce0 277 }
316670eb 278
3e170ce0
A
279 if (!is_kernel) {
280 if (sample_what & SAMPLER_MEMINFO) {
d9a64523 281 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
316670eb 282 }
3e170ce0
A
283
284 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
39037602 285 if (sample_what & SAMPLER_USTACK) {
94ff46dc
A
286 pended_ucallstack = kperf_ucallstack_pend(context,
287 ucallstack_depth, actionid);
3e170ce0
A
288 }
289
39037602 290 if (sample_what & SAMPLER_TH_DISPATCH) {
94ff46dc
A
291 pended_th_dispatch =
292 kperf_thread_dispatch_pend(context, actionid);
3e170ce0 293 }
316670eb
A
294 }
295 }
296
3e170ce0
A
297 if (sample_what & SAMPLER_PMC_THREAD) {
298 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
299 } else if (sample_what & SAMPLER_PMC_CPU) {
300 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
301 }
39236c6e 302
5ba3f43e 303log_sample:
39236c6e 304 /* lookup the user tag, if any */
3e170ce0
A
305 if (actionid && (actionid <= actionc)) {
306 userdata = actionv[actionid - 1].userdata;
3e170ce0 307 }
39236c6e 308
39037602
A
309 /* avoid logging if this sample only pended samples */
310 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
0a7de745 311 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
39037602
A
312 return SAMPLE_CONTINUE;
313 }
314
316670eb
A
315 /* stash the data into the buffer
316 * interrupts off to ensure we don't get split
317 */
39037602 318 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
316670eb 319
3e170ce0 320 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
0a7de745 321 actionid, userdata, sample_flags);
316670eb 322
5ba3f43e
A
323 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
324 if (sample_what & SAMPLER_SYS_MEM) {
325 kperf_system_memory_log();
326 }
327 }
328 if (on_idle_thread) {
329 goto log_sample_end;
330 }
331
39037602
A
332 if (sample_what & SAMPLER_TH_INFO) {
333 kperf_thread_info_log(&sbuf->th_info);
334 }
335 if (sample_what & SAMPLER_TH_SCHEDULING) {
336 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
337 }
338 if (sample_what & SAMPLER_TH_SNAPSHOT) {
339 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
3e170ce0 340 }
3e170ce0 341 if (sample_what & SAMPLER_KSTACK) {
39037602
A
342 kperf_kcallstack_log(&sbuf->kcallstack);
343 }
5ba3f43e
A
344 if (sample_what & SAMPLER_TH_INSCYC) {
345 kperf_thread_inscyc_log(context);
346 }
39037602
A
347 if (sample_what & SAMPLER_TK_SNAPSHOT) {
348 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
3e170ce0 349 }
d9a64523
A
350 if (sample_what & SAMPLER_TK_INFO) {
351 kperf_task_info_log(context);
352 }
316670eb
A
353
354 /* dump user stuff */
3e170ce0
A
355 if (!is_kernel) {
356 /* dump meminfo */
357 if (sample_what & SAMPLER_MEMINFO) {
358 kperf_meminfo_log(&(sbuf->meminfo));
316670eb 359 }
316670eb 360
3e170ce0 361 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
39037602
A
362 if (pended_ucallstack) {
363 BUF_INFO(PERF_CS_UPEND);
3e170ce0
A
364 }
365
39037602
A
366 if (pended_th_dispatch) {
367 BUF_INFO(PERF_TI_DISPPEND);
3e170ce0 368 }
316670eb
A
369 }
370 }
371
cb323159
A
372 if (sample_what & SAMPLER_PMC_CONFIG) {
373 kperf_kpc_config_log(&(sbuf->kpcdata));
374 }
3e170ce0
A
375 if (sample_what & SAMPLER_PMC_THREAD) {
376 kperf_kpc_thread_log(&(sbuf->kpcdata));
377 } else if (sample_what & SAMPLER_PMC_CPU) {
378 kperf_kpc_cpu_log(&(sbuf->kpcdata));
379 }
39236c6e 380
5ba3f43e
A
381log_sample_end:
382 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
316670eb
A
383
384 /* intrs back on */
385 ml_set_interrupts_enabled(enabled);
386
387 return SAMPLE_CONTINUE;
388}
389
390/* Translate actionid into sample bits and take a sample */
391kern_return_t
3e170ce0 392kperf_sample(struct kperf_sample *sbuf,
0a7de745
A
393 struct kperf_context *context,
394 unsigned actionid, unsigned sample_flags)
316670eb 395{
316670eb 396 /* work out what to sample, if anything */
3e170ce0 397 if ((actionid > actionc) || (actionid == 0)) {
316670eb 398 return SAMPLE_SHUTDOWN;
3e170ce0 399 }
316670eb 400
39236c6e
A
401 /* check the pid filter against the context's current pid.
402 * filter pid == -1 means any pid
403 */
39037602 404 int pid_filter = actionv[actionid - 1].pid_filter;
3e170ce0 405 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
39236c6e 406 return SAMPLE_CONTINUE;
3e170ce0 407 }
39236c6e
A
408
409 /* the samplers to run */
39037602 410 unsigned int sample_what = actionv[actionid - 1].sample;
94ff46dc 411 unsigned int ucallstack_depth = actionv[actionid - 1].ucallstack_depth;
316670eb 412
39236c6e 413 /* do the actual sample operation */
3e170ce0 414 return kperf_sample_internal(sbuf, context, sample_what,
94ff46dc 415 sample_flags, actionid, ucallstack_depth);
316670eb
A
416}
417
316670eb 418void
39037602 419kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
316670eb 420{
39037602 421 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
39037602
A
422 struct kperf_sample *sample = NULL;
423 kern_return_t kr = KERN_SUCCESS;
424 int s;
316670eb 425
39037602
A
426 if (!kperf_kdebug_should_trigger(debugid)) {
427 return;
3e170ce0 428 }
316670eb 429
39037602 430 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
3e170ce0 431
d9a64523
A
432 thread_t thread = current_thread();
433 task_t task = get_threadtask(thread);
434 struct kperf_context ctx = {
435 .cur_thread = thread,
436 .cur_task = task,
437 .cur_pid = task_pid(task),
438 .trigger_type = TRIGGER_TYPE_KDEBUG,
439 .trigger_id = 0,
440 };
3e170ce0 441
39037602 442 s = ml_set_interrupts_enabled(0);
316670eb 443
39037602 444 sample = kperf_intr_sample_buffer();
316670eb 445
39037602
A
446 if (!ml_at_interrupt_context()) {
447 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
448 ctx.starting_fp = starting_fp;
3e170ce0 449 }
316670eb 450
39037602 451 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
316670eb 452
39037602
A
453 ml_set_interrupts_enabled(s);
454 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
316670eb
A
455}
456
39236c6e 457/*
39037602
A
458 * This function allocates >2.3KB of the stack. Prevent the compiler from
459 * inlining this function into ast_taken and ensure the stack memory is only
460 * allocated for the kperf AST.
39236c6e 461 */
39037602 462__attribute__((noinline))
39236c6e 463void
39037602 464kperf_thread_ast_handler(thread_t thread)
39236c6e 465{
94ff46dc 466 uint32_t ast = thread->kperf_ast;
39236c6e 467
94ff46dc
A
468 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, ast);
469
470 struct kperf_usample sbuf = {};
39236c6e 471
39037602 472 task_t task = get_threadtask(thread);
39236c6e 473
743345f9
A
474 if (task_did_exec(task) || task_is_exec_copy(task)) {
475 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
476 return;
477 }
478
d9a64523
A
479 struct kperf_context ctx = {
480 .cur_thread = thread,
481 .cur_task = task,
482 .cur_pid = task_pid(task),
483 };
3e170ce0 484
39037602 485 unsigned int sample_what = 0;
94ff46dc 486 if (ast & T_KPERF_AST_DISPATCH) {
39037602 487 sample_what |= SAMPLER_TH_DISPATCH;
3e170ce0 488 }
94ff46dc
A
489 if (ast & T_KPERF_AST_CALLSTACK) {
490 /* TH_INFO for backwards compatibility */
491 sample_what |= SAMPLER_USTACK | SAMPLER_TH_INFO;
3e170ce0 492 }
3e170ce0 493
94ff46dc
A
494 sbuf.ucallstack.kpuc_nframes =
495 T_KPERF_GET_CALLSTACK_DEPTH(ast) ?: MAX_UCALLSTACK_FRAMES;
496 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
497 kperf_sample_user_internal(&sbuf, &ctx, actionid, sample_what);
39236c6e 498
94ff46dc 499 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END);
39236c6e
A
500}
501
502int
94ff46dc 503kperf_ast_pend(thread_t thread, uint32_t set_flags, unsigned int set_actionid)
39236c6e 504{
39037602 505 if (thread != current_thread()) {
94ff46dc 506 panic("kperf: pending AST to non-current thread");
39037602 507 }
39236c6e 508
94ff46dc
A
509 uint32_t ast = thread->kperf_ast;
510 unsigned int actionid = T_KPERF_GET_ACTIONID(ast);
511 uint32_t flags = ast & T_KPERF_AST_ALL;
3e170ce0 512
94ff46dc
A
513 if ((flags | set_flags) != flags || actionid != set_actionid) {
514 ast &= ~T_KPERF_SET_ACTIONID(actionid);
515 ast |= T_KPERF_SET_ACTIONID(set_actionid);
516 ast |= set_flags;
517
518 thread->kperf_ast = ast;
3e170ce0 519
39037602
A
520 /* set the actual AST */
521 act_set_kperf(thread);
522 return 1;
523 }
39236c6e
A
524
525 return 0;
526}
527
39236c6e 528void
39037602 529kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
39236c6e 530{
94ff46dc
A
531 uint32_t ast = thread->kperf_ast;
532 uint32_t existing_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast);
533 if (existing_depth < depth) {
534 ast &= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth);
535 ast |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
536 thread->kperf_ast = ast;
3e170ce0
A
537 }
538}
539
3e170ce0
A
540int
541kperf_kdbg_cswitch_get(void)
542{
39037602 543 return kperf_kdebug_cswitch;
3e170ce0
A
544}
545
546int
547kperf_kdbg_cswitch_set(int newval)
548{
39037602
A
549 kperf_kdebug_cswitch = newval;
550 kperf_on_cpu_update();
3e170ce0
A
551
552 return 0;
39236c6e
A
553}
554
555/*
556 * Action configuration
557 */
39037602 558unsigned int
316670eb
A
559kperf_action_get_count(void)
560{
561 return actionc;
562}
563
564int
3e170ce0 565kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
316670eb 566{
3e170ce0
A
567 if ((actionid > actionc) || (actionid == 0)) {
568 return EINVAL;
569 }
570
571 /* disallow both CPU and thread counters to be sampled in the same
572 * action */
573 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
316670eb 574 return EINVAL;
3e170ce0 575 }
316670eb 576
3e170ce0 577 actionv[actionid - 1].sample = samplers;
316670eb
A
578
579 return 0;
580}
581
582int
3e170ce0 583kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
316670eb 584{
3e170ce0 585 if ((actionid > actionc)) {
316670eb 586 return EINVAL;
3e170ce0 587 }
316670eb 588
3e170ce0 589 if (actionid == 0) {
39236c6e 590 *samplers_out = 0; /* "NULL" action */
3e170ce0
A
591 } else {
592 *samplers_out = actionv[actionid - 1].sample;
593 }
39236c6e
A
594
595 return 0;
596}
597
598int
3e170ce0 599kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
39236c6e 600{
3e170ce0 601 if ((actionid > actionc) || (actionid == 0)) {
39236c6e 602 return EINVAL;
3e170ce0 603 }
39236c6e 604
3e170ce0 605 actionv[actionid - 1].userdata = userdata;
39236c6e
A
606
607 return 0;
608}
609
610int
3e170ce0 611kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
39236c6e 612{
3e170ce0 613 if ((actionid > actionc)) {
39236c6e 614 return EINVAL;
3e170ce0 615 }
39236c6e 616
3e170ce0 617 if (actionid == 0) {
39236c6e 618 *userdata_out = 0; /* "NULL" action */
3e170ce0
A
619 } else {
620 *userdata_out = actionv[actionid - 1].userdata;
621 }
39236c6e
A
622
623 return 0;
624}
625
626int
3e170ce0 627kperf_action_set_filter(unsigned actionid, int pid)
39236c6e 628{
3e170ce0 629 if ((actionid > actionc) || (actionid == 0)) {
39236c6e 630 return EINVAL;
3e170ce0 631 }
39236c6e 632
3e170ce0 633 actionv[actionid - 1].pid_filter = pid;
39236c6e
A
634
635 return 0;
636}
637
638int
3e170ce0 639kperf_action_get_filter(unsigned actionid, int *pid_out)
39236c6e 640{
3e170ce0 641 if ((actionid > actionc)) {
39236c6e 642 return EINVAL;
3e170ce0 643 }
39236c6e 644
3e170ce0 645 if (actionid == 0) {
39236c6e 646 *pid_out = -1; /* "NULL" action */
3e170ce0
A
647 } else {
648 *pid_out = actionv[actionid - 1].pid_filter;
649 }
316670eb
A
650
651 return 0;
652}
653
39037602
A
654void
655kperf_action_reset(void)
656{
657 for (unsigned int i = 0; i < actionc; i++) {
658 kperf_action_set_samplers(i + 1, 0);
659 kperf_action_set_userdata(i + 1, 0);
660 kperf_action_set_filter(i + 1, -1);
cb323159
A
661 kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
662 kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
39037602
A
663 }
664}
665
316670eb
A
666int
667kperf_action_set_count(unsigned count)
668{
669 struct action *new_actionv = NULL, *old_actionv = NULL;
39037602 670 unsigned old_count;
316670eb
A
671
672 /* easy no-op */
3e170ce0 673 if (count == actionc) {
316670eb 674 return 0;
3e170ce0 675 }
316670eb
A
676
677 /* TODO: allow shrinking? */
3e170ce0 678 if (count < actionc) {
316670eb 679 return EINVAL;
3e170ce0 680 }
316670eb
A
681
682 /* cap it for good measure */
3e170ce0 683 if (count > ACTION_MAX) {
316670eb 684 return EINVAL;
3e170ce0 685 }
316670eb
A
686
687 /* creating the action arror for the first time. create a few
688 * more things, too.
689 */
3e170ce0 690 if (actionc == 0) {
316670eb 691 int r;
39037602 692 if ((r = kperf_init())) {
316670eb 693 return r;
3e170ce0 694 }
316670eb
A
695 }
696
697 /* create a new array */
39037602 698 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
3e170ce0 699 if (new_actionv == NULL) {
316670eb 700 return ENOMEM;
3e170ce0 701 }
316670eb
A
702
703 old_actionv = actionv;
704 old_count = actionc;
705
3e170ce0
A
706 if (old_actionv != NULL) {
707 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
708 }
316670eb 709
3e170ce0 710 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
316670eb 711
39037602 712 for (unsigned int i = old_count; i < count; i++) {
39236c6e 713 new_actionv[i].pid_filter = -1;
cb323159
A
714 new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
715 new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
3e170ce0 716 }
39236c6e 717
316670eb
A
718 actionv = new_actionv;
719 actionc = count;
720
3e170ce0
A
721 if (old_actionv != NULL) {
722 kfree(old_actionv, old_count * sizeof(*actionv));
723 }
316670eb 724
316670eb
A
725 return 0;
726}
39037602
A
727
728int
729kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
730{
731 if ((action_id > actionc) || (action_id == 0)) {
732 return EINVAL;
733 }
734
cb323159 735 if (depth > MAX_UCALLSTACK_FRAMES) {
39037602
A
736 return EINVAL;
737 }
94ff46dc
A
738 if (depth < 2) {
739 return EINVAL;
740 }
39037602
A
741
742 actionv[action_id - 1].ucallstack_depth = depth;
743
744 return 0;
745}
746
747int
748kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
749{
750 if ((action_id > actionc) || (action_id == 0)) {
751 return EINVAL;
752 }
753
cb323159 754 if (depth > MAX_KCALLSTACK_FRAMES) {
39037602
A
755 return EINVAL;
756 }
94ff46dc
A
757 if (depth < 1) {
758 return EINVAL;
759 }
39037602
A
760
761 actionv[action_id - 1].kcallstack_depth = depth;
762
763 return 0;
764}
765
766int
767kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
768{
769 if ((action_id > actionc)) {
770 return EINVAL;
771 }
772
773 assert(depth_out);
774
775 if (action_id == 0) {
cb323159 776 *depth_out = MAX_UCALLSTACK_FRAMES;
39037602
A
777 } else {
778 *depth_out = actionv[action_id - 1].ucallstack_depth;
779 }
780
781 return 0;
782}
783
784int
785kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
786{
787 if ((action_id > actionc)) {
788 return EINVAL;
789 }
790
791 assert(depth_out);
792
793 if (action_id == 0) {
cb323159 794 *depth_out = MAX_KCALLSTACK_FRAMES;
39037602
A
795 } else {
796 *depth_out = actionv[action_id - 1].kcallstack_depth;
797 }
798
799 return 0;
800}