]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/action.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / kperf / action.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34#include <mach/mach_types.h>
35#include <machine/machine_routines.h>
316670eb
A
36#include <kern/kalloc.h>
37#include <kern/debug.h> /* panic */
38#include <kern/thread.h>
39#include <sys/errno.h>
39037602 40#include <sys/vm.h>
5ba3f43e
A
41#include <vm/vm_page.h>
42#include <vm/vm_pageout.h>
316670eb 43
39037602
A
44#include <kperf/action.h>
45#include <kperf/ast.h>
316670eb 46#include <kperf/buffer.h>
316670eb 47#include <kperf/callstack.h>
316670eb 48#include <kperf/context.h>
39037602
A
49#include <kperf/kdebug_trigger.h>
50#include <kperf/kperf.h>
3e170ce0 51#include <kperf/kperf_kpc.h>
39037602
A
52#include <kperf/kperf_timer.h>
53#include <kperf/pet.h>
54#include <kperf/sample.h>
55#include <kperf/thread_samplers.h>
316670eb 56
39037602 57#define ACTION_MAX (32)
316670eb 58
316670eb
A
59/* the list of different actions to take */
60struct action
61{
39236c6e 62 uint32_t sample;
39037602
A
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
39236c6e
A
65 uint32_t userdata;
66 int pid_filter;
316670eb
A
67};
68
69/* the list of actions */
70static unsigned actionc = 0;
71static struct action *actionv = NULL;
72
3e170ce0 73/* should emit tracepoint on context switch */
39037602 74int kperf_kdebug_cswitch = 0;
316670eb 75
5ba3f43e
A
76bool
77kperf_sample_has_non_system(unsigned actionid)
78{
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88}
89
90static void
91kperf_system_memory_log(void)
92{
93 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
94 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
95 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
96 vm_page_speculative_count));
97}
98
316670eb 99static kern_return_t
3e170ce0
A
100kperf_sample_internal(struct kperf_sample *sbuf,
101 struct kperf_context *context,
102 unsigned sample_what, unsigned sample_flags,
39037602 103 unsigned actionid, uint32_t ucallstack_depth)
316670eb 104{
39037602
A
105 int pended_ucallstack = 0;
106 int pended_th_dispatch = 0;
5ba3f43e
A
107 bool on_idle_thread = false;
108 uint32_t userdata = actionid;
316670eb
A
109
110 /* not much point continuing here, but what to do ? return
111 * Shutdown? cut a tracepoint and continue?
112 */
3e170ce0 113 if (sample_what == 0) {
316670eb 114 return SAMPLE_CONTINUE;
3e170ce0 115 }
316670eb 116
39037602
A
117 /* callstacks should be explicitly ignored */
118 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
119 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
120 }
121
5ba3f43e
A
122 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
123 sample_what &= SAMPLER_SYS_MEM;
124 }
125
39037602
A
126 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
127 boolean_t is_kernel = (context->cur_pid == 0);
128
129 if (actionid && actionid <= actionc) {
130 sbuf->kcallstack.nframes = actionv[actionid - 1].kcallstack_depth;
131 } else {
132 sbuf->kcallstack.nframes = MAX_CALLSTACK_FRAMES;
133 }
134
135 if (ucallstack_depth) {
136 sbuf->ucallstack.nframes = ucallstack_depth;
137 } else {
138 sbuf->ucallstack.nframes = MAX_CALLSTACK_FRAMES;
139 }
316670eb 140
39236c6e 141 sbuf->kcallstack.flags = CALLSTACK_VALID;
39236c6e
A
142 sbuf->ucallstack.flags = CALLSTACK_VALID;
143
3e170ce0
A
144 /* an event occurred. Sample everything and dump it in a
145 * buffer.
316670eb
A
146 */
147
148 /* collect data from samplers */
39037602
A
149 if (sample_what & SAMPLER_TH_INFO) {
150 kperf_thread_info_sample(&sbuf->th_info, context);
3e170ce0 151
39236c6e 152 /* See if we should drop idle thread samples */
3e170ce0 153 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
39037602 154 if (sbuf->th_info.kpthi_runmode & 0x40) {
5ba3f43e
A
155 on_idle_thread = true;
156 goto log_sample;
3e170ce0
A
157 }
158 }
316670eb
A
159 }
160
39037602
A
161 if (sample_what & SAMPLER_TH_SNAPSHOT) {
162 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
163 }
164 if (sample_what & SAMPLER_TH_SCHEDULING) {
165 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
166 }
167 if (sample_what & SAMPLER_KSTACK) {
168 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
169 kperf_continuation_sample(&(sbuf->kcallstack), context);
170 /* outside of interrupt context, backtrace the current thread */
171 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
172 kperf_backtrace_sample(&(sbuf->kcallstack), context);
173 } else {
174 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
175 }
176 }
177 if (sample_what & SAMPLER_TK_SNAPSHOT) {
178 kperf_task_snapshot_sample(&(sbuf->tk_snapshot), context);
3e170ce0 179 }
316670eb
A
180
181 /* sensitive ones */
3e170ce0
A
182 if (!is_kernel) {
183 if (sample_what & SAMPLER_MEMINFO) {
184 kperf_meminfo_sample(&(sbuf->meminfo), context);
316670eb 185 }
3e170ce0
A
186
187 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
39037602
A
188 if (sample_what & SAMPLER_USTACK) {
189 pended_ucallstack = kperf_ucallstack_pend(context, sbuf->ucallstack.nframes);
3e170ce0
A
190 }
191
39037602
A
192 if (sample_what & SAMPLER_TH_DISPATCH) {
193 pended_th_dispatch = kperf_thread_dispatch_pend(context);
3e170ce0
A
194 }
195 } else {
39037602 196 if (sample_what & SAMPLER_USTACK) {
3e170ce0
A
197 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
198 }
199
39037602
A
200 if (sample_what & SAMPLER_TH_DISPATCH) {
201 kperf_thread_dispatch_sample(&(sbuf->th_dispatch), context);
3e170ce0 202 }
316670eb
A
203 }
204 }
205
3e170ce0
A
206 if (sample_what & SAMPLER_PMC_THREAD) {
207 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
208 } else if (sample_what & SAMPLER_PMC_CPU) {
209 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
210 }
39236c6e 211
5ba3f43e 212log_sample:
39236c6e 213 /* lookup the user tag, if any */
3e170ce0
A
214 if (actionid && (actionid <= actionc)) {
215 userdata = actionv[actionid - 1].userdata;
3e170ce0 216 }
39236c6e 217
39037602
A
218 /* avoid logging if this sample only pended samples */
219 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
220 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH)))
221 {
222 return SAMPLE_CONTINUE;
223 }
224
316670eb
A
225 /* stash the data into the buffer
226 * interrupts off to ensure we don't get split
227 */
39037602 228 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
316670eb 229
3e170ce0
A
230 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
231 actionid, userdata, sample_flags);
316670eb 232
5ba3f43e
A
233 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
234 if (sample_what & SAMPLER_SYS_MEM) {
235 kperf_system_memory_log();
236 }
237 }
238 if (on_idle_thread) {
239 goto log_sample_end;
240 }
241
39037602
A
242 if (sample_what & SAMPLER_TH_INFO) {
243 kperf_thread_info_log(&sbuf->th_info);
244 }
245 if (sample_what & SAMPLER_TH_SCHEDULING) {
246 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
247 }
248 if (sample_what & SAMPLER_TH_SNAPSHOT) {
249 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
3e170ce0 250 }
3e170ce0 251 if (sample_what & SAMPLER_KSTACK) {
39037602
A
252 kperf_kcallstack_log(&sbuf->kcallstack);
253 }
5ba3f43e
A
254 if (sample_what & SAMPLER_TH_INSCYC) {
255 kperf_thread_inscyc_log(context);
256 }
39037602
A
257 if (sample_what & SAMPLER_TK_SNAPSHOT) {
258 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
3e170ce0 259 }
316670eb
A
260
261 /* dump user stuff */
3e170ce0
A
262 if (!is_kernel) {
263 /* dump meminfo */
264 if (sample_what & SAMPLER_MEMINFO) {
265 kperf_meminfo_log(&(sbuf->meminfo));
316670eb 266 }
316670eb 267
3e170ce0 268 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
39037602
A
269 if (pended_ucallstack) {
270 BUF_INFO(PERF_CS_UPEND);
3e170ce0
A
271 }
272
39037602
A
273 if (pended_th_dispatch) {
274 BUF_INFO(PERF_TI_DISPPEND);
3e170ce0
A
275 }
276 } else {
277 if (sample_what & SAMPLER_USTACK) {
278 kperf_ucallstack_log(&(sbuf->ucallstack));
279 }
280
39037602
A
281 if (sample_what & SAMPLER_TH_DISPATCH) {
282 kperf_thread_dispatch_log(&(sbuf->th_dispatch));
3e170ce0 283 }
316670eb
A
284 }
285 }
286
3e170ce0
A
287 if (sample_what & SAMPLER_PMC_THREAD) {
288 kperf_kpc_thread_log(&(sbuf->kpcdata));
289 } else if (sample_what & SAMPLER_PMC_CPU) {
290 kperf_kpc_cpu_log(&(sbuf->kpcdata));
291 }
39236c6e 292
5ba3f43e
A
293log_sample_end:
294 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
316670eb
A
295
296 /* intrs back on */
297 ml_set_interrupts_enabled(enabled);
298
299 return SAMPLE_CONTINUE;
300}
301
302/* Translate actionid into sample bits and take a sample */
303kern_return_t
3e170ce0
A
304kperf_sample(struct kperf_sample *sbuf,
305 struct kperf_context *context,
306 unsigned actionid, unsigned sample_flags)
316670eb 307{
316670eb 308 /* work out what to sample, if anything */
3e170ce0 309 if ((actionid > actionc) || (actionid == 0)) {
316670eb 310 return SAMPLE_SHUTDOWN;
3e170ce0 311 }
316670eb 312
39236c6e
A
313 /* check the pid filter against the context's current pid.
314 * filter pid == -1 means any pid
315 */
39037602 316 int pid_filter = actionv[actionid - 1].pid_filter;
3e170ce0 317 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
39236c6e 318 return SAMPLE_CONTINUE;
3e170ce0 319 }
39236c6e
A
320
321 /* the samplers to run */
39037602 322 unsigned int sample_what = actionv[actionid - 1].sample;
316670eb 323
39236c6e 324 /* do the actual sample operation */
3e170ce0 325 return kperf_sample_internal(sbuf, context, sample_what,
39037602
A
326 sample_flags, actionid,
327 actionv[actionid - 1].ucallstack_depth);
316670eb
A
328}
329
316670eb 330void
39037602 331kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
316670eb 332{
39037602 333 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
316670eb 334 struct kperf_context ctx;
39037602
A
335 struct kperf_sample *sample = NULL;
336 kern_return_t kr = KERN_SUCCESS;
337 int s;
316670eb 338
39037602
A
339 if (!kperf_kdebug_should_trigger(debugid)) {
340 return;
3e170ce0 341 }
316670eb 342
39037602 343 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
3e170ce0 344
39037602
A
345 ctx.cur_thread = current_thread();
346 ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread));
347 ctx.trigger_type = TRIGGER_TYPE_KDEBUG;
348 ctx.trigger_id = 0;
3e170ce0 349
39037602 350 s = ml_set_interrupts_enabled(0);
316670eb 351
39037602 352 sample = kperf_intr_sample_buffer();
316670eb 353
39037602
A
354 if (!ml_at_interrupt_context()) {
355 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
356 ctx.starting_fp = starting_fp;
3e170ce0 357 }
316670eb 358
39037602 359 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
316670eb 360
39037602
A
361 ml_set_interrupts_enabled(s);
362 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
316670eb
A
363}
364
39236c6e 365/*
39037602
A
366 * This function allocates >2.3KB of the stack. Prevent the compiler from
367 * inlining this function into ast_taken and ensure the stack memory is only
368 * allocated for the kperf AST.
39236c6e 369 */
39037602 370__attribute__((noinline))
39236c6e 371void
39037602 372kperf_thread_ast_handler(thread_t thread)
39236c6e 373{
39037602 374 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, kperf_get_thread_flags(thread));
39236c6e 375
39037602
A
376 /* ~2KB of the stack for the sample since this is called from AST */
377 struct kperf_sample sbuf;
378 memset(&sbuf, 0, sizeof(struct kperf_sample));
39236c6e 379
39037602 380 task_t task = get_threadtask(thread);
39236c6e 381
743345f9
A
382 if (task_did_exec(task) || task_is_exec_copy(task)) {
383 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
384 return;
385 }
386
39037602
A
387 /* make a context, take a sample */
388 struct kperf_context ctx;
389 ctx.cur_thread = thread;
390 ctx.cur_pid = task_pid(task);
3e170ce0 391
39037602
A
392 /* decode the flags to determine what to sample */
393 unsigned int sample_what = 0;
394 uint32_t flags = kperf_get_thread_flags(thread);
3e170ce0 395
39037602
A
396 if (flags & T_KPERF_AST_DISPATCH) {
397 sample_what |= SAMPLER_TH_DISPATCH;
3e170ce0 398 }
39037602
A
399 if (flags & T_KPERF_AST_CALLSTACK) {
400 sample_what |= SAMPLER_USTACK;
401 sample_what |= SAMPLER_TH_INFO;
3e170ce0 402 }
3e170ce0 403
39037602 404 uint32_t ucallstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(flags);
3e170ce0 405
39037602 406 int r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0, ucallstack_depth);
39236c6e 407
39037602 408 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, r);
39236c6e
A
409}
410
39037602 411/* register AST bits */
39236c6e 412int
39037602 413kperf_ast_pend(thread_t thread, uint32_t set_flags)
39236c6e 414{
39037602
A
415 /* can only pend on the current thread */
416 if (thread != current_thread()) {
417 panic("pending to non-current thread");
418 }
39236c6e 419
39037602
A
420 /* get our current bits */
421 uint32_t flags = kperf_get_thread_flags(thread);
3e170ce0 422
39037602
A
423 /* see if it's already been done or pended */
424 if (!(flags & set_flags)) {
425 /* set the bit on the thread */
426 flags |= set_flags;
427 kperf_set_thread_flags(thread, flags);
3e170ce0 428
39037602
A
429 /* set the actual AST */
430 act_set_kperf(thread);
431 return 1;
432 }
39236c6e
A
433
434 return 0;
435}
436
39236c6e 437void
39037602 438kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
39236c6e 439{
39037602
A
440 uint32_t ast_flags = kperf_get_thread_flags(thread);
441 uint32_t existing_callstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast_flags);
39236c6e 442
39037602
A
443 if (existing_callstack_depth != depth) {
444 ast_flags &= ~T_KPERF_SET_CALLSTACK_DEPTH(depth);
445 ast_flags |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
3e170ce0 446
39037602 447 kperf_set_thread_flags(thread, ast_flags);
3e170ce0
A
448 }
449}
450
3e170ce0
A
451int
452kperf_kdbg_cswitch_get(void)
453{
39037602 454 return kperf_kdebug_cswitch;
3e170ce0
A
455}
456
457int
458kperf_kdbg_cswitch_set(int newval)
459{
39037602
A
460 kperf_kdebug_cswitch = newval;
461 kperf_on_cpu_update();
3e170ce0
A
462
463 return 0;
39236c6e
A
464}
465
466/*
467 * Action configuration
468 */
39037602 469unsigned int
316670eb
A
470kperf_action_get_count(void)
471{
472 return actionc;
473}
474
475int
3e170ce0 476kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
316670eb 477{
3e170ce0
A
478 if ((actionid > actionc) || (actionid == 0)) {
479 return EINVAL;
480 }
481
482 /* disallow both CPU and thread counters to be sampled in the same
483 * action */
484 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
316670eb 485 return EINVAL;
3e170ce0 486 }
316670eb 487
3e170ce0 488 actionv[actionid - 1].sample = samplers;
316670eb
A
489
490 return 0;
491}
492
493int
3e170ce0 494kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
316670eb 495{
3e170ce0 496 if ((actionid > actionc)) {
316670eb 497 return EINVAL;
3e170ce0 498 }
316670eb 499
3e170ce0 500 if (actionid == 0) {
39236c6e 501 *samplers_out = 0; /* "NULL" action */
3e170ce0
A
502 } else {
503 *samplers_out = actionv[actionid - 1].sample;
504 }
39236c6e
A
505
506 return 0;
507}
508
509int
3e170ce0 510kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
39236c6e 511{
3e170ce0 512 if ((actionid > actionc) || (actionid == 0)) {
39236c6e 513 return EINVAL;
3e170ce0 514 }
39236c6e 515
3e170ce0 516 actionv[actionid - 1].userdata = userdata;
39236c6e
A
517
518 return 0;
519}
520
521int
3e170ce0 522kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
39236c6e 523{
3e170ce0 524 if ((actionid > actionc)) {
39236c6e 525 return EINVAL;
3e170ce0 526 }
39236c6e 527
3e170ce0 528 if (actionid == 0) {
39236c6e 529 *userdata_out = 0; /* "NULL" action */
3e170ce0
A
530 } else {
531 *userdata_out = actionv[actionid - 1].userdata;
532 }
39236c6e
A
533
534 return 0;
535}
536
537int
3e170ce0 538kperf_action_set_filter(unsigned actionid, int pid)
39236c6e 539{
3e170ce0 540 if ((actionid > actionc) || (actionid == 0)) {
39236c6e 541 return EINVAL;
3e170ce0 542 }
39236c6e 543
3e170ce0 544 actionv[actionid - 1].pid_filter = pid;
39236c6e
A
545
546 return 0;
547}
548
549int
3e170ce0 550kperf_action_get_filter(unsigned actionid, int *pid_out)
39236c6e 551{
3e170ce0 552 if ((actionid > actionc)) {
39236c6e 553 return EINVAL;
3e170ce0 554 }
39236c6e 555
3e170ce0 556 if (actionid == 0) {
39236c6e 557 *pid_out = -1; /* "NULL" action */
3e170ce0
A
558 } else {
559 *pid_out = actionv[actionid - 1].pid_filter;
560 }
316670eb
A
561
562 return 0;
563}
564
39037602
A
565void
566kperf_action_reset(void)
567{
568 for (unsigned int i = 0; i < actionc; i++) {
569 kperf_action_set_samplers(i + 1, 0);
570 kperf_action_set_userdata(i + 1, 0);
571 kperf_action_set_filter(i + 1, -1);
572 kperf_action_set_ucallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
573 kperf_action_set_kcallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
574 }
575}
576
316670eb
A
577int
578kperf_action_set_count(unsigned count)
579{
580 struct action *new_actionv = NULL, *old_actionv = NULL;
39037602 581 unsigned old_count;
316670eb
A
582
583 /* easy no-op */
3e170ce0 584 if (count == actionc) {
316670eb 585 return 0;
3e170ce0 586 }
316670eb
A
587
588 /* TODO: allow shrinking? */
3e170ce0 589 if (count < actionc) {
316670eb 590 return EINVAL;
3e170ce0 591 }
316670eb
A
592
593 /* cap it for good measure */
3e170ce0 594 if (count > ACTION_MAX) {
316670eb 595 return EINVAL;
3e170ce0 596 }
316670eb
A
597
598 /* creating the action arror for the first time. create a few
599 * more things, too.
600 */
3e170ce0 601 if (actionc == 0) {
316670eb 602 int r;
39037602 603 if ((r = kperf_init())) {
316670eb 604 return r;
3e170ce0 605 }
316670eb
A
606 }
607
608 /* create a new array */
39037602 609 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
3e170ce0 610 if (new_actionv == NULL) {
316670eb 611 return ENOMEM;
3e170ce0 612 }
316670eb
A
613
614 old_actionv = actionv;
615 old_count = actionc;
616
3e170ce0
A
617 if (old_actionv != NULL) {
618 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
619 }
316670eb 620
3e170ce0 621 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
316670eb 622
39037602 623 for (unsigned int i = old_count; i < count; i++) {
39236c6e 624 new_actionv[i].pid_filter = -1;
39037602
A
625 new_actionv[i].ucallstack_depth = MAX_CALLSTACK_FRAMES;
626 new_actionv[i].kcallstack_depth = MAX_CALLSTACK_FRAMES;
3e170ce0 627 }
39236c6e 628
316670eb
A
629 actionv = new_actionv;
630 actionc = count;
631
3e170ce0
A
632 if (old_actionv != NULL) {
633 kfree(old_actionv, old_count * sizeof(*actionv));
634 }
316670eb 635
316670eb
A
636 return 0;
637}
39037602
A
638
639int
640kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
641{
642 if ((action_id > actionc) || (action_id == 0)) {
643 return EINVAL;
644 }
645
646 if (depth > MAX_CALLSTACK_FRAMES) {
647 return EINVAL;
648 }
649
650 actionv[action_id - 1].ucallstack_depth = depth;
651
652 return 0;
653}
654
655int
656kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
657{
658 if ((action_id > actionc) || (action_id == 0)) {
659 return EINVAL;
660 }
661
662 if (depth > MAX_CALLSTACK_FRAMES) {
663 return EINVAL;
664 }
665
666 actionv[action_id - 1].kcallstack_depth = depth;
667
668 return 0;
669}
670
671int
672kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
673{
674 if ((action_id > actionc)) {
675 return EINVAL;
676 }
677
678 assert(depth_out);
679
680 if (action_id == 0) {
681 *depth_out = MAX_CALLSTACK_FRAMES;
682 } else {
683 *depth_out = actionv[action_id - 1].ucallstack_depth;
684 }
685
686 return 0;
687}
688
689int
690kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
691{
692 if ((action_id > actionc)) {
693 return EINVAL;
694 }
695
696 assert(depth_out);
697
698 if (action_id == 0) {
699 *depth_out = MAX_CALLSTACK_FRAMES;
700 } else {
701 *depth_out = actionv[action_id - 1].kcallstack_depth;
702 }
703
704 return 0;
705}