]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
90d8e341f36f56242dcc28190150d12d5e4707c6
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kperf_timer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
57
58 #define ACTION_MAX (32)
59
60 /* the list of different actions to take */
61 struct action {
62 uint32_t sample;
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
65 uint32_t userdata;
66 int pid_filter;
67 };
68
69 /* the list of actions */
70 static unsigned int actionc = 0;
71 static struct action *actionv = NULL;
72
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch = 0;
75
76 bool
77 kperf_action_has_non_system(unsigned int actionid)
78 {
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88 }
89
90 bool
91 kperf_action_has_task(unsigned int actionid)
92 {
93 if (actionid > actionc) {
94 return false;
95 }
96
97 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
98 }
99
100 bool
101 kperf_action_has_thread(unsigned int actionid)
102 {
103 if (actionid > actionc) {
104 return false;
105 }
106
107 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
108 }
109
110 static void
111 kperf_system_memory_log(void)
112 {
113 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
114 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
115 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
116 vm_page_speculative_count));
117 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
118 (uintptr_t)vm_page_internal_count,
119 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
121 }
122
123 static kern_return_t
124 kperf_sample_internal(struct kperf_sample *sbuf,
125 struct kperf_context *context,
126 unsigned sample_what, unsigned sample_flags,
127 unsigned actionid, unsigned ucallstack_depth)
128 {
129 int pended_ucallstack = 0;
130 int pended_th_dispatch = 0;
131 bool on_idle_thread = false;
132 uint32_t userdata = actionid;
133 bool task_only = false;
134
135 /* not much point continuing here, but what to do ? return
136 * Shutdown? cut a tracepoint and continue?
137 */
138 if (sample_what == 0) {
139 return SAMPLE_CONTINUE;
140 }
141
142 /* callstacks should be explicitly ignored */
143 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
144 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
145 }
146
147 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
148 sample_what &= SAMPLER_SYS_MEM;
149 }
150
151 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
152 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
153 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
154 sample_what &= SAMPLER_THREAD_MASK;
155 }
156 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
157 task_only = true;
158 sample_what &= SAMPLER_TASK_MASK;
159 }
160
161 if (!task_only) {
162 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
163 }
164 bool is_kernel = (context->cur_pid == 0);
165
166 if (actionid && actionid <= actionc) {
167 sbuf->kcallstack.kpkc_nframes =
168 actionv[actionid - 1].kcallstack_depth;
169 } else {
170 sbuf->kcallstack.kpkc_nframes = MAX_KCALLSTACK_FRAMES;
171 }
172
173 if (ucallstack_depth) {
174 sbuf->ucallstack.kpuc_nframes = ucallstack_depth;
175 } else {
176 sbuf->ucallstack.kpuc_nframes = MAX_UCALLSTACK_FRAMES;
177 }
178
179 sbuf->kcallstack.kpkc_flags = 0;
180 sbuf->ucallstack.kpuc_flags = 0;
181
182 if (sample_what & SAMPLER_TH_INFO) {
183 kperf_thread_info_sample(&sbuf->th_info, context);
184
185 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
186 if (sbuf->th_info.kpthi_runmode & 0x40) {
187 on_idle_thread = true;
188 goto log_sample;
189 }
190 }
191 }
192
193 if (sample_what & SAMPLER_TH_SNAPSHOT) {
194 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
195 }
196 if (sample_what & SAMPLER_TH_SCHEDULING) {
197 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
198 }
199 if (sample_what & SAMPLER_KSTACK) {
200 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
201 kperf_continuation_sample(&(sbuf->kcallstack), context);
202 /* outside of interrupt context, backtrace the current thread */
203 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
204 kperf_backtrace_sample(&(sbuf->kcallstack), context);
205 } else {
206 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
207 }
208 }
209 if (sample_what & SAMPLER_TK_SNAPSHOT) {
210 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
211 }
212
213 /* sensitive ones */
214 if (!is_kernel) {
215 if (sample_what & SAMPLER_MEMINFO) {
216 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
217 }
218
219 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
220 if (sample_what & SAMPLER_USTACK) {
221 pended_ucallstack = kperf_ucallstack_pend(context, sbuf->ucallstack.kpuc_nframes);
222 }
223
224 if (sample_what & SAMPLER_TH_DISPATCH) {
225 pended_th_dispatch = kperf_thread_dispatch_pend(context);
226 }
227 } else {
228 if (sample_what & SAMPLER_USTACK) {
229 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
230 }
231
232 if (sample_what & SAMPLER_TH_DISPATCH) {
233 kperf_thread_dispatch_sample(&(sbuf->th_dispatch), context);
234 }
235 }
236 }
237
238 if (sample_what & SAMPLER_PMC_THREAD) {
239 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
240 } else if (sample_what & SAMPLER_PMC_CPU) {
241 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
242 }
243
244 log_sample:
245 /* lookup the user tag, if any */
246 if (actionid && (actionid <= actionc)) {
247 userdata = actionv[actionid - 1].userdata;
248 }
249
250 /* avoid logging if this sample only pended samples */
251 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
252 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
253 return SAMPLE_CONTINUE;
254 }
255
256 /* stash the data into the buffer
257 * interrupts off to ensure we don't get split
258 */
259 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
260
261 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
262 actionid, userdata, sample_flags);
263
264 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
265 if (sample_what & SAMPLER_SYS_MEM) {
266 kperf_system_memory_log();
267 }
268 }
269 if (on_idle_thread) {
270 goto log_sample_end;
271 }
272
273 if (sample_what & SAMPLER_TH_INFO) {
274 kperf_thread_info_log(&sbuf->th_info);
275 }
276 if (sample_what & SAMPLER_TH_SCHEDULING) {
277 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
278 }
279 if (sample_what & SAMPLER_TH_SNAPSHOT) {
280 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
281 }
282 if (sample_what & SAMPLER_KSTACK) {
283 kperf_kcallstack_log(&sbuf->kcallstack);
284 }
285 if (sample_what & SAMPLER_TH_INSCYC) {
286 kperf_thread_inscyc_log(context);
287 }
288 if (sample_what & SAMPLER_TK_SNAPSHOT) {
289 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
290 }
291 if (sample_what & SAMPLER_TK_INFO) {
292 kperf_task_info_log(context);
293 }
294
295 /* dump user stuff */
296 if (!is_kernel) {
297 /* dump meminfo */
298 if (sample_what & SAMPLER_MEMINFO) {
299 kperf_meminfo_log(&(sbuf->meminfo));
300 }
301
302 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
303 if (pended_ucallstack) {
304 BUF_INFO(PERF_CS_UPEND);
305 }
306
307 if (pended_th_dispatch) {
308 BUF_INFO(PERF_TI_DISPPEND);
309 }
310 } else {
311 if (sample_what & SAMPLER_USTACK) {
312 kperf_ucallstack_log(&(sbuf->ucallstack));
313 }
314
315 if (sample_what & SAMPLER_TH_DISPATCH) {
316 kperf_thread_dispatch_log(&(sbuf->th_dispatch));
317 }
318 }
319 }
320
321 if (sample_what & SAMPLER_PMC_CONFIG) {
322 kperf_kpc_config_log(&(sbuf->kpcdata));
323 }
324 if (sample_what & SAMPLER_PMC_THREAD) {
325 kperf_kpc_thread_log(&(sbuf->kpcdata));
326 } else if (sample_what & SAMPLER_PMC_CPU) {
327 kperf_kpc_cpu_log(&(sbuf->kpcdata));
328 }
329
330 log_sample_end:
331 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
332
333 /* intrs back on */
334 ml_set_interrupts_enabled(enabled);
335
336 return SAMPLE_CONTINUE;
337 }
338
339 /* Translate actionid into sample bits and take a sample */
340 kern_return_t
341 kperf_sample(struct kperf_sample *sbuf,
342 struct kperf_context *context,
343 unsigned actionid, unsigned sample_flags)
344 {
345 /* work out what to sample, if anything */
346 if ((actionid > actionc) || (actionid == 0)) {
347 return SAMPLE_SHUTDOWN;
348 }
349
350 /* check the pid filter against the context's current pid.
351 * filter pid == -1 means any pid
352 */
353 int pid_filter = actionv[actionid - 1].pid_filter;
354 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
355 return SAMPLE_CONTINUE;
356 }
357
358 /* the samplers to run */
359 unsigned int sample_what = actionv[actionid - 1].sample;
360
361 /* do the actual sample operation */
362 return kperf_sample_internal(sbuf, context, sample_what,
363 sample_flags, actionid,
364 actionv[actionid - 1].ucallstack_depth);
365 }
366
367 void
368 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
369 {
370 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
371 struct kperf_sample *sample = NULL;
372 kern_return_t kr = KERN_SUCCESS;
373 int s;
374
375 if (!kperf_kdebug_should_trigger(debugid)) {
376 return;
377 }
378
379 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
380
381 thread_t thread = current_thread();
382 task_t task = get_threadtask(thread);
383 struct kperf_context ctx = {
384 .cur_thread = thread,
385 .cur_task = task,
386 .cur_pid = task_pid(task),
387 .trigger_type = TRIGGER_TYPE_KDEBUG,
388 .trigger_id = 0,
389 };
390
391 s = ml_set_interrupts_enabled(0);
392
393 sample = kperf_intr_sample_buffer();
394
395 if (!ml_at_interrupt_context()) {
396 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
397 ctx.starting_fp = starting_fp;
398 }
399
400 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
401
402 ml_set_interrupts_enabled(s);
403 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
404 }
405
406 /*
407 * This function allocates >2.3KB of the stack. Prevent the compiler from
408 * inlining this function into ast_taken and ensure the stack memory is only
409 * allocated for the kperf AST.
410 */
411 __attribute__((noinline))
412 void
413 kperf_thread_ast_handler(thread_t thread)
414 {
415 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, kperf_get_thread_flags(thread));
416
417 /* ~2KB of the stack for the sample since this is called from AST */
418 struct kperf_sample sbuf;
419 memset(&sbuf, 0, sizeof(struct kperf_sample));
420
421 task_t task = get_threadtask(thread);
422
423 if (task_did_exec(task) || task_is_exec_copy(task)) {
424 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
425 return;
426 }
427
428 /* make a context, take a sample */
429 struct kperf_context ctx = {
430 .cur_thread = thread,
431 .cur_task = task,
432 .cur_pid = task_pid(task),
433 };
434
435 /* decode the flags to determine what to sample */
436 unsigned int sample_what = 0;
437 uint32_t flags = kperf_get_thread_flags(thread);
438
439 if (flags & T_KPERF_AST_DISPATCH) {
440 sample_what |= SAMPLER_TH_DISPATCH;
441 }
442 if (flags & T_KPERF_AST_CALLSTACK) {
443 sample_what |= SAMPLER_USTACK;
444 sample_what |= SAMPLER_TH_INFO;
445 }
446
447 uint32_t ucallstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(flags);
448
449 int r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0, ucallstack_depth);
450
451 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, r);
452 }
453
454 /* register AST bits */
455 int
456 kperf_ast_pend(thread_t thread, uint32_t set_flags)
457 {
458 /* can only pend on the current thread */
459 if (thread != current_thread()) {
460 panic("pending to non-current thread");
461 }
462
463 /* get our current bits */
464 uint32_t flags = kperf_get_thread_flags(thread);
465
466 /* see if it's already been done or pended */
467 if (!(flags & set_flags)) {
468 /* set the bit on the thread */
469 flags |= set_flags;
470 kperf_set_thread_flags(thread, flags);
471
472 /* set the actual AST */
473 act_set_kperf(thread);
474 return 1;
475 }
476
477 return 0;
478 }
479
480 void
481 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
482 {
483 uint32_t ast_flags = kperf_get_thread_flags(thread);
484 uint32_t existing_callstack_depth =
485 T_KPERF_GET_CALLSTACK_DEPTH(ast_flags);
486
487 if (existing_callstack_depth < depth) {
488 ast_flags &= ~T_KPERF_SET_CALLSTACK_DEPTH(depth);
489 ast_flags |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
490 kperf_set_thread_flags(thread, ast_flags);
491 }
492 }
493
494 int
495 kperf_kdbg_cswitch_get(void)
496 {
497 return kperf_kdebug_cswitch;
498 }
499
500 int
501 kperf_kdbg_cswitch_set(int newval)
502 {
503 kperf_kdebug_cswitch = newval;
504 kperf_on_cpu_update();
505
506 return 0;
507 }
508
509 /*
510 * Action configuration
511 */
512 unsigned int
513 kperf_action_get_count(void)
514 {
515 return actionc;
516 }
517
518 int
519 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
520 {
521 if ((actionid > actionc) || (actionid == 0)) {
522 return EINVAL;
523 }
524
525 /* disallow both CPU and thread counters to be sampled in the same
526 * action */
527 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
528 return EINVAL;
529 }
530
531 actionv[actionid - 1].sample = samplers;
532
533 return 0;
534 }
535
536 int
537 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
538 {
539 if ((actionid > actionc)) {
540 return EINVAL;
541 }
542
543 if (actionid == 0) {
544 *samplers_out = 0; /* "NULL" action */
545 } else {
546 *samplers_out = actionv[actionid - 1].sample;
547 }
548
549 return 0;
550 }
551
552 int
553 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
554 {
555 if ((actionid > actionc) || (actionid == 0)) {
556 return EINVAL;
557 }
558
559 actionv[actionid - 1].userdata = userdata;
560
561 return 0;
562 }
563
564 int
565 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
566 {
567 if ((actionid > actionc)) {
568 return EINVAL;
569 }
570
571 if (actionid == 0) {
572 *userdata_out = 0; /* "NULL" action */
573 } else {
574 *userdata_out = actionv[actionid - 1].userdata;
575 }
576
577 return 0;
578 }
579
580 int
581 kperf_action_set_filter(unsigned actionid, int pid)
582 {
583 if ((actionid > actionc) || (actionid == 0)) {
584 return EINVAL;
585 }
586
587 actionv[actionid - 1].pid_filter = pid;
588
589 return 0;
590 }
591
592 int
593 kperf_action_get_filter(unsigned actionid, int *pid_out)
594 {
595 if ((actionid > actionc)) {
596 return EINVAL;
597 }
598
599 if (actionid == 0) {
600 *pid_out = -1; /* "NULL" action */
601 } else {
602 *pid_out = actionv[actionid - 1].pid_filter;
603 }
604
605 return 0;
606 }
607
608 void
609 kperf_action_reset(void)
610 {
611 for (unsigned int i = 0; i < actionc; i++) {
612 kperf_action_set_samplers(i + 1, 0);
613 kperf_action_set_userdata(i + 1, 0);
614 kperf_action_set_filter(i + 1, -1);
615 kperf_action_set_ucallstack_depth(i + 1, MAX_UCALLSTACK_FRAMES);
616 kperf_action_set_kcallstack_depth(i + 1, MAX_KCALLSTACK_FRAMES);
617 }
618 }
619
620 int
621 kperf_action_set_count(unsigned count)
622 {
623 struct action *new_actionv = NULL, *old_actionv = NULL;
624 unsigned old_count;
625
626 /* easy no-op */
627 if (count == actionc) {
628 return 0;
629 }
630
631 /* TODO: allow shrinking? */
632 if (count < actionc) {
633 return EINVAL;
634 }
635
636 /* cap it for good measure */
637 if (count > ACTION_MAX) {
638 return EINVAL;
639 }
640
641 /* creating the action arror for the first time. create a few
642 * more things, too.
643 */
644 if (actionc == 0) {
645 int r;
646 if ((r = kperf_init())) {
647 return r;
648 }
649 }
650
651 /* create a new array */
652 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
653 if (new_actionv == NULL) {
654 return ENOMEM;
655 }
656
657 old_actionv = actionv;
658 old_count = actionc;
659
660 if (old_actionv != NULL) {
661 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
662 }
663
664 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
665
666 for (unsigned int i = old_count; i < count; i++) {
667 new_actionv[i].pid_filter = -1;
668 new_actionv[i].ucallstack_depth = MAX_UCALLSTACK_FRAMES;
669 new_actionv[i].kcallstack_depth = MAX_KCALLSTACK_FRAMES;
670 }
671
672 actionv = new_actionv;
673 actionc = count;
674
675 if (old_actionv != NULL) {
676 kfree(old_actionv, old_count * sizeof(*actionv));
677 }
678
679 return 0;
680 }
681
682 int
683 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
684 {
685 if ((action_id > actionc) || (action_id == 0)) {
686 return EINVAL;
687 }
688
689 if (depth > MAX_UCALLSTACK_FRAMES) {
690 return EINVAL;
691 }
692
693 actionv[action_id - 1].ucallstack_depth = depth;
694
695 return 0;
696 }
697
698 int
699 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
700 {
701 if ((action_id > actionc) || (action_id == 0)) {
702 return EINVAL;
703 }
704
705 if (depth > MAX_KCALLSTACK_FRAMES) {
706 return EINVAL;
707 }
708
709 actionv[action_id - 1].kcallstack_depth = depth;
710
711 return 0;
712 }
713
714 int
715 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
716 {
717 if ((action_id > actionc)) {
718 return EINVAL;
719 }
720
721 assert(depth_out);
722
723 if (action_id == 0) {
724 *depth_out = MAX_UCALLSTACK_FRAMES;
725 } else {
726 *depth_out = actionv[action_id - 1].ucallstack_depth;
727 }
728
729 return 0;
730 }
731
732 int
733 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
734 {
735 if ((action_id > actionc)) {
736 return EINVAL;
737 }
738
739 assert(depth_out);
740
741 if (action_id == 0) {
742 *depth_out = MAX_KCALLSTACK_FRAMES;
743 } else {
744 *depth_out = actionv[action_id - 1].kcallstack_depth;
745 }
746
747 return 0;
748 }