]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
2ff723f937b328f7846cef30d30411d638b97f08
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
40 #include <sys/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kperf_timer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
57
58 #define ACTION_MAX (32)
59
60 /* the list of different actions to take */
61 struct action {
62 uint32_t sample;
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
65 uint32_t userdata;
66 int pid_filter;
67 };
68
69 /* the list of actions */
70 static unsigned int actionc = 0;
71 static struct action *actionv = NULL;
72
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch = 0;
75
76 bool
77 kperf_action_has_non_system(unsigned int actionid)
78 {
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88 }
89
90 bool
91 kperf_action_has_task(unsigned int actionid)
92 {
93 if (actionid > actionc) {
94 return false;
95 }
96
97 return actionv[actionid - 1].sample & SAMPLER_TASK_MASK;
98 }
99
100 bool
101 kperf_action_has_thread(unsigned int actionid)
102 {
103 if (actionid > actionc) {
104 return false;
105 }
106
107 return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK;
108 }
109
110 static void
111 kperf_system_memory_log(void)
112 {
113 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
114 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
115 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
116 vm_page_speculative_count));
117 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
118 (uintptr_t)vm_page_internal_count,
119 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
121 }
122
123 static kern_return_t
124 kperf_sample_internal(struct kperf_sample *sbuf,
125 struct kperf_context *context,
126 unsigned sample_what, unsigned sample_flags,
127 unsigned actionid, uint32_t ucallstack_depth)
128 {
129 int pended_ucallstack = 0;
130 int pended_th_dispatch = 0;
131 bool on_idle_thread = false;
132 uint32_t userdata = actionid;
133 bool task_only = false;
134
135 /* not much point continuing here, but what to do ? return
136 * Shutdown? cut a tracepoint and continue?
137 */
138 if (sample_what == 0) {
139 return SAMPLE_CONTINUE;
140 }
141
142 /* callstacks should be explicitly ignored */
143 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
144 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
145 }
146
147 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
148 sample_what &= SAMPLER_SYS_MEM;
149 }
150
151 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
152 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
153 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
154 sample_what &= SAMPLER_THREAD_MASK;
155 }
156 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
157 task_only = true;
158 sample_what &= SAMPLER_TASK_MASK;
159 }
160
161 if (!task_only) {
162 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
163 }
164 bool is_kernel = (context->cur_pid == 0);
165
166 if (actionid && actionid <= actionc) {
167 sbuf->kcallstack.nframes = actionv[actionid - 1].kcallstack_depth;
168 } else {
169 sbuf->kcallstack.nframes = MAX_CALLSTACK_FRAMES;
170 }
171
172 if (ucallstack_depth) {
173 sbuf->ucallstack.nframes = ucallstack_depth;
174 } else {
175 sbuf->ucallstack.nframes = MAX_CALLSTACK_FRAMES;
176 }
177
178 sbuf->kcallstack.flags = CALLSTACK_VALID;
179 sbuf->ucallstack.flags = CALLSTACK_VALID;
180
181 /* an event occurred. Sample everything and dump it in a
182 * buffer.
183 */
184
185 /* collect data from samplers */
186 if (sample_what & SAMPLER_TH_INFO) {
187 kperf_thread_info_sample(&sbuf->th_info, context);
188
189 /* See if we should drop idle thread samples */
190 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
191 if (sbuf->th_info.kpthi_runmode & 0x40) {
192 on_idle_thread = true;
193 goto log_sample;
194 }
195 }
196 }
197
198 if (sample_what & SAMPLER_TH_SNAPSHOT) {
199 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
200 }
201 if (sample_what & SAMPLER_TH_SCHEDULING) {
202 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
203 }
204 if (sample_what & SAMPLER_KSTACK) {
205 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
206 kperf_continuation_sample(&(sbuf->kcallstack), context);
207 /* outside of interrupt context, backtrace the current thread */
208 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
209 kperf_backtrace_sample(&(sbuf->kcallstack), context);
210 } else {
211 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
212 }
213 }
214 if (sample_what & SAMPLER_TK_SNAPSHOT) {
215 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
216 }
217
218 /* sensitive ones */
219 if (!is_kernel) {
220 if (sample_what & SAMPLER_MEMINFO) {
221 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
222 }
223
224 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
225 if (sample_what & SAMPLER_USTACK) {
226 pended_ucallstack = kperf_ucallstack_pend(context, sbuf->ucallstack.nframes);
227 }
228
229 if (sample_what & SAMPLER_TH_DISPATCH) {
230 pended_th_dispatch = kperf_thread_dispatch_pend(context);
231 }
232 } else {
233 if (sample_what & SAMPLER_USTACK) {
234 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
235 }
236
237 if (sample_what & SAMPLER_TH_DISPATCH) {
238 kperf_thread_dispatch_sample(&(sbuf->th_dispatch), context);
239 }
240 }
241 }
242
243 if (sample_what & SAMPLER_PMC_THREAD) {
244 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
245 } else if (sample_what & SAMPLER_PMC_CPU) {
246 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
247 }
248
249 log_sample:
250 /* lookup the user tag, if any */
251 if (actionid && (actionid <= actionc)) {
252 userdata = actionv[actionid - 1].userdata;
253 }
254
255 /* avoid logging if this sample only pended samples */
256 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
257 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) {
258 return SAMPLE_CONTINUE;
259 }
260
261 /* stash the data into the buffer
262 * interrupts off to ensure we don't get split
263 */
264 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
265
266 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
267 actionid, userdata, sample_flags);
268
269 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
270 if (sample_what & SAMPLER_SYS_MEM) {
271 kperf_system_memory_log();
272 }
273 }
274 if (on_idle_thread) {
275 goto log_sample_end;
276 }
277
278 if (sample_what & SAMPLER_TH_INFO) {
279 kperf_thread_info_log(&sbuf->th_info);
280 }
281 if (sample_what & SAMPLER_TH_SCHEDULING) {
282 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
283 }
284 if (sample_what & SAMPLER_TH_SNAPSHOT) {
285 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
286 }
287 if (sample_what & SAMPLER_KSTACK) {
288 kperf_kcallstack_log(&sbuf->kcallstack);
289 }
290 if (sample_what & SAMPLER_TH_INSCYC) {
291 kperf_thread_inscyc_log(context);
292 }
293 if (sample_what & SAMPLER_TK_SNAPSHOT) {
294 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
295 }
296 if (sample_what & SAMPLER_TK_INFO) {
297 kperf_task_info_log(context);
298 }
299
300 /* dump user stuff */
301 if (!is_kernel) {
302 /* dump meminfo */
303 if (sample_what & SAMPLER_MEMINFO) {
304 kperf_meminfo_log(&(sbuf->meminfo));
305 }
306
307 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
308 if (pended_ucallstack) {
309 BUF_INFO(PERF_CS_UPEND);
310 }
311
312 if (pended_th_dispatch) {
313 BUF_INFO(PERF_TI_DISPPEND);
314 }
315 } else {
316 if (sample_what & SAMPLER_USTACK) {
317 kperf_ucallstack_log(&(sbuf->ucallstack));
318 }
319
320 if (sample_what & SAMPLER_TH_DISPATCH) {
321 kperf_thread_dispatch_log(&(sbuf->th_dispatch));
322 }
323 }
324 }
325
326 if (sample_what & SAMPLER_PMC_THREAD) {
327 kperf_kpc_thread_log(&(sbuf->kpcdata));
328 } else if (sample_what & SAMPLER_PMC_CPU) {
329 kperf_kpc_cpu_log(&(sbuf->kpcdata));
330 }
331
332 log_sample_end:
333 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
334
335 /* intrs back on */
336 ml_set_interrupts_enabled(enabled);
337
338 return SAMPLE_CONTINUE;
339 }
340
341 /* Translate actionid into sample bits and take a sample */
342 kern_return_t
343 kperf_sample(struct kperf_sample *sbuf,
344 struct kperf_context *context,
345 unsigned actionid, unsigned sample_flags)
346 {
347 /* work out what to sample, if anything */
348 if ((actionid > actionc) || (actionid == 0)) {
349 return SAMPLE_SHUTDOWN;
350 }
351
352 /* check the pid filter against the context's current pid.
353 * filter pid == -1 means any pid
354 */
355 int pid_filter = actionv[actionid - 1].pid_filter;
356 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
357 return SAMPLE_CONTINUE;
358 }
359
360 /* the samplers to run */
361 unsigned int sample_what = actionv[actionid - 1].sample;
362
363 /* do the actual sample operation */
364 return kperf_sample_internal(sbuf, context, sample_what,
365 sample_flags, actionid,
366 actionv[actionid - 1].ucallstack_depth);
367 }
368
369 void
370 kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
371 {
372 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
373 struct kperf_sample *sample = NULL;
374 kern_return_t kr = KERN_SUCCESS;
375 int s;
376
377 if (!kperf_kdebug_should_trigger(debugid)) {
378 return;
379 }
380
381 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
382
383 thread_t thread = current_thread();
384 task_t task = get_threadtask(thread);
385 struct kperf_context ctx = {
386 .cur_thread = thread,
387 .cur_task = task,
388 .cur_pid = task_pid(task),
389 .trigger_type = TRIGGER_TYPE_KDEBUG,
390 .trigger_id = 0,
391 };
392
393 s = ml_set_interrupts_enabled(0);
394
395 sample = kperf_intr_sample_buffer();
396
397 if (!ml_at_interrupt_context()) {
398 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
399 ctx.starting_fp = starting_fp;
400 }
401
402 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
403
404 ml_set_interrupts_enabled(s);
405 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
406 }
407
408 /*
409 * This function allocates >2.3KB of the stack. Prevent the compiler from
410 * inlining this function into ast_taken and ensure the stack memory is only
411 * allocated for the kperf AST.
412 */
413 __attribute__((noinline))
414 void
415 kperf_thread_ast_handler(thread_t thread)
416 {
417 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, kperf_get_thread_flags(thread));
418
419 /* ~2KB of the stack for the sample since this is called from AST */
420 struct kperf_sample sbuf;
421 memset(&sbuf, 0, sizeof(struct kperf_sample));
422
423 task_t task = get_threadtask(thread);
424
425 if (task_did_exec(task) || task_is_exec_copy(task)) {
426 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
427 return;
428 }
429
430 /* make a context, take a sample */
431 struct kperf_context ctx = {
432 .cur_thread = thread,
433 .cur_task = task,
434 .cur_pid = task_pid(task),
435 };
436
437 /* decode the flags to determine what to sample */
438 unsigned int sample_what = 0;
439 uint32_t flags = kperf_get_thread_flags(thread);
440
441 if (flags & T_KPERF_AST_DISPATCH) {
442 sample_what |= SAMPLER_TH_DISPATCH;
443 }
444 if (flags & T_KPERF_AST_CALLSTACK) {
445 sample_what |= SAMPLER_USTACK;
446 sample_what |= SAMPLER_TH_INFO;
447 }
448
449 uint32_t ucallstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(flags);
450
451 int r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0, ucallstack_depth);
452
453 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, r);
454 }
455
456 /* register AST bits */
457 int
458 kperf_ast_pend(thread_t thread, uint32_t set_flags)
459 {
460 /* can only pend on the current thread */
461 if (thread != current_thread()) {
462 panic("pending to non-current thread");
463 }
464
465 /* get our current bits */
466 uint32_t flags = kperf_get_thread_flags(thread);
467
468 /* see if it's already been done or pended */
469 if (!(flags & set_flags)) {
470 /* set the bit on the thread */
471 flags |= set_flags;
472 kperf_set_thread_flags(thread, flags);
473
474 /* set the actual AST */
475 act_set_kperf(thread);
476 return 1;
477 }
478
479 return 0;
480 }
481
482 void
483 kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
484 {
485 uint32_t ast_flags = kperf_get_thread_flags(thread);
486 uint32_t existing_callstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast_flags);
487
488 if (existing_callstack_depth != depth) {
489 ast_flags &= ~T_KPERF_SET_CALLSTACK_DEPTH(depth);
490 ast_flags |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
491
492 kperf_set_thread_flags(thread, ast_flags);
493 }
494 }
495
496 int
497 kperf_kdbg_cswitch_get(void)
498 {
499 return kperf_kdebug_cswitch;
500 }
501
502 int
503 kperf_kdbg_cswitch_set(int newval)
504 {
505 kperf_kdebug_cswitch = newval;
506 kperf_on_cpu_update();
507
508 return 0;
509 }
510
511 /*
512 * Action configuration
513 */
514 unsigned int
515 kperf_action_get_count(void)
516 {
517 return actionc;
518 }
519
520 int
521 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
522 {
523 if ((actionid > actionc) || (actionid == 0)) {
524 return EINVAL;
525 }
526
527 /* disallow both CPU and thread counters to be sampled in the same
528 * action */
529 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
530 return EINVAL;
531 }
532
533 actionv[actionid - 1].sample = samplers;
534
535 return 0;
536 }
537
538 int
539 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
540 {
541 if ((actionid > actionc)) {
542 return EINVAL;
543 }
544
545 if (actionid == 0) {
546 *samplers_out = 0; /* "NULL" action */
547 } else {
548 *samplers_out = actionv[actionid - 1].sample;
549 }
550
551 return 0;
552 }
553
554 int
555 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
556 {
557 if ((actionid > actionc) || (actionid == 0)) {
558 return EINVAL;
559 }
560
561 actionv[actionid - 1].userdata = userdata;
562
563 return 0;
564 }
565
566 int
567 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
568 {
569 if ((actionid > actionc)) {
570 return EINVAL;
571 }
572
573 if (actionid == 0) {
574 *userdata_out = 0; /* "NULL" action */
575 } else {
576 *userdata_out = actionv[actionid - 1].userdata;
577 }
578
579 return 0;
580 }
581
582 int
583 kperf_action_set_filter(unsigned actionid, int pid)
584 {
585 if ((actionid > actionc) || (actionid == 0)) {
586 return EINVAL;
587 }
588
589 actionv[actionid - 1].pid_filter = pid;
590
591 return 0;
592 }
593
594 int
595 kperf_action_get_filter(unsigned actionid, int *pid_out)
596 {
597 if ((actionid > actionc)) {
598 return EINVAL;
599 }
600
601 if (actionid == 0) {
602 *pid_out = -1; /* "NULL" action */
603 } else {
604 *pid_out = actionv[actionid - 1].pid_filter;
605 }
606
607 return 0;
608 }
609
610 void
611 kperf_action_reset(void)
612 {
613 for (unsigned int i = 0; i < actionc; i++) {
614 kperf_action_set_samplers(i + 1, 0);
615 kperf_action_set_userdata(i + 1, 0);
616 kperf_action_set_filter(i + 1, -1);
617 kperf_action_set_ucallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
618 kperf_action_set_kcallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
619 }
620 }
621
622 int
623 kperf_action_set_count(unsigned count)
624 {
625 struct action *new_actionv = NULL, *old_actionv = NULL;
626 unsigned old_count;
627
628 /* easy no-op */
629 if (count == actionc) {
630 return 0;
631 }
632
633 /* TODO: allow shrinking? */
634 if (count < actionc) {
635 return EINVAL;
636 }
637
638 /* cap it for good measure */
639 if (count > ACTION_MAX) {
640 return EINVAL;
641 }
642
643 /* creating the action arror for the first time. create a few
644 * more things, too.
645 */
646 if (actionc == 0) {
647 int r;
648 if ((r = kperf_init())) {
649 return r;
650 }
651 }
652
653 /* create a new array */
654 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
655 if (new_actionv == NULL) {
656 return ENOMEM;
657 }
658
659 old_actionv = actionv;
660 old_count = actionc;
661
662 if (old_actionv != NULL) {
663 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
664 }
665
666 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
667
668 for (unsigned int i = old_count; i < count; i++) {
669 new_actionv[i].pid_filter = -1;
670 new_actionv[i].ucallstack_depth = MAX_CALLSTACK_FRAMES;
671 new_actionv[i].kcallstack_depth = MAX_CALLSTACK_FRAMES;
672 }
673
674 actionv = new_actionv;
675 actionc = count;
676
677 if (old_actionv != NULL) {
678 kfree(old_actionv, old_count * sizeof(*actionv));
679 }
680
681 return 0;
682 }
683
684 int
685 kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
686 {
687 if ((action_id > actionc) || (action_id == 0)) {
688 return EINVAL;
689 }
690
691 if (depth > MAX_CALLSTACK_FRAMES) {
692 return EINVAL;
693 }
694
695 actionv[action_id - 1].ucallstack_depth = depth;
696
697 return 0;
698 }
699
700 int
701 kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
702 {
703 if ((action_id > actionc) || (action_id == 0)) {
704 return EINVAL;
705 }
706
707 if (depth > MAX_CALLSTACK_FRAMES) {
708 return EINVAL;
709 }
710
711 actionv[action_id - 1].kcallstack_depth = depth;
712
713 return 0;
714 }
715
716 int
717 kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
718 {
719 if ((action_id > actionc)) {
720 return EINVAL;
721 }
722
723 assert(depth_out);
724
725 if (action_id == 0) {
726 *depth_out = MAX_CALLSTACK_FRAMES;
727 } else {
728 *depth_out = actionv[action_id - 1].ucallstack_depth;
729 }
730
731 return 0;
732 }
733
734 int
735 kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
736 {
737 if ((action_id > actionc)) {
738 return EINVAL;
739 }
740
741 assert(depth_out);
742
743 if (action_id == 0) {
744 *depth_out = MAX_CALLSTACK_FRAMES;
745 } else {
746 *depth_out = actionv[action_id - 1].kcallstack_depth;
747 }
748
749 return 0;
750 }