2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 #include <kern/kalloc.h>
37 #include <kern/debug.h> /* panic */
38 #include <kern/thread.h>
39 #include <sys/errno.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
45 #include <kperf/action.h>
46 #include <kperf/ast.h>
47 #include <kperf/buffer.h>
48 #include <kperf/callstack.h>
49 #include <kperf/context.h>
50 #include <kperf/kdebug_trigger.h>
51 #include <kperf/kperf.h>
52 #include <kperf/kperf_kpc.h>
53 #include <kperf/kperf_timer.h>
54 #include <kperf/pet.h>
55 #include <kperf/sample.h>
56 #include <kperf/thread_samplers.h>
58 #define ACTION_MAX (32)
60 /* the list of different actions to take */
63 uint32_t ucallstack_depth
;
64 uint32_t kcallstack_depth
;
69 /* the list of actions */
70 static unsigned int actionc
= 0;
71 static struct action
*actionv
= NULL
;
73 /* should emit tracepoint on context switch */
74 int kperf_kdebug_cswitch
= 0;
77 kperf_action_has_non_system(unsigned int actionid
)
79 if (actionid
> actionc
) {
83 if (actionv
[actionid
- 1].sample
& ~SAMPLER_SYS_MEM
) {
91 kperf_action_has_task(unsigned int actionid
)
93 if (actionid
> actionc
) {
97 return actionv
[actionid
- 1].sample
& SAMPLER_TASK_MASK
;
101 kperf_action_has_thread(unsigned int actionid
)
103 if (actionid
> actionc
) {
107 return actionv
[actionid
- 1].sample
& SAMPLER_THREAD_MASK
;
111 kperf_system_memory_log(void)
113 BUF_DATA(PERF_MI_SYS_DATA
, (uintptr_t)vm_page_free_count
,
114 (uintptr_t)vm_page_wire_count
, (uintptr_t)vm_page_external_count
,
115 (uintptr_t)(vm_page_active_count
+ vm_page_inactive_count
+
116 vm_page_speculative_count
));
117 BUF_DATA(PERF_MI_SYS_DATA_2
, (uintptr_t)vm_page_anonymous_count
,
118 (uintptr_t)vm_page_internal_count
,
119 (uintptr_t)vm_pageout_vminfo
.vm_pageout_compressions
,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT
);
124 kperf_sample_user_internal(struct kperf_usample
*sbuf
,
125 struct kperf_context
*context
, unsigned int actionid
,
126 unsigned int sample_what
)
128 if (sample_what
& SAMPLER_USTACK
) {
129 kperf_ucallstack_sample(&sbuf
->ucallstack
, context
);
131 if (sample_what
& SAMPLER_TH_DISPATCH
) {
132 kperf_thread_dispatch_sample(&sbuf
->th_dispatch
, context
);
134 if (sample_what
& SAMPLER_TH_INFO
) {
135 kperf_thread_info_sample(&sbuf
->th_info
, context
);
138 boolean_t intren
= ml_set_interrupts_enabled(FALSE
);
141 * No userdata or sample_flags for this one.
143 BUF_DATA(PERF_GEN_EVENT
| DBG_FUNC_START
, sample_what
, actionid
);
145 if (sample_what
& SAMPLER_USTACK
) {
146 kperf_ucallstack_log(&sbuf
->ucallstack
);
148 if (sample_what
& SAMPLER_TH_DISPATCH
) {
149 kperf_thread_dispatch_log(&sbuf
->th_dispatch
);
151 if (sample_what
& SAMPLER_TH_INFO
) {
152 kperf_thread_info_log(&sbuf
->th_info
);
155 BUF_DATA(PERF_GEN_EVENT
| DBG_FUNC_END
, sample_what
);
157 ml_set_interrupts_enabled(intren
);
161 kperf_sample_user(struct kperf_usample
*sbuf
, struct kperf_context
*context
,
162 unsigned int actionid
, unsigned int sample_flags
)
164 if (actionid
== 0 || actionid
> actionc
) {
168 unsigned int sample_what
= actionv
[actionid
- 1].sample
;
169 unsigned int ucallstack_depth
= actionv
[actionid
- 1].ucallstack_depth
;
171 /* callstacks should be explicitly ignored */
172 if (sample_flags
& SAMPLE_FLAG_EMPTY_CALLSTACK
) {
173 sample_what
&= ~(SAMPLER_KSTACK
| SAMPLER_USTACK
);
175 if (sample_flags
& SAMPLE_FLAG_ONLY_SYSTEM
) {
176 sample_what
&= SAMPLER_SYS_MEM
;
178 assert((sample_flags
& (SAMPLE_FLAG_THREAD_ONLY
| SAMPLE_FLAG_TASK_ONLY
))
179 != (SAMPLE_FLAG_THREAD_ONLY
| SAMPLE_FLAG_TASK_ONLY
));
180 if (sample_flags
& SAMPLE_FLAG_THREAD_ONLY
) {
181 sample_what
&= SAMPLER_THREAD_MASK
;
183 if (sample_flags
& SAMPLE_FLAG_TASK_ONLY
) {
184 sample_what
&= SAMPLER_TASK_MASK
;
187 if (sample_what
== 0) {
191 sbuf
->ucallstack
.kpuc_nframes
= ucallstack_depth
?:
192 MAX_UCALLSTACK_FRAMES
;
194 kperf_sample_user_internal(sbuf
, context
, actionid
, sample_what
);
198 kperf_sample_internal(struct kperf_sample
*sbuf
,
199 struct kperf_context
*context
,
200 unsigned sample_what
, unsigned sample_flags
,
201 unsigned actionid
, unsigned ucallstack_depth
)
203 int pended_ucallstack
= 0;
204 int pended_th_dispatch
= 0;
205 bool on_idle_thread
= false;
206 uint32_t userdata
= actionid
;
207 bool task_only
= false;
209 if (sample_what
== 0) {
210 return SAMPLE_CONTINUE
;
213 /* callstacks should be explicitly ignored */
214 if (sample_flags
& SAMPLE_FLAG_EMPTY_CALLSTACK
) {
215 sample_what
&= ~(SAMPLER_KSTACK
| SAMPLER_USTACK
);
218 if (sample_flags
& SAMPLE_FLAG_ONLY_SYSTEM
) {
219 sample_what
&= SAMPLER_SYS_MEM
;
222 assert((sample_flags
& (SAMPLE_FLAG_THREAD_ONLY
| SAMPLE_FLAG_TASK_ONLY
))
223 != (SAMPLE_FLAG_THREAD_ONLY
| SAMPLE_FLAG_TASK_ONLY
));
224 if (sample_flags
& SAMPLE_FLAG_THREAD_ONLY
) {
225 sample_what
&= SAMPLER_THREAD_MASK
;
227 if (sample_flags
& SAMPLE_FLAG_TASK_ONLY
) {
229 sample_what
&= SAMPLER_TASK_MASK
;
233 context
->cur_thread
->kperf_pet_gen
= kperf_pet_gen
;
235 bool is_kernel
= (context
->cur_pid
== 0);
237 if (actionid
&& actionid
<= actionc
) {
238 sbuf
->kcallstack
.kpkc_nframes
=
239 actionv
[actionid
- 1].kcallstack_depth
;
241 sbuf
->kcallstack
.kpkc_nframes
= MAX_KCALLSTACK_FRAMES
;
244 ucallstack_depth
= ucallstack_depth
?: MAX_UCALLSTACK_FRAMES
;
245 sbuf
->kcallstack
.kpkc_flags
= 0;
246 sbuf
->usample
.ucallstack
.kpuc_flags
= 0;
248 if (sample_what
& SAMPLER_TH_INFO
) {
249 kperf_thread_info_sample(&sbuf
->th_info
, context
);
251 if (!(sample_flags
& SAMPLE_FLAG_IDLE_THREADS
)) {
252 if (sbuf
->th_info
.kpthi_runmode
& 0x40) {
253 on_idle_thread
= true;
259 if (sample_what
& SAMPLER_TH_SNAPSHOT
) {
260 kperf_thread_snapshot_sample(&(sbuf
->th_snapshot
), context
);
262 if (sample_what
& SAMPLER_TH_SCHEDULING
) {
263 kperf_thread_scheduling_sample(&(sbuf
->th_scheduling
), context
);
265 if (sample_what
& SAMPLER_KSTACK
) {
266 if (sample_flags
& SAMPLE_FLAG_CONTINUATION
) {
267 kperf_continuation_sample(&(sbuf
->kcallstack
), context
);
268 } else if (sample_flags
& SAMPLE_FLAG_NON_INTERRUPT
) {
269 /* outside of interrupt context, backtrace the current thread */
270 kperf_backtrace_sample(&(sbuf
->kcallstack
), context
);
272 kperf_kcallstack_sample(&(sbuf
->kcallstack
), context
);
275 if (sample_what
& SAMPLER_TK_SNAPSHOT
) {
276 kperf_task_snapshot_sample(context
->cur_task
, &(sbuf
->tk_snapshot
));
280 if (sample_what
& SAMPLER_MEMINFO
) {
281 kperf_meminfo_sample(context
->cur_task
, &(sbuf
->meminfo
));
284 if (sample_flags
& SAMPLE_FLAG_PEND_USER
) {
285 if (sample_what
& SAMPLER_USTACK
) {
286 pended_ucallstack
= kperf_ucallstack_pend(context
,
287 ucallstack_depth
, actionid
);
290 if (sample_what
& SAMPLER_TH_DISPATCH
) {
292 kperf_thread_dispatch_pend(context
, actionid
);
297 if (sample_what
& SAMPLER_PMC_THREAD
) {
298 kperf_kpc_thread_sample(&(sbuf
->kpcdata
), sample_what
);
299 } else if (sample_what
& SAMPLER_PMC_CPU
) {
300 kperf_kpc_cpu_sample(&(sbuf
->kpcdata
), sample_what
);
304 /* lookup the user tag, if any */
305 if (actionid
&& (actionid
<= actionc
)) {
306 userdata
= actionv
[actionid
- 1].userdata
;
309 /* avoid logging if this sample only pended samples */
310 if (sample_flags
& SAMPLE_FLAG_PEND_USER
&&
311 !(sample_what
& ~(SAMPLER_USTACK
| SAMPLER_TH_DISPATCH
))) {
312 return SAMPLE_CONTINUE
;
315 /* stash the data into the buffer
316 * interrupts off to ensure we don't get split
318 boolean_t enabled
= ml_set_interrupts_enabled(FALSE
);
320 BUF_DATA(PERF_GEN_EVENT
| DBG_FUNC_START
, sample_what
,
321 actionid
, userdata
, sample_flags
);
323 if (sample_flags
& SAMPLE_FLAG_SYSTEM
) {
324 if (sample_what
& SAMPLER_SYS_MEM
) {
325 kperf_system_memory_log();
328 if (on_idle_thread
) {
332 if (sample_what
& SAMPLER_TH_INFO
) {
333 kperf_thread_info_log(&sbuf
->th_info
);
335 if (sample_what
& SAMPLER_TH_SCHEDULING
) {
336 kperf_thread_scheduling_log(&(sbuf
->th_scheduling
));
338 if (sample_what
& SAMPLER_TH_SNAPSHOT
) {
339 kperf_thread_snapshot_log(&(sbuf
->th_snapshot
));
341 if (sample_what
& SAMPLER_KSTACK
) {
342 kperf_kcallstack_log(&sbuf
->kcallstack
);
344 if (sample_what
& SAMPLER_TH_INSCYC
) {
345 kperf_thread_inscyc_log(context
);
347 if (sample_what
& SAMPLER_TK_SNAPSHOT
) {
348 kperf_task_snapshot_log(&(sbuf
->tk_snapshot
));
350 if (sample_what
& SAMPLER_TK_INFO
) {
351 kperf_task_info_log(context
);
354 /* dump user stuff */
357 if (sample_what
& SAMPLER_MEMINFO
) {
358 kperf_meminfo_log(&(sbuf
->meminfo
));
361 if (sample_flags
& SAMPLE_FLAG_PEND_USER
) {
362 if (pended_ucallstack
) {
363 BUF_INFO(PERF_CS_UPEND
);
366 if (pended_th_dispatch
) {
367 BUF_INFO(PERF_TI_DISPPEND
);
372 if (sample_what
& SAMPLER_PMC_CONFIG
) {
373 kperf_kpc_config_log(&(sbuf
->kpcdata
));
375 if (sample_what
& SAMPLER_PMC_THREAD
) {
376 kperf_kpc_thread_log(&(sbuf
->kpcdata
));
377 } else if (sample_what
& SAMPLER_PMC_CPU
) {
378 kperf_kpc_cpu_log(&(sbuf
->kpcdata
));
382 BUF_DATA(PERF_GEN_EVENT
| DBG_FUNC_END
, sample_what
, on_idle_thread
? 1 : 0);
385 ml_set_interrupts_enabled(enabled
);
387 return SAMPLE_CONTINUE
;
390 /* Translate actionid into sample bits and take a sample */
392 kperf_sample(struct kperf_sample
*sbuf
,
393 struct kperf_context
*context
,
394 unsigned actionid
, unsigned sample_flags
)
396 /* work out what to sample, if anything */
397 if ((actionid
> actionc
) || (actionid
== 0)) {
398 return SAMPLE_SHUTDOWN
;
401 /* check the pid filter against the context's current pid.
402 * filter pid == -1 means any pid
404 int pid_filter
= actionv
[actionid
- 1].pid_filter
;
405 if ((pid_filter
!= -1) && (pid_filter
!= context
->cur_pid
)) {
406 return SAMPLE_CONTINUE
;
409 /* the samplers to run */
410 unsigned int sample_what
= actionv
[actionid
- 1].sample
;
411 unsigned int ucallstack_depth
= actionv
[actionid
- 1].ucallstack_depth
;
413 /* do the actual sample operation */
414 return kperf_sample_internal(sbuf
, context
, sample_what
,
415 sample_flags
, actionid
, ucallstack_depth
);
419 kperf_kdebug_handler(uint32_t debugid
, uintptr_t *starting_fp
)
421 uint32_t sample_flags
= SAMPLE_FLAG_PEND_USER
;
422 struct kperf_sample
*sample
= NULL
;
423 kern_return_t kr
= KERN_SUCCESS
;
426 if (!kperf_kdebug_should_trigger(debugid
)) {
430 BUF_VERB(PERF_KDBG_HNDLR
| DBG_FUNC_START
, debugid
);
432 thread_t thread
= current_thread();
433 task_t task
= get_threadtask(thread
);
434 struct kperf_context ctx
= {
435 .cur_thread
= thread
,
437 .cur_pid
= task_pid(task
),
438 .trigger_type
= TRIGGER_TYPE_KDEBUG
,
442 s
= ml_set_interrupts_enabled(0);
444 sample
= kperf_intr_sample_buffer();
446 if (!ml_at_interrupt_context()) {
447 sample_flags
|= SAMPLE_FLAG_NON_INTERRUPT
;
448 ctx
.starting_fp
= starting_fp
;
451 kr
= kperf_sample(sample
, &ctx
, kperf_kdebug_get_action(), sample_flags
);
453 ml_set_interrupts_enabled(s
);
454 BUF_VERB(PERF_KDBG_HNDLR
| DBG_FUNC_END
, kr
);
458 * This function allocates >2.3KB of the stack. Prevent the compiler from
459 * inlining this function into ast_taken and ensure the stack memory is only
460 * allocated for the kperf AST.
462 __attribute__((noinline
))
464 kperf_thread_ast_handler(thread_t thread
)
466 uint32_t ast
= thread
->kperf_ast
;
468 BUF_INFO(PERF_AST_HNDLR
| DBG_FUNC_START
, thread
, ast
);
470 struct kperf_usample sbuf
= {};
472 task_t task
= get_threadtask(thread
);
474 if (task_did_exec(task
) || task_is_exec_copy(task
)) {
475 BUF_INFO(PERF_AST_HNDLR
| DBG_FUNC_END
, SAMPLE_CONTINUE
);
479 struct kperf_context ctx
= {
480 .cur_thread
= thread
,
482 .cur_pid
= task_pid(task
),
485 unsigned int sample_what
= 0;
486 if (ast
& T_KPERF_AST_DISPATCH
) {
487 sample_what
|= SAMPLER_TH_DISPATCH
;
489 if (ast
& T_KPERF_AST_CALLSTACK
) {
490 /* TH_INFO for backwards compatibility */
491 sample_what
|= SAMPLER_USTACK
| SAMPLER_TH_INFO
;
494 sbuf
.ucallstack
.kpuc_nframes
=
495 T_KPERF_GET_CALLSTACK_DEPTH(ast
) ?: MAX_UCALLSTACK_FRAMES
;
496 unsigned int actionid
= T_KPERF_GET_ACTIONID(ast
);
497 kperf_sample_user_internal(&sbuf
, &ctx
, actionid
, sample_what
);
499 BUF_INFO(PERF_AST_HNDLR
| DBG_FUNC_END
);
503 kperf_ast_pend(thread_t thread
, uint32_t set_flags
, unsigned int set_actionid
)
505 if (thread
!= current_thread()) {
506 panic("kperf: pending AST to non-current thread");
509 uint32_t ast
= thread
->kperf_ast
;
510 unsigned int actionid
= T_KPERF_GET_ACTIONID(ast
);
511 uint32_t flags
= ast
& T_KPERF_AST_ALL
;
513 if ((flags
| set_flags
) != flags
|| actionid
!= set_actionid
) {
514 ast
&= ~T_KPERF_SET_ACTIONID(actionid
);
515 ast
|= T_KPERF_SET_ACTIONID(set_actionid
);
518 thread
->kperf_ast
= ast
;
520 /* set the actual AST */
521 act_set_kperf(thread
);
529 kperf_ast_set_callstack_depth(thread_t thread
, uint32_t depth
)
531 uint32_t ast
= thread
->kperf_ast
;
532 uint32_t existing_depth
= T_KPERF_GET_CALLSTACK_DEPTH(ast
);
533 if (existing_depth
< depth
) {
534 ast
&= ~T_KPERF_SET_CALLSTACK_DEPTH(existing_depth
);
535 ast
|= T_KPERF_SET_CALLSTACK_DEPTH(depth
);
536 thread
->kperf_ast
= ast
;
541 kperf_kdbg_cswitch_get(void)
543 return kperf_kdebug_cswitch
;
547 kperf_kdbg_cswitch_set(int newval
)
549 kperf_kdebug_cswitch
= newval
;
550 kperf_on_cpu_update();
556 * Action configuration
559 kperf_action_get_count(void)
565 kperf_action_set_samplers(unsigned actionid
, uint32_t samplers
)
567 if ((actionid
> actionc
) || (actionid
== 0)) {
571 /* disallow both CPU and thread counters to be sampled in the same
573 if ((samplers
& SAMPLER_PMC_THREAD
) && (samplers
& SAMPLER_PMC_CPU
)) {
577 actionv
[actionid
- 1].sample
= samplers
;
583 kperf_action_get_samplers(unsigned actionid
, uint32_t *samplers_out
)
585 if ((actionid
> actionc
)) {
590 *samplers_out
= 0; /* "NULL" action */
592 *samplers_out
= actionv
[actionid
- 1].sample
;
599 kperf_action_set_userdata(unsigned actionid
, uint32_t userdata
)
601 if ((actionid
> actionc
) || (actionid
== 0)) {
605 actionv
[actionid
- 1].userdata
= userdata
;
611 kperf_action_get_userdata(unsigned actionid
, uint32_t *userdata_out
)
613 if ((actionid
> actionc
)) {
618 *userdata_out
= 0; /* "NULL" action */
620 *userdata_out
= actionv
[actionid
- 1].userdata
;
627 kperf_action_set_filter(unsigned actionid
, int pid
)
629 if ((actionid
> actionc
) || (actionid
== 0)) {
633 actionv
[actionid
- 1].pid_filter
= pid
;
639 kperf_action_get_filter(unsigned actionid
, int *pid_out
)
641 if ((actionid
> actionc
)) {
646 *pid_out
= -1; /* "NULL" action */
648 *pid_out
= actionv
[actionid
- 1].pid_filter
;
655 kperf_action_reset(void)
657 for (unsigned int i
= 0; i
< actionc
; i
++) {
658 kperf_action_set_samplers(i
+ 1, 0);
659 kperf_action_set_userdata(i
+ 1, 0);
660 kperf_action_set_filter(i
+ 1, -1);
661 kperf_action_set_ucallstack_depth(i
+ 1, MAX_UCALLSTACK_FRAMES
);
662 kperf_action_set_kcallstack_depth(i
+ 1, MAX_KCALLSTACK_FRAMES
);
667 kperf_action_set_count(unsigned count
)
669 struct action
*new_actionv
= NULL
, *old_actionv
= NULL
;
673 if (count
== actionc
) {
677 /* TODO: allow shrinking? */
678 if (count
< actionc
) {
682 /* cap it for good measure */
683 if (count
> ACTION_MAX
) {
687 /* creating the action arror for the first time. create a few
692 if ((r
= kperf_init())) {
697 /* create a new array */
698 new_actionv
= kalloc_tag(count
* sizeof(*new_actionv
), VM_KERN_MEMORY_DIAG
);
699 if (new_actionv
== NULL
) {
703 old_actionv
= actionv
;
706 if (old_actionv
!= NULL
) {
707 memcpy(new_actionv
, actionv
, actionc
* sizeof(*actionv
));
710 memset(&(new_actionv
[actionc
]), 0, (count
- old_count
) * sizeof(*actionv
));
712 for (unsigned int i
= old_count
; i
< count
; i
++) {
713 new_actionv
[i
].pid_filter
= -1;
714 new_actionv
[i
].ucallstack_depth
= MAX_UCALLSTACK_FRAMES
;
715 new_actionv
[i
].kcallstack_depth
= MAX_KCALLSTACK_FRAMES
;
718 actionv
= new_actionv
;
721 if (old_actionv
!= NULL
) {
722 kfree(old_actionv
, old_count
* sizeof(*actionv
));
729 kperf_action_set_ucallstack_depth(unsigned action_id
, uint32_t depth
)
731 if ((action_id
> actionc
) || (action_id
== 0)) {
735 if (depth
> MAX_UCALLSTACK_FRAMES
) {
742 actionv
[action_id
- 1].ucallstack_depth
= depth
;
748 kperf_action_set_kcallstack_depth(unsigned action_id
, uint32_t depth
)
750 if ((action_id
> actionc
) || (action_id
== 0)) {
754 if (depth
> MAX_KCALLSTACK_FRAMES
) {
761 actionv
[action_id
- 1].kcallstack_depth
= depth
;
767 kperf_action_get_ucallstack_depth(unsigned action_id
, uint32_t * depth_out
)
769 if ((action_id
> actionc
)) {
775 if (action_id
== 0) {
776 *depth_out
= MAX_UCALLSTACK_FRAMES
;
778 *depth_out
= actionv
[action_id
- 1].ucallstack_depth
;
785 kperf_action_get_kcallstack_depth(unsigned action_id
, uint32_t * depth_out
)
787 if ((action_id
> actionc
)) {
793 if (action_id
== 0) {
794 *depth_out
= MAX_KCALLSTACK_FRAMES
;
796 *depth_out
= actionv
[action_id
- 1].kcallstack_depth
;