]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/callstack.c
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* Collect kernel callstacks */
31 #include <chud/chud_xnu.h>
32 #include <mach/mach_types.h>
33 #include <kern/thread.h>
34 #include <kern/backtrace.h>
35 #include <vm/vm_map.h>
36 #include <kperf/buffer.h>
37 #include <kperf/context.h>
38 #include <kperf/callstack.h>
39 #include <kperf/ast.h>
40 #include <sys/errno.h>
42 #if defined(__arm__) || defined(__arm64__)
43 #include <arm/cpu_data.h>
44 #include <arm/cpu_data_internal.h>
48 callstack_fixup_user(struct callstack
*cs
, thread_t thread
)
50 uint64_t fixup_val
= 0;
51 assert(cs
->nframes
< MAX_CALLSTACK_FRAMES
);
53 #if defined(__x86_64__)
56 x86_saved_state_t
*state
;
58 state
= get_user_regs(thread
);
63 user_64
= is_saved_state64(state
);
65 sp_user
= saved_state64(state
)->isf
.rsp
;
67 sp_user
= saved_state32(state
)->uesp
;
70 if (thread
== current_thread()) {
71 (void)copyin(sp_user
, (char *)&fixup_val
,
72 user_64
? sizeof(uint64_t) : sizeof(uint32_t));
74 (void)vm_map_read_user(get_task_map(get_threadtask(thread
)), sp_user
,
75 &fixup_val
, user_64
? sizeof(uint64_t) : sizeof(uint32_t));
78 #elif defined(__arm64__) || defined(__arm__)
80 struct arm_saved_state
*state
= get_user_regs(thread
);
85 /* encode thumb mode into low bit of PC */
86 if (get_saved_state_cpsr(state
) & PSR_TF
) {
87 cs
->frames
[0] |= 1ULL;
90 fixup_val
= get_saved_state_lr(state
);
93 #error "callstack_fixup_user: unsupported architecture"
97 cs
->frames
[cs
->nframes
++] = fixup_val
;
100 #if defined(__x86_64__)
102 __attribute__((used
))
104 interrupted_kernel_sp_value(uintptr_t *sp_val
)
106 x86_saved_state_t
*state
;
110 uintptr_t top
, bottom
;
112 state
= current_cpu_datap()->cpu_int_state
;
117 state_64
= is_saved_state64(state
);
120 cs
= saved_state64(state
)->isf
.cs
;
122 cs
= saved_state32(state
)->cs
;
124 /* return early if interrupted a thread in user space */
125 if ((cs
& SEL_PL
) == SEL_PL_U
) {
130 sp
= saved_state64(state
)->isf
.rsp
;
132 sp
= saved_state32(state
)->uesp
;
135 /* make sure the stack pointer is pointing somewhere in this stack */
136 bottom
= current_thread()->kernel_stack
;
137 top
= bottom
+ kernel_stack_size
;
138 if (sp
>= bottom
&& sp
< top
) {
142 *sp_val
= *(uintptr_t *)sp
;
146 #elif defined(__arm64__)
148 __attribute__((used
))
150 interrupted_kernel_lr(uintptr_t *lr
)
152 struct arm_saved_state
*state
;
154 state
= getCpuDatap()->cpu_int_state
;
156 /* return early if interrupted a thread in user space */
157 if (PSR64_IS_USER(get_saved_state_cpsr(state
))) {
161 *lr
= get_saved_state_lr(state
);
165 #elif defined(__arm__)
167 __attribute__((used
))
169 interrupted_kernel_lr(uintptr_t *lr
)
171 struct arm_saved_state
*state
;
173 state
= getCpuDatap()->cpu_int_state
;
175 /* return early if interrupted a thread in user space */
176 if (PSR_IS_USER(get_saved_state_cpsr(state
))) {
180 *lr
= get_saved_state_lr(state
);
184 #else /* defined(__arm__) */
185 #error "interrupted_kernel_{sp,lr}: unsupported architecture"
186 #endif /* !defined(__arm__) */
190 callstack_fixup_interrupted(struct callstack
*cs
)
192 uintptr_t fixup_val
= 0;
193 assert(cs
->nframes
< MAX_CALLSTACK_FRAMES
);
196 * Only provide arbitrary data on development or debug kernels.
198 #if DEVELOPMENT || DEBUG
199 #if defined(__x86_64__)
200 (void)interrupted_kernel_sp_value(&fixup_val
);
201 #elif defined(__arm64__) || defined(__arm__)
202 (void)interrupted_kernel_lr(&fixup_val
);
203 #endif /* defined(__x86_64__) */
204 #endif /* DEVELOPMENT || DEBUG */
206 assert(cs
->flags
& CALLSTACK_KERNEL
);
207 cs
->frames
[cs
->nframes
++] = fixup_val
;
211 kperf_continuation_sample(struct callstack
*cs
, struct kperf_context
*context
)
216 assert(context
!= NULL
);
218 thread
= context
->cur_thread
;
219 assert(thread
!= NULL
);
220 assert(thread
->continuation
!= NULL
);
222 cs
->flags
= CALLSTACK_CONTINUATION
| CALLSTACK_VALID
| CALLSTACK_KERNEL
;
224 cs
->flags
|= CALLSTACK_64BIT
;
228 cs
->frames
[0] = VM_KERNEL_UNSLIDE(thread
->continuation
);
232 kperf_backtrace_sample(struct callstack
*cs
, struct kperf_context
*context
)
235 assert(context
!= NULL
);
236 assert(context
->cur_thread
== current_thread());
238 cs
->flags
= CALLSTACK_KERNEL
| CALLSTACK_KERNEL_WORDS
;
240 cs
->flags
|= CALLSTACK_64BIT
;
243 BUF_VERB(PERF_CS_BACKTRACE
| DBG_FUNC_START
, 1);
245 cs
->nframes
= backtrace_frame((uintptr_t *)&(cs
->frames
), cs
->nframes
- 1,
246 context
->starting_fp
);
247 if (cs
->nframes
> 0) {
248 cs
->flags
|= CALLSTACK_VALID
;
250 * Fake the value pointed to by the stack pointer or the link
251 * register for symbolicators.
253 cs
->frames
[cs
->nframes
+ 1] = 0;
257 BUF_VERB(PERF_CS_BACKTRACE
| DBG_FUNC_END
, cs
->nframes
);
261 kperf_kcallstack_sample(struct callstack
*cs
, struct kperf_context
*context
)
266 assert(context
!= NULL
);
267 assert(cs
->nframes
<= MAX_CALLSTACK_FRAMES
);
269 thread
= context
->cur_thread
;
270 assert(thread
!= NULL
);
272 BUF_INFO(PERF_CS_KSAMPLE
| DBG_FUNC_START
, (uintptr_t)thread_tid(thread
),
275 cs
->flags
= CALLSTACK_KERNEL
;
278 cs
->flags
|= CALLSTACK_64BIT
;
281 if (ml_at_interrupt_context()) {
282 assert(thread
== current_thread());
283 cs
->flags
|= CALLSTACK_KERNEL_WORDS
;
284 cs
->nframes
= backtrace_interrupted((uintptr_t *)cs
->frames
,
286 if (cs
->nframes
!= 0) {
287 callstack_fixup_interrupted(cs
);
291 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
295 kr
= chudxnu_thread_get_callstack64_kperf(thread
, cs
->frames
,
296 &cs
->nframes
, FALSE
);
297 if (kr
== KERN_SUCCESS
) {
298 cs
->flags
|= CALLSTACK_VALID
;
299 } else if (kr
== KERN_RESOURCE_SHORTAGE
) {
300 cs
->flags
|= CALLSTACK_VALID
;
301 cs
->flags
|= CALLSTACK_TRUNCATED
;
307 if (cs
->nframes
== 0) {
308 BUF_INFO(PERF_CS_ERROR
, ERR_GETSTACK
);
311 BUF_INFO(PERF_CS_KSAMPLE
| DBG_FUNC_END
, (uintptr_t)thread_tid(thread
), cs
->flags
, cs
->nframes
);
315 kperf_ucallstack_sample(struct callstack
*cs
, struct kperf_context
*context
)
318 bool user_64
= false;
322 assert(context
!= NULL
);
323 assert(cs
->nframes
<= MAX_CALLSTACK_FRAMES
);
324 assert(ml_get_interrupts_enabled() == TRUE
);
326 thread
= context
->cur_thread
;
327 assert(thread
!= NULL
);
329 BUF_INFO(PERF_CS_USAMPLE
| DBG_FUNC_START
, (uintptr_t)thread_tid(thread
),
334 err
= backtrace_thread_user(thread
, (uintptr_t *)cs
->frames
,
335 cs
->nframes
- 1, &cs
->nframes
, &user_64
);
336 cs
->flags
|= CALLSTACK_KERNEL_WORDS
;
338 cs
->flags
|= CALLSTACK_64BIT
;
341 if (!err
|| err
== EFAULT
) {
342 callstack_fixup_user(cs
, thread
);
343 cs
->flags
|= CALLSTACK_VALID
;
346 BUF_INFO(PERF_CS_ERROR
, ERR_GETSTACK
, err
);
349 BUF_INFO(PERF_CS_USAMPLE
| DBG_FUNC_END
, (uintptr_t)thread_tid(thread
),
350 cs
->flags
, cs
->nframes
);
353 static inline uintptr_t
354 scrub_word(uintptr_t *bt
, int n_frames
, int frame
, bool kern
)
356 if (frame
< n_frames
) {
358 return VM_KERNEL_UNSLIDE(bt
[frame
]);
367 static inline uintptr_t
368 scrub_frame(uint64_t *bt
, int n_frames
, int frame
)
370 if (frame
< n_frames
) {
371 return (uintptr_t)(bt
[frame
]);
378 callstack_log(struct callstack
*cs
, uint32_t hcode
, uint32_t dcode
)
380 BUF_VERB(PERF_CS_LOG
| DBG_FUNC_START
, cs
->flags
, cs
->nframes
);
382 /* framing information for the stack */
383 BUF_DATA(hcode
, cs
->flags
, cs
->nframes
);
385 /* how many batches of 4 */
386 unsigned int nframes
= cs
->nframes
;
387 unsigned int n
= nframes
/ 4;
388 unsigned int ovf
= nframes
% 4;
393 bool kern
= cs
->flags
& CALLSTACK_KERNEL
;
395 if (cs
->flags
& CALLSTACK_KERNEL_WORDS
) {
396 uintptr_t *frames
= (uintptr_t *)cs
->frames
;
397 for (unsigned int i
= 0; i
< n
; i
++) {
398 unsigned int j
= i
* 4;
400 scrub_word(frames
, nframes
, j
+ 0, kern
),
401 scrub_word(frames
, nframes
, j
+ 1, kern
),
402 scrub_word(frames
, nframes
, j
+ 2, kern
),
403 scrub_word(frames
, nframes
, j
+ 3, kern
));
406 for (unsigned int i
= 0; i
< n
; i
++) {
407 uint64_t *frames
= cs
->frames
;
408 unsigned int j
= i
* 4;
410 scrub_frame(frames
, nframes
, j
+ 0),
411 scrub_frame(frames
, nframes
, j
+ 1),
412 scrub_frame(frames
, nframes
, j
+ 2),
413 scrub_frame(frames
, nframes
, j
+ 3));
417 BUF_VERB(PERF_CS_LOG
| DBG_FUNC_END
, cs
->flags
, cs
->nframes
);
421 kperf_kcallstack_log( struct callstack
*cs
)
423 callstack_log(cs
, PERF_CS_KHDR
, PERF_CS_KDATA
);
427 kperf_ucallstack_log( struct callstack
*cs
)
429 callstack_log(cs
, PERF_CS_UHDR
, PERF_CS_UDATA
);
433 kperf_ucallstack_pend(struct kperf_context
* context
, uint32_t depth
)
435 int did_pend
= kperf_ast_pend(context
->cur_thread
, T_KPERF_AST_CALLSTACK
);
436 kperf_ast_set_callstack_depth(context
->cur_thread
, depth
);