]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/callstack.c
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* Collect kernel callstacks */
31 #include <chud/chud_xnu.h>
32 #include <mach/mach_types.h>
33 #include <kern/thread.h>
34 #include <kern/backtrace.h>
35 #include <vm/vm_map.h>
36 #include <kperf/buffer.h>
37 #include <kperf/context.h>
38 #include <kperf/callstack.h>
39 #include <kperf/ast.h>
40 #include <sys/errno.h>
44 callstack_fixup_user(struct callstack
*cs
, thread_t thread
)
46 uint64_t fixup_val
= 0;
47 assert(cs
->nframes
< MAX_CALLSTACK_FRAMES
);
49 #if defined(__x86_64__)
52 x86_saved_state_t
*state
;
54 state
= get_user_regs(thread
);
59 user_64
= is_saved_state64(state
);
61 sp_user
= saved_state64(state
)->isf
.rsp
;
63 sp_user
= saved_state32(state
)->uesp
;
66 if (thread
== current_thread()) {
67 (void)copyin(sp_user
, (char *)&fixup_val
,
68 user_64
? sizeof(uint64_t) : sizeof(uint32_t));
70 (void)vm_map_read_user(get_task_map(get_threadtask(thread
)), sp_user
,
71 &fixup_val
, user_64
? sizeof(uint64_t) : sizeof(uint32_t));
75 #error "callstack_fixup_user: unsupported architecture"
79 cs
->frames
[cs
->nframes
++] = fixup_val
;
82 #if defined(__x86_64__)
86 interrupted_kernel_sp_value(uintptr_t *sp_val
)
88 x86_saved_state_t
*state
;
92 uintptr_t top
, bottom
;
94 state
= current_cpu_datap()->cpu_int_state
;
99 state_64
= is_saved_state64(state
);
102 cs
= saved_state64(state
)->isf
.cs
;
104 cs
= saved_state32(state
)->cs
;
106 /* return early if interrupted a thread in user space */
107 if ((cs
& SEL_PL
) == SEL_PL_U
) {
112 sp
= saved_state64(state
)->isf
.rsp
;
114 sp
= saved_state32(state
)->uesp
;
117 /* make sure the stack pointer is pointing somewhere in this stack */
118 bottom
= current_thread()->kernel_stack
;
119 top
= bottom
+ kernel_stack_size
;
120 if (sp
>= bottom
&& sp
< top
) {
124 *sp_val
= *(uintptr_t *)sp
;
128 #else /* defined(__arm__) */
129 #error "interrupted_kernel_{sp,lr}: unsupported architecture"
130 #endif /* !defined(__arm__) */
134 callstack_fixup_interrupted(struct callstack
*cs
)
136 uintptr_t fixup_val
= 0;
137 assert(cs
->nframes
< MAX_CALLSTACK_FRAMES
);
140 * Only provide arbitrary data on development or debug kernels.
142 #if DEVELOPMENT || DEBUG
143 #if defined(__x86_64__)
144 (void)interrupted_kernel_sp_value(&fixup_val
);
145 #endif /* defined(__x86_64__) */
146 #endif /* DEVELOPMENT || DEBUG */
148 cs
->frames
[cs
->nframes
++] = fixup_val
?
149 VM_KERNEL_UNSLIDE_OR_PERM(fixup_val
) : 0;
153 kperf_continuation_sample(struct callstack
*cs
, struct kperf_context
*context
)
158 assert(context
!= NULL
);
160 thread
= context
->cur_thread
;
161 assert(thread
!= NULL
);
162 assert(thread
->continuation
!= NULL
);
164 cs
->flags
= CALLSTACK_CONTINUATION
| CALLSTACK_VALID
| CALLSTACK_KERNEL
;
166 cs
->flags
|= CALLSTACK_64BIT
;
170 cs
->frames
[0] = VM_KERNEL_UNSLIDE(thread
->continuation
);
174 kperf_backtrace_sample(struct callstack
*cs
, struct kperf_context
*context
)
177 assert(context
!= NULL
);
178 assert(context
->cur_thread
== current_thread());
180 cs
->flags
= CALLSTACK_KERNEL
| CALLSTACK_KERNEL_WORDS
;
182 cs
->flags
|= CALLSTACK_64BIT
;
185 BUF_VERB(PERF_CS_BACKTRACE
| DBG_FUNC_START
, 1);
187 cs
->nframes
= backtrace_frame((uintptr_t *)&(cs
->frames
), cs
->nframes
- 1,
188 context
->starting_fp
);
189 if (cs
->nframes
> 0) {
190 cs
->flags
|= CALLSTACK_VALID
;
192 * Fake the value pointed to by the stack pointer or the link
193 * register for symbolicators.
195 cs
->frames
[cs
->nframes
+ 1] = 0;
199 BUF_VERB(PERF_CS_BACKTRACE
| DBG_FUNC_END
, cs
->nframes
);
203 kperf_kcallstack_sample(struct callstack
*cs
, struct kperf_context
*context
)
208 assert(context
!= NULL
);
209 assert(cs
->nframes
<= MAX_CALLSTACK_FRAMES
);
211 thread
= context
->cur_thread
;
212 assert(thread
!= NULL
);
214 BUF_INFO(PERF_CS_KSAMPLE
| DBG_FUNC_START
, (uintptr_t)thread_tid(thread
),
217 cs
->flags
= CALLSTACK_KERNEL
;
220 cs
->flags
|= CALLSTACK_64BIT
;
223 if (ml_at_interrupt_context()) {
224 assert(thread
== current_thread());
225 cs
->flags
|= CALLSTACK_KERNEL_WORDS
;
226 cs
->nframes
= backtrace_interrupted((uintptr_t *)cs
->frames
,
228 if (cs
->nframes
!= 0) {
229 callstack_fixup_interrupted(cs
);
233 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
237 kr
= chudxnu_thread_get_callstack64_kperf(thread
, cs
->frames
,
238 &cs
->nframes
, FALSE
);
239 if (kr
== KERN_SUCCESS
) {
240 cs
->flags
|= CALLSTACK_VALID
;
241 } else if (kr
== KERN_RESOURCE_SHORTAGE
) {
242 cs
->flags
|= CALLSTACK_VALID
;
243 cs
->flags
|= CALLSTACK_TRUNCATED
;
249 if (cs
->nframes
== 0) {
250 BUF_INFO(PERF_CS_ERROR
, ERR_GETSTACK
);
253 BUF_INFO(PERF_CS_KSAMPLE
| DBG_FUNC_END
, (uintptr_t)thread_tid(thread
), cs
->flags
, cs
->nframes
);
257 kperf_ucallstack_sample(struct callstack
*cs
, struct kperf_context
*context
)
260 bool user_64
= false;
264 assert(context
!= NULL
);
265 assert(cs
->nframes
<= MAX_CALLSTACK_FRAMES
);
266 assert(ml_get_interrupts_enabled() == TRUE
);
268 thread
= context
->cur_thread
;
269 assert(thread
!= NULL
);
271 BUF_INFO(PERF_CS_USAMPLE
| DBG_FUNC_START
, (uintptr_t)thread_tid(thread
),
276 err
= backtrace_thread_user(thread
, (uintptr_t *)cs
->frames
,
277 cs
->nframes
- 1, &cs
->nframes
, &user_64
);
278 cs
->flags
|= CALLSTACK_KERNEL_WORDS
;
280 cs
->flags
|= CALLSTACK_64BIT
;
283 if (!err
|| err
== EFAULT
) {
284 callstack_fixup_user(cs
, thread
);
285 cs
->flags
|= CALLSTACK_VALID
;
288 BUF_INFO(PERF_CS_ERROR
, ERR_GETSTACK
, err
);
291 BUF_INFO(PERF_CS_USAMPLE
| DBG_FUNC_END
, (uintptr_t)thread_tid(thread
),
292 cs
->flags
, cs
->nframes
);
295 static inline uintptr_t
296 scrub_kernel_frame(uintptr_t *bt
, int n_frames
, int frame
)
298 if (frame
< n_frames
) {
299 return VM_KERNEL_UNSLIDE(bt
[frame
]);
305 static inline uintptr_t
306 scrub_frame(uint64_t *bt
, int n_frames
, int frame
)
308 if (frame
< n_frames
) {
309 return (uintptr_t)(bt
[frame
]);
316 callstack_log(struct callstack
*cs
, uint32_t hcode
, uint32_t dcode
)
318 BUF_VERB(PERF_CS_LOG
| DBG_FUNC_START
, cs
->flags
, cs
->nframes
);
320 /* framing information for the stack */
321 BUF_DATA(hcode
, cs
->flags
, cs
->nframes
);
323 /* how many batches of 4 */
324 unsigned int n
= cs
->nframes
/ 4;
325 unsigned int ovf
= cs
->nframes
% 4;
330 if (cs
->flags
& CALLSTACK_KERNEL_WORDS
) {
331 for (unsigned int i
= 0; i
< n
; i
++) {
332 unsigned int j
= i
* 4;
334 scrub_kernel_frame((uintptr_t *)cs
->frames
, cs
->nframes
, j
+ 0),
335 scrub_kernel_frame((uintptr_t *)cs
->frames
, cs
->nframes
, j
+ 1),
336 scrub_kernel_frame((uintptr_t *)cs
->frames
, cs
->nframes
, j
+ 2),
337 scrub_kernel_frame((uintptr_t *)cs
->frames
, cs
->nframes
, j
+ 3));
340 for (unsigned int i
= 0; i
< n
; i
++) {
341 unsigned int j
= i
* 4;
343 scrub_frame(cs
->frames
, cs
->nframes
, j
+ 0),
344 scrub_frame(cs
->frames
, cs
->nframes
, j
+ 1),
345 scrub_frame(cs
->frames
, cs
->nframes
, j
+ 2),
346 scrub_frame(cs
->frames
, cs
->nframes
, j
+ 3));
350 BUF_VERB(PERF_CS_LOG
| DBG_FUNC_END
, cs
->flags
, cs
->nframes
);
354 kperf_kcallstack_log( struct callstack
*cs
)
356 callstack_log(cs
, PERF_CS_KHDR
, PERF_CS_KDATA
);
360 kperf_ucallstack_log( struct callstack
*cs
)
362 callstack_log(cs
, PERF_CS_UHDR
, PERF_CS_UDATA
);
366 kperf_ucallstack_pend(struct kperf_context
* context
, uint32_t depth
)
368 int did_pend
= kperf_ast_pend(context
->cur_thread
, T_KPERF_AST_CALLSTACK
);
369 kperf_ast_set_callstack_depth(context
->cur_thread
, depth
);