]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/backtrace.c
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <kern/assert.h>
33 #include <kern/backtrace.h>
34 #include <kern/thread.h>
35 #include <sys/errno.h>
36 #include <vm/vm_map.h>
39 uint32_t __attribute__((noinline
))
40 backtrace(uintptr_t *bt
, uint32_t max_frames
)
42 return backtrace_frame(bt
, max_frames
, __builtin_frame_address(0));
46 * This function captures a backtrace from the current stack and returns the
47 * number of frames captured, limited by max_frames and starting at start_frame.
48 * It's fast because it does no checking to make sure there isn't bad data.
49 * Since it's only called from threads that we're going to keep executing,
50 * if there's bad data we were going to die eventually. If this function is
51 * inlined, it doesn't record the frame of the function it's inside (because
52 * there's no stack frame).
54 uint32_t __attribute__((noinline
,not_tail_called
))
55 backtrace_frame(uintptr_t *bt
, uint32_t max_frames
, void *start_frame
)
57 thread_t thread
= current_thread();
60 uint32_t frame_index
= 0;
61 uintptr_t top
, bottom
;
64 assert(max_frames
> 0);
67 bottom
= thread
->kernel_stack
;
68 top
= bottom
+ kernel_stack_size
;
70 if ((uintptr_t)fp
>= top
|| (uintptr_t)fp
< bottom
) {
74 while (fp
!= NULL
&& frame_index
< max_frames
) {
75 next_fp
= (uintptr_t *)*fp
;
78 * If the frame pointer is 0, backtracing has reached the top of
79 * the stack and there is no return address. Some stacks might not
80 * have set this up, so bounds check, as well.
82 if (next_fp
== NULL
||
83 (uintptr_t)next_fp
>= top
||
84 (uintptr_t)next_fp
< bottom
)
89 /* return address is one word higher than frame pointer */
90 bt
[frame_index
++] = *(fp
+ 1);
92 /* stacks grow down; backtracing should be moving to higher addresses */
102 #if defined(__x86_64__)
105 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
107 x86_saved_state_t
*state
;
111 state
= current_cpu_datap()->cpu_int_state
;
116 state_64
= is_saved_state64(state
);
119 cs
= saved_state64(state
)->isf
.cs
;
121 cs
= saved_state32(state
)->cs
;
123 /* return early if interrupted a thread in user space */
124 if ((cs
& SEL_PL
) == SEL_PL_U
) {
129 *pc
= saved_state64(state
)->isf
.rip
;
130 *fp
= saved_state64(state
)->rbp
;
132 *pc
= saved_state32(state
)->eip
;
133 *fp
= saved_state32(state
)->ebp
;
138 #else /* defined(__arm__) */
139 #error "interrupted_kernel_pc_fp: unsupported architecture"
140 #endif /* !defined(__arm__) */
143 backtrace_interrupted(uintptr_t *bt
, uint32_t max_frames
)
150 assert(max_frames
> 0);
151 assert(ml_at_interrupt_context() == TRUE
);
153 kr
= interrupted_kernel_pc_fp(&pc
, (uintptr_t)&fp
);
154 if (kr
!= KERN_SUCCESS
) {
159 if (max_frames
== 1) {
163 return backtrace_frame(bt
+ 1, max_frames
- 1, fp
);
167 backtrace_user(uintptr_t *bt
, uint32_t max_frames
, uint32_t *frames_out
,
170 return backtrace_thread_user(current_thread(), bt
, max_frames
, frames_out
,
175 backtrace_thread_user(void *thread
, uintptr_t *bt
, uint32_t max_frames
,
176 uint32_t *frames_out
, bool *user_64_out
)
179 uintptr_t pc
, fp
, next_fp
;
180 vm_map_t map
, old_map
;
181 uint32_t frame_index
= 0;
185 assert(ml_get_interrupts_enabled() == TRUE
);
186 if (!ml_get_interrupts_enabled()) {
191 assert(max_frames
> 0);
192 assert(frames_out
!= NULL
);
193 assert(user_64_out
!= NULL
);
195 #if defined(__x86_64__)
197 /* don't allow a malformed user stack to copyin arbitrary kernel data */
198 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
200 x86_saved_state_t
*state
= get_user_regs(thread
);
206 user_64
= is_saved_state64(state
);
208 pc
= saved_state64(state
)->isf
.rip
;
209 fp
= saved_state64(state
)->rbp
;
211 pc
= saved_state32(state
)->eip
;
212 fp
= saved_state32(state
)->ebp
;
215 #else /* defined(__arm__) */
216 #error "backtrace_thread_user: unsupported architecture"
217 #endif /* !defined(__arm__) */
219 /* switch to the correct map, for copyin */
220 if (thread
!= current_thread()) {
221 map
= get_task_map_reference(get_threadtask(thread
));
225 old_map
= vm_map_switch(map
);
240 frame_size
= 2 * (user_64
? sizeof(uint64_t) : sizeof(uint32_t));
242 bt
[frame_index
++] = pc
;
244 if (INVALID_USER_FP(fp
)) {
248 while (fp
!= 0 && frame_index
< max_frames
) {
249 err
= copyin(fp
, (char *)&frame
, frame_size
);
254 next_fp
= user_64
? frame
.u64
.fp
: frame
.u32
.fp
;
256 if (INVALID_USER_FP(next_fp
)) {
260 bt
[frame_index
++] = user_64
? frame
.u64
.ret
: frame
.u32
.ret
;
262 /* stacks grow down; backtracing should be moving to higher addresses */
271 (void)vm_map_switch(old_map
);
272 vm_map_deallocate(map
);
275 *user_64_out
= user_64
;
276 *frames_out
= frame_index
;
278 #undef INVALID_USER_FP