2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <kern/assert.h>
33 #include <kern/backtrace.h>
34 #include <kern/cambria_layout.h>
35 #include <kern/thread.h>
36 #include <sys/errno.h>
37 #include <vm/vm_map.h>
39 #if defined(__arm__) || defined(__arm64__)
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
44 #if defined(HAS_APPLE_PAC)
49 #define IN_PPLSTK_BOUNDS(__addr) \
50 (((uintptr_t)(__addr) >= (uintptr_t)pmap_stacks_start) && \
51 ((uintptr_t)(__addr) < (uintptr_t)pmap_stacks_end))
54 unsigned int __attribute__((noinline
))
55 backtrace(uintptr_t *bt
, unsigned int max_frames
, bool *was_truncated_out
)
57 return backtrace_frame(bt
, max_frames
, __builtin_frame_address(0),
62 * This function captures a backtrace from the current stack and returns the
63 * number of frames captured, limited by max_frames and starting at start_frame.
64 * It's fast because it does no checking to make sure there isn't bad data.
65 * Since it's only called from threads that we're going to keep executing,
66 * if there's bad data we were going to die eventually. If this function is
67 * inlined, it doesn't record the frame of the function it's inside (because
68 * there's no stack frame).
70 unsigned int __attribute__((noinline
, not_tail_called
))
71 backtrace_frame(uintptr_t *bt
, unsigned int max_frames
, void *start_frame
,
72 bool *was_truncated_out
)
74 thread_t thread
= current_thread();
76 unsigned int frame_index
= 0;
77 uintptr_t top
, bottom
;
81 assert(max_frames
> 0);
84 bottom
= thread
->kernel_stack
;
85 top
= bottom
+ kernel_stack_size
;
87 #define IN_STK_BOUNDS(__addr) \
88 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
89 ((uintptr_t)(__addr) < (uintptr_t)top))
91 in_valid_stack
= IN_STK_BOUNDS(fp
);
93 in_valid_stack
|= IN_PPLSTK_BOUNDS(fp
);
94 #endif /* XNU_MONITOR */
96 if (!in_valid_stack
) {
100 while (fp
!= NULL
&& frame_index
< max_frames
) {
101 uintptr_t *next_fp
= (uintptr_t *)*fp
;
102 uintptr_t ret_addr
= *(fp
+ 1); /* return address is one word higher than frame pointer */
105 * If the frame pointer is 0, backtracing has reached the top of
106 * the stack and there is no return address. Some stacks might not
107 * have set this up, so bounds check, as well.
109 in_valid_stack
= IN_STK_BOUNDS(next_fp
);
111 in_valid_stack
|= IN_PPLSTK_BOUNDS(next_fp
);
112 #endif /* XNU_MONITOR */
114 if (next_fp
== NULL
|| !in_valid_stack
) {
118 #if defined(HAS_APPLE_PAC)
119 /* return addresses signed by arm64e ABI */
120 bt
[frame_index
++] = (uintptr_t) ptrauth_strip((void *)ret_addr
, ptrauth_key_return_address
);
121 #else /* defined(HAS_APPLE_PAC) */
122 bt
[frame_index
++] = ret_addr
;
123 #endif /* !defined(HAS_APPLE_PAC) */
125 /* stacks grow down; backtracing should be moving to higher addresses */
128 bool fp_in_pplstack
= IN_PPLSTK_BOUNDS(fp
);
129 bool fp_in_kstack
= IN_STK_BOUNDS(fp
);
130 bool next_fp_in_pplstack
= IN_PPLSTK_BOUNDS(fp
);
131 bool next_fp_in_kstack
= IN_STK_BOUNDS(fp
);
134 * This check is verbose; it is basically checking whether
135 * we are switching between the kernel stack and the cpu
136 * stack. If so, we ignore the fact that fp has switched
137 * directions (as it is a symptom of switching stacks).
139 if (((fp_in_pplstack
) && (next_fp_in_kstack
)) ||
140 ((fp_in_kstack
) && (next_fp_in_pplstack
))) {
143 #else /* XNU_MONITOR */
145 #endif /* !XNU_MONITOR */
150 /* NULL-terminate the list, if space is available */
151 if (frame_index
!= max_frames
) {
155 if (fp
!= NULL
&& frame_index
== max_frames
&& was_truncated_out
) {
156 *was_truncated_out
= true;
163 #if defined(__x86_64__)
166 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
168 x86_saved_state_t
*state
;
172 state
= current_cpu_datap()->cpu_int_state
;
177 state_64
= is_saved_state64(state
);
180 cs
= saved_state64(state
)->isf
.cs
;
182 cs
= saved_state32(state
)->cs
;
184 /* return early if interrupted a thread in user space */
185 if ((cs
& SEL_PL
) == SEL_PL_U
) {
190 *pc
= saved_state64(state
)->isf
.rip
;
191 *fp
= saved_state64(state
)->rbp
;
193 *pc
= saved_state32(state
)->eip
;
194 *fp
= saved_state32(state
)->ebp
;
199 #elif defined(__arm64__)
202 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
204 struct arm_saved_state
*state
;
207 state
= getCpuDatap()->cpu_int_state
;
211 state_64
= is_saved_state64(state
);
213 /* return early if interrupted a thread in user space */
214 if (PSR64_IS_USER(get_saved_state_cpsr(state
))) {
218 *pc
= get_saved_state_pc(state
);
219 *fp
= get_saved_state_fp(state
);
223 #elif defined(__arm__)
226 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
228 struct arm_saved_state
*state
;
230 state
= getCpuDatap()->cpu_int_state
;
235 /* return early if interrupted a thread in user space */
236 if (PSR_IS_USER(get_saved_state_cpsr(state
))) {
240 *pc
= get_saved_state_pc(state
);
241 *fp
= get_saved_state_fp(state
);
245 #else /* defined(__arm__) */
246 #error "interrupted_kernel_pc_fp: unsupported architecture"
247 #endif /* !defined(__arm__) */
250 backtrace_interrupted(uintptr_t *bt
, unsigned int max_frames
,
251 bool *was_truncated_out
)
258 assert(max_frames
> 0);
259 assert(ml_at_interrupt_context() == TRUE
);
261 kr
= interrupted_kernel_pc_fp(&pc
, &fp
);
262 if (kr
!= KERN_SUCCESS
) {
267 if (max_frames
== 1) {
271 return backtrace_frame(bt
+ 1, max_frames
- 1, (void *)fp
,
272 was_truncated_out
) + 1;
276 backtrace_user(uintptr_t *bt
, unsigned int max_frames
,
277 int *error_out
, bool *user_64_out
, bool *was_truncated_out
)
279 return backtrace_thread_user(current_thread(), bt
, max_frames
,
280 error_out
, user_64_out
, was_truncated_out
, true);
284 backtrace_thread_user(void *thread
, uintptr_t *bt
, unsigned int max_frames
,
285 int *error_out
, bool *user_64_out
, bool *was_truncated_out
, __unused
bool faults_permitted
)
288 uintptr_t pc
= 0, fp
= 0, next_fp
= 0;
289 vm_map_t map
= NULL
, old_map
= NULL
;
290 unsigned int frame_index
= 0;
292 size_t frame_size
= 0;
295 assert(max_frames
> 0);
296 assert((max_frames
== 1) || (faults_permitted
== true));
298 #if defined(__x86_64__)
300 /* don't allow a malformed user stack to copyin arbitrary kernel data */
301 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
303 x86_saved_state_t
*state
= get_user_regs(thread
);
308 user_64
= is_saved_state64(state
);
310 pc
= saved_state64(state
)->isf
.rip
;
311 fp
= saved_state64(state
)->rbp
;
313 pc
= saved_state32(state
)->eip
;
314 fp
= saved_state32(state
)->ebp
;
317 #elif defined(__arm64__)
319 struct arm_saved_state
*state
= get_user_regs(thread
);
324 user_64
= is_saved_state64(state
);
325 pc
= get_saved_state_pc(state
);
326 fp
= get_saved_state_fp(state
);
329 /* ARM expects stack frames to be aligned to 16 bytes */
330 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
334 #elif defined(__arm__)
336 /* ARM expects stack frames to be aligned to 16 bytes */
337 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
339 struct arm_saved_state
*state
= get_user_regs(thread
);
345 pc
= get_saved_state_pc(state
);
346 fp
= get_saved_state_fp(state
);
348 #else /* defined(__arm__) */
349 #error "backtrace_thread_user: unsupported architecture"
350 #endif /* !defined(__arm__) */
352 bt
[frame_index
++] = pc
;
354 if (frame_index
>= max_frames
) {
358 if (INVALID_USER_FP(fp
)) {
362 assert(ml_get_interrupts_enabled() == TRUE
);
363 if (!ml_get_interrupts_enabled()) {
378 frame_size
= 2 * (user_64
? 8 : 4);
380 /* switch to the correct map, for copyin */
381 if (thread
!= current_thread()) {
382 map
= get_task_map_reference(get_threadtask(thread
));
386 old_map
= vm_map_switch(map
);
391 while (fp
!= 0 && frame_index
< max_frames
) {
392 err
= copyin(fp
, (char *)&frame
, frame_size
);
394 if (was_truncated_out
) {
395 *was_truncated_out
= true;
400 next_fp
= user_64
? frame
.u64
.fp
: frame
.u32
.fp
;
402 if (INVALID_USER_FP(next_fp
)) {
406 uintptr_t ret_addr
= user_64
? frame
.u64
.ret
: frame
.u32
.ret
;
407 #if defined(HAS_APPLE_PAC)
408 /* return addresses signed by arm64e ABI */
409 bt
[frame_index
++] = (uintptr_t)ptrauth_strip((void *)ret_addr
,
410 ptrauth_key_return_address
);
411 #else /* defined(HAS_APPLE_PAC) */
412 bt
[frame_index
++] = ret_addr
;
413 #endif /* !defined(HAS_APPLE_PAC) */
415 /* stacks grow down; backtracing should be moving to higher addresses */
424 (void)vm_map_switch(old_map
);
425 vm_map_deallocate(map
);
428 /* NULL-terminate the list, if space is available */
429 if (frame_index
!= max_frames
) {
433 if (fp
!= 0 && frame_index
== max_frames
&& was_truncated_out
) {
434 *was_truncated_out
= true;
438 *user_64_out
= user_64
;
445 #undef INVALID_USER_FP