2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <kern/assert.h>
33 #include <kern/backtrace.h>
34 #include <kern/cambria_layout.h>
35 #include <kern/thread.h>
36 #include <sys/errno.h>
37 #include <vm/vm_map.h>
39 #if defined(__arm__) || defined(__arm64__)
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
44 #if defined(HAS_APPLE_PAC)
49 unsigned int __attribute__((noinline
))
50 backtrace(uintptr_t *bt
, unsigned int max_frames
, bool *was_truncated_out
)
52 return backtrace_frame(bt
, max_frames
, __builtin_frame_address(0),
57 * This function captures a backtrace from the current stack and returns the
58 * number of frames captured, limited by max_frames and starting at start_frame.
59 * It's fast because it does no checking to make sure there isn't bad data.
60 * Since it's only called from threads that we're going to keep executing,
61 * if there's bad data we were going to die eventually. If this function is
62 * inlined, it doesn't record the frame of the function it's inside (because
63 * there's no stack frame).
65 unsigned int __attribute__((noinline
, not_tail_called
))
66 backtrace_frame(uintptr_t *bt
, unsigned int max_frames
, void *start_frame
,
67 bool *was_truncated_out
)
69 thread_t thread
= current_thread();
71 unsigned int frame_index
= 0;
72 uintptr_t top
, bottom
;
76 assert(max_frames
> 0);
79 bottom
= thread
->kernel_stack
;
80 top
= bottom
+ kernel_stack_size
;
82 #define IN_STK_BOUNDS(__addr) \
83 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
84 ((uintptr_t)(__addr) < (uintptr_t)top))
86 in_valid_stack
= IN_STK_BOUNDS(fp
);
88 if (!in_valid_stack
) {
92 while (fp
!= NULL
&& frame_index
< max_frames
) {
93 uintptr_t *next_fp
= (uintptr_t *)*fp
;
94 uintptr_t ret_addr
= *(fp
+ 1); /* return address is one word higher than frame pointer */
97 * If the frame pointer is 0, backtracing has reached the top of
98 * the stack and there is no return address. Some stacks might not
99 * have set this up, so bounds check, as well.
101 in_valid_stack
= IN_STK_BOUNDS(next_fp
);
103 if (next_fp
== NULL
|| !in_valid_stack
) {
107 #if defined(HAS_APPLE_PAC)
108 /* return addresses signed by arm64e ABI */
109 bt
[frame_index
++] = (uintptr_t) ptrauth_strip((void *)ret_addr
, ptrauth_key_return_address
);
110 #else /* defined(HAS_APPLE_PAC) */
111 bt
[frame_index
++] = ret_addr
;
112 #endif /* !defined(HAS_APPLE_PAC) */
114 /* stacks grow down; backtracing should be moving to higher addresses */
121 /* NULL-terminate the list, if space is available */
122 if (frame_index
!= max_frames
) {
126 if (fp
!= NULL
&& frame_index
== max_frames
&& was_truncated_out
) {
127 *was_truncated_out
= true;
134 #if defined(__x86_64__)
137 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
139 x86_saved_state_t
*state
;
143 state
= current_cpu_datap()->cpu_int_state
;
148 state_64
= is_saved_state64(state
);
151 cs
= saved_state64(state
)->isf
.cs
;
153 cs
= saved_state32(state
)->cs
;
155 /* return early if interrupted a thread in user space */
156 if ((cs
& SEL_PL
) == SEL_PL_U
) {
161 *pc
= saved_state64(state
)->isf
.rip
;
162 *fp
= saved_state64(state
)->rbp
;
164 *pc
= saved_state32(state
)->eip
;
165 *fp
= saved_state32(state
)->ebp
;
170 #elif defined(__arm64__)
173 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
175 struct arm_saved_state
*state
;
178 state
= getCpuDatap()->cpu_int_state
;
182 state_64
= is_saved_state64(state
);
184 /* return early if interrupted a thread in user space */
185 if (PSR64_IS_USER(get_saved_state_cpsr(state
))) {
189 *pc
= get_saved_state_pc(state
);
190 *fp
= get_saved_state_fp(state
);
194 #elif defined(__arm__)
197 interrupted_kernel_pc_fp(uintptr_t *pc
, uintptr_t *fp
)
199 struct arm_saved_state
*state
;
201 state
= getCpuDatap()->cpu_int_state
;
206 /* return early if interrupted a thread in user space */
207 if (PSR_IS_USER(get_saved_state_cpsr(state
))) {
211 *pc
= get_saved_state_pc(state
);
212 *fp
= get_saved_state_fp(state
);
216 #else /* defined(__arm__) */
217 #error "interrupted_kernel_pc_fp: unsupported architecture"
218 #endif /* !defined(__arm__) */
221 backtrace_interrupted(uintptr_t *bt
, unsigned int max_frames
,
222 bool *was_truncated_out
)
229 assert(max_frames
> 0);
230 assert(ml_at_interrupt_context() == TRUE
);
232 kr
= interrupted_kernel_pc_fp(&pc
, &fp
);
233 if (kr
!= KERN_SUCCESS
) {
238 if (max_frames
== 1) {
242 return backtrace_frame(bt
+ 1, max_frames
- 1, (void *)fp
,
243 was_truncated_out
) + 1;
247 backtrace_user(uintptr_t *bt
, unsigned int max_frames
,
248 int *error_out
, bool *user_64_out
, bool *was_truncated_out
)
250 return backtrace_thread_user(current_thread(), bt
, max_frames
,
251 error_out
, user_64_out
, was_truncated_out
, true);
255 backtrace_thread_user(void *thread
, uintptr_t *bt
, unsigned int max_frames
,
256 int *error_out
, bool *user_64_out
, bool *was_truncated_out
, __unused
bool faults_permitted
)
259 uintptr_t pc
= 0, fp
= 0, next_fp
= 0;
260 vm_map_t map
= NULL
, old_map
= NULL
;
261 unsigned int frame_index
= 0;
263 size_t frame_size
= 0;
266 assert(max_frames
> 0);
267 assert((max_frames
== 1) || (faults_permitted
== true));
269 #if defined(__x86_64__)
271 /* don't allow a malformed user stack to copyin arbitrary kernel data */
272 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
274 x86_saved_state_t
*state
= get_user_regs(thread
);
279 user_64
= is_saved_state64(state
);
281 pc
= saved_state64(state
)->isf
.rip
;
282 fp
= saved_state64(state
)->rbp
;
284 pc
= saved_state32(state
)->eip
;
285 fp
= saved_state32(state
)->ebp
;
288 #elif defined(__arm64__)
290 struct arm_saved_state
*state
= get_user_regs(thread
);
295 user_64
= is_saved_state64(state
);
296 pc
= get_saved_state_pc(state
);
297 fp
= get_saved_state_fp(state
);
300 /* ARM expects stack frames to be aligned to 16 bytes */
301 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
305 #elif defined(__arm__)
307 /* ARM expects stack frames to be aligned to 16 bytes */
308 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
310 struct arm_saved_state
*state
= get_user_regs(thread
);
316 pc
= get_saved_state_pc(state
);
317 fp
= get_saved_state_fp(state
);
319 #else /* defined(__arm__) */
320 #error "backtrace_thread_user: unsupported architecture"
321 #endif /* !defined(__arm__) */
323 bt
[frame_index
++] = pc
;
325 if (frame_index
>= max_frames
) {
329 if (INVALID_USER_FP(fp
)) {
333 assert(ml_get_interrupts_enabled() == TRUE
);
334 if (!ml_get_interrupts_enabled()) {
349 frame_size
= 2 * (user_64
? 8 : 4);
351 /* switch to the correct map, for copyin */
352 if (thread
!= current_thread()) {
353 map
= get_task_map_reference(get_threadtask(thread
));
357 old_map
= vm_map_switch(map
);
362 while (fp
!= 0 && frame_index
< max_frames
) {
363 err
= copyin(fp
, (char *)&frame
, frame_size
);
365 if (was_truncated_out
) {
366 *was_truncated_out
= true;
371 next_fp
= user_64
? frame
.u64
.fp
: frame
.u32
.fp
;
373 if (INVALID_USER_FP(next_fp
)) {
377 uintptr_t ret_addr
= user_64
? frame
.u64
.ret
: frame
.u32
.ret
;
378 #if defined(HAS_APPLE_PAC)
379 /* return addresses signed by arm64e ABI */
380 bt
[frame_index
++] = (uintptr_t)ptrauth_strip((void *)ret_addr
,
381 ptrauth_key_return_address
);
382 #else /* defined(HAS_APPLE_PAC) */
383 bt
[frame_index
++] = ret_addr
;
384 #endif /* !defined(HAS_APPLE_PAC) */
386 /* stacks grow down; backtracing should be moving to higher addresses */
395 (void)vm_map_switch(old_map
);
396 vm_map_deallocate(map
);
399 /* NULL-terminate the list, if space is available */
400 if (frame_index
!= max_frames
) {
404 if (fp
!= 0 && frame_index
== max_frames
&& was_truncated_out
) {
405 *was_truncated_out
= true;
409 *user_64_out
= user_64
;
416 #undef INVALID_USER_FP