]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/backtrace.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / kern / backtrace.c
CommitLineData
39037602 1/*
f427ee49 2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
39037602
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stddef.h>
30#include <stdint.h>
31
32#include <kern/assert.h>
33#include <kern/backtrace.h>
f427ee49 34#include <kern/cambria_layout.h>
39037602
A
35#include <kern/thread.h>
36#include <sys/errno.h>
37#include <vm/vm_map.h>
38
5ba3f43e
A
39#if defined(__arm__) || defined(__arm64__)
40#include <arm/cpu_data.h>
41#include <arm/cpu_data_internal.h>
42#endif
43
cb323159
A
44#if defined(HAS_APPLE_PAC)
45#include <ptrauth.h>
46#endif
39037602 47
2a1bd2d3
A
48#if XNU_MONITOR
49#define IN_PPLSTK_BOUNDS(__addr) \
50 (((uintptr_t)(__addr) >= (uintptr_t)pmap_stacks_start) && \
51 ((uintptr_t)(__addr) < (uintptr_t)pmap_stacks_end))
52#endif
d9a64523 53
cb323159
A
54unsigned int __attribute__((noinline))
55backtrace(uintptr_t *bt, unsigned int max_frames, bool *was_truncated_out)
39037602 56{
cb323159 57 return backtrace_frame(bt, max_frames, __builtin_frame_address(0),
f427ee49 58 was_truncated_out);
39037602
A
59}
60
61/*
62 * This function captures a backtrace from the current stack and returns the
63 * number of frames captured, limited by max_frames and starting at start_frame.
64 * It's fast because it does no checking to make sure there isn't bad data.
65 * Since it's only called from threads that we're going to keep executing,
66 * if there's bad data we were going to die eventually. If this function is
67 * inlined, it doesn't record the frame of the function it's inside (because
68 * there's no stack frame).
69 */
cb323159
A
70unsigned int __attribute__((noinline, not_tail_called))
71backtrace_frame(uintptr_t *bt, unsigned int max_frames, void *start_frame,
72 bool *was_truncated_out)
39037602
A
73{
74 thread_t thread = current_thread();
75 uintptr_t *fp;
cb323159 76 unsigned int frame_index = 0;
39037602 77 uintptr_t top, bottom;
5ba3f43e 78 bool in_valid_stack;
39037602
A
79
80 assert(bt != NULL);
81 assert(max_frames > 0);
82
83 fp = start_frame;
84 bottom = thread->kernel_stack;
85 top = bottom + kernel_stack_size;
86
5ba3f43e
A
87#define IN_STK_BOUNDS(__addr) \
88 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
89 ((uintptr_t)(__addr) < (uintptr_t)top))
90
91 in_valid_stack = IN_STK_BOUNDS(fp);
2a1bd2d3
A
92#if XNU_MONITOR
93 in_valid_stack |= IN_PPLSTK_BOUNDS(fp);
94#endif /* XNU_MONITOR */
5ba3f43e
A
95
96 if (!in_valid_stack) {
39037602
A
97 fp = NULL;
98 }
99
100 while (fp != NULL && frame_index < max_frames) {
5ba3f43e 101 uintptr_t *next_fp = (uintptr_t *)*fp;
d9a64523 102 uintptr_t ret_addr = *(fp + 1); /* return address is one word higher than frame pointer */
39037602
A
103
104 /*
105 * If the frame pointer is 0, backtracing has reached the top of
106 * the stack and there is no return address. Some stacks might not
107 * have set this up, so bounds check, as well.
108 */
5ba3f43e 109 in_valid_stack = IN_STK_BOUNDS(next_fp);
2a1bd2d3
A
110#if XNU_MONITOR
111 in_valid_stack |= IN_PPLSTK_BOUNDS(next_fp);
112#endif /* XNU_MONITOR */
5ba3f43e 113
0a7de745 114 if (next_fp == NULL || !in_valid_stack) {
39037602
A
115 break;
116 }
117
cb323159
A
118#if defined(HAS_APPLE_PAC)
119 /* return addresses signed by arm64e ABI */
120 bt[frame_index++] = (uintptr_t) ptrauth_strip((void *)ret_addr, ptrauth_key_return_address);
121#else /* defined(HAS_APPLE_PAC) */
d9a64523 122 bt[frame_index++] = ret_addr;
cb323159 123#endif /* !defined(HAS_APPLE_PAC) */
39037602
A
124
125 /* stacks grow down; backtracing should be moving to higher addresses */
126 if (next_fp <= fp) {
2a1bd2d3
A
127#if XNU_MONITOR
128 bool fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
129 bool fp_in_kstack = IN_STK_BOUNDS(fp);
130 bool next_fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
131 bool next_fp_in_kstack = IN_STK_BOUNDS(fp);
132
133 /*
134 * This check is verbose; it is basically checking whether
135 * we are switching between the kernel stack and the cpu
136 * stack. If so, we ignore the fact that fp has switched
137 * directions (as it is a symptom of switching stacks).
138 */
139 if (((fp_in_pplstack) && (next_fp_in_kstack)) ||
140 ((fp_in_kstack) && (next_fp_in_pplstack))) {
141 break;
142 }
143#else /* XNU_MONITOR */
39037602 144 break;
2a1bd2d3 145#endif /* !XNU_MONITOR */
39037602
A
146 }
147 fp = next_fp;
148 }
149
cb323159
A
150 /* NULL-terminate the list, if space is available */
151 if (frame_index != max_frames) {
152 bt[frame_index] = 0;
153 }
154
155 if (fp != NULL && frame_index == max_frames && was_truncated_out) {
156 *was_truncated_out = true;
157 }
158
39037602 159 return frame_index;
5ba3f43e 160#undef IN_STK_BOUNDS
39037602
A
161}
162
163#if defined(__x86_64__)
164
165static kern_return_t
166interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
167{
168 x86_saved_state_t *state;
169 bool state_64;
170 uint64_t cs;
171
172 state = current_cpu_datap()->cpu_int_state;
173 if (!state) {
174 return KERN_FAILURE;
175 }
176
177 state_64 = is_saved_state64(state);
178
179 if (state_64) {
180 cs = saved_state64(state)->isf.cs;
181 } else {
182 cs = saved_state32(state)->cs;
183 }
184 /* return early if interrupted a thread in user space */
185 if ((cs & SEL_PL) == SEL_PL_U) {
186 return KERN_FAILURE;
187 }
188
189 if (state_64) {
190 *pc = saved_state64(state)->isf.rip;
191 *fp = saved_state64(state)->rbp;
192 } else {
193 *pc = saved_state32(state)->eip;
194 *fp = saved_state32(state)->ebp;
195 }
196 return KERN_SUCCESS;
197}
198
5ba3f43e
A
199#elif defined(__arm64__)
200
201static kern_return_t
202interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
203{
204 struct arm_saved_state *state;
205 bool state_64;
206
207 state = getCpuDatap()->cpu_int_state;
208 if (!state) {
209 return KERN_FAILURE;
210 }
211 state_64 = is_saved_state64(state);
212
213 /* return early if interrupted a thread in user space */
214 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
215 return KERN_FAILURE;
216 }
217
218 *pc = get_saved_state_pc(state);
219 *fp = get_saved_state_fp(state);
220 return KERN_SUCCESS;
221}
222
223#elif defined(__arm__)
224
225static kern_return_t
226interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
227{
228 struct arm_saved_state *state;
229
230 state = getCpuDatap()->cpu_int_state;
231 if (!state) {
232 return KERN_FAILURE;
233 }
234
235 /* return early if interrupted a thread in user space */
236 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
237 return KERN_FAILURE;
238 }
239
240 *pc = get_saved_state_pc(state);
241 *fp = get_saved_state_fp(state);
242 return KERN_SUCCESS;
243}
244
39037602
A
245#else /* defined(__arm__) */
246#error "interrupted_kernel_pc_fp: unsupported architecture"
247#endif /* !defined(__arm__) */
248
cb323159
A
249unsigned int
250backtrace_interrupted(uintptr_t *bt, unsigned int max_frames,
251 bool *was_truncated_out)
39037602
A
252{
253 uintptr_t pc;
5ba3f43e 254 uintptr_t fp;
39037602
A
255 kern_return_t kr;
256
257 assert(bt != NULL);
258 assert(max_frames > 0);
259 assert(ml_at_interrupt_context() == TRUE);
260
5ba3f43e 261 kr = interrupted_kernel_pc_fp(&pc, &fp);
39037602
A
262 if (kr != KERN_SUCCESS) {
263 return 0;
264 }
265
266 bt[0] = pc;
267 if (max_frames == 1) {
268 return 1;
269 }
270
cb323159 271 return backtrace_frame(bt + 1, max_frames - 1, (void *)fp,
f427ee49 272 was_truncated_out) + 1;
39037602
A
273}
274
ea3f0419 275unsigned int
cb323159 276backtrace_user(uintptr_t *bt, unsigned int max_frames,
ea3f0419 277 int *error_out, bool *user_64_out, bool *was_truncated_out)
39037602 278{
cb323159 279 return backtrace_thread_user(current_thread(), bt, max_frames,
f427ee49 280 error_out, user_64_out, was_truncated_out, true);
39037602
A
281}
282
ea3f0419 283unsigned int
cb323159 284backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames,
f427ee49 285 int *error_out, bool *user_64_out, bool *was_truncated_out, __unused bool faults_permitted)
39037602
A
286{
287 bool user_64;
cb323159 288 uintptr_t pc = 0, fp = 0, next_fp = 0;
d9a64523 289 vm_map_t map = NULL, old_map = NULL;
cb323159 290 unsigned int frame_index = 0;
39037602 291 int err = 0;
cb323159 292 size_t frame_size = 0;
39037602 293
39037602
A
294 assert(bt != NULL);
295 assert(max_frames > 0);
f427ee49 296 assert((max_frames == 1) || (faults_permitted == true));
39037602
A
297
298#if defined(__x86_64__)
299
300 /* don't allow a malformed user stack to copyin arbitrary kernel data */
301#define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
302
303 x86_saved_state_t *state = get_user_regs(thread);
39037602
A
304 if (!state) {
305 return EINVAL;
306 }
307
308 user_64 = is_saved_state64(state);
309 if (user_64) {
310 pc = saved_state64(state)->isf.rip;
311 fp = saved_state64(state)->rbp;
312 } else {
313 pc = saved_state32(state)->eip;
314 fp = saved_state32(state)->ebp;
315 }
316
5ba3f43e
A
317#elif defined(__arm64__)
318
5ba3f43e
A
319 struct arm_saved_state *state = get_user_regs(thread);
320 if (!state) {
321 return EINVAL;
322 }
323
324 user_64 = is_saved_state64(state);
325 pc = get_saved_state_pc(state);
326 fp = get_saved_state_fp(state);
327
f427ee49
A
328
329 /* ARM expects stack frames to be aligned to 16 bytes */
330#define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
331
332
333
5ba3f43e
A
334#elif defined(__arm__)
335
336 /* ARM expects stack frames to be aligned to 16 bytes */
337#define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
338
339 struct arm_saved_state *state = get_user_regs(thread);
340 if (!state) {
341 return EINVAL;
342 }
343
344 user_64 = false;
345 pc = get_saved_state_pc(state);
346 fp = get_saved_state_fp(state);
347
39037602
A
348#else /* defined(__arm__) */
349#error "backtrace_thread_user: unsupported architecture"
350#endif /* !defined(__arm__) */
351
d9a64523
A
352 bt[frame_index++] = pc;
353
354 if (frame_index >= max_frames) {
355 goto out;
356 }
357
358 if (INVALID_USER_FP(fp)) {
359 goto out;
360 }
361
362 assert(ml_get_interrupts_enabled() == TRUE);
363 if (!ml_get_interrupts_enabled()) {
94ff46dc 364 goto out;
39037602
A
365 }
366
367 union {
368 struct {
369 uint64_t fp;
370 uint64_t ret;
371 } u64;
372 struct {
373 uint32_t fp;
374 uint32_t ret;
375 } u32;
376 } frame;
39037602 377
cb323159 378 frame_size = 2 * (user_64 ? 8 : 4);
39037602 379
d9a64523
A
380 /* switch to the correct map, for copyin */
381 if (thread != current_thread()) {
382 map = get_task_map_reference(get_threadtask(thread));
383 if (map == NULL) {
94ff46dc 384 goto out;
d9a64523
A
385 }
386 old_map = vm_map_switch(map);
387 } else {
388 map = NULL;
39037602
A
389 }
390
391 while (fp != 0 && frame_index < max_frames) {
392 err = copyin(fp, (char *)&frame, frame_size);
393 if (err) {
cb323159
A
394 if (was_truncated_out) {
395 *was_truncated_out = true;
396 }
39037602
A
397 goto out;
398 }
399
400 next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
401
402 if (INVALID_USER_FP(next_fp)) {
403 break;
404 }
405
d9a64523 406 uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
cb323159
A
407#if defined(HAS_APPLE_PAC)
408 /* return addresses signed by arm64e ABI */
409 bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
410 ptrauth_key_return_address);
411#else /* defined(HAS_APPLE_PAC) */
d9a64523 412 bt[frame_index++] = ret_addr;
cb323159 413#endif /* !defined(HAS_APPLE_PAC) */
39037602
A
414
415 /* stacks grow down; backtracing should be moving to higher addresses */
416 if (next_fp <= fp) {
417 break;
418 }
419 fp = next_fp;
420 }
421
422out:
423 if (map) {
424 (void)vm_map_switch(old_map);
425 vm_map_deallocate(map);
426 }
427
cb323159
A
428 /* NULL-terminate the list, if space is available */
429 if (frame_index != max_frames) {
430 bt[frame_index] = 0;
431 }
432
433 if (fp != 0 && frame_index == max_frames && was_truncated_out) {
434 *was_truncated_out = true;
435 }
436
437 if (user_64_out) {
438 *user_64_out = user_64;
439 }
ea3f0419
A
440 if (error_out) {
441 *error_out = err;
442 }
cb323159 443
ea3f0419 444 return frame_index;
39037602
A
445#undef INVALID_USER_FP
446}