]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/backtrace.c
205fbc52da141c6b8a8b86da4d145f4a5680f2e9
[apple/xnu.git] / osfmk / kern / backtrace.c
1 /*
2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stddef.h>
30 #include <stdint.h>
31
32 #include <kern/assert.h>
33 #include <kern/backtrace.h>
34 #include <kern/cambria_layout.h>
35 #include <kern/thread.h>
36 #include <sys/errno.h>
37 #include <vm/vm_map.h>
38
39 #if defined(__arm__) || defined(__arm64__)
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #endif
43
44 #if defined(HAS_APPLE_PAC)
45 #include <ptrauth.h>
46 #endif
47
48
49 unsigned int __attribute__((noinline))
50 backtrace(uintptr_t *bt, unsigned int max_frames, bool *was_truncated_out)
51 {
52 return backtrace_frame(bt, max_frames, __builtin_frame_address(0),
53 was_truncated_out);
54 }
55
56 /*
57 * This function captures a backtrace from the current stack and returns the
58 * number of frames captured, limited by max_frames and starting at start_frame.
59 * It's fast because it does no checking to make sure there isn't bad data.
60 * Since it's only called from threads that we're going to keep executing,
61 * if there's bad data we were going to die eventually. If this function is
62 * inlined, it doesn't record the frame of the function it's inside (because
63 * there's no stack frame).
64 */
65 unsigned int __attribute__((noinline, not_tail_called))
66 backtrace_frame(uintptr_t *bt, unsigned int max_frames, void *start_frame,
67 bool *was_truncated_out)
68 {
69 thread_t thread = current_thread();
70 uintptr_t *fp;
71 unsigned int frame_index = 0;
72 uintptr_t top, bottom;
73 bool in_valid_stack;
74
75 assert(bt != NULL);
76 assert(max_frames > 0);
77
78 fp = start_frame;
79 bottom = thread->kernel_stack;
80 top = bottom + kernel_stack_size;
81
82 #define IN_STK_BOUNDS(__addr) \
83 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
84 ((uintptr_t)(__addr) < (uintptr_t)top))
85
86 in_valid_stack = IN_STK_BOUNDS(fp);
87
88 if (!in_valid_stack) {
89 fp = NULL;
90 }
91
92 while (fp != NULL && frame_index < max_frames) {
93 uintptr_t *next_fp = (uintptr_t *)*fp;
94 uintptr_t ret_addr = *(fp + 1); /* return address is one word higher than frame pointer */
95
96 /*
97 * If the frame pointer is 0, backtracing has reached the top of
98 * the stack and there is no return address. Some stacks might not
99 * have set this up, so bounds check, as well.
100 */
101 in_valid_stack = IN_STK_BOUNDS(next_fp);
102
103 if (next_fp == NULL || !in_valid_stack) {
104 break;
105 }
106
107 #if defined(HAS_APPLE_PAC)
108 /* return addresses signed by arm64e ABI */
109 bt[frame_index++] = (uintptr_t) ptrauth_strip((void *)ret_addr, ptrauth_key_return_address);
110 #else /* defined(HAS_APPLE_PAC) */
111 bt[frame_index++] = ret_addr;
112 #endif /* !defined(HAS_APPLE_PAC) */
113
114 /* stacks grow down; backtracing should be moving to higher addresses */
115 if (next_fp <= fp) {
116 break;
117 }
118 fp = next_fp;
119 }
120
121 /* NULL-terminate the list, if space is available */
122 if (frame_index != max_frames) {
123 bt[frame_index] = 0;
124 }
125
126 if (fp != NULL && frame_index == max_frames && was_truncated_out) {
127 *was_truncated_out = true;
128 }
129
130 return frame_index;
131 #undef IN_STK_BOUNDS
132 }
133
134 #if defined(__x86_64__)
135
136 static kern_return_t
137 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
138 {
139 x86_saved_state_t *state;
140 bool state_64;
141 uint64_t cs;
142
143 state = current_cpu_datap()->cpu_int_state;
144 if (!state) {
145 return KERN_FAILURE;
146 }
147
148 state_64 = is_saved_state64(state);
149
150 if (state_64) {
151 cs = saved_state64(state)->isf.cs;
152 } else {
153 cs = saved_state32(state)->cs;
154 }
155 /* return early if interrupted a thread in user space */
156 if ((cs & SEL_PL) == SEL_PL_U) {
157 return KERN_FAILURE;
158 }
159
160 if (state_64) {
161 *pc = saved_state64(state)->isf.rip;
162 *fp = saved_state64(state)->rbp;
163 } else {
164 *pc = saved_state32(state)->eip;
165 *fp = saved_state32(state)->ebp;
166 }
167 return KERN_SUCCESS;
168 }
169
170 #elif defined(__arm64__)
171
172 static kern_return_t
173 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
174 {
175 struct arm_saved_state *state;
176 bool state_64;
177
178 state = getCpuDatap()->cpu_int_state;
179 if (!state) {
180 return KERN_FAILURE;
181 }
182 state_64 = is_saved_state64(state);
183
184 /* return early if interrupted a thread in user space */
185 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
186 return KERN_FAILURE;
187 }
188
189 *pc = get_saved_state_pc(state);
190 *fp = get_saved_state_fp(state);
191 return KERN_SUCCESS;
192 }
193
194 #elif defined(__arm__)
195
196 static kern_return_t
197 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
198 {
199 struct arm_saved_state *state;
200
201 state = getCpuDatap()->cpu_int_state;
202 if (!state) {
203 return KERN_FAILURE;
204 }
205
206 /* return early if interrupted a thread in user space */
207 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
208 return KERN_FAILURE;
209 }
210
211 *pc = get_saved_state_pc(state);
212 *fp = get_saved_state_fp(state);
213 return KERN_SUCCESS;
214 }
215
216 #else /* defined(__arm__) */
217 #error "interrupted_kernel_pc_fp: unsupported architecture"
218 #endif /* !defined(__arm__) */
219
220 unsigned int
221 backtrace_interrupted(uintptr_t *bt, unsigned int max_frames,
222 bool *was_truncated_out)
223 {
224 uintptr_t pc;
225 uintptr_t fp;
226 kern_return_t kr;
227
228 assert(bt != NULL);
229 assert(max_frames > 0);
230 assert(ml_at_interrupt_context() == TRUE);
231
232 kr = interrupted_kernel_pc_fp(&pc, &fp);
233 if (kr != KERN_SUCCESS) {
234 return 0;
235 }
236
237 bt[0] = pc;
238 if (max_frames == 1) {
239 return 1;
240 }
241
242 return backtrace_frame(bt + 1, max_frames - 1, (void *)fp,
243 was_truncated_out) + 1;
244 }
245
246 unsigned int
247 backtrace_user(uintptr_t *bt, unsigned int max_frames,
248 int *error_out, bool *user_64_out, bool *was_truncated_out)
249 {
250 return backtrace_thread_user(current_thread(), bt, max_frames,
251 error_out, user_64_out, was_truncated_out, true);
252 }
253
254 unsigned int
255 backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames,
256 int *error_out, bool *user_64_out, bool *was_truncated_out, __unused bool faults_permitted)
257 {
258 bool user_64;
259 uintptr_t pc = 0, fp = 0, next_fp = 0;
260 vm_map_t map = NULL, old_map = NULL;
261 unsigned int frame_index = 0;
262 int err = 0;
263 size_t frame_size = 0;
264
265 assert(bt != NULL);
266 assert(max_frames > 0);
267 assert((max_frames == 1) || (faults_permitted == true));
268
269 #if defined(__x86_64__)
270
271 /* don't allow a malformed user stack to copyin arbitrary kernel data */
272 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
273
274 x86_saved_state_t *state = get_user_regs(thread);
275 if (!state) {
276 return EINVAL;
277 }
278
279 user_64 = is_saved_state64(state);
280 if (user_64) {
281 pc = saved_state64(state)->isf.rip;
282 fp = saved_state64(state)->rbp;
283 } else {
284 pc = saved_state32(state)->eip;
285 fp = saved_state32(state)->ebp;
286 }
287
288 #elif defined(__arm64__)
289
290 struct arm_saved_state *state = get_user_regs(thread);
291 if (!state) {
292 return EINVAL;
293 }
294
295 user_64 = is_saved_state64(state);
296 pc = get_saved_state_pc(state);
297 fp = get_saved_state_fp(state);
298
299
300 /* ARM expects stack frames to be aligned to 16 bytes */
301 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
302
303
304
305 #elif defined(__arm__)
306
307 /* ARM expects stack frames to be aligned to 16 bytes */
308 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
309
310 struct arm_saved_state *state = get_user_regs(thread);
311 if (!state) {
312 return EINVAL;
313 }
314
315 user_64 = false;
316 pc = get_saved_state_pc(state);
317 fp = get_saved_state_fp(state);
318
319 #else /* defined(__arm__) */
320 #error "backtrace_thread_user: unsupported architecture"
321 #endif /* !defined(__arm__) */
322
323 bt[frame_index++] = pc;
324
325 if (frame_index >= max_frames) {
326 goto out;
327 }
328
329 if (INVALID_USER_FP(fp)) {
330 goto out;
331 }
332
333 assert(ml_get_interrupts_enabled() == TRUE);
334 if (!ml_get_interrupts_enabled()) {
335 goto out;
336 }
337
338 union {
339 struct {
340 uint64_t fp;
341 uint64_t ret;
342 } u64;
343 struct {
344 uint32_t fp;
345 uint32_t ret;
346 } u32;
347 } frame;
348
349 frame_size = 2 * (user_64 ? 8 : 4);
350
351 /* switch to the correct map, for copyin */
352 if (thread != current_thread()) {
353 map = get_task_map_reference(get_threadtask(thread));
354 if (map == NULL) {
355 goto out;
356 }
357 old_map = vm_map_switch(map);
358 } else {
359 map = NULL;
360 }
361
362 while (fp != 0 && frame_index < max_frames) {
363 err = copyin(fp, (char *)&frame, frame_size);
364 if (err) {
365 if (was_truncated_out) {
366 *was_truncated_out = true;
367 }
368 goto out;
369 }
370
371 next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
372
373 if (INVALID_USER_FP(next_fp)) {
374 break;
375 }
376
377 uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
378 #if defined(HAS_APPLE_PAC)
379 /* return addresses signed by arm64e ABI */
380 bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
381 ptrauth_key_return_address);
382 #else /* defined(HAS_APPLE_PAC) */
383 bt[frame_index++] = ret_addr;
384 #endif /* !defined(HAS_APPLE_PAC) */
385
386 /* stacks grow down; backtracing should be moving to higher addresses */
387 if (next_fp <= fp) {
388 break;
389 }
390 fp = next_fp;
391 }
392
393 out:
394 if (map) {
395 (void)vm_map_switch(old_map);
396 vm_map_deallocate(map);
397 }
398
399 /* NULL-terminate the list, if space is available */
400 if (frame_index != max_frames) {
401 bt[frame_index] = 0;
402 }
403
404 if (fp != 0 && frame_index == max_frames && was_truncated_out) {
405 *was_truncated_out = true;
406 }
407
408 if (user_64_out) {
409 *user_64_out = user_64;
410 }
411 if (error_out) {
412 *error_out = err;
413 }
414
415 return frame_index;
416 #undef INVALID_USER_FP
417 }