]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/backtrace.c
d99787543c67b1e0eb7dae048d2dd7135cc4967b
[apple/xnu.git] / osfmk / kern / backtrace.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stddef.h>
30 #include <stdint.h>
31
32 #include <kern/assert.h>
33 #include <kern/backtrace.h>
34 #include <kern/thread.h>
35 #include <sys/errno.h>
36 #include <vm/vm_map.h>
37
38 #if defined(__arm__) || defined(__arm64__)
39 #include <arm/cpu_data.h>
40 #include <arm/cpu_data_internal.h>
41 #endif
42
43
44
45 uint32_t __attribute__((noinline))
46 backtrace(uintptr_t *bt, uint32_t max_frames)
47 {
48 return backtrace_frame(bt, max_frames, __builtin_frame_address(0));
49 }
50
51 /*
52 * This function captures a backtrace from the current stack and returns the
53 * number of frames captured, limited by max_frames and starting at start_frame.
54 * It's fast because it does no checking to make sure there isn't bad data.
55 * Since it's only called from threads that we're going to keep executing,
56 * if there's bad data we were going to die eventually. If this function is
57 * inlined, it doesn't record the frame of the function it's inside (because
58 * there's no stack frame).
59 */
60 uint32_t __attribute__((noinline, not_tail_called))
61 backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame)
62 {
63 thread_t thread = current_thread();
64 uintptr_t *fp;
65 uint32_t frame_index = 0;
66 uintptr_t top, bottom;
67 bool in_valid_stack;
68
69 assert(bt != NULL);
70 assert(max_frames > 0);
71
72 fp = start_frame;
73 bottom = thread->kernel_stack;
74 top = bottom + kernel_stack_size;
75
76 #define IN_STK_BOUNDS(__addr) \
77 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
78 ((uintptr_t)(__addr) < (uintptr_t)top))
79
80 in_valid_stack = IN_STK_BOUNDS(fp);
81
82 if (!in_valid_stack) {
83 fp = NULL;
84 }
85
86 while (fp != NULL && frame_index < max_frames) {
87 uintptr_t *next_fp = (uintptr_t *)*fp;
88 uintptr_t ret_addr = *(fp + 1); /* return address is one word higher than frame pointer */
89
90 /*
91 * If the frame pointer is 0, backtracing has reached the top of
92 * the stack and there is no return address. Some stacks might not
93 * have set this up, so bounds check, as well.
94 */
95 in_valid_stack = IN_STK_BOUNDS(next_fp);
96
97 if (next_fp == NULL || !in_valid_stack) {
98 break;
99 }
100
101 bt[frame_index++] = ret_addr;
102
103 /* stacks grow down; backtracing should be moving to higher addresses */
104 if (next_fp <= fp) {
105 break;
106 }
107 fp = next_fp;
108 }
109
110 return frame_index;
111 #undef IN_STK_BOUNDS
112 }
113
114 #if defined(__x86_64__)
115
116 static kern_return_t
117 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
118 {
119 x86_saved_state_t *state;
120 bool state_64;
121 uint64_t cs;
122
123 state = current_cpu_datap()->cpu_int_state;
124 if (!state) {
125 return KERN_FAILURE;
126 }
127
128 state_64 = is_saved_state64(state);
129
130 if (state_64) {
131 cs = saved_state64(state)->isf.cs;
132 } else {
133 cs = saved_state32(state)->cs;
134 }
135 /* return early if interrupted a thread in user space */
136 if ((cs & SEL_PL) == SEL_PL_U) {
137 return KERN_FAILURE;
138 }
139
140 if (state_64) {
141 *pc = saved_state64(state)->isf.rip;
142 *fp = saved_state64(state)->rbp;
143 } else {
144 *pc = saved_state32(state)->eip;
145 *fp = saved_state32(state)->ebp;
146 }
147 return KERN_SUCCESS;
148 }
149
150 #elif defined(__arm64__)
151
152 static kern_return_t
153 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
154 {
155 struct arm_saved_state *state;
156 bool state_64;
157
158 state = getCpuDatap()->cpu_int_state;
159 if (!state) {
160 return KERN_FAILURE;
161 }
162 state_64 = is_saved_state64(state);
163
164 /* return early if interrupted a thread in user space */
165 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
166 return KERN_FAILURE;
167 }
168
169 *pc = get_saved_state_pc(state);
170 *fp = get_saved_state_fp(state);
171 return KERN_SUCCESS;
172 }
173
174 #elif defined(__arm__)
175
176 static kern_return_t
177 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
178 {
179 struct arm_saved_state *state;
180
181 state = getCpuDatap()->cpu_int_state;
182 if (!state) {
183 return KERN_FAILURE;
184 }
185
186 /* return early if interrupted a thread in user space */
187 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
188 return KERN_FAILURE;
189 }
190
191 *pc = get_saved_state_pc(state);
192 *fp = get_saved_state_fp(state);
193 return KERN_SUCCESS;
194 }
195
196 #else /* defined(__arm__) */
197 #error "interrupted_kernel_pc_fp: unsupported architecture"
198 #endif /* !defined(__arm__) */
199
200 uint32_t
201 backtrace_interrupted(uintptr_t *bt, uint32_t max_frames)
202 {
203 uintptr_t pc;
204 uintptr_t fp;
205 kern_return_t kr;
206
207 assert(bt != NULL);
208 assert(max_frames > 0);
209 assert(ml_at_interrupt_context() == TRUE);
210
211 kr = interrupted_kernel_pc_fp(&pc, &fp);
212 if (kr != KERN_SUCCESS) {
213 return 0;
214 }
215
216 bt[0] = pc;
217 if (max_frames == 1) {
218 return 1;
219 }
220
221 return backtrace_frame(bt + 1, max_frames - 1, (void *)fp) + 1;
222 }
223
224 int
225 backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out,
226 bool *user_64_out)
227 {
228 return backtrace_thread_user(current_thread(), bt, max_frames, frames_out,
229 user_64_out);
230 }
231
232 int
233 backtrace_thread_user(void *thread, uintptr_t *bt, uint32_t max_frames,
234 uint32_t *frames_out, bool *user_64_out)
235 {
236 bool user_64;
237 uintptr_t pc, fp, next_fp;
238 vm_map_t map = NULL, old_map = NULL;
239 uint32_t frame_index = 0;
240 int err = 0;
241 size_t frame_size;
242
243 assert(bt != NULL);
244 assert(max_frames > 0);
245 assert(frames_out != NULL);
246 assert(user_64_out != NULL);
247
248 #if defined(__x86_64__)
249
250 /* don't allow a malformed user stack to copyin arbitrary kernel data */
251 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
252
253 x86_saved_state_t *state = get_user_regs(thread);
254
255 if (!state) {
256 return EINVAL;
257 }
258
259 user_64 = is_saved_state64(state);
260 if (user_64) {
261 pc = saved_state64(state)->isf.rip;
262 fp = saved_state64(state)->rbp;
263 } else {
264 pc = saved_state32(state)->eip;
265 fp = saved_state32(state)->ebp;
266 }
267
268 #elif defined(__arm64__)
269
270 /* ARM expects stack frames to be aligned to 16 bytes */
271 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
272
273 struct arm_saved_state *state = get_user_regs(thread);
274 if (!state) {
275 return EINVAL;
276 }
277
278 user_64 = is_saved_state64(state);
279 pc = get_saved_state_pc(state);
280 fp = get_saved_state_fp(state);
281
282 #elif defined(__arm__)
283
284 /* ARM expects stack frames to be aligned to 16 bytes */
285 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
286
287 struct arm_saved_state *state = get_user_regs(thread);
288 if (!state) {
289 return EINVAL;
290 }
291
292 user_64 = false;
293 pc = get_saved_state_pc(state);
294 fp = get_saved_state_fp(state);
295
296 #else /* defined(__arm__) */
297 #error "backtrace_thread_user: unsupported architecture"
298 #endif /* !defined(__arm__) */
299
300 if (max_frames == 0) {
301 goto out;
302 }
303
304 bt[frame_index++] = pc;
305
306 if (frame_index >= max_frames) {
307 goto out;
308 }
309
310 if (INVALID_USER_FP(fp)) {
311 goto out;
312 }
313
314 assert(ml_get_interrupts_enabled() == TRUE);
315 if (!ml_get_interrupts_enabled()) {
316 return EINVAL;
317 }
318
319 union {
320 struct {
321 uint64_t fp;
322 uint64_t ret;
323 } u64;
324 struct {
325 uint32_t fp;
326 uint32_t ret;
327 } u32;
328 } frame;
329
330 frame_size = 2 * (user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
331
332 /* switch to the correct map, for copyin */
333 if (thread != current_thread()) {
334 map = get_task_map_reference(get_threadtask(thread));
335 if (map == NULL) {
336 return EINVAL;
337 }
338 old_map = vm_map_switch(map);
339 } else {
340 map = NULL;
341 }
342
343 while (fp != 0 && frame_index < max_frames) {
344 err = copyin(fp, (char *)&frame, frame_size);
345 if (err) {
346 goto out;
347 }
348
349 next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
350
351 if (INVALID_USER_FP(next_fp)) {
352 break;
353 }
354
355 uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
356 bt[frame_index++] = ret_addr;
357
358 /* stacks grow down; backtracing should be moving to higher addresses */
359 if (next_fp <= fp) {
360 break;
361 }
362 fp = next_fp;
363 }
364
365 out:
366 if (map) {
367 (void)vm_map_switch(old_map);
368 vm_map_deallocate(map);
369 }
370
371 *user_64_out = user_64;
372 *frames_out = frame_index;
373 return err;
374 #undef INVALID_USER_FP
375 }