]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/backtrace.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / kern / backtrace.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stddef.h>
30 #include <stdint.h>
31
32 #include <kern/assert.h>
33 #include <kern/backtrace.h>
34 #include <kern/thread.h>
35 #include <sys/errno.h>
36 #include <vm/vm_map.h>
37
38 #if defined(__arm__) || defined(__arm64__)
39 #include <arm/cpu_data.h>
40 #include <arm/cpu_data_internal.h>
41 #endif
42
43
44 uint32_t __attribute__((noinline))
45 backtrace(uintptr_t *bt, uint32_t max_frames)
46 {
47 return backtrace_frame(bt, max_frames, __builtin_frame_address(0));
48 }
49
50 /*
51 * This function captures a backtrace from the current stack and returns the
52 * number of frames captured, limited by max_frames and starting at start_frame.
53 * It's fast because it does no checking to make sure there isn't bad data.
54 * Since it's only called from threads that we're going to keep executing,
55 * if there's bad data we were going to die eventually. If this function is
56 * inlined, it doesn't record the frame of the function it's inside (because
57 * there's no stack frame).
58 */
59 uint32_t __attribute__((noinline,not_tail_called))
60 backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame)
61 {
62 thread_t thread = current_thread();
63 uintptr_t *fp;
64 uint32_t frame_index = 0;
65 uintptr_t top, bottom;
66 bool in_valid_stack;
67
68 assert(bt != NULL);
69 assert(max_frames > 0);
70
71 fp = start_frame;
72 bottom = thread->kernel_stack;
73 top = bottom + kernel_stack_size;
74
75 #define IN_STK_BOUNDS(__addr) \
76 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
77 ((uintptr_t)(__addr) < (uintptr_t)top))
78
79 in_valid_stack = IN_STK_BOUNDS(fp);
80
81 if (!in_valid_stack) {
82 fp = NULL;
83 }
84
85 while (fp != NULL && frame_index < max_frames) {
86 uintptr_t *next_fp = (uintptr_t *)*fp;
87
88 /*
89 * If the frame pointer is 0, backtracing has reached the top of
90 * the stack and there is no return address. Some stacks might not
91 * have set this up, so bounds check, as well.
92 */
93 in_valid_stack = IN_STK_BOUNDS(next_fp);
94
95 if (next_fp == NULL || !in_valid_stack)
96 {
97 break;
98 }
99
100 /* return address is one word higher than frame pointer */
101 bt[frame_index++] = *(fp + 1);
102
103 /* stacks grow down; backtracing should be moving to higher addresses */
104 if (next_fp <= fp) {
105 break;
106 }
107 fp = next_fp;
108 }
109
110 return frame_index;
111 #undef IN_STK_BOUNDS
112 }
113
114 #if defined(__x86_64__)
115
116 static kern_return_t
117 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
118 {
119 x86_saved_state_t *state;
120 bool state_64;
121 uint64_t cs;
122
123 state = current_cpu_datap()->cpu_int_state;
124 if (!state) {
125 return KERN_FAILURE;
126 }
127
128 state_64 = is_saved_state64(state);
129
130 if (state_64) {
131 cs = saved_state64(state)->isf.cs;
132 } else {
133 cs = saved_state32(state)->cs;
134 }
135 /* return early if interrupted a thread in user space */
136 if ((cs & SEL_PL) == SEL_PL_U) {
137 return KERN_FAILURE;
138 }
139
140 if (state_64) {
141 *pc = saved_state64(state)->isf.rip;
142 *fp = saved_state64(state)->rbp;
143 } else {
144 *pc = saved_state32(state)->eip;
145 *fp = saved_state32(state)->ebp;
146 }
147 return KERN_SUCCESS;
148 }
149
150 #elif defined(__arm64__)
151
152 static kern_return_t
153 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
154 {
155 struct arm_saved_state *state;
156 bool state_64;
157
158 state = getCpuDatap()->cpu_int_state;
159 if (!state) {
160 return KERN_FAILURE;
161 }
162 state_64 = is_saved_state64(state);
163
164 /* return early if interrupted a thread in user space */
165 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
166 return KERN_FAILURE;
167 }
168
169 *pc = get_saved_state_pc(state);
170 *fp = get_saved_state_fp(state);
171 return KERN_SUCCESS;
172 }
173
174 #elif defined(__arm__)
175
176 static kern_return_t
177 interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
178 {
179 struct arm_saved_state *state;
180
181 state = getCpuDatap()->cpu_int_state;
182 if (!state) {
183 return KERN_FAILURE;
184 }
185
186 /* return early if interrupted a thread in user space */
187 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
188 return KERN_FAILURE;
189 }
190
191 *pc = get_saved_state_pc(state);
192 *fp = get_saved_state_fp(state);
193 return KERN_SUCCESS;
194 }
195
196 #else /* defined(__arm__) */
197 #error "interrupted_kernel_pc_fp: unsupported architecture"
198 #endif /* !defined(__arm__) */
199
200 uint32_t
201 backtrace_interrupted(uintptr_t *bt, uint32_t max_frames)
202 {
203 uintptr_t pc;
204 uintptr_t fp;
205 kern_return_t kr;
206
207 assert(bt != NULL);
208 assert(max_frames > 0);
209 assert(ml_at_interrupt_context() == TRUE);
210
211 kr = interrupted_kernel_pc_fp(&pc, &fp);
212 if (kr != KERN_SUCCESS) {
213 return 0;
214 }
215
216 bt[0] = pc;
217 if (max_frames == 1) {
218 return 1;
219 }
220
221 return backtrace_frame(bt + 1, max_frames - 1, (void *)fp);
222 }
223
224 int
225 backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out,
226 bool *user_64_out)
227 {
228 return backtrace_thread_user(current_thread(), bt, max_frames, frames_out,
229 user_64_out);
230 }
231
232 int
233 backtrace_thread_user(void *thread, uintptr_t *bt, uint32_t max_frames,
234 uint32_t *frames_out, bool *user_64_out)
235 {
236 bool user_64;
237 uintptr_t pc, fp, next_fp;
238 vm_map_t map, old_map;
239 uint32_t frame_index = 0;
240 int err = 0;
241 size_t frame_size;
242
243 assert(ml_get_interrupts_enabled() == TRUE);
244 if (!ml_get_interrupts_enabled()) {
245 return EINVAL;
246 }
247
248 assert(bt != NULL);
249 assert(max_frames > 0);
250 assert(frames_out != NULL);
251 assert(user_64_out != NULL);
252
253 #if defined(__x86_64__)
254
255 /* don't allow a malformed user stack to copyin arbitrary kernel data */
256 #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
257
258 x86_saved_state_t *state = get_user_regs(thread);
259
260 if (!state) {
261 return EINVAL;
262 }
263
264 user_64 = is_saved_state64(state);
265 if (user_64) {
266 pc = saved_state64(state)->isf.rip;
267 fp = saved_state64(state)->rbp;
268 } else {
269 pc = saved_state32(state)->eip;
270 fp = saved_state32(state)->ebp;
271 }
272
273 #elif defined(__arm64__)
274
275 /* ARM expects stack frames to be aligned to 16 bytes */
276 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
277
278 struct arm_saved_state *state = get_user_regs(thread);
279 if (!state) {
280 return EINVAL;
281 }
282
283 user_64 = is_saved_state64(state);
284 pc = get_saved_state_pc(state);
285 fp = get_saved_state_fp(state);
286
287 #elif defined(__arm__)
288
289 /* ARM expects stack frames to be aligned to 16 bytes */
290 #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
291
292 struct arm_saved_state *state = get_user_regs(thread);
293 if (!state) {
294 return EINVAL;
295 }
296
297 user_64 = false;
298 pc = get_saved_state_pc(state);
299 fp = get_saved_state_fp(state);
300
301 #else /* defined(__arm__) */
302 #error "backtrace_thread_user: unsupported architecture"
303 #endif /* !defined(__arm__) */
304
305 /* switch to the correct map, for copyin */
306 if (thread != current_thread()) {
307 map = get_task_map_reference(get_threadtask(thread));
308 if (map == NULL) {
309 return EINVAL;
310 }
311 old_map = vm_map_switch(map);
312 } else {
313 map = NULL;
314 }
315
316 union {
317 struct {
318 uint64_t fp;
319 uint64_t ret;
320 } u64;
321 struct {
322 uint32_t fp;
323 uint32_t ret;
324 } u32;
325 } frame;
326 frame_size = 2 * (user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
327
328 bt[frame_index++] = pc;
329
330 if (INVALID_USER_FP(fp)) {
331 goto out;
332 }
333
334 while (fp != 0 && frame_index < max_frames) {
335 err = copyin(fp, (char *)&frame, frame_size);
336 if (err) {
337 goto out;
338 }
339
340 next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
341
342 if (INVALID_USER_FP(next_fp)) {
343 break;
344 }
345
346 bt[frame_index++] = user_64 ? frame.u64.ret : frame.u32.ret;
347
348 /* stacks grow down; backtracing should be moving to higher addresses */
349 if (next_fp <= fp) {
350 break;
351 }
352 fp = next_fp;
353 }
354
355 out:
356 if (map) {
357 (void)vm_map_switch(old_map);
358 vm_map_deallocate(map);
359 }
360
361 *user_64_out = user_64;
362 *frames_out = frame_index;
363 return err;
364 #undef INVALID_USER_FP
365 }