]>
Commit | Line | Data |
---|---|---|
39037602 A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stddef.h> | |
30 | #include <stdint.h> | |
31 | ||
32 | #include <kern/assert.h> | |
33 | #include <kern/backtrace.h> | |
34 | #include <kern/thread.h> | |
35 | #include <sys/errno.h> | |
36 | #include <vm/vm_map.h> | |
37 | ||
38 | ||
39 | uint32_t __attribute__((noinline)) | |
40 | backtrace(uintptr_t *bt, uint32_t max_frames) | |
41 | { | |
42 | return backtrace_frame(bt, max_frames, __builtin_frame_address(0)); | |
43 | } | |
44 | ||
45 | /* | |
46 | * This function captures a backtrace from the current stack and returns the | |
47 | * number of frames captured, limited by max_frames and starting at start_frame. | |
48 | * It's fast because it does no checking to make sure there isn't bad data. | |
49 | * Since it's only called from threads that we're going to keep executing, | |
50 | * if there's bad data we were going to die eventually. If this function is | |
51 | * inlined, it doesn't record the frame of the function it's inside (because | |
52 | * there's no stack frame). | |
53 | */ | |
54 | uint32_t __attribute__((noinline,not_tail_called)) | |
55 | backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame) | |
56 | { | |
57 | thread_t thread = current_thread(); | |
58 | uintptr_t *fp; | |
59 | uintptr_t *next_fp; | |
60 | uint32_t frame_index = 0; | |
61 | uintptr_t top, bottom; | |
62 | ||
63 | assert(bt != NULL); | |
64 | assert(max_frames > 0); | |
65 | ||
66 | fp = start_frame; | |
67 | bottom = thread->kernel_stack; | |
68 | top = bottom + kernel_stack_size; | |
69 | ||
70 | if ((uintptr_t)fp >= top || (uintptr_t)fp < bottom) { | |
71 | fp = NULL; | |
72 | } | |
73 | ||
74 | while (fp != NULL && frame_index < max_frames) { | |
75 | next_fp = (uintptr_t *)*fp; | |
76 | ||
77 | /* | |
78 | * If the frame pointer is 0, backtracing has reached the top of | |
79 | * the stack and there is no return address. Some stacks might not | |
80 | * have set this up, so bounds check, as well. | |
81 | */ | |
82 | if (next_fp == NULL || | |
83 | (uintptr_t)next_fp >= top || | |
84 | (uintptr_t)next_fp < bottom) | |
85 | { | |
86 | break; | |
87 | } | |
88 | ||
89 | /* return address is one word higher than frame pointer */ | |
90 | bt[frame_index++] = *(fp + 1); | |
91 | ||
92 | /* stacks grow down; backtracing should be moving to higher addresses */ | |
93 | if (next_fp <= fp) { | |
94 | break; | |
95 | } | |
96 | fp = next_fp; | |
97 | } | |
98 | ||
99 | return frame_index; | |
100 | } | |
101 | ||
102 | #if defined(__x86_64__) | |
103 | ||
104 | static kern_return_t | |
105 | interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp) | |
106 | { | |
107 | x86_saved_state_t *state; | |
108 | bool state_64; | |
109 | uint64_t cs; | |
110 | ||
111 | state = current_cpu_datap()->cpu_int_state; | |
112 | if (!state) { | |
113 | return KERN_FAILURE; | |
114 | } | |
115 | ||
116 | state_64 = is_saved_state64(state); | |
117 | ||
118 | if (state_64) { | |
119 | cs = saved_state64(state)->isf.cs; | |
120 | } else { | |
121 | cs = saved_state32(state)->cs; | |
122 | } | |
123 | /* return early if interrupted a thread in user space */ | |
124 | if ((cs & SEL_PL) == SEL_PL_U) { | |
125 | return KERN_FAILURE; | |
126 | } | |
127 | ||
128 | if (state_64) { | |
129 | *pc = saved_state64(state)->isf.rip; | |
130 | *fp = saved_state64(state)->rbp; | |
131 | } else { | |
132 | *pc = saved_state32(state)->eip; | |
133 | *fp = saved_state32(state)->ebp; | |
134 | } | |
135 | return KERN_SUCCESS; | |
136 | } | |
137 | ||
138 | #else /* defined(__arm__) */ | |
139 | #error "interrupted_kernel_pc_fp: unsupported architecture" | |
140 | #endif /* !defined(__arm__) */ | |
141 | ||
142 | uint32_t | |
143 | backtrace_interrupted(uintptr_t *bt, uint32_t max_frames) | |
144 | { | |
145 | uintptr_t pc; | |
146 | uintptr_t *fp; | |
147 | kern_return_t kr; | |
148 | ||
149 | assert(bt != NULL); | |
150 | assert(max_frames > 0); | |
151 | assert(ml_at_interrupt_context() == TRUE); | |
152 | ||
153 | kr = interrupted_kernel_pc_fp(&pc, (uintptr_t)&fp); | |
154 | if (kr != KERN_SUCCESS) { | |
155 | return 0; | |
156 | } | |
157 | ||
158 | bt[0] = pc; | |
159 | if (max_frames == 1) { | |
160 | return 1; | |
161 | } | |
162 | ||
163 | return backtrace_frame(bt + 1, max_frames - 1, fp); | |
164 | } | |
165 | ||
166 | int | |
167 | backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out, | |
168 | bool *user_64_out) | |
169 | { | |
170 | return backtrace_thread_user(current_thread(), bt, max_frames, frames_out, | |
171 | user_64_out); | |
172 | } | |
173 | ||
174 | int | |
175 | backtrace_thread_user(void *thread, uintptr_t *bt, uint32_t max_frames, | |
176 | uint32_t *frames_out, bool *user_64_out) | |
177 | { | |
178 | bool user_64; | |
179 | uintptr_t pc, fp, next_fp; | |
180 | vm_map_t map, old_map; | |
181 | uint32_t frame_index = 0; | |
182 | int err = 0; | |
183 | size_t frame_size; | |
184 | ||
185 | assert(ml_get_interrupts_enabled() == TRUE); | |
186 | if (!ml_get_interrupts_enabled()) { | |
187 | return EINVAL; | |
188 | } | |
189 | ||
190 | assert(bt != NULL); | |
191 | assert(max_frames > 0); | |
192 | assert(frames_out != NULL); | |
193 | assert(user_64_out != NULL); | |
194 | ||
195 | #if defined(__x86_64__) | |
196 | ||
197 | /* don't allow a malformed user stack to copyin arbitrary kernel data */ | |
198 | #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP))) | |
199 | ||
200 | x86_saved_state_t *state = get_user_regs(thread); | |
201 | ||
202 | if (!state) { | |
203 | return EINVAL; | |
204 | } | |
205 | ||
206 | user_64 = is_saved_state64(state); | |
207 | if (user_64) { | |
208 | pc = saved_state64(state)->isf.rip; | |
209 | fp = saved_state64(state)->rbp; | |
210 | } else { | |
211 | pc = saved_state32(state)->eip; | |
212 | fp = saved_state32(state)->ebp; | |
213 | } | |
214 | ||
215 | #else /* defined(__arm__) */ | |
216 | #error "backtrace_thread_user: unsupported architecture" | |
217 | #endif /* !defined(__arm__) */ | |
218 | ||
219 | /* switch to the correct map, for copyin */ | |
220 | if (thread != current_thread()) { | |
221 | map = get_task_map_reference(get_threadtask(thread)); | |
222 | if (map == NULL) { | |
223 | return EINVAL; | |
224 | } | |
225 | old_map = vm_map_switch(map); | |
226 | } else { | |
227 | map = NULL; | |
228 | } | |
229 | ||
230 | union { | |
231 | struct { | |
232 | uint64_t fp; | |
233 | uint64_t ret; | |
234 | } u64; | |
235 | struct { | |
236 | uint32_t fp; | |
237 | uint32_t ret; | |
238 | } u32; | |
239 | } frame; | |
240 | frame_size = 2 * (user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); | |
241 | ||
242 | bt[frame_index++] = pc; | |
243 | ||
244 | if (INVALID_USER_FP(fp)) { | |
245 | goto out; | |
246 | } | |
247 | ||
248 | while (fp != 0 && frame_index < max_frames) { | |
249 | err = copyin(fp, (char *)&frame, frame_size); | |
250 | if (err) { | |
251 | goto out; | |
252 | } | |
253 | ||
254 | next_fp = user_64 ? frame.u64.fp : frame.u32.fp; | |
255 | ||
256 | if (INVALID_USER_FP(next_fp)) { | |
257 | break; | |
258 | } | |
259 | ||
260 | bt[frame_index++] = user_64 ? frame.u64.ret : frame.u32.ret; | |
261 | ||
262 | /* stacks grow down; backtracing should be moving to higher addresses */ | |
263 | if (next_fp <= fp) { | |
264 | break; | |
265 | } | |
266 | fp = next_fp; | |
267 | } | |
268 | ||
269 | out: | |
270 | if (map) { | |
271 | (void)vm_map_switch(old_map); | |
272 | vm_map_deallocate(map); | |
273 | } | |
274 | ||
275 | *user_64_out = user_64; | |
276 | *frames_out = frame_index; | |
277 | return err; | |
278 | #undef INVALID_USER_FP | |
279 | } |