]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* Collect kernel callstacks */ | |
30 | ||
31 | #include <chud/chud_xnu.h> | |
32 | #include <mach/mach_types.h> | |
33 | #include <kern/thread.h> | |
34 | #include <kern/backtrace.h> | |
35 | #include <vm/vm_map.h> | |
36 | #include <kperf/buffer.h> | |
37 | #include <kperf/context.h> | |
38 | #include <kperf/callstack.h> | |
39 | #include <kperf/ast.h> | |
40 | #include <sys/errno.h> | |
41 | ||
42 | ||
43 | static void | |
44 | callstack_fixup_user(struct callstack *cs, thread_t thread) | |
45 | { | |
46 | uint64_t fixup_val = 0; | |
47 | assert(cs->nframes < MAX_CALLSTACK_FRAMES); | |
48 | ||
49 | #if defined(__x86_64__) | |
50 | user_addr_t sp_user; | |
51 | bool user_64; | |
52 | x86_saved_state_t *state; | |
53 | ||
54 | state = get_user_regs(thread); | |
55 | if (!state) { | |
56 | goto out; | |
57 | } | |
58 | ||
59 | user_64 = is_saved_state64(state); | |
60 | if (user_64) { | |
61 | sp_user = saved_state64(state)->isf.rsp; | |
62 | } else { | |
63 | sp_user = saved_state32(state)->uesp; | |
64 | } | |
65 | ||
66 | if (thread == current_thread()) { | |
67 | (void)copyin(sp_user, (char *)&fixup_val, | |
68 | user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); | |
69 | } else { | |
70 | (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user, | |
71 | &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); | |
72 | } | |
73 | ||
74 | #else | |
75 | #error "callstack_fixup_user: unsupported architecture" | |
76 | #endif | |
77 | ||
78 | out: | |
79 | cs->frames[cs->nframes++] = fixup_val; | |
80 | } | |
81 | ||
82 | #if defined(__x86_64__) | |
83 | ||
84 | __attribute__((used)) | |
85 | static kern_return_t | |
86 | interrupted_kernel_sp_value(uintptr_t *sp_val) | |
87 | { | |
88 | x86_saved_state_t *state; | |
89 | uintptr_t sp; | |
90 | bool state_64; | |
91 | uint64_t cs; | |
92 | uintptr_t top, bottom; | |
93 | ||
94 | state = current_cpu_datap()->cpu_int_state; | |
95 | if (!state) { | |
96 | return KERN_FAILURE; | |
97 | } | |
98 | ||
99 | state_64 = is_saved_state64(state); | |
100 | ||
101 | if (state_64) { | |
102 | cs = saved_state64(state)->isf.cs; | |
103 | } else { | |
104 | cs = saved_state32(state)->cs; | |
105 | } | |
106 | /* return early if interrupted a thread in user space */ | |
107 | if ((cs & SEL_PL) == SEL_PL_U) { | |
108 | return KERN_FAILURE; | |
109 | } | |
110 | ||
111 | if (state_64) { | |
112 | sp = saved_state64(state)->isf.rsp; | |
113 | } else { | |
114 | sp = saved_state32(state)->uesp; | |
115 | } | |
116 | ||
117 | /* make sure the stack pointer is pointing somewhere in this stack */ | |
118 | bottom = current_thread()->kernel_stack; | |
119 | top = bottom + kernel_stack_size; | |
120 | if (sp >= bottom && sp < top) { | |
121 | return KERN_FAILURE; | |
122 | } | |
123 | ||
124 | *sp_val = *(uintptr_t *)sp; | |
125 | return KERN_SUCCESS; | |
126 | } | |
127 | ||
128 | #else /* defined(__arm__) */ | |
129 | #error "interrupted_kernel_{sp,lr}: unsupported architecture" | |
130 | #endif /* !defined(__arm__) */ | |
131 | ||
132 | ||
133 | static void | |
134 | callstack_fixup_interrupted(struct callstack *cs) | |
135 | { | |
136 | uintptr_t fixup_val = 0; | |
137 | assert(cs->nframes < MAX_CALLSTACK_FRAMES); | |
138 | ||
139 | /* | |
140 | * Only provide arbitrary data on development or debug kernels. | |
141 | */ | |
142 | #if DEVELOPMENT || DEBUG | |
143 | #if defined(__x86_64__) | |
144 | (void)interrupted_kernel_sp_value(&fixup_val); | |
145 | #endif /* defined(__x86_64__) */ | |
146 | #endif /* DEVELOPMENT || DEBUG */ | |
147 | ||
148 | cs->frames[cs->nframes++] = fixup_val ? | |
149 | VM_KERNEL_UNSLIDE_OR_PERM(fixup_val) : 0; | |
150 | } | |
151 | ||
152 | void | |
153 | kperf_continuation_sample(struct callstack *cs, struct kperf_context *context) | |
154 | { | |
155 | thread_t thread; | |
156 | ||
157 | assert(cs != NULL); | |
158 | assert(context != NULL); | |
159 | ||
160 | thread = context->cur_thread; | |
161 | assert(thread != NULL); | |
162 | assert(thread->continuation != NULL); | |
163 | ||
164 | cs->flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL; | |
165 | #ifdef __LP64__ | |
166 | cs->flags |= CALLSTACK_64BIT; | |
167 | #endif | |
168 | ||
169 | cs->nframes = 1; | |
170 | cs->frames[0] = VM_KERNEL_UNSLIDE(thread->continuation); | |
171 | } | |
172 | ||
173 | void | |
174 | kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context) | |
175 | { | |
176 | assert(cs != NULL); | |
177 | assert(context != NULL); | |
178 | assert(context->cur_thread == current_thread()); | |
179 | ||
180 | cs->flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS; | |
181 | #ifdef __LP64__ | |
182 | cs->flags |= CALLSTACK_64BIT; | |
183 | #endif | |
184 | ||
185 | BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1); | |
186 | ||
187 | cs->nframes = backtrace_frame((uintptr_t *)&(cs->frames), cs->nframes - 1, | |
188 | context->starting_fp); | |
189 | if (cs->nframes > 0) { | |
190 | cs->flags |= CALLSTACK_VALID; | |
191 | /* | |
192 | * Fake the value pointed to by the stack pointer or the link | |
193 | * register for symbolicators. | |
194 | */ | |
195 | cs->frames[cs->nframes + 1] = 0; | |
196 | cs->nframes += 1; | |
197 | } | |
198 | ||
199 | BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->nframes); | |
200 | } | |
201 | ||
202 | void | |
203 | kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context) | |
204 | { | |
205 | thread_t thread; | |
206 | ||
207 | assert(cs != NULL); | |
208 | assert(context != NULL); | |
209 | assert(cs->nframes <= MAX_CALLSTACK_FRAMES); | |
210 | ||
211 | thread = context->cur_thread; | |
212 | assert(thread != NULL); | |
213 | ||
214 | BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread), | |
215 | cs->nframes); | |
216 | ||
217 | cs->flags = CALLSTACK_KERNEL; | |
218 | ||
219 | #ifdef __LP64__ | |
220 | cs->flags |= CALLSTACK_64BIT; | |
221 | #endif | |
222 | ||
223 | if (ml_at_interrupt_context()) { | |
224 | assert(thread == current_thread()); | |
225 | cs->flags |= CALLSTACK_KERNEL_WORDS; | |
226 | cs->nframes = backtrace_interrupted((uintptr_t *)cs->frames, | |
227 | cs->nframes - 1); | |
228 | if (cs->nframes != 0) { | |
229 | callstack_fixup_interrupted(cs); | |
230 | } | |
231 | } else { | |
232 | /* | |
233 | * Rely on legacy CHUD backtracer to backtrace kernel stacks on | |
234 | * other threads. | |
235 | */ | |
236 | kern_return_t kr; | |
237 | kr = chudxnu_thread_get_callstack64_kperf(thread, cs->frames, | |
238 | &cs->nframes, FALSE); | |
239 | if (kr == KERN_SUCCESS) { | |
240 | cs->flags |= CALLSTACK_VALID; | |
241 | } else if (kr == KERN_RESOURCE_SHORTAGE) { | |
242 | cs->flags |= CALLSTACK_VALID; | |
243 | cs->flags |= CALLSTACK_TRUNCATED; | |
244 | } else { | |
245 | cs->nframes = 0; | |
246 | } | |
247 | } | |
248 | ||
249 | if (cs->nframes == 0) { | |
250 | BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK); | |
251 | } | |
252 | ||
253 | BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), cs->flags, cs->nframes); | |
254 | } | |
255 | ||
256 | void | |
257 | kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context) | |
258 | { | |
259 | thread_t thread; | |
260 | bool user_64 = false; | |
261 | int err; | |
262 | ||
263 | assert(cs != NULL); | |
264 | assert(context != NULL); | |
265 | assert(cs->nframes <= MAX_CALLSTACK_FRAMES); | |
266 | assert(ml_get_interrupts_enabled() == TRUE); | |
267 | ||
268 | thread = context->cur_thread; | |
269 | assert(thread != NULL); | |
270 | ||
271 | BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread), | |
272 | cs->nframes); | |
273 | ||
274 | cs->flags = 0; | |
275 | ||
276 | err = backtrace_thread_user(thread, (uintptr_t *)cs->frames, | |
277 | cs->nframes - 1, &cs->nframes, &user_64); | |
278 | cs->flags |= CALLSTACK_KERNEL_WORDS; | |
279 | if (user_64) { | |
280 | cs->flags |= CALLSTACK_64BIT; | |
281 | } | |
282 | ||
283 | if (!err || err == EFAULT) { | |
284 | callstack_fixup_user(cs, thread); | |
285 | cs->flags |= CALLSTACK_VALID; | |
286 | } else { | |
287 | cs->nframes = 0; | |
288 | BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, err); | |
289 | } | |
290 | ||
291 | BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), | |
292 | cs->flags, cs->nframes); | |
293 | } | |
294 | ||
295 | static inline uintptr_t | |
296 | scrub_kernel_frame(uintptr_t *bt, int n_frames, int frame) | |
297 | { | |
298 | if (frame < n_frames) { | |
299 | return VM_KERNEL_UNSLIDE(bt[frame]); | |
300 | } else { | |
301 | return 0; | |
302 | } | |
303 | } | |
304 | ||
305 | static inline uintptr_t | |
306 | scrub_frame(uint64_t *bt, int n_frames, int frame) | |
307 | { | |
308 | if (frame < n_frames) { | |
309 | return (uintptr_t)(bt[frame]); | |
310 | } else { | |
311 | return 0; | |
312 | } | |
313 | } | |
314 | ||
315 | static void | |
316 | callstack_log(struct callstack *cs, uint32_t hcode, uint32_t dcode) | |
317 | { | |
318 | BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, cs->flags, cs->nframes); | |
319 | ||
320 | /* framing information for the stack */ | |
321 | BUF_DATA(hcode, cs->flags, cs->nframes); | |
322 | ||
323 | /* how many batches of 4 */ | |
324 | unsigned int n = cs->nframes / 4; | |
325 | unsigned int ovf = cs->nframes % 4; | |
326 | if (ovf != 0) { | |
327 | n++; | |
328 | } | |
329 | ||
330 | if (cs->flags & CALLSTACK_KERNEL_WORDS) { | |
331 | for (unsigned int i = 0; i < n; i++) { | |
332 | unsigned int j = i * 4; | |
333 | BUF_DATA(dcode, | |
334 | scrub_kernel_frame((uintptr_t *)cs->frames, cs->nframes, j + 0), | |
335 | scrub_kernel_frame((uintptr_t *)cs->frames, cs->nframes, j + 1), | |
336 | scrub_kernel_frame((uintptr_t *)cs->frames, cs->nframes, j + 2), | |
337 | scrub_kernel_frame((uintptr_t *)cs->frames, cs->nframes, j + 3)); | |
338 | } | |
339 | } else { | |
340 | for (unsigned int i = 0; i < n; i++) { | |
341 | unsigned int j = i * 4; | |
342 | BUF_DATA(dcode, | |
343 | scrub_frame(cs->frames, cs->nframes, j + 0), | |
344 | scrub_frame(cs->frames, cs->nframes, j + 1), | |
345 | scrub_frame(cs->frames, cs->nframes, j + 2), | |
346 | scrub_frame(cs->frames, cs->nframes, j + 3)); | |
347 | } | |
348 | } | |
349 | ||
350 | BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, cs->flags, cs->nframes); | |
351 | } | |
352 | ||
353 | void | |
354 | kperf_kcallstack_log( struct callstack *cs ) | |
355 | { | |
356 | callstack_log(cs, PERF_CS_KHDR, PERF_CS_KDATA); | |
357 | } | |
358 | ||
359 | void | |
360 | kperf_ucallstack_log( struct callstack *cs ) | |
361 | { | |
362 | callstack_log(cs, PERF_CS_UHDR, PERF_CS_UDATA); | |
363 | } | |
364 | ||
365 | int | |
366 | kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth) | |
367 | { | |
368 | int did_pend = kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK); | |
369 | kperf_ast_set_callstack_depth(context->cur_thread, depth); | |
370 | ||
371 | return did_pend; | |
372 | } |