]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/callstack.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / kperf / callstack.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* Collect kernel callstacks */
30
39037602 31#include <chud/chud_xnu.h>
316670eb 32#include <mach/mach_types.h>
316670eb 33#include <kern/thread.h>
39037602
A
34#include <kern/backtrace.h>
35#include <vm/vm_map.h>
316670eb
A
36#include <kperf/buffer.h>
37#include <kperf/context.h>
38#include <kperf/callstack.h>
39#include <kperf/ast.h>
39037602
A
40#include <sys/errno.h>
41
5ba3f43e
A
42#if defined(__arm__) || defined(__arm64__)
43#include <arm/cpu_data.h>
44#include <arm/cpu_data_internal.h>
45#endif
316670eb
A
46
47static void
39037602 48callstack_fixup_user(struct callstack *cs, thread_t thread)
316670eb 49{
39037602
A
50 uint64_t fixup_val = 0;
51 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
52
53#if defined(__x86_64__)
54 user_addr_t sp_user;
55 bool user_64;
56 x86_saved_state_t *state;
316670eb 57
39037602
A
58 state = get_user_regs(thread);
59 if (!state) {
60 goto out;
61 }
316670eb 62
39037602
A
63 user_64 = is_saved_state64(state);
64 if (user_64) {
65 sp_user = saved_state64(state)->isf.rsp;
66 } else {
67 sp_user = saved_state32(state)->uesp;
68 }
316670eb 69
39037602
A
70 if (thread == current_thread()) {
71 (void)copyin(sp_user, (char *)&fixup_val,
72 user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
73 } else {
74 (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
75 &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
76 }
77
5ba3f43e
A
78#elif defined(__arm64__) || defined(__arm__)
79
80 struct arm_saved_state *state = get_user_regs(thread);
81 if (!state) {
82 goto out;
83 }
84
85 /* encode thumb mode into low bit of PC */
86 if (get_saved_state_cpsr(state) & PSR_TF) {
87 cs->frames[0] |= 1ULL;
88 }
89
90 fixup_val = get_saved_state_lr(state);
91
39037602
A
92#else
93#error "callstack_fixup_user: unsupported architecture"
316670eb 94#endif
39037602
A
95
96out:
97 cs->frames[cs->nframes++] = fixup_val;
98}
99
100#if defined(__x86_64__)
101
102__attribute__((used))
103static kern_return_t
104interrupted_kernel_sp_value(uintptr_t *sp_val)
105{
106 x86_saved_state_t *state;
107 uintptr_t sp;
108 bool state_64;
109 uint64_t cs;
110 uintptr_t top, bottom;
111
112 state = current_cpu_datap()->cpu_int_state;
113 if (!state) {
114 return KERN_FAILURE;
316670eb 115 }
39037602
A
116
117 state_64 = is_saved_state64(state);
118
119 if (state_64) {
120 cs = saved_state64(state)->isf.cs;
121 } else {
122 cs = saved_state32(state)->cs;
123 }
124 /* return early if interrupted a thread in user space */
125 if ((cs & SEL_PL) == SEL_PL_U) {
126 return KERN_FAILURE;
316670eb
A
127 }
128
39037602
A
129 if (state_64) {
130 sp = saved_state64(state)->isf.rsp;
131 } else {
132 sp = saved_state32(state)->uesp;
133 }
316670eb 134
39037602
A
135 /* make sure the stack pointer is pointing somewhere in this stack */
136 bottom = current_thread()->kernel_stack;
137 top = bottom + kernel_stack_size;
138 if (sp >= bottom && sp < top) {
139 return KERN_FAILURE;
316670eb 140 }
39037602
A
141
142 *sp_val = *(uintptr_t *)sp;
143 return KERN_SUCCESS;
144}
145
5ba3f43e
A
146#elif defined(__arm64__)
147
148__attribute__((used))
149static kern_return_t
150interrupted_kernel_lr(uintptr_t *lr)
151{
152 struct arm_saved_state *state;
153
154 state = getCpuDatap()->cpu_int_state;
155
156 /* return early if interrupted a thread in user space */
157 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
158 return KERN_FAILURE;
159 }
160
161 *lr = get_saved_state_lr(state);
162 return KERN_SUCCESS;
163}
164
165#elif defined(__arm__)
166
167__attribute__((used))
168static kern_return_t
169interrupted_kernel_lr(uintptr_t *lr)
170{
171 struct arm_saved_state *state;
172
173 state = getCpuDatap()->cpu_int_state;
174
175 /* return early if interrupted a thread in user space */
176 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
177 return KERN_FAILURE;
178 }
179
180 *lr = get_saved_state_lr(state);
181 return KERN_SUCCESS;
182}
183
39037602
A
184#else /* defined(__arm__) */
185#error "interrupted_kernel_{sp,lr}: unsupported architecture"
186#endif /* !defined(__arm__) */
187
188
189static void
190callstack_fixup_interrupted(struct callstack *cs)
191{
192 uintptr_t fixup_val = 0;
193 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
194
195 /*
196 * Only provide arbitrary data on development or debug kernels.
197 */
198#if DEVELOPMENT || DEBUG
199#if defined(__x86_64__)
200 (void)interrupted_kernel_sp_value(&fixup_val);
5ba3f43e
A
201#elif defined(__arm64__) || defined(__arm__)
202 (void)interrupted_kernel_lr(&fixup_val);
39037602
A
203#endif /* defined(__x86_64__) */
204#endif /* DEVELOPMENT || DEBUG */
205
5ba3f43e
A
206 assert(cs->flags & CALLSTACK_KERNEL);
207 cs->frames[cs->nframes++] = fixup_val;
39037602
A
208}
209
210void
211kperf_continuation_sample(struct callstack *cs, struct kperf_context *context)
212{
213 thread_t thread;
214
215 assert(cs != NULL);
216 assert(context != NULL);
217
218 thread = context->cur_thread;
219 assert(thread != NULL);
220 assert(thread->continuation != NULL);
221
222 cs->flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
223#ifdef __LP64__
224 cs->flags |= CALLSTACK_64BIT;
225#endif
226
227 cs->nframes = 1;
228 cs->frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
229}
230
231void
232kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context)
233{
234 assert(cs != NULL);
235 assert(context != NULL);
236 assert(context->cur_thread == current_thread());
237
238 cs->flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
239#ifdef __LP64__
240 cs->flags |= CALLSTACK_64BIT;
241#endif
242
243 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
244
245 cs->nframes = backtrace_frame((uintptr_t *)&(cs->frames), cs->nframes - 1,
246 context->starting_fp);
247 if (cs->nframes > 0) {
316670eb 248 cs->flags |= CALLSTACK_VALID;
39037602
A
249 /*
250 * Fake the value pointed to by the stack pointer or the link
251 * register for symbolicators.
252 */
253 cs->frames[cs->nframes + 1] = 0;
254 cs->nframes += 1;
316670eb 255 }
39037602
A
256
257 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->nframes);
258}
259
260void
261kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context)
262{
263 thread_t thread;
264
265 assert(cs != NULL);
266 assert(context != NULL);
267 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
268
269 thread = context->cur_thread;
270 assert(thread != NULL);
271
272 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
273 cs->nframes);
274
275 cs->flags = CALLSTACK_KERNEL;
276
277#ifdef __LP64__
278 cs->flags |= CALLSTACK_64BIT;
279#endif
280
281 if (ml_at_interrupt_context()) {
282 assert(thread == current_thread());
283 cs->flags |= CALLSTACK_KERNEL_WORDS;
284 cs->nframes = backtrace_interrupted((uintptr_t *)cs->frames,
285 cs->nframes - 1);
286 if (cs->nframes != 0) {
287 callstack_fixup_interrupted(cs);
288 }
289 } else {
290 /*
291 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
292 * other threads.
293 */
294 kern_return_t kr;
295 kr = chudxnu_thread_get_callstack64_kperf(thread, cs->frames,
296 &cs->nframes, FALSE);
297 if (kr == KERN_SUCCESS) {
298 cs->flags |= CALLSTACK_VALID;
299 } else if (kr == KERN_RESOURCE_SHORTAGE) {
300 cs->flags |= CALLSTACK_VALID;
301 cs->flags |= CALLSTACK_TRUNCATED;
302 } else {
303 cs->nframes = 0;
304 }
316670eb
A
305 }
306
39037602
A
307 if (cs->nframes == 0) {
308 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
316670eb
A
309 }
310
39037602 311 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), cs->flags, cs->nframes);
316670eb
A
312}
313
314void
39037602 315kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context)
316670eb 316{
39037602
A
317 thread_t thread;
318 bool user_64 = false;
319 int err;
320
321 assert(cs != NULL);
322 assert(context != NULL);
323 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
324 assert(ml_get_interrupts_enabled() == TRUE);
325
326 thread = context->cur_thread;
327 assert(thread != NULL);
328
329 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
330 cs->nframes);
331
332 cs->flags = 0;
333
334 err = backtrace_thread_user(thread, (uintptr_t *)cs->frames,
335 cs->nframes - 1, &cs->nframes, &user_64);
336 cs->flags |= CALLSTACK_KERNEL_WORDS;
337 if (user_64) {
338 cs->flags |= CALLSTACK_64BIT;
339 }
340
341 if (!err || err == EFAULT) {
342 callstack_fixup_user(cs, thread);
343 cs->flags |= CALLSTACK_VALID;
344 } else {
345 cs->nframes = 0;
346 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, err);
347 }
348
349 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
350 cs->flags, cs->nframes);
316670eb
A
351}
352
39037602 353static inline uintptr_t
5ba3f43e 354scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
316670eb 355{
39037602 356 if (frame < n_frames) {
5ba3f43e
A
357 if (kern) {
358 return VM_KERNEL_UNSLIDE(bt[frame]);
359 } else {
360 return bt[frame];
361 }
39037602
A
362 } else {
363 return 0;
364 }
365}
366
367static inline uintptr_t
368scrub_frame(uint64_t *bt, int n_frames, int frame)
369{
370 if (frame < n_frames) {
371 return (uintptr_t)(bt[frame]);
372 } else {
373 return 0;
374 }
316670eb
A
375}
376
377static void
39037602 378callstack_log(struct callstack *cs, uint32_t hcode, uint32_t dcode)
316670eb 379{
39037602 380 BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, cs->flags, cs->nframes);
316670eb 381
39037602
A
382 /* framing information for the stack */
383 BUF_DATA(hcode, cs->flags, cs->nframes);
316670eb 384
39037602 385 /* how many batches of 4 */
5ba3f43e
A
386 unsigned int nframes = cs->nframes;
387 unsigned int n = nframes / 4;
388 unsigned int ovf = nframes % 4;
39037602 389 if (ovf != 0) {
316670eb 390 n++;
39037602 391 }
316670eb 392
5ba3f43e
A
393 bool kern = cs->flags & CALLSTACK_KERNEL;
394
39037602 395 if (cs->flags & CALLSTACK_KERNEL_WORDS) {
5ba3f43e 396 uintptr_t *frames = (uintptr_t *)cs->frames;
39037602
A
397 for (unsigned int i = 0; i < n; i++) {
398 unsigned int j = i * 4;
399 BUF_DATA(dcode,
5ba3f43e
A
400 scrub_word(frames, nframes, j + 0, kern),
401 scrub_word(frames, nframes, j + 1, kern),
402 scrub_word(frames, nframes, j + 2, kern),
403 scrub_word(frames, nframes, j + 3, kern));
39037602
A
404 }
405 } else {
406 for (unsigned int i = 0; i < n; i++) {
5ba3f43e 407 uint64_t *frames = cs->frames;
39037602
A
408 unsigned int j = i * 4;
409 BUF_DATA(dcode,
5ba3f43e
A
410 scrub_frame(frames, nframes, j + 0),
411 scrub_frame(frames, nframes, j + 1),
412 scrub_frame(frames, nframes, j + 2),
413 scrub_frame(frames, nframes, j + 3));
39037602 414 }
316670eb 415 }
39037602
A
416
417 BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, cs->flags, cs->nframes);
316670eb
A
418}
419
420void
421kperf_kcallstack_log( struct callstack *cs )
422{
39037602 423 callstack_log(cs, PERF_CS_KHDR, PERF_CS_KDATA);
316670eb
A
424}
425
426void
427kperf_ucallstack_log( struct callstack *cs )
428{
39037602 429 callstack_log(cs, PERF_CS_UHDR, PERF_CS_UDATA);
316670eb
A
430}
431
432int
39037602 433kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth)
316670eb 434{
39037602
A
435 int did_pend = kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK);
436 kperf_ast_set_callstack_depth(context->cur_thread, depth);
316670eb 437
39037602
A
438 return did_pend;
439}