]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kperf/callstack.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kperf / callstack.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
316670eb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
316670eb
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
316670eb
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
316670eb
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* Collect kernel callstacks */
30
31#include <mach/mach_types.h>
316670eb 32#include <kern/thread.h>
39037602
A
33#include <kern/backtrace.h>
34#include <vm/vm_map.h>
316670eb
A
35#include <kperf/buffer.h>
36#include <kperf/context.h>
37#include <kperf/callstack.h>
38#include <kperf/ast.h>
39037602
A
39#include <sys/errno.h>
40
5ba3f43e
A
41#if defined(__arm__) || defined(__arm64__)
42#include <arm/cpu_data.h>
43#include <arm/cpu_data_internal.h>
44#endif
316670eb
A
45
46static void
cb323159 47callstack_fixup_user(struct kp_ucallstack *cs, thread_t thread)
316670eb 48{
39037602 49 uint64_t fixup_val = 0;
cb323159 50 assert(cs->kpuc_nframes < MAX_UCALLSTACK_FRAMES);
39037602
A
51
52#if defined(__x86_64__)
53 user_addr_t sp_user;
54 bool user_64;
55 x86_saved_state_t *state;
316670eb 56
39037602
A
57 state = get_user_regs(thread);
58 if (!state) {
59 goto out;
60 }
316670eb 61
39037602
A
62 user_64 = is_saved_state64(state);
63 if (user_64) {
0a7de745 64 sp_user = saved_state64(state)->isf.rsp;
39037602
A
65 } else {
66 sp_user = saved_state32(state)->uesp;
67 }
316670eb 68
39037602
A
69 if (thread == current_thread()) {
70 (void)copyin(sp_user, (char *)&fixup_val,
0a7de745 71 user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
39037602
A
72 } else {
73 (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
0a7de745 74 &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
39037602
A
75 }
76
5ba3f43e
A
77#elif defined(__arm64__) || defined(__arm__)
78
79 struct arm_saved_state *state = get_user_regs(thread);
80 if (!state) {
81 goto out;
82 }
83
84 /* encode thumb mode into low bit of PC */
85 if (get_saved_state_cpsr(state) & PSR_TF) {
cb323159 86 cs->kpuc_frames[0] |= 1ULL;
5ba3f43e
A
87 }
88
89 fixup_val = get_saved_state_lr(state);
90
39037602
A
91#else
92#error "callstack_fixup_user: unsupported architecture"
316670eb 93#endif
39037602
A
94
95out:
cb323159 96 cs->kpuc_frames[cs->kpuc_nframes++] = fixup_val;
39037602
A
97}
98
99#if defined(__x86_64__)
100
101__attribute__((used))
102static kern_return_t
103interrupted_kernel_sp_value(uintptr_t *sp_val)
104{
105 x86_saved_state_t *state;
106 uintptr_t sp;
107 bool state_64;
108 uint64_t cs;
109 uintptr_t top, bottom;
110
111 state = current_cpu_datap()->cpu_int_state;
112 if (!state) {
113 return KERN_FAILURE;
316670eb 114 }
39037602
A
115
116 state_64 = is_saved_state64(state);
117
118 if (state_64) {
119 cs = saved_state64(state)->isf.cs;
120 } else {
121 cs = saved_state32(state)->cs;
122 }
123 /* return early if interrupted a thread in user space */
124 if ((cs & SEL_PL) == SEL_PL_U) {
125 return KERN_FAILURE;
316670eb
A
126 }
127
39037602
A
128 if (state_64) {
129 sp = saved_state64(state)->isf.rsp;
130 } else {
131 sp = saved_state32(state)->uesp;
132 }
316670eb 133
39037602
A
134 /* make sure the stack pointer is pointing somewhere in this stack */
135 bottom = current_thread()->kernel_stack;
136 top = bottom + kernel_stack_size;
137 if (sp >= bottom && sp < top) {
0a7de745 138 return KERN_FAILURE;
316670eb 139 }
39037602
A
140
141 *sp_val = *(uintptr_t *)sp;
142 return KERN_SUCCESS;
143}
144
5ba3f43e
A
145#elif defined(__arm64__)
146
147__attribute__((used))
148static kern_return_t
149interrupted_kernel_lr(uintptr_t *lr)
150{
151 struct arm_saved_state *state;
152
153 state = getCpuDatap()->cpu_int_state;
154
155 /* return early if interrupted a thread in user space */
156 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
157 return KERN_FAILURE;
158 }
159
160 *lr = get_saved_state_lr(state);
161 return KERN_SUCCESS;
162}
163
164#elif defined(__arm__)
165
166__attribute__((used))
167static kern_return_t
168interrupted_kernel_lr(uintptr_t *lr)
169{
170 struct arm_saved_state *state;
171
172 state = getCpuDatap()->cpu_int_state;
173
174 /* return early if interrupted a thread in user space */
175 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
176 return KERN_FAILURE;
177 }
178
179 *lr = get_saved_state_lr(state);
180 return KERN_SUCCESS;
181}
182
39037602
A
183#else /* defined(__arm__) */
184#error "interrupted_kernel_{sp,lr}: unsupported architecture"
185#endif /* !defined(__arm__) */
186
187
188static void
cb323159 189callstack_fixup_interrupted(struct kp_kcallstack *cs)
39037602
A
190{
191 uintptr_t fixup_val = 0;
cb323159 192 assert(cs->kpkc_nframes < MAX_KCALLSTACK_FRAMES);
39037602
A
193
194 /*
195 * Only provide arbitrary data on development or debug kernels.
196 */
197#if DEVELOPMENT || DEBUG
198#if defined(__x86_64__)
199 (void)interrupted_kernel_sp_value(&fixup_val);
5ba3f43e
A
200#elif defined(__arm64__) || defined(__arm__)
201 (void)interrupted_kernel_lr(&fixup_val);
39037602
A
202#endif /* defined(__x86_64__) */
203#endif /* DEVELOPMENT || DEBUG */
204
cb323159
A
205 assert(cs->kpkc_flags & CALLSTACK_KERNEL);
206 cs->kpkc_frames[cs->kpkc_nframes++] = fixup_val;
39037602
A
207}
208
209void
cb323159 210kperf_continuation_sample(struct kp_kcallstack *cs, struct kperf_context *context)
39037602
A
211{
212 thread_t thread;
213
214 assert(cs != NULL);
215 assert(context != NULL);
216
217 thread = context->cur_thread;
218 assert(thread != NULL);
219 assert(thread->continuation != NULL);
220
cb323159 221 cs->kpkc_flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
39037602 222#ifdef __LP64__
cb323159 223 cs->kpkc_flags |= CALLSTACK_64BIT;
39037602
A
224#endif
225
cb323159
A
226 cs->kpkc_nframes = 1;
227 cs->kpkc_frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
39037602
A
228}
229
230void
cb323159 231kperf_backtrace_sample(struct kp_kcallstack *cs, struct kperf_context *context)
39037602
A
232{
233 assert(cs != NULL);
234 assert(context != NULL);
235 assert(context->cur_thread == current_thread());
236
cb323159 237 cs->kpkc_flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
39037602 238#ifdef __LP64__
cb323159 239 cs->kpkc_flags |= CALLSTACK_64BIT;
39037602
A
240#endif
241
242 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
243
cb323159
A
244 bool trunc = false;
245 cs->kpkc_nframes = backtrace_frame(cs->kpkc_word_frames,
246 cs->kpkc_nframes - 1, context->starting_fp, &trunc);
247 if (cs->kpkc_nframes > 0) {
248 cs->kpkc_flags |= CALLSTACK_VALID;
39037602
A
249 /*
250 * Fake the value pointed to by the stack pointer or the link
251 * register for symbolicators.
252 */
cb323159
A
253 cs->kpkc_word_frames[cs->kpkc_nframes + 1] = 0;
254 cs->kpkc_nframes += 1;
255 }
256 if (trunc) {
94ff46dc 257 cs->kpkc_flags |= CALLSTACK_TRUNCATED;
316670eb 258 }
39037602 259
cb323159 260 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->kpkc_nframes);
39037602
A
261}
262
a39ff7e2 263kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
0a7de745
A
264 uint64_t *callStack, mach_msg_type_number_t *count,
265 boolean_t user_only);
a39ff7e2 266
39037602 267void
cb323159 268kperf_kcallstack_sample(struct kp_kcallstack *cs, struct kperf_context *context)
39037602
A
269{
270 thread_t thread;
271
272 assert(cs != NULL);
273 assert(context != NULL);
cb323159 274 assert(cs->kpkc_nframes <= MAX_KCALLSTACK_FRAMES);
39037602
A
275
276 thread = context->cur_thread;
277 assert(thread != NULL);
278
279 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
cb323159 280 cs->kpkc_nframes);
39037602 281
cb323159 282 cs->kpkc_flags = CALLSTACK_KERNEL;
39037602 283#ifdef __LP64__
cb323159 284 cs->kpkc_flags |= CALLSTACK_64BIT;
39037602
A
285#endif
286
287 if (ml_at_interrupt_context()) {
288 assert(thread == current_thread());
cb323159
A
289 cs->kpkc_flags |= CALLSTACK_KERNEL_WORDS;
290 bool trunc = false;
291 cs->kpkc_nframes = backtrace_interrupted(
292 cs->kpkc_word_frames, cs->kpkc_nframes - 1, &trunc);
293 if (cs->kpkc_nframes != 0) {
39037602
A
294 callstack_fixup_interrupted(cs);
295 }
cb323159
A
296 if (trunc) {
297 cs->kpkc_flags |= CALLSTACK_TRUNCATED;
298 }
39037602
A
299 } else {
300 /*
301 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
302 * other threads.
303 */
304 kern_return_t kr;
cb323159
A
305 kr = chudxnu_thread_get_callstack64_kperf(thread,
306 cs->kpkc_frames, &cs->kpkc_nframes, FALSE);
39037602 307 if (kr == KERN_SUCCESS) {
cb323159 308 cs->kpkc_flags |= CALLSTACK_VALID;
39037602 309 } else if (kr == KERN_RESOURCE_SHORTAGE) {
cb323159
A
310 cs->kpkc_flags |= CALLSTACK_VALID;
311 cs->kpkc_flags |= CALLSTACK_TRUNCATED;
39037602 312 } else {
cb323159 313 cs->kpkc_nframes = 0;
39037602 314 }
316670eb
A
315 }
316
cb323159 317 if (!(cs->kpkc_flags & CALLSTACK_VALID)) {
39037602 318 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
316670eb
A
319 }
320
cb323159
A
321 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
322 cs->kpkc_flags, cs->kpkc_nframes);
316670eb
A
323}
324
325void
cb323159 326kperf_ucallstack_sample(struct kp_ucallstack *cs, struct kperf_context *context)
316670eb 327{
39037602
A
328 assert(ml_get_interrupts_enabled() == TRUE);
329
cb323159 330 thread_t thread = context->cur_thread;
39037602
A
331 assert(thread != NULL);
332
cb323159
A
333 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START,
334 (uintptr_t)thread_tid(thread), cs->kpuc_nframes);
39037602 335
cb323159
A
336 bool user64 = false;
337 bool trunc = false;
338 int err = backtrace_thread_user(thread, cs->kpuc_frames,
339 cs->kpuc_nframes - 1, &cs->kpuc_nframes, &user64, &trunc);
340 cs->kpuc_flags = CALLSTACK_KERNEL_WORDS;
341 if (user64) {
342 cs->kpuc_flags |= CALLSTACK_64BIT;
343 }
344 if (trunc) {
345 cs->kpuc_flags |= CALLSTACK_TRUNCATED;
39037602
A
346 }
347
348 if (!err || err == EFAULT) {
349 callstack_fixup_user(cs, thread);
cb323159 350 cs->kpuc_flags |= CALLSTACK_VALID;
39037602 351 } else {
cb323159 352 cs->kpuc_nframes = 0;
39037602
A
353 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, err);
354 }
355
356 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
cb323159 357 cs->kpuc_flags, cs->kpuc_nframes);
316670eb
A
358}
359
39037602 360static inline uintptr_t
5ba3f43e 361scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
316670eb 362{
39037602 363 if (frame < n_frames) {
5ba3f43e
A
364 if (kern) {
365 return VM_KERNEL_UNSLIDE(bt[frame]);
366 } else {
367 return bt[frame];
368 }
39037602
A
369 } else {
370 return 0;
371 }
372}
373
374static inline uintptr_t
375scrub_frame(uint64_t *bt, int n_frames, int frame)
376{
377 if (frame < n_frames) {
378 return (uintptr_t)(bt[frame]);
379 } else {
380 return 0;
381 }
316670eb
A
382}
383
384static void
cb323159
A
385callstack_log(uint32_t hdrid, uint32_t dataid, void *vframes,
386 unsigned int nframes, unsigned int flags)
316670eb 387{
cb323159 388 BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, flags, nframes);
316670eb 389
cb323159 390 BUF_DATA(hdrid, flags, nframes);
316670eb 391
cb323159 392 unsigned int nevts = nframes / 4;
5ba3f43e 393 unsigned int ovf = nframes % 4;
39037602 394 if (ovf != 0) {
cb323159 395 nevts++;
39037602 396 }
316670eb 397
cb323159 398 bool kern = flags & CALLSTACK_KERNEL;
5ba3f43e 399
cb323159
A
400 if (flags & CALLSTACK_KERNEL_WORDS) {
401 uintptr_t *frames = vframes;
402 for (unsigned int i = 0; i < nevts; i++) {
39037602 403 unsigned int j = i * 4;
cb323159 404 BUF_DATA(dataid,
0a7de745
A
405 scrub_word(frames, nframes, j + 0, kern),
406 scrub_word(frames, nframes, j + 1, kern),
407 scrub_word(frames, nframes, j + 2, kern),
408 scrub_word(frames, nframes, j + 3, kern));
39037602
A
409 }
410 } else {
cb323159
A
411 for (unsigned int i = 0; i < nevts; i++) {
412 uint64_t *frames = vframes;
39037602 413 unsigned int j = i * 4;
cb323159 414 BUF_DATA(dataid,
0a7de745
A
415 scrub_frame(frames, nframes, j + 0),
416 scrub_frame(frames, nframes, j + 1),
417 scrub_frame(frames, nframes, j + 2),
418 scrub_frame(frames, nframes, j + 3));
39037602 419 }
316670eb 420 }
39037602 421
cb323159 422 BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, flags, nframes);
316670eb
A
423}
424
425void
cb323159 426kperf_kcallstack_log(struct kp_kcallstack *cs)
316670eb 427{
cb323159
A
428 callstack_log(PERF_CS_KHDR, PERF_CS_KDATA, cs->kpkc_frames,
429 cs->kpkc_nframes, cs->kpkc_flags);
316670eb
A
430}
431
432void
cb323159 433kperf_ucallstack_log(struct kp_ucallstack *cs)
316670eb 434{
cb323159
A
435 callstack_log(PERF_CS_UHDR, PERF_CS_UDATA, cs->kpuc_frames,
436 cs->kpuc_nframes, cs->kpuc_flags);
316670eb
A
437}
438
439int
94ff46dc
A
440kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth,
441 unsigned int actionid)
316670eb 442{
94ff46dc
A
443 if (depth < 2) {
444 panic("HUH");
445 }
39037602 446 kperf_ast_set_callstack_depth(context->cur_thread, depth);
94ff46dc
A
447 return kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK,
448 actionid);
39037602 449}
a39ff7e2
A
450
451static kern_return_t
452chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
453{
0a7de745
A
454 return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
455 KERN_SUCCESS : KERN_FAILURE;
a39ff7e2
A
456}
457
458static kern_return_t
459chudxnu_task_read(
0a7de745
A
460 task_t task,
461 void *kernaddr,
462 uint64_t usraddr,
463 vm_size_t size)
a39ff7e2
A
464{
465 //ppc version ported to arm
466 kern_return_t ret = KERN_SUCCESS;
467
468 if (ml_at_interrupt_context()) {
469 return KERN_FAILURE; // can't look at tasks on interrupt stack
470 }
471
472 if (current_task() == task) {
a39ff7e2
A
473 if (copyin(usraddr, kernaddr, size)) {
474 ret = KERN_FAILURE;
475 }
a39ff7e2
A
476 } else {
477 vm_map_t map = get_task_map(task);
478 ret = vm_map_read_user(map, usraddr, kernaddr, size);
479 }
480
481 return ret;
482}
483
484static inline uint64_t
485chudxnu_vm_unslide( uint64_t ptr, int kaddr )
486{
0a7de745 487 if (!kaddr) {
a39ff7e2 488 return ptr;
0a7de745 489 }
a39ff7e2
A
490
491 return VM_KERNEL_UNSLIDE(ptr);
492}
493
494#if __arm__
495#define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
496#define CS_FLAG_EXTRASP 1 // capture extra sp register
497static kern_return_t
498chudxnu_thread_get_callstack64_internal(
0a7de745
A
499 thread_t thread,
500 uint64_t *callStack,
501 mach_msg_type_number_t *count,
502 boolean_t user_only,
a39ff7e2
A
503 int flags)
504{
505 kern_return_t kr;
0a7de745
A
506 task_t task;
507 uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
508 uint64_t prevPC = 0ULL;
509 uint32_t kernStackMin = thread->kernel_stack;
510 uint32_t kernStackMax = kernStackMin + kernel_stack_size;
a39ff7e2 511 uint64_t *buffer = callStack;
0a7de745 512 uint32_t frame[2];
a39ff7e2
A
513 int bufferIndex = 0;
514 int bufferMaxIndex = 0;
515 boolean_t supervisor = FALSE;
516 struct arm_saved_state *state = NULL;
0a7de745
A
517 uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
518 uint64_t pc = 0ULL;
a39ff7e2
A
519
520 task = get_threadtask(thread);
521
522 bufferMaxIndex = *count;
523 //get thread state
0a7de745 524 if (user_only) {
a39ff7e2 525 state = find_user_regs(thread);
0a7de745 526 } else {
a39ff7e2 527 state = find_kern_regs(thread);
0a7de745 528 }
a39ff7e2
A
529
530 if (!state) {
0a7de745 531 *count = 0;
a39ff7e2
A
532 return KERN_FAILURE;
533 }
534
535 /* make sure it is safe to dereference before you do it */
536 supervisor = ARM_SUPERVISOR_MODE(state->cpsr);
537
538 /* can't take a kernel callstack if we've got a user frame */
0a7de745 539 if (!user_only && !supervisor) {
a39ff7e2 540 return KERN_FAILURE;
0a7de745 541 }
a39ff7e2
A
542
543 /*
0a7de745
A
544 * Reserve space for saving LR (and sometimes SP) at the end of the
545 * backtrace.
546 */
a39ff7e2
A
547 if (flags & CS_FLAG_EXTRASP) {
548 bufferMaxIndex -= 2;
549 } else {
550 bufferMaxIndex -= 1;
551 }
552
553 if (bufferMaxIndex < 2) {
554 *count = 0;
555 return KERN_RESOURCE_SHORTAGE;
556 }
557
558 currPC = (uint64_t)state->pc; /* r15 */
0a7de745
A
559 if (state->cpsr & PSR_TF) {
560 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
561 }
a39ff7e2
A
562 currLR = (uint64_t)state->lr; /* r14 */
563 currSP = (uint64_t)state->sp; /* r13 */
564
565 fp = (uint32_t *)state->r[7]; /* frame pointer */
566 topfp = fp;
567
568 bufferIndex = 0; // start with a stack of size zero
569 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, supervisor); // save PC in position 0.
570
571 // Now, fill buffer with stack backtraces.
572 while (bufferIndex < bufferMaxIndex) {
573 pc = 0ULL;
574 /*
575 * Below the frame pointer, the following values are saved:
576 * -> FP
577 */
578
579 /*
580 * Note that we read the pc even for the first stack frame
581 * (which, in theory, is always empty because the callee fills
582 * it in just before it lowers the stack. However, if we
583 * catch the program in between filling in the return address
584 * and lowering the stack, we want to still have a valid
585 * backtrace. FixupStack correctly disregards this value if
586 * necessary.
587 */
588
0a7de745 589 if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
a39ff7e2
A
590 /* frame pointer is invalid - stop backtracing */
591 pc = 0ULL;
592 break;
593 }
594
595 if (supervisor) {
596 if (((uint32_t)fp > kernStackMax) ||
597 ((uint32_t)fp < kernStackMin)) {
598 kr = KERN_FAILURE;
599 } else {
600 kr = chudxnu_kern_read(&frame,
0a7de745
A
601 (vm_offset_t)fp,
602 (vm_size_t)sizeof(frame));
a39ff7e2
A
603 if (kr == KERN_SUCCESS) {
604 pc = (uint64_t)frame[1];
605 nextFramePointer = (uint32_t *) (frame[0]);
606 } else {
607 pc = 0ULL;
608 nextFramePointer = 0ULL;
609 kr = KERN_FAILURE;
610 }
611 }
612 } else {
613 kr = chudxnu_task_read(task,
0a7de745
A
614 &frame,
615 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
616 sizeof(frame));
a39ff7e2
A
617 if (kr == KERN_SUCCESS) {
618 pc = (uint64_t) frame[1];
619 nextFramePointer = (uint32_t *) (frame[0]);
620 } else {
621 pc = 0ULL;
622 nextFramePointer = 0ULL;
623 kr = KERN_FAILURE;
624 }
625 }
626
627 if (kr != KERN_SUCCESS) {
628 pc = 0ULL;
629 break;
630 }
631
632 if (nextFramePointer) {
633 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, supervisor);
634 prevPC = pc;
635 }
636
0a7de745 637 if (nextFramePointer < fp) {
a39ff7e2 638 break;
0a7de745 639 } else {
a39ff7e2 640 fp = nextFramePointer;
0a7de745 641 }
a39ff7e2
A
642 }
643
644 if (bufferIndex >= bufferMaxIndex) {
645 bufferIndex = bufferMaxIndex;
646 kr = KERN_RESOURCE_SHORTAGE;
647 } else {
648 kr = KERN_SUCCESS;
649 }
650
651 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
652 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, supervisor);
0a7de745 653 if (flags & CS_FLAG_EXTRASP) {
a39ff7e2 654 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, supervisor);
0a7de745 655 }
a39ff7e2
A
656
657 *count = bufferIndex;
658 return kr;
a39ff7e2
A
659}
660
661kern_return_t
662chudxnu_thread_get_callstack64_kperf(
0a7de745
A
663 thread_t thread,
664 uint64_t *callStack,
665 mach_msg_type_number_t *count,
666 boolean_t user_only)
a39ff7e2
A
667{
668 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
669}
670#elif __arm64__
d9a64523 671
cb323159
A
672#if defined(HAS_APPLE_PAC)
673#include <ptrauth.h>
674#endif
d9a64523 675
a39ff7e2
A
676// chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
677// fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
678// after sampling has finished.
679//
680// For an N-entry callstack:
681//
682// [0] current pc
683// [1..N-3] stack frames (including current one)
684// [N-2] current LR (return value if we're in a leaf function)
685// [N-1] current r0 (in case we've saved LR in r0) (optional)
686//
687//
688#define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
689
690#define CS_FLAG_EXTRASP 1 // capture extra sp register
691
692static kern_return_t
693chudxnu_thread_get_callstack64_internal(
0a7de745
A
694 thread_t thread,
695 uint64_t *callStack,
696 mach_msg_type_number_t *count,
697 boolean_t user_only,
a39ff7e2
A
698 int flags)
699{
700 kern_return_t kr = KERN_SUCCESS;
0a7de745
A
701 task_t task;
702 uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
703 uint64_t prevPC = 0ULL;
704 uint64_t kernStackMin = thread->kernel_stack;
705 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
a39ff7e2
A
706 uint64_t *buffer = callStack;
707 int bufferIndex = 0;
708 int bufferMaxIndex = 0;
709 boolean_t kernel = FALSE;
710 struct arm_saved_state *sstate = NULL;
0a7de745 711 uint64_t pc = 0ULL;
a39ff7e2
A
712
713 task = get_threadtask(thread);
714 bufferMaxIndex = *count;
715 //get thread state
0a7de745 716 if (user_only) {
a39ff7e2 717 sstate = find_user_regs(thread);
0a7de745 718 } else {
a39ff7e2 719 sstate = find_kern_regs(thread);
0a7de745 720 }
a39ff7e2
A
721
722 if (!sstate) {
0a7de745 723 *count = 0;
a39ff7e2
A
724 return KERN_FAILURE;
725 }
726
727 if (is_saved_state64(sstate)) {
728 struct arm_saved_state64 *state = NULL;
0a7de745 729 uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
a39ff7e2
A
730 uint64_t frame[2];
731
732 state = saved_state64(sstate);
733
734 /* make sure it is safe to dereference before you do it */
735 kernel = PSR64_IS_KERNEL(state->cpsr);
736
737 /* can't take a kernel callstack if we've got a user frame */
0a7de745 738 if (!user_only && !kernel) {
a39ff7e2 739 return KERN_FAILURE;
0a7de745 740 }
a39ff7e2
A
741
742 /*
743 * Reserve space for saving LR (and sometimes SP) at the end of the
744 * backtrace.
745 */
746 if (flags & CS_FLAG_EXTRASP) {
747 bufferMaxIndex -= 2;
748 } else {
749 bufferMaxIndex -= 1;
750 }
751
752 if (bufferMaxIndex < 2) {
753 *count = 0;
754 return KERN_RESOURCE_SHORTAGE;
755 }
756
757 currPC = state->pc;
758 currLR = state->lr;
759 currSP = state->sp;
760
761 fp = (uint64_t *)state->fp; /* frame pointer */
762 topfp = fp;
763
764 bufferIndex = 0; // start with a stack of size zero
765 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
766
767 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
768
769 // Now, fill buffer with stack backtraces.
770 while (bufferIndex < bufferMaxIndex) {
771 pc = 0ULL;
772 /*
773 * Below the frame pointer, the following values are saved:
774 * -> FP
775 */
776
777 /*
778 * Note that we read the pc even for the first stack frame
779 * (which, in theory, is always empty because the callee fills
780 * it in just before it lowers the stack. However, if we
781 * catch the program in between filling in the return address
782 * and lowering the stack, we want to still have a valid
783 * backtrace. FixupStack correctly disregards this value if
784 * necessary.
785 */
786
0a7de745 787 if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
a39ff7e2
A
788 /* frame pointer is invalid - stop backtracing */
789 pc = 0ULL;
790 break;
791 }
792
793 if (kernel) {
794 if (((uint64_t)fp > kernStackMax) ||
795 ((uint64_t)fp < kernStackMin)) {
796 kr = KERN_FAILURE;
797 } else {
798 kr = chudxnu_kern_read(&frame,
0a7de745
A
799 (vm_offset_t)fp,
800 (vm_size_t)sizeof(frame));
a39ff7e2 801 if (kr == KERN_SUCCESS) {
cb323159
A
802#if defined(HAS_APPLE_PAC)
803 /* return addresses on stack will be signed by arm64e ABI */
804 pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
805#else
a39ff7e2 806 pc = frame[1];
cb323159 807#endif
a39ff7e2
A
808 nextFramePointer = (uint64_t *)frame[0];
809 } else {
810 pc = 0ULL;
811 nextFramePointer = 0ULL;
812 kr = KERN_FAILURE;
813 }
814 }
815 } else {
816 kr = chudxnu_task_read(task,
0a7de745
A
817 &frame,
818 (vm_offset_t)fp,
819 (vm_size_t)sizeof(frame));
a39ff7e2 820 if (kr == KERN_SUCCESS) {
cb323159
A
821#if defined(HAS_APPLE_PAC)
822 /* return addresses on stack will be signed by arm64e ABI */
823 pc = (uint64_t)ptrauth_strip((void *)frame[1], ptrauth_key_return_address);
824#else
a39ff7e2 825 pc = frame[1];
cb323159 826#endif
a39ff7e2
A
827 nextFramePointer = (uint64_t *)(frame[0]);
828 } else {
829 pc = 0ULL;
830 nextFramePointer = 0ULL;
831 kr = KERN_FAILURE;
832 }
833 }
834
835 if (kr != KERN_SUCCESS) {
836 pc = 0ULL;
837 break;
838 }
839
840 if (nextFramePointer) {
841 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
842 prevPC = pc;
843 }
844
0a7de745 845 if (nextFramePointer < fp) {
a39ff7e2 846 break;
0a7de745 847 } else {
a39ff7e2 848 fp = nextFramePointer;
0a7de745 849 }
a39ff7e2
A
850 }
851
852 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
853
854 if (bufferIndex >= bufferMaxIndex) {
855 bufferIndex = bufferMaxIndex;
856 kr = KERN_RESOURCE_SHORTAGE;
857 } else {
858 kr = KERN_SUCCESS;
859 }
860
861 // Save link register and SP at bottom of stack (used for later fixup).
862 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
0a7de745
A
863 if (flags & CS_FLAG_EXTRASP) {
864 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
865 }
a39ff7e2
A
866 } else {
867 struct arm_saved_state32 *state = NULL;
0a7de745 868 uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
a39ff7e2
A
869
870 /* 64-bit kernel stacks, 32-bit user stacks */
871 uint64_t frame[2];
872 uint32_t frame32[2];
0a7de745 873
a39ff7e2
A
874 state = saved_state32(sstate);
875
876 /* make sure it is safe to dereference before you do it */
877 kernel = ARM_SUPERVISOR_MODE(state->cpsr);
878
879 /* can't take a kernel callstack if we've got a user frame */
0a7de745 880 if (!user_only && !kernel) {
a39ff7e2 881 return KERN_FAILURE;
0a7de745 882 }
a39ff7e2
A
883
884 /*
885 * Reserve space for saving LR (and sometimes SP) at the end of the
886 * backtrace.
887 */
888 if (flags & CS_FLAG_EXTRASP) {
889 bufferMaxIndex -= 2;
890 } else {
891 bufferMaxIndex -= 1;
892 }
893
894 if (bufferMaxIndex < 2) {
895 *count = 0;
896 return KERN_RESOURCE_SHORTAGE;
897 }
898
899 currPC = (uint64_t)state->pc; /* r15 */
0a7de745
A
900 if (state->cpsr & PSR_TF) {
901 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
902 }
a39ff7e2
A
903 currLR = (uint64_t)state->lr; /* r14 */
904 currSP = (uint64_t)state->sp; /* r13 */
905
906 fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
907 topfp = fp;
908
909 bufferIndex = 0; // start with a stack of size zero
910 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
911
912 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
913
914 // Now, fill buffer with stack backtraces.
915 while (bufferIndex < bufferMaxIndex) {
916 pc = 0ULL;
917 /*
918 * Below the frame pointer, the following values are saved:
919 * -> FP
920 */
921
922 /*
923 * Note that we read the pc even for the first stack frame
924 * (which, in theory, is always empty because the callee fills
925 * it in just before it lowers the stack. However, if we
926 * catch the program in between filling in the return address
927 * and lowering the stack, we want to still have a valid
928 * backtrace. FixupStack correctly disregards this value if
929 * necessary.
930 */
931
0a7de745 932 if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
a39ff7e2
A
933 /* frame pointer is invalid - stop backtracing */
934 pc = 0ULL;
935 break;
936 }
937
938 if (kernel) {
939 if (((uint32_t)fp > kernStackMax) ||
940 ((uint32_t)fp < kernStackMin)) {
941 kr = KERN_FAILURE;
942 } else {
943 kr = chudxnu_kern_read(&frame,
0a7de745
A
944 (vm_offset_t)fp,
945 (vm_size_t)sizeof(frame));
a39ff7e2
A
946 if (kr == KERN_SUCCESS) {
947 pc = (uint64_t)frame[1];
948 nextFramePointer = (uint32_t *) (frame[0]);
949 } else {
950 pc = 0ULL;
951 nextFramePointer = 0ULL;
952 kr = KERN_FAILURE;
953 }
954 }
955 } else {
956 kr = chudxnu_task_read(task,
0a7de745
A
957 &frame32,
958 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
959 sizeof(frame32));
a39ff7e2
A
960 if (kr == KERN_SUCCESS) {
961 pc = (uint64_t)frame32[1];
962 nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
963 } else {
964 pc = 0ULL;
965 nextFramePointer = 0ULL;
966 kr = KERN_FAILURE;
967 }
968 }
969
970 if (kr != KERN_SUCCESS) {
971 pc = 0ULL;
972 break;
973 }
974
975 if (nextFramePointer) {
976 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
977 prevPC = pc;
978 }
979
0a7de745 980 if (nextFramePointer < fp) {
a39ff7e2 981 break;
0a7de745 982 } else {
a39ff7e2 983 fp = nextFramePointer;
0a7de745 984 }
a39ff7e2
A
985 }
986
987 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
988
989 /* clamp callstack size to max */
990 if (bufferIndex >= bufferMaxIndex) {
991 bufferIndex = bufferMaxIndex;
992 kr = KERN_RESOURCE_SHORTAGE;
993 } else {
994 /* ignore all other failures */
995 kr = KERN_SUCCESS;
996 }
997
998 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
999 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
0a7de745 1000 if (flags & CS_FLAG_EXTRASP) {
a39ff7e2 1001 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
0a7de745 1002 }
a39ff7e2
A
1003 }
1004
1005 *count = bufferIndex;
1006 return kr;
1007}
1008
1009kern_return_t
1010chudxnu_thread_get_callstack64_kperf(
0a7de745
A
1011 thread_t thread,
1012 uint64_t *callStack,
1013 mach_msg_type_number_t *count,
1014 boolean_t user_only)
a39ff7e2
A
1015{
1016 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
1017}
1018#elif __x86_64__
1019
1020#define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
1021// don't try to read in the hole
1022#define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
1023(supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
1024((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
1025
1026typedef struct _cframe64_t {
0a7de745
A
1027 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
1028 uint64_t caller;
1029 uint64_t args[0];
a39ff7e2
A
1030}cframe64_t;
1031
1032
1033typedef struct _cframe_t {
0a7de745
A
1034 uint32_t prev; // this is really a user32-space pointer to the previous frame
1035 uint32_t caller;
1036 uint32_t args[0];
a39ff7e2
A
1037} cframe_t;
1038
1039extern void * find_user_regs(thread_t);
1040extern x86_saved_state32_t *find_kern_regs(thread_t);
1041
0a7de745
A
1042static kern_return_t
1043do_kernel_backtrace(
a39ff7e2 1044 thread_t thread,
0a7de745 1045 struct x86_kernel_state *regs,
a39ff7e2
A
1046 uint64_t *frames,
1047 mach_msg_type_number_t *start_idx,
1048 mach_msg_type_number_t max_idx)
1049{
1050 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
0a7de745 1051 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
a39ff7e2
A
1052 mach_msg_type_number_t ct = *start_idx;
1053 kern_return_t kr = KERN_FAILURE;
1054
1055#if __LP64__
1056 uint64_t currPC = 0ULL;
1057 uint64_t currFP = 0ULL;
1058 uint64_t prevPC = 0ULL;
1059 uint64_t prevFP = 0ULL;
0a7de745 1060 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
a39ff7e2
A
1061 return KERN_FAILURE;
1062 }
0a7de745 1063 if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
a39ff7e2
A
1064 return KERN_FAILURE;
1065 }
1066#else
1067 uint32_t currPC = 0U;
1068 uint32_t currFP = 0U;
1069 uint32_t prevPC = 0U;
1070 uint32_t prevFP = 0U;
0a7de745 1071 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
a39ff7e2
A
1072 return KERN_FAILURE;
1073 }
0a7de745 1074 if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
a39ff7e2
A
1075 return KERN_FAILURE;
1076 }
1077#endif
1078
0a7de745
A
1079 if (*start_idx >= max_idx) {
1080 return KERN_RESOURCE_SHORTAGE; // no frames traced
1081 }
1082 if (!currPC) {
a39ff7e2
A
1083 return KERN_FAILURE;
1084 }
1085
1086 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1087
1088 // build a backtrace of this kernel state
1089#if __LP64__
0a7de745 1090 while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
a39ff7e2
A
1091 // this is the address where caller lives in the user thread
1092 uint64_t caller = currFP + sizeof(uint64_t);
1093#else
0a7de745 1094 while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
a39ff7e2
A
1095 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
1096#endif
1097
0a7de745
A
1098 if (!currFP || !currPC) {
1099 currPC = 0;
1100 break;
1101 }
a39ff7e2 1102
0a7de745 1103 if (ct >= max_idx) {
a39ff7e2 1104 *start_idx = ct;
0a7de745
A
1105 return KERN_RESOURCE_SHORTAGE;
1106 }
a39ff7e2
A
1107
1108 /* read our caller */
1109 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1110
0a7de745 1111 if (kr != KERN_SUCCESS || !currPC) {
a39ff7e2
A
1112 currPC = 0UL;
1113 break;
1114 }
1115
0a7de745
A
1116 /*
1117 * retrive contents of the frame pointer and advance to the next stack
1118 * frame if it's valid
1119 */
1120 prevFP = 0;
a39ff7e2
A
1121 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1122
1123#if __LP64__
0a7de745 1124 if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
a39ff7e2 1125#else
0a7de745 1126 if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
a39ff7e2 1127#endif
0a7de745
A
1128 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1129 prevPC = currPC;
1130 }
1131 if (prevFP <= currFP) {
1132 break;
1133 } else {
1134 currFP = prevFP;
1135 }
a39ff7e2
A
1136 }
1137
1138 *start_idx = ct;
1139 return KERN_SUCCESS;
1140}
1141
1142
1143
0a7de745
A
1144static kern_return_t
1145do_backtrace32(
a39ff7e2
A
1146 task_t task,
1147 thread_t thread,
0a7de745 1148 x86_saved_state32_t *regs,
a39ff7e2
A
1149 uint64_t *frames,
1150 mach_msg_type_number_t *start_idx,
1151 mach_msg_type_number_t max_idx,
1152 boolean_t supervisor)
1153{
1154 uint32_t tmpWord = 0UL;
1155 uint64_t currPC = (uint64_t) regs->eip;
1156 uint64_t currFP = (uint64_t) regs->ebp;
1157 uint64_t prevPC = 0ULL;
1158 uint64_t prevFP = 0ULL;
1159 uint64_t kernStackMin = thread->kernel_stack;
0a7de745 1160 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
a39ff7e2
A
1161 mach_msg_type_number_t ct = *start_idx;
1162 kern_return_t kr = KERN_FAILURE;
1163
0a7de745
A
1164 if (ct >= max_idx) {
1165 return KERN_RESOURCE_SHORTAGE; // no frames traced
1166 }
a39ff7e2
A
1167 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1168
1169 // build a backtrace of this 32 bit state.
0a7de745 1170 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
a39ff7e2
A
1171 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1172
0a7de745
A
1173 if (!currFP) {
1174 currPC = 0;
1175 break;
1176 }
a39ff7e2 1177
0a7de745 1178 if (ct >= max_idx) {
a39ff7e2 1179 *start_idx = ct;
0a7de745
A
1180 return KERN_RESOURCE_SHORTAGE;
1181 }
a39ff7e2
A
1182
1183 /* read our caller */
0a7de745 1184 if (supervisor) {
a39ff7e2
A
1185 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1186 } else {
1187 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1188 }
1189
0a7de745 1190 if (kr != KERN_SUCCESS) {
a39ff7e2
A
1191 currPC = 0ULL;
1192 break;
1193 }
1194
1195 currPC = (uint64_t) tmpWord; // promote 32 bit address
1196
0a7de745
A
1197 /*
1198 * retrive contents of the frame pointer and advance to the next stack
1199 * frame if it's valid
1200 */
1201 prevFP = 0;
1202 if (supervisor) {
a39ff7e2
A
1203 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1204 } else {
1205 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1206 }
1207 prevFP = (uint64_t) tmpWord; // promote 32 bit address
1208
0a7de745
A
1209 if (prevFP) {
1210 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1211 prevPC = currPC;
1212 }
1213 if (prevFP < currFP) {
1214 break;
1215 } else {
1216 currFP = prevFP;
1217 }
a39ff7e2
A
1218 }
1219
1220 *start_idx = ct;
1221 return KERN_SUCCESS;
1222}
1223
0a7de745
A
1224static kern_return_t
1225do_backtrace64(
a39ff7e2
A
1226 task_t task,
1227 thread_t thread,
0a7de745 1228 x86_saved_state64_t *regs,
a39ff7e2
A
1229 uint64_t *frames,
1230 mach_msg_type_number_t *start_idx,
1231 mach_msg_type_number_t max_idx,
1232 boolean_t supervisor)
1233{
1234 uint64_t currPC = regs->isf.rip;
1235 uint64_t currFP = regs->rbp;
1236 uint64_t prevPC = 0ULL;
1237 uint64_t prevFP = 0ULL;
1238 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
0a7de745 1239 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
a39ff7e2
A
1240 mach_msg_type_number_t ct = *start_idx;
1241 kern_return_t kr = KERN_FAILURE;
1242
0a7de745
A
1243 if (*start_idx >= max_idx) {
1244 return KERN_RESOURCE_SHORTAGE; // no frames traced
1245 }
a39ff7e2
A
1246 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1247
1248 // build a backtrace of this 32 bit state.
0a7de745 1249 while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
a39ff7e2
A
1250 // this is the address where caller lives in the user thread
1251 uint64_t caller = currFP + sizeof(uint64_t);
1252
0a7de745
A
1253 if (!currFP) {
1254 currPC = 0;
1255 break;
1256 }
a39ff7e2 1257
0a7de745 1258 if (ct >= max_idx) {
a39ff7e2 1259 *start_idx = ct;
0a7de745
A
1260 return KERN_RESOURCE_SHORTAGE;
1261 }
a39ff7e2
A
1262
1263 /* read our caller */
0a7de745 1264 if (supervisor) {
a39ff7e2
A
1265 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1266 } else {
1267 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1268 }
1269
0a7de745 1270 if (kr != KERN_SUCCESS) {
a39ff7e2
A
1271 currPC = 0ULL;
1272 break;
1273 }
1274
0a7de745
A
1275 /*
1276 * retrive contents of the frame pointer and advance to the next stack
1277 * frame if it's valid
1278 */
1279 prevFP = 0;
1280 if (supervisor) {
a39ff7e2
A
1281 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1282 } else {
1283 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1284 }
1285
0a7de745
A
1286 if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1287 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1288 prevPC = currPC;
1289 }
1290 if (prevFP < currFP) {
1291 break;
1292 } else {
1293 currFP = prevFP;
1294 }
a39ff7e2
A
1295 }
1296
1297 *start_idx = ct;
1298 return KERN_SUCCESS;
1299}
1300
1301static kern_return_t
1302chudxnu_thread_get_callstack64_internal(
0a7de745
A
1303 thread_t thread,
1304 uint64_t *callstack,
1305 mach_msg_type_number_t *count,
1306 boolean_t user_only,
1307 boolean_t kern_only)
a39ff7e2
A
1308{
1309 kern_return_t kr = KERN_FAILURE;
0a7de745
A
1310 task_t task = thread->task;
1311 uint64_t currPC = 0ULL;
a39ff7e2 1312 boolean_t supervisor = FALSE;
0a7de745
A
1313 mach_msg_type_number_t bufferIndex = 0;
1314 mach_msg_type_number_t bufferMaxIndex = *count;
1315 x86_saved_state_t *tagged_regs = NULL; // kernel register state
a39ff7e2
A
1316 x86_saved_state64_t *regs64 = NULL;
1317 x86_saved_state32_t *regs32 = NULL;
1318 x86_saved_state32_t *u_regs32 = NULL;
1319 x86_saved_state64_t *u_regs64 = NULL;
1320 struct x86_kernel_state *kregs = NULL;
1321
0a7de745
A
1322 if (ml_at_interrupt_context()) {
1323 if (user_only) {
a39ff7e2
A
1324 /* can't backtrace user state on interrupt stack. */
1325 return KERN_FAILURE;
1326 }
1327
1328 /* backtracing at interrupt context? */
0a7de745
A
1329 if (thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1330 /*
a39ff7e2 1331 * Locate the registers for the interrupted thread, assuming it is
0a7de745 1332 * current_thread().
a39ff7e2
A
1333 */
1334 tagged_regs = current_cpu_datap()->cpu_int_state;
0a7de745
A
1335
1336 if (is_saved_state64(tagged_regs)) {
a39ff7e2
A
1337 /* 64 bit registers */
1338 regs64 = saved_state64(tagged_regs);
1339 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1340 } else {
1341 /* 32 bit registers */
1342 regs32 = saved_state32(tagged_regs);
1343 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1344 }
0a7de745 1345 }
a39ff7e2
A
1346 }
1347
0a7de745
A
1348 if (!ml_at_interrupt_context() && kernel_task == task) {
1349 if (!thread->kernel_stack) {
a39ff7e2
A
1350 return KERN_FAILURE;
1351 }
1352
1353 // Kernel thread not at interrupt context
1354 kregs = (struct x86_kernel_state *)NULL;
1355
1356 // nofault read of the thread->kernel_stack pointer
0a7de745 1357 if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
a39ff7e2
A
1358 return KERN_FAILURE;
1359 }
1360
1361 // Adjust to find the saved kernel state
1362 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1363
1364 supervisor = TRUE;
0a7de745
A
1365 } else if (!tagged_regs) {
1366 /*
a39ff7e2 1367 * not at interrupt context, or tracing a different thread than
0a7de745 1368 * current_thread() at interrupt context
a39ff7e2
A
1369 */
1370 tagged_regs = USER_STATE(thread);
0a7de745 1371 if (is_saved_state64(tagged_regs)) {
a39ff7e2
A
1372 /* 64 bit registers */
1373 regs64 = saved_state64(tagged_regs);
0a7de745 1374 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
a39ff7e2
A
1375 } else {
1376 /* 32 bit registers */
1377 regs32 = saved_state32(tagged_regs);
1378 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1379 }
1380 }
1381
0a7de745 1382 *count = 0;
a39ff7e2 1383
0a7de745 1384 if (supervisor) {
a39ff7e2 1385 // the caller only wants a user callstack.
0a7de745 1386 if (user_only) {
a39ff7e2
A
1387 // bail - we've only got kernel state
1388 return KERN_FAILURE;
1389 }
1390 } else {
1391 // regs32(64) is not in supervisor mode.
1392 u_regs32 = regs32;
1393 u_regs64 = regs64;
1394 regs32 = NULL;
1395 regs64 = NULL;
1396 }
1397
1398 if (user_only) {
1399 /* we only want to backtrace the user mode */
0a7de745 1400 if (!(u_regs32 || u_regs64)) {
a39ff7e2
A
1401 /* no user state to look at */
1402 return KERN_FAILURE;
1403 }
1404 }
1405
0a7de745 1406 /*
a39ff7e2
A
1407 * Order of preference for top of stack:
1408 * 64 bit kernel state (not likely)
1409 * 32 bit kernel state
1410 * 64 bit user land state
1411 * 32 bit user land state
1412 */
1413
0a7de745 1414 if (kregs) {
a39ff7e2
A
1415 /*
1416 * nofault read of the registers from the kernel stack (as they can
1417 * disappear on the fly).
1418 */
1419
0a7de745 1420 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
a39ff7e2
A
1421 return KERN_FAILURE;
1422 }
0a7de745 1423 } else if (regs64) {
a39ff7e2 1424 currPC = regs64->isf.rip;
0a7de745 1425 } else if (regs32) {
a39ff7e2 1426 currPC = (uint64_t) regs32->eip;
0a7de745 1427 } else if (u_regs64) {
a39ff7e2 1428 currPC = u_regs64->isf.rip;
0a7de745 1429 } else if (u_regs32) {
a39ff7e2
A
1430 currPC = (uint64_t) u_regs32->eip;
1431 }
0a7de745
A
1432
1433 if (!currPC) {
a39ff7e2
A
1434 /* no top of the stack, bail out */
1435 return KERN_FAILURE;
1436 }
1437
1438 bufferIndex = 0;
0a7de745
A
1439
1440 if (bufferMaxIndex < 1) {
a39ff7e2
A
1441 *count = 0;
1442 return KERN_RESOURCE_SHORTAGE;
1443 }
1444
1445 /* backtrace kernel */
0a7de745 1446 if (kregs) {
a39ff7e2
A
1447 addr64_t address = 0ULL;
1448 size_t size = 0UL;
1449
1450 // do the backtrace
1451 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1452
1453 // and do a nofault read of (r|e)sp
1454 uint64_t rsp = 0ULL;
1455 size = sizeof(uint64_t);
0a7de745
A
1456
1457 if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
a39ff7e2
A
1458 address = 0ULL;
1459 }
1460
0a7de745 1461 if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
a39ff7e2
A
1462 callstack[bufferIndex++] = (uint64_t)rsp;
1463 }
0a7de745 1464 } else if (regs64) {
a39ff7e2
A
1465 uint64_t rsp = 0ULL;
1466
1467 // backtrace the 64bit side.
1468 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
0a7de745 1469 bufferMaxIndex - 1, TRUE);
a39ff7e2 1470
0a7de745
A
1471 if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1472 bufferIndex < bufferMaxIndex) {
a39ff7e2
A
1473 callstack[bufferIndex++] = rsp;
1474 }
0a7de745 1475 } else if (regs32) {
a39ff7e2
A
1476 uint32_t esp = 0UL;
1477
1478 // backtrace the 32bit side.
1479 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
0a7de745
A
1480 bufferMaxIndex - 1, TRUE);
1481
1482 if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1483 bufferIndex < bufferMaxIndex) {
a39ff7e2
A
1484 callstack[bufferIndex++] = (uint64_t) esp;
1485 }
0a7de745 1486 } else if (u_regs64 && !kern_only) {
a39ff7e2
A
1487 /* backtrace user land */
1488 uint64_t rsp = 0ULL;
0a7de745 1489
a39ff7e2 1490 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
0a7de745 1491 bufferMaxIndex - 1, FALSE);
a39ff7e2 1492
0a7de745
A
1493 if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1494 bufferIndex < bufferMaxIndex) {
a39ff7e2
A
1495 callstack[bufferIndex++] = rsp;
1496 }
0a7de745 1497 } else if (u_regs32 && !kern_only) {
a39ff7e2 1498 uint32_t esp = 0UL;
0a7de745 1499
a39ff7e2 1500 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
0a7de745 1501 bufferMaxIndex - 1, FALSE);
a39ff7e2 1502
0a7de745
A
1503 if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1504 bufferIndex < bufferMaxIndex) {
a39ff7e2
A
1505 callstack[bufferIndex++] = (uint64_t) esp;
1506 }
1507 }
1508
0a7de745
A
1509 *count = bufferIndex;
1510 return kr;
a39ff7e2
A
1511}
1512
1513__private_extern__
0a7de745
A
1514kern_return_t
1515chudxnu_thread_get_callstack64_kperf(
1516 thread_t thread,
1517 uint64_t *callstack,
1518 mach_msg_type_number_t *count,
1519 boolean_t is_user)
a39ff7e2
A
1520{
1521 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1522}
1523#else /* !__arm__ && !__arm64__ && !__x86_64__ */
1524#error kperf: unsupported architecture
1525#endif /* !__arm__ && !__arm64__ && !__x86_64__ */