]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/callstack.c
228cd9fe00f9fb2480f7040aed7d7c9d65a4769f
[apple/xnu.git] / osfmk / kperf / callstack.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* Collect kernel callstacks */
30
31 #include <mach/mach_types.h>
32 #include <kern/thread.h>
33 #include <kern/backtrace.h>
34 #include <vm/vm_map.h>
35 #include <kperf/buffer.h>
36 #include <kperf/context.h>
37 #include <kperf/callstack.h>
38 #include <kperf/ast.h>
39 #include <sys/errno.h>
40
41 #if defined(__arm__) || defined(__arm64__)
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #endif
45
46 static void
47 callstack_fixup_user(struct callstack *cs, thread_t thread)
48 {
49 uint64_t fixup_val = 0;
50 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
51
52 #if defined(__x86_64__)
53 user_addr_t sp_user;
54 bool user_64;
55 x86_saved_state_t *state;
56
57 state = get_user_regs(thread);
58 if (!state) {
59 goto out;
60 }
61
62 user_64 = is_saved_state64(state);
63 if (user_64) {
64 sp_user = saved_state64(state)->isf.rsp;
65 } else {
66 sp_user = saved_state32(state)->uesp;
67 }
68
69 if (thread == current_thread()) {
70 (void)copyin(sp_user, (char *)&fixup_val,
71 user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
72 } else {
73 (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
74 &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
75 }
76
77 #elif defined(__arm64__) || defined(__arm__)
78
79 struct arm_saved_state *state = get_user_regs(thread);
80 if (!state) {
81 goto out;
82 }
83
84 /* encode thumb mode into low bit of PC */
85 if (get_saved_state_cpsr(state) & PSR_TF) {
86 cs->frames[0] |= 1ULL;
87 }
88
89 fixup_val = get_saved_state_lr(state);
90
91 #else
92 #error "callstack_fixup_user: unsupported architecture"
93 #endif
94
95 out:
96 cs->frames[cs->nframes++] = fixup_val;
97 }
98
99 #if defined(__x86_64__)
100
101 __attribute__((used))
102 static kern_return_t
103 interrupted_kernel_sp_value(uintptr_t *sp_val)
104 {
105 x86_saved_state_t *state;
106 uintptr_t sp;
107 bool state_64;
108 uint64_t cs;
109 uintptr_t top, bottom;
110
111 state = current_cpu_datap()->cpu_int_state;
112 if (!state) {
113 return KERN_FAILURE;
114 }
115
116 state_64 = is_saved_state64(state);
117
118 if (state_64) {
119 cs = saved_state64(state)->isf.cs;
120 } else {
121 cs = saved_state32(state)->cs;
122 }
123 /* return early if interrupted a thread in user space */
124 if ((cs & SEL_PL) == SEL_PL_U) {
125 return KERN_FAILURE;
126 }
127
128 if (state_64) {
129 sp = saved_state64(state)->isf.rsp;
130 } else {
131 sp = saved_state32(state)->uesp;
132 }
133
134 /* make sure the stack pointer is pointing somewhere in this stack */
135 bottom = current_thread()->kernel_stack;
136 top = bottom + kernel_stack_size;
137 if (sp >= bottom && sp < top) {
138 return KERN_FAILURE;
139 }
140
141 *sp_val = *(uintptr_t *)sp;
142 return KERN_SUCCESS;
143 }
144
145 #elif defined(__arm64__)
146
147 __attribute__((used))
148 static kern_return_t
149 interrupted_kernel_lr(uintptr_t *lr)
150 {
151 struct arm_saved_state *state;
152
153 state = getCpuDatap()->cpu_int_state;
154
155 /* return early if interrupted a thread in user space */
156 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
157 return KERN_FAILURE;
158 }
159
160 *lr = get_saved_state_lr(state);
161 return KERN_SUCCESS;
162 }
163
164 #elif defined(__arm__)
165
166 __attribute__((used))
167 static kern_return_t
168 interrupted_kernel_lr(uintptr_t *lr)
169 {
170 struct arm_saved_state *state;
171
172 state = getCpuDatap()->cpu_int_state;
173
174 /* return early if interrupted a thread in user space */
175 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
176 return KERN_FAILURE;
177 }
178
179 *lr = get_saved_state_lr(state);
180 return KERN_SUCCESS;
181 }
182
183 #else /* defined(__arm__) */
184 #error "interrupted_kernel_{sp,lr}: unsupported architecture"
185 #endif /* !defined(__arm__) */
186
187
188 static void
189 callstack_fixup_interrupted(struct callstack *cs)
190 {
191 uintptr_t fixup_val = 0;
192 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
193
194 /*
195 * Only provide arbitrary data on development or debug kernels.
196 */
197 #if DEVELOPMENT || DEBUG
198 #if defined(__x86_64__)
199 (void)interrupted_kernel_sp_value(&fixup_val);
200 #elif defined(__arm64__) || defined(__arm__)
201 (void)interrupted_kernel_lr(&fixup_val);
202 #endif /* defined(__x86_64__) */
203 #endif /* DEVELOPMENT || DEBUG */
204
205 assert(cs->flags & CALLSTACK_KERNEL);
206 cs->frames[cs->nframes++] = fixup_val;
207 }
208
209 void
210 kperf_continuation_sample(struct callstack *cs, struct kperf_context *context)
211 {
212 thread_t thread;
213
214 assert(cs != NULL);
215 assert(context != NULL);
216
217 thread = context->cur_thread;
218 assert(thread != NULL);
219 assert(thread->continuation != NULL);
220
221 cs->flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
222 #ifdef __LP64__
223 cs->flags |= CALLSTACK_64BIT;
224 #endif
225
226 cs->nframes = 1;
227 cs->frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
228 }
229
230 void
231 kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context)
232 {
233 assert(cs != NULL);
234 assert(context != NULL);
235 assert(context->cur_thread == current_thread());
236
237 cs->flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
238 #ifdef __LP64__
239 cs->flags |= CALLSTACK_64BIT;
240 #endif
241
242 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
243
244 cs->nframes = backtrace_frame((uintptr_t *)&(cs->frames), cs->nframes - 1,
245 context->starting_fp);
246 if (cs->nframes > 0) {
247 cs->flags |= CALLSTACK_VALID;
248 /*
249 * Fake the value pointed to by the stack pointer or the link
250 * register for symbolicators.
251 */
252 cs->frames[cs->nframes + 1] = 0;
253 cs->nframes += 1;
254 }
255
256 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->nframes);
257 }
258
259 kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
260 uint64_t *callStack, mach_msg_type_number_t *count,
261 boolean_t user_only);
262
263 void
264 kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context)
265 {
266 thread_t thread;
267
268 assert(cs != NULL);
269 assert(context != NULL);
270 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
271
272 thread = context->cur_thread;
273 assert(thread != NULL);
274
275 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
276 cs->nframes);
277
278 cs->flags = CALLSTACK_KERNEL;
279
280 #ifdef __LP64__
281 cs->flags |= CALLSTACK_64BIT;
282 #endif
283
284 if (ml_at_interrupt_context()) {
285 assert(thread == current_thread());
286 cs->flags |= CALLSTACK_KERNEL_WORDS;
287 cs->nframes = backtrace_interrupted((uintptr_t *)cs->frames,
288 cs->nframes - 1);
289 if (cs->nframes != 0) {
290 callstack_fixup_interrupted(cs);
291 }
292 } else {
293 /*
294 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
295 * other threads.
296 */
297 kern_return_t kr;
298 kr = chudxnu_thread_get_callstack64_kperf(thread, cs->frames,
299 &cs->nframes, FALSE);
300 if (kr == KERN_SUCCESS) {
301 cs->flags |= CALLSTACK_VALID;
302 } else if (kr == KERN_RESOURCE_SHORTAGE) {
303 cs->flags |= CALLSTACK_VALID;
304 cs->flags |= CALLSTACK_TRUNCATED;
305 } else {
306 cs->nframes = 0;
307 }
308 }
309
310 if (cs->nframes == 0) {
311 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
312 }
313
314 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), cs->flags, cs->nframes);
315 }
316
317 void
318 kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context)
319 {
320 thread_t thread;
321 bool user_64 = false;
322 int err;
323
324 assert(cs != NULL);
325 assert(context != NULL);
326 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
327 assert(ml_get_interrupts_enabled() == TRUE);
328
329 thread = context->cur_thread;
330 assert(thread != NULL);
331
332 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
333 cs->nframes);
334
335 cs->flags = 0;
336
337 err = backtrace_thread_user(thread, (uintptr_t *)cs->frames,
338 cs->nframes - 1, &cs->nframes, &user_64);
339 cs->flags |= CALLSTACK_KERNEL_WORDS;
340 if (user_64) {
341 cs->flags |= CALLSTACK_64BIT;
342 }
343
344 if (!err || err == EFAULT) {
345 callstack_fixup_user(cs, thread);
346 cs->flags |= CALLSTACK_VALID;
347 } else {
348 cs->nframes = 0;
349 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, err);
350 }
351
352 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
353 cs->flags, cs->nframes);
354 }
355
356 static inline uintptr_t
357 scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
358 {
359 if (frame < n_frames) {
360 if (kern) {
361 return VM_KERNEL_UNSLIDE(bt[frame]);
362 } else {
363 return bt[frame];
364 }
365 } else {
366 return 0;
367 }
368 }
369
370 static inline uintptr_t
371 scrub_frame(uint64_t *bt, int n_frames, int frame)
372 {
373 if (frame < n_frames) {
374 return (uintptr_t)(bt[frame]);
375 } else {
376 return 0;
377 }
378 }
379
380 static void
381 callstack_log(struct callstack *cs, uint32_t hcode, uint32_t dcode)
382 {
383 BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, cs->flags, cs->nframes);
384
385 /* framing information for the stack */
386 BUF_DATA(hcode, cs->flags, cs->nframes);
387
388 /* how many batches of 4 */
389 unsigned int nframes = cs->nframes;
390 unsigned int n = nframes / 4;
391 unsigned int ovf = nframes % 4;
392 if (ovf != 0) {
393 n++;
394 }
395
396 bool kern = cs->flags & CALLSTACK_KERNEL;
397
398 if (cs->flags & CALLSTACK_KERNEL_WORDS) {
399 uintptr_t *frames = (uintptr_t *)cs->frames;
400 for (unsigned int i = 0; i < n; i++) {
401 unsigned int j = i * 4;
402 BUF_DATA(dcode,
403 scrub_word(frames, nframes, j + 0, kern),
404 scrub_word(frames, nframes, j + 1, kern),
405 scrub_word(frames, nframes, j + 2, kern),
406 scrub_word(frames, nframes, j + 3, kern));
407 }
408 } else {
409 for (unsigned int i = 0; i < n; i++) {
410 uint64_t *frames = cs->frames;
411 unsigned int j = i * 4;
412 BUF_DATA(dcode,
413 scrub_frame(frames, nframes, j + 0),
414 scrub_frame(frames, nframes, j + 1),
415 scrub_frame(frames, nframes, j + 2),
416 scrub_frame(frames, nframes, j + 3));
417 }
418 }
419
420 BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, cs->flags, cs->nframes);
421 }
422
423 void
424 kperf_kcallstack_log( struct callstack *cs )
425 {
426 callstack_log(cs, PERF_CS_KHDR, PERF_CS_KDATA);
427 }
428
429 void
430 kperf_ucallstack_log( struct callstack *cs )
431 {
432 callstack_log(cs, PERF_CS_UHDR, PERF_CS_UDATA);
433 }
434
435 int
436 kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth)
437 {
438 int did_pend = kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK);
439 kperf_ast_set_callstack_depth(context->cur_thread, depth);
440
441 return did_pend;
442 }
443
444 static kern_return_t
445 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
446 {
447 return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
448 KERN_SUCCESS : KERN_FAILURE;
449 }
450
451 static kern_return_t
452 chudxnu_task_read(
453 task_t task,
454 void *kernaddr,
455 uint64_t usraddr,
456 vm_size_t size)
457 {
458 //ppc version ported to arm
459 kern_return_t ret = KERN_SUCCESS;
460
461 if (ml_at_interrupt_context()) {
462 return KERN_FAILURE; // can't look at tasks on interrupt stack
463 }
464
465 if (current_task() == task) {
466 if (copyin(usraddr, kernaddr, size)) {
467 ret = KERN_FAILURE;
468 }
469 } else {
470 vm_map_t map = get_task_map(task);
471 ret = vm_map_read_user(map, usraddr, kernaddr, size);
472 }
473
474 return ret;
475 }
476
477 static inline uint64_t
478 chudxnu_vm_unslide( uint64_t ptr, int kaddr )
479 {
480 if (!kaddr) {
481 return ptr;
482 }
483
484 return VM_KERNEL_UNSLIDE(ptr);
485 }
486
487 #if __arm__
488 #define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
489 #define CS_FLAG_EXTRASP 1 // capture extra sp register
490 static kern_return_t
491 chudxnu_thread_get_callstack64_internal(
492 thread_t thread,
493 uint64_t *callStack,
494 mach_msg_type_number_t *count,
495 boolean_t user_only,
496 int flags)
497 {
498 kern_return_t kr;
499 task_t task;
500 uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
501 uint64_t prevPC = 0ULL;
502 uint32_t kernStackMin = thread->kernel_stack;
503 uint32_t kernStackMax = kernStackMin + kernel_stack_size;
504 uint64_t *buffer = callStack;
505 uint32_t frame[2];
506 int bufferIndex = 0;
507 int bufferMaxIndex = 0;
508 boolean_t supervisor = FALSE;
509 struct arm_saved_state *state = NULL;
510 uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
511 uint64_t pc = 0ULL;
512
513 task = get_threadtask(thread);
514
515 bufferMaxIndex = *count;
516 //get thread state
517 if (user_only) {
518 state = find_user_regs(thread);
519 } else {
520 state = find_kern_regs(thread);
521 }
522
523 if (!state) {
524 *count = 0;
525 return KERN_FAILURE;
526 }
527
528 /* make sure it is safe to dereference before you do it */
529 supervisor = ARM_SUPERVISOR_MODE(state->cpsr);
530
531 /* can't take a kernel callstack if we've got a user frame */
532 if (!user_only && !supervisor) {
533 return KERN_FAILURE;
534 }
535
536 /*
537 * Reserve space for saving LR (and sometimes SP) at the end of the
538 * backtrace.
539 */
540 if (flags & CS_FLAG_EXTRASP) {
541 bufferMaxIndex -= 2;
542 } else {
543 bufferMaxIndex -= 1;
544 }
545
546 if (bufferMaxIndex < 2) {
547 *count = 0;
548 return KERN_RESOURCE_SHORTAGE;
549 }
550
551 currPC = (uint64_t)state->pc; /* r15 */
552 if (state->cpsr & PSR_TF) {
553 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
554 }
555 currLR = (uint64_t)state->lr; /* r14 */
556 currSP = (uint64_t)state->sp; /* r13 */
557
558 fp = (uint32_t *)state->r[7]; /* frame pointer */
559 topfp = fp;
560
561 bufferIndex = 0; // start with a stack of size zero
562 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, supervisor); // save PC in position 0.
563
564 // Now, fill buffer with stack backtraces.
565 while (bufferIndex < bufferMaxIndex) {
566 pc = 0ULL;
567 /*
568 * Below the frame pointer, the following values are saved:
569 * -> FP
570 */
571
572 /*
573 * Note that we read the pc even for the first stack frame
574 * (which, in theory, is always empty because the callee fills
575 * it in just before it lowers the stack. However, if we
576 * catch the program in between filling in the return address
577 * and lowering the stack, we want to still have a valid
578 * backtrace. FixupStack correctly disregards this value if
579 * necessary.
580 */
581
582 if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
583 /* frame pointer is invalid - stop backtracing */
584 pc = 0ULL;
585 break;
586 }
587
588 if (supervisor) {
589 if (((uint32_t)fp > kernStackMax) ||
590 ((uint32_t)fp < kernStackMin)) {
591 kr = KERN_FAILURE;
592 } else {
593 kr = chudxnu_kern_read(&frame,
594 (vm_offset_t)fp,
595 (vm_size_t)sizeof(frame));
596 if (kr == KERN_SUCCESS) {
597 pc = (uint64_t)frame[1];
598 nextFramePointer = (uint32_t *) (frame[0]);
599 } else {
600 pc = 0ULL;
601 nextFramePointer = 0ULL;
602 kr = KERN_FAILURE;
603 }
604 }
605 } else {
606 kr = chudxnu_task_read(task,
607 &frame,
608 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
609 sizeof(frame));
610 if (kr == KERN_SUCCESS) {
611 pc = (uint64_t) frame[1];
612 nextFramePointer = (uint32_t *) (frame[0]);
613 } else {
614 pc = 0ULL;
615 nextFramePointer = 0ULL;
616 kr = KERN_FAILURE;
617 }
618 }
619
620 if (kr != KERN_SUCCESS) {
621 pc = 0ULL;
622 break;
623 }
624
625 if (nextFramePointer) {
626 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, supervisor);
627 prevPC = pc;
628 }
629
630 if (nextFramePointer < fp) {
631 break;
632 } else {
633 fp = nextFramePointer;
634 }
635 }
636
637 if (bufferIndex >= bufferMaxIndex) {
638 bufferIndex = bufferMaxIndex;
639 kr = KERN_RESOURCE_SHORTAGE;
640 } else {
641 kr = KERN_SUCCESS;
642 }
643
644 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
645 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, supervisor);
646 if (flags & CS_FLAG_EXTRASP) {
647 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, supervisor);
648 }
649
650 *count = bufferIndex;
651 return kr;
652 }
653
654 kern_return_t
655 chudxnu_thread_get_callstack64_kperf(
656 thread_t thread,
657 uint64_t *callStack,
658 mach_msg_type_number_t *count,
659 boolean_t user_only)
660 {
661 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
662 }
663 #elif __arm64__
664
665
666 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
667 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
668 // after sampling has finished.
669 //
670 // For an N-entry callstack:
671 //
672 // [0] current pc
673 // [1..N-3] stack frames (including current one)
674 // [N-2] current LR (return value if we're in a leaf function)
675 // [N-1] current r0 (in case we've saved LR in r0) (optional)
676 //
677 //
678 #define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
679
680 #define CS_FLAG_EXTRASP 1 // capture extra sp register
681
682 static kern_return_t
683 chudxnu_thread_get_callstack64_internal(
684 thread_t thread,
685 uint64_t *callStack,
686 mach_msg_type_number_t *count,
687 boolean_t user_only,
688 int flags)
689 {
690 kern_return_t kr = KERN_SUCCESS;
691 task_t task;
692 uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL;
693 uint64_t prevPC = 0ULL;
694 uint64_t kernStackMin = thread->kernel_stack;
695 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
696 uint64_t *buffer = callStack;
697 int bufferIndex = 0;
698 int bufferMaxIndex = 0;
699 boolean_t kernel = FALSE;
700 struct arm_saved_state *sstate = NULL;
701 uint64_t pc = 0ULL;
702
703 task = get_threadtask(thread);
704 bufferMaxIndex = *count;
705 //get thread state
706 if (user_only) {
707 sstate = find_user_regs(thread);
708 } else {
709 sstate = find_kern_regs(thread);
710 }
711
712 if (!sstate) {
713 *count = 0;
714 return KERN_FAILURE;
715 }
716
717 if (is_saved_state64(sstate)) {
718 struct arm_saved_state64 *state = NULL;
719 uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
720 uint64_t frame[2];
721
722 state = saved_state64(sstate);
723
724 /* make sure it is safe to dereference before you do it */
725 kernel = PSR64_IS_KERNEL(state->cpsr);
726
727 /* can't take a kernel callstack if we've got a user frame */
728 if (!user_only && !kernel) {
729 return KERN_FAILURE;
730 }
731
732 /*
733 * Reserve space for saving LR (and sometimes SP) at the end of the
734 * backtrace.
735 */
736 if (flags & CS_FLAG_EXTRASP) {
737 bufferMaxIndex -= 2;
738 } else {
739 bufferMaxIndex -= 1;
740 }
741
742 if (bufferMaxIndex < 2) {
743 *count = 0;
744 return KERN_RESOURCE_SHORTAGE;
745 }
746
747 currPC = state->pc;
748 currLR = state->lr;
749 currSP = state->sp;
750
751 fp = (uint64_t *)state->fp; /* frame pointer */
752 topfp = fp;
753
754 bufferIndex = 0; // start with a stack of size zero
755 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
756
757 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
758
759 // Now, fill buffer with stack backtraces.
760 while (bufferIndex < bufferMaxIndex) {
761 pc = 0ULL;
762 /*
763 * Below the frame pointer, the following values are saved:
764 * -> FP
765 */
766
767 /*
768 * Note that we read the pc even for the first stack frame
769 * (which, in theory, is always empty because the callee fills
770 * it in just before it lowers the stack. However, if we
771 * catch the program in between filling in the return address
772 * and lowering the stack, we want to still have a valid
773 * backtrace. FixupStack correctly disregards this value if
774 * necessary.
775 */
776
777 if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
778 /* frame pointer is invalid - stop backtracing */
779 pc = 0ULL;
780 break;
781 }
782
783 if (kernel) {
784 if (((uint64_t)fp > kernStackMax) ||
785 ((uint64_t)fp < kernStackMin)) {
786 kr = KERN_FAILURE;
787 } else {
788 kr = chudxnu_kern_read(&frame,
789 (vm_offset_t)fp,
790 (vm_size_t)sizeof(frame));
791 if (kr == KERN_SUCCESS) {
792 pc = frame[1];
793 nextFramePointer = (uint64_t *)frame[0];
794 } else {
795 pc = 0ULL;
796 nextFramePointer = 0ULL;
797 kr = KERN_FAILURE;
798 }
799 }
800 } else {
801 kr = chudxnu_task_read(task,
802 &frame,
803 (vm_offset_t)fp,
804 (vm_size_t)sizeof(frame));
805 if (kr == KERN_SUCCESS) {
806 pc = frame[1];
807 nextFramePointer = (uint64_t *)(frame[0]);
808 } else {
809 pc = 0ULL;
810 nextFramePointer = 0ULL;
811 kr = KERN_FAILURE;
812 }
813 }
814
815 if (kr != KERN_SUCCESS) {
816 pc = 0ULL;
817 break;
818 }
819
820 if (nextFramePointer) {
821 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
822 prevPC = pc;
823 }
824
825 if (nextFramePointer < fp) {
826 break;
827 } else {
828 fp = nextFramePointer;
829 }
830 }
831
832 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
833
834 if (bufferIndex >= bufferMaxIndex) {
835 bufferIndex = bufferMaxIndex;
836 kr = KERN_RESOURCE_SHORTAGE;
837 } else {
838 kr = KERN_SUCCESS;
839 }
840
841 // Save link register and SP at bottom of stack (used for later fixup).
842 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
843 if (flags & CS_FLAG_EXTRASP) {
844 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
845 }
846 } else {
847 struct arm_saved_state32 *state = NULL;
848 uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL;
849
850 /* 64-bit kernel stacks, 32-bit user stacks */
851 uint64_t frame[2];
852 uint32_t frame32[2];
853
854 state = saved_state32(sstate);
855
856 /* make sure it is safe to dereference before you do it */
857 kernel = ARM_SUPERVISOR_MODE(state->cpsr);
858
859 /* can't take a kernel callstack if we've got a user frame */
860 if (!user_only && !kernel) {
861 return KERN_FAILURE;
862 }
863
864 /*
865 * Reserve space for saving LR (and sometimes SP) at the end of the
866 * backtrace.
867 */
868 if (flags & CS_FLAG_EXTRASP) {
869 bufferMaxIndex -= 2;
870 } else {
871 bufferMaxIndex -= 1;
872 }
873
874 if (bufferMaxIndex < 2) {
875 *count = 0;
876 return KERN_RESOURCE_SHORTAGE;
877 }
878
879 currPC = (uint64_t)state->pc; /* r15 */
880 if (state->cpsr & PSR_TF) {
881 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
882 }
883 currLR = (uint64_t)state->lr; /* r14 */
884 currSP = (uint64_t)state->sp; /* r13 */
885
886 fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
887 topfp = fp;
888
889 bufferIndex = 0; // start with a stack of size zero
890 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
891
892 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
893
894 // Now, fill buffer with stack backtraces.
895 while (bufferIndex < bufferMaxIndex) {
896 pc = 0ULL;
897 /*
898 * Below the frame pointer, the following values are saved:
899 * -> FP
900 */
901
902 /*
903 * Note that we read the pc even for the first stack frame
904 * (which, in theory, is always empty because the callee fills
905 * it in just before it lowers the stack. However, if we
906 * catch the program in between filling in the return address
907 * and lowering the stack, we want to still have a valid
908 * backtrace. FixupStack correctly disregards this value if
909 * necessary.
910 */
911
912 if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
913 /* frame pointer is invalid - stop backtracing */
914 pc = 0ULL;
915 break;
916 }
917
918 if (kernel) {
919 if (((uint32_t)fp > kernStackMax) ||
920 ((uint32_t)fp < kernStackMin)) {
921 kr = KERN_FAILURE;
922 } else {
923 kr = chudxnu_kern_read(&frame,
924 (vm_offset_t)fp,
925 (vm_size_t)sizeof(frame));
926 if (kr == KERN_SUCCESS) {
927 pc = (uint64_t)frame[1];
928 nextFramePointer = (uint32_t *) (frame[0]);
929 } else {
930 pc = 0ULL;
931 nextFramePointer = 0ULL;
932 kr = KERN_FAILURE;
933 }
934 }
935 } else {
936 kr = chudxnu_task_read(task,
937 &frame32,
938 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
939 sizeof(frame32));
940 if (kr == KERN_SUCCESS) {
941 pc = (uint64_t)frame32[1];
942 nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
943 } else {
944 pc = 0ULL;
945 nextFramePointer = 0ULL;
946 kr = KERN_FAILURE;
947 }
948 }
949
950 if (kr != KERN_SUCCESS) {
951 pc = 0ULL;
952 break;
953 }
954
955 if (nextFramePointer) {
956 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
957 prevPC = pc;
958 }
959
960 if (nextFramePointer < fp) {
961 break;
962 } else {
963 fp = nextFramePointer;
964 }
965 }
966
967 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
968
969 /* clamp callstack size to max */
970 if (bufferIndex >= bufferMaxIndex) {
971 bufferIndex = bufferMaxIndex;
972 kr = KERN_RESOURCE_SHORTAGE;
973 } else {
974 /* ignore all other failures */
975 kr = KERN_SUCCESS;
976 }
977
978 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
979 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
980 if (flags & CS_FLAG_EXTRASP) {
981 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
982 }
983 }
984
985 *count = bufferIndex;
986 return kr;
987 }
988
989 kern_return_t
990 chudxnu_thread_get_callstack64_kperf(
991 thread_t thread,
992 uint64_t *callStack,
993 mach_msg_type_number_t *count,
994 boolean_t user_only)
995 {
996 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
997 }
998 #elif __x86_64__
999
1000 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
1001 // don't try to read in the hole
1002 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
1003 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
1004 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
1005
1006 typedef struct _cframe64_t {
1007 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
1008 uint64_t caller;
1009 uint64_t args[0];
1010 }cframe64_t;
1011
1012
1013 typedef struct _cframe_t {
1014 uint32_t prev; // this is really a user32-space pointer to the previous frame
1015 uint32_t caller;
1016 uint32_t args[0];
1017 } cframe_t;
1018
1019 extern void * find_user_regs(thread_t);
1020 extern x86_saved_state32_t *find_kern_regs(thread_t);
1021
1022 static kern_return_t
1023 do_kernel_backtrace(
1024 thread_t thread,
1025 struct x86_kernel_state *regs,
1026 uint64_t *frames,
1027 mach_msg_type_number_t *start_idx,
1028 mach_msg_type_number_t max_idx)
1029 {
1030 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1031 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1032 mach_msg_type_number_t ct = *start_idx;
1033 kern_return_t kr = KERN_FAILURE;
1034
1035 #if __LP64__
1036 uint64_t currPC = 0ULL;
1037 uint64_t currFP = 0ULL;
1038 uint64_t prevPC = 0ULL;
1039 uint64_t prevFP = 0ULL;
1040 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
1041 return KERN_FAILURE;
1042 }
1043 if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
1044 return KERN_FAILURE;
1045 }
1046 #else
1047 uint32_t currPC = 0U;
1048 uint32_t currFP = 0U;
1049 uint32_t prevPC = 0U;
1050 uint32_t prevFP = 0U;
1051 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
1052 return KERN_FAILURE;
1053 }
1054 if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
1055 return KERN_FAILURE;
1056 }
1057 #endif
1058
1059 if (*start_idx >= max_idx) {
1060 return KERN_RESOURCE_SHORTAGE; // no frames traced
1061 }
1062 if (!currPC) {
1063 return KERN_FAILURE;
1064 }
1065
1066 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1067
1068 // build a backtrace of this kernel state
1069 #if __LP64__
1070 while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
1071 // this is the address where caller lives in the user thread
1072 uint64_t caller = currFP + sizeof(uint64_t);
1073 #else
1074 while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
1075 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
1076 #endif
1077
1078 if (!currFP || !currPC) {
1079 currPC = 0;
1080 break;
1081 }
1082
1083 if (ct >= max_idx) {
1084 *start_idx = ct;
1085 return KERN_RESOURCE_SHORTAGE;
1086 }
1087
1088 /* read our caller */
1089 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1090
1091 if (kr != KERN_SUCCESS || !currPC) {
1092 currPC = 0UL;
1093 break;
1094 }
1095
1096 /*
1097 * retrive contents of the frame pointer and advance to the next stack
1098 * frame if it's valid
1099 */
1100 prevFP = 0;
1101 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1102
1103 #if __LP64__
1104 if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
1105 #else
1106 if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
1107 #endif
1108 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1109 prevPC = currPC;
1110 }
1111 if (prevFP <= currFP) {
1112 break;
1113 } else {
1114 currFP = prevFP;
1115 }
1116 }
1117
1118 *start_idx = ct;
1119 return KERN_SUCCESS;
1120 }
1121
1122
1123
1124 static kern_return_t
1125 do_backtrace32(
1126 task_t task,
1127 thread_t thread,
1128 x86_saved_state32_t *regs,
1129 uint64_t *frames,
1130 mach_msg_type_number_t *start_idx,
1131 mach_msg_type_number_t max_idx,
1132 boolean_t supervisor)
1133 {
1134 uint32_t tmpWord = 0UL;
1135 uint64_t currPC = (uint64_t) regs->eip;
1136 uint64_t currFP = (uint64_t) regs->ebp;
1137 uint64_t prevPC = 0ULL;
1138 uint64_t prevFP = 0ULL;
1139 uint64_t kernStackMin = thread->kernel_stack;
1140 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
1141 mach_msg_type_number_t ct = *start_idx;
1142 kern_return_t kr = KERN_FAILURE;
1143
1144 if (ct >= max_idx) {
1145 return KERN_RESOURCE_SHORTAGE; // no frames traced
1146 }
1147 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1148
1149 // build a backtrace of this 32 bit state.
1150 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1151 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1152
1153 if (!currFP) {
1154 currPC = 0;
1155 break;
1156 }
1157
1158 if (ct >= max_idx) {
1159 *start_idx = ct;
1160 return KERN_RESOURCE_SHORTAGE;
1161 }
1162
1163 /* read our caller */
1164 if (supervisor) {
1165 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1166 } else {
1167 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1168 }
1169
1170 if (kr != KERN_SUCCESS) {
1171 currPC = 0ULL;
1172 break;
1173 }
1174
1175 currPC = (uint64_t) tmpWord; // promote 32 bit address
1176
1177 /*
1178 * retrive contents of the frame pointer and advance to the next stack
1179 * frame if it's valid
1180 */
1181 prevFP = 0;
1182 if (supervisor) {
1183 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1184 } else {
1185 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1186 }
1187 prevFP = (uint64_t) tmpWord; // promote 32 bit address
1188
1189 if (prevFP) {
1190 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1191 prevPC = currPC;
1192 }
1193 if (prevFP < currFP) {
1194 break;
1195 } else {
1196 currFP = prevFP;
1197 }
1198 }
1199
1200 *start_idx = ct;
1201 return KERN_SUCCESS;
1202 }
1203
1204 static kern_return_t
1205 do_backtrace64(
1206 task_t task,
1207 thread_t thread,
1208 x86_saved_state64_t *regs,
1209 uint64_t *frames,
1210 mach_msg_type_number_t *start_idx,
1211 mach_msg_type_number_t max_idx,
1212 boolean_t supervisor)
1213 {
1214 uint64_t currPC = regs->isf.rip;
1215 uint64_t currFP = regs->rbp;
1216 uint64_t prevPC = 0ULL;
1217 uint64_t prevFP = 0ULL;
1218 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1219 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1220 mach_msg_type_number_t ct = *start_idx;
1221 kern_return_t kr = KERN_FAILURE;
1222
1223 if (*start_idx >= max_idx) {
1224 return KERN_RESOURCE_SHORTAGE; // no frames traced
1225 }
1226 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1227
1228 // build a backtrace of this 32 bit state.
1229 while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1230 // this is the address where caller lives in the user thread
1231 uint64_t caller = currFP + sizeof(uint64_t);
1232
1233 if (!currFP) {
1234 currPC = 0;
1235 break;
1236 }
1237
1238 if (ct >= max_idx) {
1239 *start_idx = ct;
1240 return KERN_RESOURCE_SHORTAGE;
1241 }
1242
1243 /* read our caller */
1244 if (supervisor) {
1245 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1246 } else {
1247 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1248 }
1249
1250 if (kr != KERN_SUCCESS) {
1251 currPC = 0ULL;
1252 break;
1253 }
1254
1255 /*
1256 * retrive contents of the frame pointer and advance to the next stack
1257 * frame if it's valid
1258 */
1259 prevFP = 0;
1260 if (supervisor) {
1261 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1262 } else {
1263 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1264 }
1265
1266 if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1267 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1268 prevPC = currPC;
1269 }
1270 if (prevFP < currFP) {
1271 break;
1272 } else {
1273 currFP = prevFP;
1274 }
1275 }
1276
1277 *start_idx = ct;
1278 return KERN_SUCCESS;
1279 }
1280
1281 static kern_return_t
1282 chudxnu_thread_get_callstack64_internal(
1283 thread_t thread,
1284 uint64_t *callstack,
1285 mach_msg_type_number_t *count,
1286 boolean_t user_only,
1287 boolean_t kern_only)
1288 {
1289 kern_return_t kr = KERN_FAILURE;
1290 task_t task = thread->task;
1291 uint64_t currPC = 0ULL;
1292 boolean_t supervisor = FALSE;
1293 mach_msg_type_number_t bufferIndex = 0;
1294 mach_msg_type_number_t bufferMaxIndex = *count;
1295 x86_saved_state_t *tagged_regs = NULL; // kernel register state
1296 x86_saved_state64_t *regs64 = NULL;
1297 x86_saved_state32_t *regs32 = NULL;
1298 x86_saved_state32_t *u_regs32 = NULL;
1299 x86_saved_state64_t *u_regs64 = NULL;
1300 struct x86_kernel_state *kregs = NULL;
1301
1302 if (ml_at_interrupt_context()) {
1303 if (user_only) {
1304 /* can't backtrace user state on interrupt stack. */
1305 return KERN_FAILURE;
1306 }
1307
1308 /* backtracing at interrupt context? */
1309 if (thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1310 /*
1311 * Locate the registers for the interrupted thread, assuming it is
1312 * current_thread().
1313 */
1314 tagged_regs = current_cpu_datap()->cpu_int_state;
1315
1316 if (is_saved_state64(tagged_regs)) {
1317 /* 64 bit registers */
1318 regs64 = saved_state64(tagged_regs);
1319 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1320 } else {
1321 /* 32 bit registers */
1322 regs32 = saved_state32(tagged_regs);
1323 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1324 }
1325 }
1326 }
1327
1328 if (!ml_at_interrupt_context() && kernel_task == task) {
1329 if (!thread->kernel_stack) {
1330 return KERN_FAILURE;
1331 }
1332
1333 // Kernel thread not at interrupt context
1334 kregs = (struct x86_kernel_state *)NULL;
1335
1336 // nofault read of the thread->kernel_stack pointer
1337 if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1338 return KERN_FAILURE;
1339 }
1340
1341 // Adjust to find the saved kernel state
1342 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1343
1344 supervisor = TRUE;
1345 } else if (!tagged_regs) {
1346 /*
1347 * not at interrupt context, or tracing a different thread than
1348 * current_thread() at interrupt context
1349 */
1350 tagged_regs = USER_STATE(thread);
1351 if (is_saved_state64(tagged_regs)) {
1352 /* 64 bit registers */
1353 regs64 = saved_state64(tagged_regs);
1354 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1355 } else {
1356 /* 32 bit registers */
1357 regs32 = saved_state32(tagged_regs);
1358 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1359 }
1360 }
1361
1362 *count = 0;
1363
1364 if (supervisor) {
1365 // the caller only wants a user callstack.
1366 if (user_only) {
1367 // bail - we've only got kernel state
1368 return KERN_FAILURE;
1369 }
1370 } else {
1371 // regs32(64) is not in supervisor mode.
1372 u_regs32 = regs32;
1373 u_regs64 = regs64;
1374 regs32 = NULL;
1375 regs64 = NULL;
1376 }
1377
1378 if (user_only) {
1379 /* we only want to backtrace the user mode */
1380 if (!(u_regs32 || u_regs64)) {
1381 /* no user state to look at */
1382 return KERN_FAILURE;
1383 }
1384 }
1385
1386 /*
1387 * Order of preference for top of stack:
1388 * 64 bit kernel state (not likely)
1389 * 32 bit kernel state
1390 * 64 bit user land state
1391 * 32 bit user land state
1392 */
1393
1394 if (kregs) {
1395 /*
1396 * nofault read of the registers from the kernel stack (as they can
1397 * disappear on the fly).
1398 */
1399
1400 if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1401 return KERN_FAILURE;
1402 }
1403 } else if (regs64) {
1404 currPC = regs64->isf.rip;
1405 } else if (regs32) {
1406 currPC = (uint64_t) regs32->eip;
1407 } else if (u_regs64) {
1408 currPC = u_regs64->isf.rip;
1409 } else if (u_regs32) {
1410 currPC = (uint64_t) u_regs32->eip;
1411 }
1412
1413 if (!currPC) {
1414 /* no top of the stack, bail out */
1415 return KERN_FAILURE;
1416 }
1417
1418 bufferIndex = 0;
1419
1420 if (bufferMaxIndex < 1) {
1421 *count = 0;
1422 return KERN_RESOURCE_SHORTAGE;
1423 }
1424
1425 /* backtrace kernel */
1426 if (kregs) {
1427 addr64_t address = 0ULL;
1428 size_t size = 0UL;
1429
1430 // do the backtrace
1431 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1432
1433 // and do a nofault read of (r|e)sp
1434 uint64_t rsp = 0ULL;
1435 size = sizeof(uint64_t);
1436
1437 if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1438 address = 0ULL;
1439 }
1440
1441 if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1442 callstack[bufferIndex++] = (uint64_t)rsp;
1443 }
1444 } else if (regs64) {
1445 uint64_t rsp = 0ULL;
1446
1447 // backtrace the 64bit side.
1448 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1449 bufferMaxIndex - 1, TRUE);
1450
1451 if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1452 bufferIndex < bufferMaxIndex) {
1453 callstack[bufferIndex++] = rsp;
1454 }
1455 } else if (regs32) {
1456 uint32_t esp = 0UL;
1457
1458 // backtrace the 32bit side.
1459 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1460 bufferMaxIndex - 1, TRUE);
1461
1462 if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1463 bufferIndex < bufferMaxIndex) {
1464 callstack[bufferIndex++] = (uint64_t) esp;
1465 }
1466 } else if (u_regs64 && !kern_only) {
1467 /* backtrace user land */
1468 uint64_t rsp = 0ULL;
1469
1470 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1471 bufferMaxIndex - 1, FALSE);
1472
1473 if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1474 bufferIndex < bufferMaxIndex) {
1475 callstack[bufferIndex++] = rsp;
1476 }
1477 } else if (u_regs32 && !kern_only) {
1478 uint32_t esp = 0UL;
1479
1480 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1481 bufferMaxIndex - 1, FALSE);
1482
1483 if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1484 bufferIndex < bufferMaxIndex) {
1485 callstack[bufferIndex++] = (uint64_t) esp;
1486 }
1487 }
1488
1489 *count = bufferIndex;
1490 return kr;
1491 }
1492
1493 __private_extern__
1494 kern_return_t
1495 chudxnu_thread_get_callstack64_kperf(
1496 thread_t thread,
1497 uint64_t *callstack,
1498 mach_msg_type_number_t *count,
1499 boolean_t is_user)
1500 {
1501 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1502 }
1503 #else /* !__arm__ && !__arm64__ && !__x86_64__ */
1504 #error kperf: unsupported architecture
1505 #endif /* !__arm__ && !__arm64__ && !__x86_64__ */