]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/callstack.c
2fe676882634e2daa2a957ad8ffd64f3364179fe
[apple/xnu.git] / osfmk / kperf / callstack.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* Collect kernel callstacks */
30
31 #include <mach/mach_types.h>
32 #include <kern/thread.h>
33 #include <kern/backtrace.h>
34 #include <vm/vm_map.h>
35 #include <kperf/buffer.h>
36 #include <kperf/context.h>
37 #include <kperf/callstack.h>
38 #include <kperf/ast.h>
39 #include <sys/errno.h>
40
41 #if defined(__arm__) || defined(__arm64__)
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #endif
45
46 static void
47 callstack_fixup_user(struct callstack *cs, thread_t thread)
48 {
49 uint64_t fixup_val = 0;
50 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
51
52 #if defined(__x86_64__)
53 user_addr_t sp_user;
54 bool user_64;
55 x86_saved_state_t *state;
56
57 state = get_user_regs(thread);
58 if (!state) {
59 goto out;
60 }
61
62 user_64 = is_saved_state64(state);
63 if (user_64) {
64 sp_user = saved_state64(state)->isf.rsp;
65 } else {
66 sp_user = saved_state32(state)->uesp;
67 }
68
69 if (thread == current_thread()) {
70 (void)copyin(sp_user, (char *)&fixup_val,
71 user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
72 } else {
73 (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
74 &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
75 }
76
77 #elif defined(__arm64__) || defined(__arm__)
78
79 struct arm_saved_state *state = get_user_regs(thread);
80 if (!state) {
81 goto out;
82 }
83
84 /* encode thumb mode into low bit of PC */
85 if (get_saved_state_cpsr(state) & PSR_TF) {
86 cs->frames[0] |= 1ULL;
87 }
88
89 fixup_val = get_saved_state_lr(state);
90
91 #else
92 #error "callstack_fixup_user: unsupported architecture"
93 #endif
94
95 out:
96 cs->frames[cs->nframes++] = fixup_val;
97 }
98
99 #if defined(__x86_64__)
100
101 __attribute__((used))
102 static kern_return_t
103 interrupted_kernel_sp_value(uintptr_t *sp_val)
104 {
105 x86_saved_state_t *state;
106 uintptr_t sp;
107 bool state_64;
108 uint64_t cs;
109 uintptr_t top, bottom;
110
111 state = current_cpu_datap()->cpu_int_state;
112 if (!state) {
113 return KERN_FAILURE;
114 }
115
116 state_64 = is_saved_state64(state);
117
118 if (state_64) {
119 cs = saved_state64(state)->isf.cs;
120 } else {
121 cs = saved_state32(state)->cs;
122 }
123 /* return early if interrupted a thread in user space */
124 if ((cs & SEL_PL) == SEL_PL_U) {
125 return KERN_FAILURE;
126 }
127
128 if (state_64) {
129 sp = saved_state64(state)->isf.rsp;
130 } else {
131 sp = saved_state32(state)->uesp;
132 }
133
134 /* make sure the stack pointer is pointing somewhere in this stack */
135 bottom = current_thread()->kernel_stack;
136 top = bottom + kernel_stack_size;
137 if (sp >= bottom && sp < top) {
138 return KERN_FAILURE;
139 }
140
141 *sp_val = *(uintptr_t *)sp;
142 return KERN_SUCCESS;
143 }
144
145 #elif defined(__arm64__)
146
147 __attribute__((used))
148 static kern_return_t
149 interrupted_kernel_lr(uintptr_t *lr)
150 {
151 struct arm_saved_state *state;
152
153 state = getCpuDatap()->cpu_int_state;
154
155 /* return early if interrupted a thread in user space */
156 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
157 return KERN_FAILURE;
158 }
159
160 *lr = get_saved_state_lr(state);
161 return KERN_SUCCESS;
162 }
163
164 #elif defined(__arm__)
165
166 __attribute__((used))
167 static kern_return_t
168 interrupted_kernel_lr(uintptr_t *lr)
169 {
170 struct arm_saved_state *state;
171
172 state = getCpuDatap()->cpu_int_state;
173
174 /* return early if interrupted a thread in user space */
175 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
176 return KERN_FAILURE;
177 }
178
179 *lr = get_saved_state_lr(state);
180 return KERN_SUCCESS;
181 }
182
183 #else /* defined(__arm__) */
184 #error "interrupted_kernel_{sp,lr}: unsupported architecture"
185 #endif /* !defined(__arm__) */
186
187
188 static void
189 callstack_fixup_interrupted(struct callstack *cs)
190 {
191 uintptr_t fixup_val = 0;
192 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
193
194 /*
195 * Only provide arbitrary data on development or debug kernels.
196 */
197 #if DEVELOPMENT || DEBUG
198 #if defined(__x86_64__)
199 (void)interrupted_kernel_sp_value(&fixup_val);
200 #elif defined(__arm64__) || defined(__arm__)
201 (void)interrupted_kernel_lr(&fixup_val);
202 #endif /* defined(__x86_64__) */
203 #endif /* DEVELOPMENT || DEBUG */
204
205 assert(cs->flags & CALLSTACK_KERNEL);
206 cs->frames[cs->nframes++] = fixup_val;
207 }
208
209 void
210 kperf_continuation_sample(struct callstack *cs, struct kperf_context *context)
211 {
212 thread_t thread;
213
214 assert(cs != NULL);
215 assert(context != NULL);
216
217 thread = context->cur_thread;
218 assert(thread != NULL);
219 assert(thread->continuation != NULL);
220
221 cs->flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
222 #ifdef __LP64__
223 cs->flags |= CALLSTACK_64BIT;
224 #endif
225
226 cs->nframes = 1;
227 cs->frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
228 }
229
230 void
231 kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context)
232 {
233 assert(cs != NULL);
234 assert(context != NULL);
235 assert(context->cur_thread == current_thread());
236
237 cs->flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
238 #ifdef __LP64__
239 cs->flags |= CALLSTACK_64BIT;
240 #endif
241
242 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
243
244 cs->nframes = backtrace_frame((uintptr_t *)&(cs->frames), cs->nframes - 1,
245 context->starting_fp);
246 if (cs->nframes > 0) {
247 cs->flags |= CALLSTACK_VALID;
248 /*
249 * Fake the value pointed to by the stack pointer or the link
250 * register for symbolicators.
251 */
252 cs->frames[cs->nframes + 1] = 0;
253 cs->nframes += 1;
254 }
255
256 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->nframes);
257 }
258
259 kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
260 uint64_t *callStack, mach_msg_type_number_t *count,
261 boolean_t user_only);
262
263 void
264 kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context)
265 {
266 thread_t thread;
267
268 assert(cs != NULL);
269 assert(context != NULL);
270 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
271
272 thread = context->cur_thread;
273 assert(thread != NULL);
274
275 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
276 cs->nframes);
277
278 cs->flags = CALLSTACK_KERNEL;
279
280 #ifdef __LP64__
281 cs->flags |= CALLSTACK_64BIT;
282 #endif
283
284 if (ml_at_interrupt_context()) {
285 assert(thread == current_thread());
286 cs->flags |= CALLSTACK_KERNEL_WORDS;
287 cs->nframes = backtrace_interrupted((uintptr_t *)cs->frames,
288 cs->nframes - 1);
289 if (cs->nframes != 0) {
290 callstack_fixup_interrupted(cs);
291 }
292 } else {
293 /*
294 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
295 * other threads.
296 */
297 kern_return_t kr;
298 kr = chudxnu_thread_get_callstack64_kperf(thread, cs->frames,
299 &cs->nframes, FALSE);
300 if (kr == KERN_SUCCESS) {
301 cs->flags |= CALLSTACK_VALID;
302 } else if (kr == KERN_RESOURCE_SHORTAGE) {
303 cs->flags |= CALLSTACK_VALID;
304 cs->flags |= CALLSTACK_TRUNCATED;
305 } else {
306 cs->nframes = 0;
307 }
308 }
309
310 if (cs->nframes == 0) {
311 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
312 }
313
314 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), cs->flags, cs->nframes);
315 }
316
317 void
318 kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context)
319 {
320 thread_t thread;
321 bool user_64 = false;
322 int err;
323
324 assert(cs != NULL);
325 assert(context != NULL);
326 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
327 assert(ml_get_interrupts_enabled() == TRUE);
328
329 thread = context->cur_thread;
330 assert(thread != NULL);
331
332 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
333 cs->nframes);
334
335 cs->flags = 0;
336
337 err = backtrace_thread_user(thread, (uintptr_t *)cs->frames,
338 cs->nframes - 1, &cs->nframes, &user_64);
339 cs->flags |= CALLSTACK_KERNEL_WORDS;
340 if (user_64) {
341 cs->flags |= CALLSTACK_64BIT;
342 }
343
344 if (!err || err == EFAULT) {
345 callstack_fixup_user(cs, thread);
346 cs->flags |= CALLSTACK_VALID;
347 } else {
348 cs->nframes = 0;
349 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, err);
350 }
351
352 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
353 cs->flags, cs->nframes);
354 }
355
356 static inline uintptr_t
357 scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
358 {
359 if (frame < n_frames) {
360 if (kern) {
361 return VM_KERNEL_UNSLIDE(bt[frame]);
362 } else {
363 return bt[frame];
364 }
365 } else {
366 return 0;
367 }
368 }
369
370 static inline uintptr_t
371 scrub_frame(uint64_t *bt, int n_frames, int frame)
372 {
373 if (frame < n_frames) {
374 return (uintptr_t)(bt[frame]);
375 } else {
376 return 0;
377 }
378 }
379
380 static void
381 callstack_log(struct callstack *cs, uint32_t hcode, uint32_t dcode)
382 {
383 BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, cs->flags, cs->nframes);
384
385 /* framing information for the stack */
386 BUF_DATA(hcode, cs->flags, cs->nframes);
387
388 /* how many batches of 4 */
389 unsigned int nframes = cs->nframes;
390 unsigned int n = nframes / 4;
391 unsigned int ovf = nframes % 4;
392 if (ovf != 0) {
393 n++;
394 }
395
396 bool kern = cs->flags & CALLSTACK_KERNEL;
397
398 if (cs->flags & CALLSTACK_KERNEL_WORDS) {
399 uintptr_t *frames = (uintptr_t *)cs->frames;
400 for (unsigned int i = 0; i < n; i++) {
401 unsigned int j = i * 4;
402 BUF_DATA(dcode,
403 scrub_word(frames, nframes, j + 0, kern),
404 scrub_word(frames, nframes, j + 1, kern),
405 scrub_word(frames, nframes, j + 2, kern),
406 scrub_word(frames, nframes, j + 3, kern));
407 }
408 } else {
409 for (unsigned int i = 0; i < n; i++) {
410 uint64_t *frames = cs->frames;
411 unsigned int j = i * 4;
412 BUF_DATA(dcode,
413 scrub_frame(frames, nframes, j + 0),
414 scrub_frame(frames, nframes, j + 1),
415 scrub_frame(frames, nframes, j + 2),
416 scrub_frame(frames, nframes, j + 3));
417 }
418 }
419
420 BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, cs->flags, cs->nframes);
421 }
422
423 void
424 kperf_kcallstack_log( struct callstack *cs )
425 {
426 callstack_log(cs, PERF_CS_KHDR, PERF_CS_KDATA);
427 }
428
429 void
430 kperf_ucallstack_log( struct callstack *cs )
431 {
432 callstack_log(cs, PERF_CS_UHDR, PERF_CS_UDATA);
433 }
434
435 int
436 kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth)
437 {
438 int did_pend = kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK);
439 kperf_ast_set_callstack_depth(context->cur_thread, depth);
440
441 return did_pend;
442 }
443
444 static kern_return_t
445 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
446 {
447 return ((ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
448 KERN_SUCCESS : KERN_FAILURE);
449 }
450
451 static kern_return_t
452 chudxnu_task_read(
453 task_t task,
454 void *kernaddr,
455 uint64_t usraddr,
456 vm_size_t size)
457 {
458 //ppc version ported to arm
459 kern_return_t ret = KERN_SUCCESS;
460
461 if (ml_at_interrupt_context()) {
462 return KERN_FAILURE; // can't look at tasks on interrupt stack
463 }
464
465 if (current_task() == task) {
466 thread_t cur_thr = current_thread();
467 vm_offset_t recover_handler = cur_thr->recover;
468
469 if (copyin(usraddr, kernaddr, size)) {
470 ret = KERN_FAILURE;
471 }
472 cur_thr->recover = recover_handler;
473 } else {
474 vm_map_t map = get_task_map(task);
475 ret = vm_map_read_user(map, usraddr, kernaddr, size);
476 }
477
478 return ret;
479 }
480
481 static inline uint64_t
482 chudxnu_vm_unslide( uint64_t ptr, int kaddr )
483 {
484 if (!kaddr)
485 return ptr;
486
487 return VM_KERNEL_UNSLIDE(ptr);
488 }
489
490 #if __arm__
491 #define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
492 #define CS_FLAG_EXTRASP 1 // capture extra sp register
493 static kern_return_t
494 chudxnu_thread_get_callstack64_internal(
495 thread_t thread,
496 uint64_t *callStack,
497 mach_msg_type_number_t *count,
498 boolean_t user_only,
499 int flags)
500 {
501 kern_return_t kr;
502 task_t task;
503 uint64_t currPC=0ULL, currLR=0ULL, currSP=0ULL;
504 uint64_t prevPC = 0ULL;
505 uint32_t kernStackMin = thread->kernel_stack;
506 uint32_t kernStackMax = kernStackMin + kernel_stack_size;
507 uint64_t *buffer = callStack;
508 uint32_t frame[2];
509 int bufferIndex = 0;
510 int bufferMaxIndex = 0;
511 boolean_t supervisor = FALSE;
512 struct arm_saved_state *state = NULL;
513 uint32_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL;
514 uint64_t pc = 0ULL;
515
516 task = get_threadtask(thread);
517
518 bufferMaxIndex = *count;
519 //get thread state
520 if (user_only)
521 state = find_user_regs(thread);
522 else
523 state = find_kern_regs(thread);
524
525 if (!state) {
526 *count = 0;
527 return KERN_FAILURE;
528 }
529
530 /* make sure it is safe to dereference before you do it */
531 supervisor = ARM_SUPERVISOR_MODE(state->cpsr);
532
533 /* can't take a kernel callstack if we've got a user frame */
534 if( !user_only && !supervisor )
535 return KERN_FAILURE;
536
537 /*
538 * Reserve space for saving LR (and sometimes SP) at the end of the
539 * backtrace.
540 */
541 if (flags & CS_FLAG_EXTRASP) {
542 bufferMaxIndex -= 2;
543 } else {
544 bufferMaxIndex -= 1;
545 }
546
547 if (bufferMaxIndex < 2) {
548 *count = 0;
549 return KERN_RESOURCE_SHORTAGE;
550 }
551
552 currPC = (uint64_t)state->pc; /* r15 */
553 if (state->cpsr & PSR_TF)
554 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
555
556 currLR = (uint64_t)state->lr; /* r14 */
557 currSP = (uint64_t)state->sp; /* r13 */
558
559 fp = (uint32_t *)state->r[7]; /* frame pointer */
560 topfp = fp;
561
562 bufferIndex = 0; // start with a stack of size zero
563 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, supervisor); // save PC in position 0.
564
565 // Now, fill buffer with stack backtraces.
566 while (bufferIndex < bufferMaxIndex) {
567 pc = 0ULL;
568 /*
569 * Below the frame pointer, the following values are saved:
570 * -> FP
571 */
572
573 /*
574 * Note that we read the pc even for the first stack frame
575 * (which, in theory, is always empty because the callee fills
576 * it in just before it lowers the stack. However, if we
577 * catch the program in between filling in the return address
578 * and lowering the stack, we want to still have a valid
579 * backtrace. FixupStack correctly disregards this value if
580 * necessary.
581 */
582
583 if((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
584 /* frame pointer is invalid - stop backtracing */
585 pc = 0ULL;
586 break;
587 }
588
589 if (supervisor) {
590 if (((uint32_t)fp > kernStackMax) ||
591 ((uint32_t)fp < kernStackMin)) {
592 kr = KERN_FAILURE;
593 } else {
594 kr = chudxnu_kern_read(&frame,
595 (vm_offset_t)fp,
596 (vm_size_t)sizeof(frame));
597 if (kr == KERN_SUCCESS) {
598 pc = (uint64_t)frame[1];
599 nextFramePointer = (uint32_t *) (frame[0]);
600 } else {
601 pc = 0ULL;
602 nextFramePointer = 0ULL;
603 kr = KERN_FAILURE;
604 }
605 }
606 } else {
607 kr = chudxnu_task_read(task,
608 &frame,
609 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
610 sizeof(frame));
611 if (kr == KERN_SUCCESS) {
612 pc = (uint64_t) frame[1];
613 nextFramePointer = (uint32_t *) (frame[0]);
614 } else {
615 pc = 0ULL;
616 nextFramePointer = 0ULL;
617 kr = KERN_FAILURE;
618 }
619 }
620
621 if (kr != KERN_SUCCESS) {
622 pc = 0ULL;
623 break;
624 }
625
626 if (nextFramePointer) {
627 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, supervisor);
628 prevPC = pc;
629 }
630
631 if (nextFramePointer < fp)
632 break;
633 else
634 fp = nextFramePointer;
635 }
636
637 if (bufferIndex >= bufferMaxIndex) {
638 bufferIndex = bufferMaxIndex;
639 kr = KERN_RESOURCE_SHORTAGE;
640 } else {
641 kr = KERN_SUCCESS;
642 }
643
644 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
645 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, supervisor);
646 if( flags & CS_FLAG_EXTRASP )
647 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, supervisor);
648
649 *count = bufferIndex;
650 return kr;
651
652
653 }
654
655 kern_return_t
656 chudxnu_thread_get_callstack64_kperf(
657 thread_t thread,
658 uint64_t *callStack,
659 mach_msg_type_number_t *count,
660 boolean_t user_only)
661 {
662 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
663 }
664 #elif __arm64__
665 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
666 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
667 // after sampling has finished.
668 //
669 // For an N-entry callstack:
670 //
671 // [0] current pc
672 // [1..N-3] stack frames (including current one)
673 // [N-2] current LR (return value if we're in a leaf function)
674 // [N-1] current r0 (in case we've saved LR in r0) (optional)
675 //
676 //
677 #define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
678
679 #define CS_FLAG_EXTRASP 1 // capture extra sp register
680
681 static kern_return_t
682 chudxnu_thread_get_callstack64_internal(
683 thread_t thread,
684 uint64_t *callStack,
685 mach_msg_type_number_t *count,
686 boolean_t user_only,
687 int flags)
688 {
689 kern_return_t kr = KERN_SUCCESS;
690 task_t task;
691 uint64_t currPC=0ULL, currLR=0ULL, currSP=0ULL;
692 uint64_t prevPC = 0ULL;
693 uint64_t kernStackMin = thread->kernel_stack;
694 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
695 uint64_t *buffer = callStack;
696 int bufferIndex = 0;
697 int bufferMaxIndex = 0;
698 boolean_t kernel = FALSE;
699 struct arm_saved_state *sstate = NULL;
700 uint64_t pc = 0ULL;
701
702 task = get_threadtask(thread);
703 bufferMaxIndex = *count;
704 //get thread state
705 if (user_only)
706 sstate = find_user_regs(thread);
707 else
708 sstate = find_kern_regs(thread);
709
710 if (!sstate) {
711 *count = 0;
712 return KERN_FAILURE;
713 }
714
715 if (is_saved_state64(sstate)) {
716 struct arm_saved_state64 *state = NULL;
717 uint64_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL;
718 uint64_t frame[2];
719
720 state = saved_state64(sstate);
721
722 /* make sure it is safe to dereference before you do it */
723 kernel = PSR64_IS_KERNEL(state->cpsr);
724
725 /* can't take a kernel callstack if we've got a user frame */
726 if( !user_only && !kernel )
727 return KERN_FAILURE;
728
729 /*
730 * Reserve space for saving LR (and sometimes SP) at the end of the
731 * backtrace.
732 */
733 if (flags & CS_FLAG_EXTRASP) {
734 bufferMaxIndex -= 2;
735 } else {
736 bufferMaxIndex -= 1;
737 }
738
739 if (bufferMaxIndex < 2) {
740 *count = 0;
741 return KERN_RESOURCE_SHORTAGE;
742 }
743
744 currPC = state->pc;
745 currLR = state->lr;
746 currSP = state->sp;
747
748 fp = (uint64_t *)state->fp; /* frame pointer */
749 topfp = fp;
750
751 bufferIndex = 0; // start with a stack of size zero
752 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
753
754 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
755
756 // Now, fill buffer with stack backtraces.
757 while (bufferIndex < bufferMaxIndex) {
758 pc = 0ULL;
759 /*
760 * Below the frame pointer, the following values are saved:
761 * -> FP
762 */
763
764 /*
765 * Note that we read the pc even for the first stack frame
766 * (which, in theory, is always empty because the callee fills
767 * it in just before it lowers the stack. However, if we
768 * catch the program in between filling in the return address
769 * and lowering the stack, we want to still have a valid
770 * backtrace. FixupStack correctly disregards this value if
771 * necessary.
772 */
773
774 if((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
775 /* frame pointer is invalid - stop backtracing */
776 pc = 0ULL;
777 break;
778 }
779
780 if (kernel) {
781 if (((uint64_t)fp > kernStackMax) ||
782 ((uint64_t)fp < kernStackMin)) {
783 kr = KERN_FAILURE;
784 } else {
785 kr = chudxnu_kern_read(&frame,
786 (vm_offset_t)fp,
787 (vm_size_t)sizeof(frame));
788 if (kr == KERN_SUCCESS) {
789 pc = frame[1];
790 nextFramePointer = (uint64_t *)frame[0];
791 } else {
792 pc = 0ULL;
793 nextFramePointer = 0ULL;
794 kr = KERN_FAILURE;
795 }
796 }
797 } else {
798 kr = chudxnu_task_read(task,
799 &frame,
800 (vm_offset_t)fp,
801 (vm_size_t)sizeof(frame));
802 if (kr == KERN_SUCCESS) {
803 pc = frame[1];
804 nextFramePointer = (uint64_t *)(frame[0]);
805 } else {
806 pc = 0ULL;
807 nextFramePointer = 0ULL;
808 kr = KERN_FAILURE;
809 }
810 }
811
812 if (kr != KERN_SUCCESS) {
813 pc = 0ULL;
814 break;
815 }
816
817 if (nextFramePointer) {
818 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
819 prevPC = pc;
820 }
821
822 if (nextFramePointer < fp)
823 break;
824 else
825 fp = nextFramePointer;
826 }
827
828 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
829
830 if (bufferIndex >= bufferMaxIndex) {
831 bufferIndex = bufferMaxIndex;
832 kr = KERN_RESOURCE_SHORTAGE;
833 } else {
834 kr = KERN_SUCCESS;
835 }
836
837 // Save link register and SP at bottom of stack (used for later fixup).
838 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
839 if( flags & CS_FLAG_EXTRASP )
840 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
841 } else {
842 struct arm_saved_state32 *state = NULL;
843 uint32_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL;
844
845 /* 64-bit kernel stacks, 32-bit user stacks */
846 uint64_t frame[2];
847 uint32_t frame32[2];
848
849 state = saved_state32(sstate);
850
851 /* make sure it is safe to dereference before you do it */
852 kernel = ARM_SUPERVISOR_MODE(state->cpsr);
853
854 /* can't take a kernel callstack if we've got a user frame */
855 if( !user_only && !kernel )
856 return KERN_FAILURE;
857
858 /*
859 * Reserve space for saving LR (and sometimes SP) at the end of the
860 * backtrace.
861 */
862 if (flags & CS_FLAG_EXTRASP) {
863 bufferMaxIndex -= 2;
864 } else {
865 bufferMaxIndex -= 1;
866 }
867
868 if (bufferMaxIndex < 2) {
869 *count = 0;
870 return KERN_RESOURCE_SHORTAGE;
871 }
872
873 currPC = (uint64_t)state->pc; /* r15 */
874 if (state->cpsr & PSR_TF)
875 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
876
877 currLR = (uint64_t)state->lr; /* r14 */
878 currSP = (uint64_t)state->sp; /* r13 */
879
880 fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
881 topfp = fp;
882
883 bufferIndex = 0; // start with a stack of size zero
884 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
885
886 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
887
888 // Now, fill buffer with stack backtraces.
889 while (bufferIndex < bufferMaxIndex) {
890 pc = 0ULL;
891 /*
892 * Below the frame pointer, the following values are saved:
893 * -> FP
894 */
895
896 /*
897 * Note that we read the pc even for the first stack frame
898 * (which, in theory, is always empty because the callee fills
899 * it in just before it lowers the stack. However, if we
900 * catch the program in between filling in the return address
901 * and lowering the stack, we want to still have a valid
902 * backtrace. FixupStack correctly disregards this value if
903 * necessary.
904 */
905
906 if((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
907 /* frame pointer is invalid - stop backtracing */
908 pc = 0ULL;
909 break;
910 }
911
912 if (kernel) {
913 if (((uint32_t)fp > kernStackMax) ||
914 ((uint32_t)fp < kernStackMin)) {
915 kr = KERN_FAILURE;
916 } else {
917 kr = chudxnu_kern_read(&frame,
918 (vm_offset_t)fp,
919 (vm_size_t)sizeof(frame));
920 if (kr == KERN_SUCCESS) {
921 pc = (uint64_t)frame[1];
922 nextFramePointer = (uint32_t *) (frame[0]);
923 } else {
924 pc = 0ULL;
925 nextFramePointer = 0ULL;
926 kr = KERN_FAILURE;
927 }
928 }
929 } else {
930 kr = chudxnu_task_read(task,
931 &frame32,
932 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
933 sizeof(frame32));
934 if (kr == KERN_SUCCESS) {
935 pc = (uint64_t)frame32[1];
936 nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
937 } else {
938 pc = 0ULL;
939 nextFramePointer = 0ULL;
940 kr = KERN_FAILURE;
941 }
942 }
943
944 if (kr != KERN_SUCCESS) {
945 pc = 0ULL;
946 break;
947 }
948
949 if (nextFramePointer) {
950 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
951 prevPC = pc;
952 }
953
954 if (nextFramePointer < fp)
955 break;
956 else
957 fp = nextFramePointer;
958 }
959
960 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
961
962 /* clamp callstack size to max */
963 if (bufferIndex >= bufferMaxIndex) {
964 bufferIndex = bufferMaxIndex;
965 kr = KERN_RESOURCE_SHORTAGE;
966 } else {
967 /* ignore all other failures */
968 kr = KERN_SUCCESS;
969 }
970
971 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
972 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
973 if( flags & CS_FLAG_EXTRASP )
974 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
975 }
976
977 *count = bufferIndex;
978 return kr;
979 }
980
981 kern_return_t
982 chudxnu_thread_get_callstack64_kperf(
983 thread_t thread,
984 uint64_t *callStack,
985 mach_msg_type_number_t *count,
986 boolean_t user_only)
987 {
988 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
989 }
990 #elif __x86_64__
991
992 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
993 // don't try to read in the hole
994 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
995 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
996 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
997
998 typedef struct _cframe64_t {
999 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
1000 uint64_t caller;
1001 uint64_t args[0];
1002 }cframe64_t;
1003
1004
1005 typedef struct _cframe_t {
1006 uint32_t prev; // this is really a user32-space pointer to the previous frame
1007 uint32_t caller;
1008 uint32_t args[0];
1009 } cframe_t;
1010
1011 extern void * find_user_regs(thread_t);
1012 extern x86_saved_state32_t *find_kern_regs(thread_t);
1013
1014 static kern_return_t do_kernel_backtrace(
1015 thread_t thread,
1016 struct x86_kernel_state *regs,
1017 uint64_t *frames,
1018 mach_msg_type_number_t *start_idx,
1019 mach_msg_type_number_t max_idx)
1020 {
1021 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1022 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1023 mach_msg_type_number_t ct = *start_idx;
1024 kern_return_t kr = KERN_FAILURE;
1025
1026 #if __LP64__
1027 uint64_t currPC = 0ULL;
1028 uint64_t currFP = 0ULL;
1029 uint64_t prevPC = 0ULL;
1030 uint64_t prevFP = 0ULL;
1031 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
1032 return KERN_FAILURE;
1033 }
1034 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
1035 return KERN_FAILURE;
1036 }
1037 #else
1038 uint32_t currPC = 0U;
1039 uint32_t currFP = 0U;
1040 uint32_t prevPC = 0U;
1041 uint32_t prevFP = 0U;
1042 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
1043 return KERN_FAILURE;
1044 }
1045 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
1046 return KERN_FAILURE;
1047 }
1048 #endif
1049
1050 if(*start_idx >= max_idx)
1051 return KERN_RESOURCE_SHORTAGE; // no frames traced
1052
1053 if(!currPC) {
1054 return KERN_FAILURE;
1055 }
1056
1057 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1058
1059 // build a backtrace of this kernel state
1060 #if __LP64__
1061 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
1062 // this is the address where caller lives in the user thread
1063 uint64_t caller = currFP + sizeof(uint64_t);
1064 #else
1065 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
1066 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
1067 #endif
1068
1069 if(!currFP || !currPC) {
1070 currPC = 0;
1071 break;
1072 }
1073
1074 if(ct >= max_idx) {
1075 *start_idx = ct;
1076 return KERN_RESOURCE_SHORTAGE;
1077 }
1078
1079 /* read our caller */
1080 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1081
1082 if(kr != KERN_SUCCESS || !currPC) {
1083 currPC = 0UL;
1084 break;
1085 }
1086
1087 /*
1088 * retrive contents of the frame pointer and advance to the next stack
1089 * frame if it's valid
1090 */
1091 prevFP = 0;
1092 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1093
1094 #if __LP64__
1095 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
1096 #else
1097 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
1098 #endif
1099 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1100 prevPC = currPC;
1101 }
1102 if(prevFP <= currFP) {
1103 break;
1104 } else {
1105 currFP = prevFP;
1106 }
1107 }
1108
1109 *start_idx = ct;
1110 return KERN_SUCCESS;
1111 }
1112
1113
1114
1115 static kern_return_t do_backtrace32(
1116 task_t task,
1117 thread_t thread,
1118 x86_saved_state32_t *regs,
1119 uint64_t *frames,
1120 mach_msg_type_number_t *start_idx,
1121 mach_msg_type_number_t max_idx,
1122 boolean_t supervisor)
1123 {
1124 uint32_t tmpWord = 0UL;
1125 uint64_t currPC = (uint64_t) regs->eip;
1126 uint64_t currFP = (uint64_t) regs->ebp;
1127 uint64_t prevPC = 0ULL;
1128 uint64_t prevFP = 0ULL;
1129 uint64_t kernStackMin = thread->kernel_stack;
1130 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
1131 mach_msg_type_number_t ct = *start_idx;
1132 kern_return_t kr = KERN_FAILURE;
1133
1134 if(ct >= max_idx)
1135 return KERN_RESOURCE_SHORTAGE; // no frames traced
1136
1137 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1138
1139 // build a backtrace of this 32 bit state.
1140 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1141 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1142
1143 if(!currFP) {
1144 currPC = 0;
1145 break;
1146 }
1147
1148 if(ct >= max_idx) {
1149 *start_idx = ct;
1150 return KERN_RESOURCE_SHORTAGE;
1151 }
1152
1153 /* read our caller */
1154 if(supervisor) {
1155 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1156 } else {
1157 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1158 }
1159
1160 if(kr != KERN_SUCCESS) {
1161 currPC = 0ULL;
1162 break;
1163 }
1164
1165 currPC = (uint64_t) tmpWord; // promote 32 bit address
1166
1167 /*
1168 * retrive contents of the frame pointer and advance to the next stack
1169 * frame if it's valid
1170 */
1171 prevFP = 0;
1172 if(supervisor) {
1173 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1174 } else {
1175 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1176 }
1177 prevFP = (uint64_t) tmpWord; // promote 32 bit address
1178
1179 if(prevFP) {
1180 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1181 prevPC = currPC;
1182 }
1183 if(prevFP < currFP) {
1184 break;
1185 } else {
1186 currFP = prevFP;
1187 }
1188 }
1189
1190 *start_idx = ct;
1191 return KERN_SUCCESS;
1192 }
1193
1194 static kern_return_t do_backtrace64(
1195 task_t task,
1196 thread_t thread,
1197 x86_saved_state64_t *regs,
1198 uint64_t *frames,
1199 mach_msg_type_number_t *start_idx,
1200 mach_msg_type_number_t max_idx,
1201 boolean_t supervisor)
1202 {
1203 uint64_t currPC = regs->isf.rip;
1204 uint64_t currFP = regs->rbp;
1205 uint64_t prevPC = 0ULL;
1206 uint64_t prevFP = 0ULL;
1207 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1208 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1209 mach_msg_type_number_t ct = *start_idx;
1210 kern_return_t kr = KERN_FAILURE;
1211
1212 if(*start_idx >= max_idx)
1213 return KERN_RESOURCE_SHORTAGE; // no frames traced
1214
1215 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1216
1217 // build a backtrace of this 32 bit state.
1218 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1219 // this is the address where caller lives in the user thread
1220 uint64_t caller = currFP + sizeof(uint64_t);
1221
1222 if(!currFP) {
1223 currPC = 0;
1224 break;
1225 }
1226
1227 if(ct >= max_idx) {
1228 *start_idx = ct;
1229 return KERN_RESOURCE_SHORTAGE;
1230 }
1231
1232 /* read our caller */
1233 if(supervisor) {
1234 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1235 } else {
1236 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1237 }
1238
1239 if(kr != KERN_SUCCESS) {
1240 currPC = 0ULL;
1241 break;
1242 }
1243
1244 /*
1245 * retrive contents of the frame pointer and advance to the next stack
1246 * frame if it's valid
1247 */
1248 prevFP = 0;
1249 if(supervisor) {
1250 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1251 } else {
1252 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1253 }
1254
1255 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1256 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1257 prevPC = currPC;
1258 }
1259 if(prevFP < currFP) {
1260 break;
1261 } else {
1262 currFP = prevFP;
1263 }
1264 }
1265
1266 *start_idx = ct;
1267 return KERN_SUCCESS;
1268 }
1269
1270 static kern_return_t
1271 chudxnu_thread_get_callstack64_internal(
1272 thread_t thread,
1273 uint64_t *callstack,
1274 mach_msg_type_number_t *count,
1275 boolean_t user_only,
1276 boolean_t kern_only)
1277 {
1278 kern_return_t kr = KERN_FAILURE;
1279 task_t task = thread->task;
1280 uint64_t currPC = 0ULL;
1281 boolean_t supervisor = FALSE;
1282 mach_msg_type_number_t bufferIndex = 0;
1283 mach_msg_type_number_t bufferMaxIndex = *count;
1284 x86_saved_state_t *tagged_regs = NULL; // kernel register state
1285 x86_saved_state64_t *regs64 = NULL;
1286 x86_saved_state32_t *regs32 = NULL;
1287 x86_saved_state32_t *u_regs32 = NULL;
1288 x86_saved_state64_t *u_regs64 = NULL;
1289 struct x86_kernel_state *kregs = NULL;
1290
1291 if(ml_at_interrupt_context()) {
1292
1293 if(user_only) {
1294 /* can't backtrace user state on interrupt stack. */
1295 return KERN_FAILURE;
1296 }
1297
1298 /* backtracing at interrupt context? */
1299 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1300 /*
1301 * Locate the registers for the interrupted thread, assuming it is
1302 * current_thread().
1303 */
1304 tagged_regs = current_cpu_datap()->cpu_int_state;
1305
1306 if(is_saved_state64(tagged_regs)) {
1307 /* 64 bit registers */
1308 regs64 = saved_state64(tagged_regs);
1309 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1310 } else {
1311 /* 32 bit registers */
1312 regs32 = saved_state32(tagged_regs);
1313 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1314 }
1315 }
1316 }
1317
1318 if(!ml_at_interrupt_context() && kernel_task == task) {
1319
1320 if(!thread->kernel_stack) {
1321 return KERN_FAILURE;
1322 }
1323
1324 // Kernel thread not at interrupt context
1325 kregs = (struct x86_kernel_state *)NULL;
1326
1327 // nofault read of the thread->kernel_stack pointer
1328 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1329 return KERN_FAILURE;
1330 }
1331
1332 // Adjust to find the saved kernel state
1333 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1334
1335 supervisor = TRUE;
1336 } else if(!tagged_regs) {
1337 /*
1338 * not at interrupt context, or tracing a different thread than
1339 * current_thread() at interrupt context
1340 */
1341 tagged_regs = USER_STATE(thread);
1342 if(is_saved_state64(tagged_regs)) {
1343 /* 64 bit registers */
1344 regs64 = saved_state64(tagged_regs);
1345 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1346 } else {
1347 /* 32 bit registers */
1348 regs32 = saved_state32(tagged_regs);
1349 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1350 }
1351 }
1352
1353 *count = 0;
1354
1355 if(supervisor) {
1356 // the caller only wants a user callstack.
1357 if(user_only) {
1358 // bail - we've only got kernel state
1359 return KERN_FAILURE;
1360 }
1361 } else {
1362 // regs32(64) is not in supervisor mode.
1363 u_regs32 = regs32;
1364 u_regs64 = regs64;
1365 regs32 = NULL;
1366 regs64 = NULL;
1367 }
1368
1369 if (user_only) {
1370 /* we only want to backtrace the user mode */
1371 if(!(u_regs32 || u_regs64)) {
1372 /* no user state to look at */
1373 return KERN_FAILURE;
1374 }
1375 }
1376
1377 /*
1378 * Order of preference for top of stack:
1379 * 64 bit kernel state (not likely)
1380 * 32 bit kernel state
1381 * 64 bit user land state
1382 * 32 bit user land state
1383 */
1384
1385 if(kregs) {
1386 /*
1387 * nofault read of the registers from the kernel stack (as they can
1388 * disappear on the fly).
1389 */
1390
1391 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1392 return KERN_FAILURE;
1393 }
1394 } else if(regs64) {
1395 currPC = regs64->isf.rip;
1396 } else if(regs32) {
1397 currPC = (uint64_t) regs32->eip;
1398 } else if(u_regs64) {
1399 currPC = u_regs64->isf.rip;
1400 } else if(u_regs32) {
1401 currPC = (uint64_t) u_regs32->eip;
1402 }
1403
1404 if(!currPC) {
1405 /* no top of the stack, bail out */
1406 return KERN_FAILURE;
1407 }
1408
1409 bufferIndex = 0;
1410
1411 if(bufferMaxIndex < 1) {
1412 *count = 0;
1413 return KERN_RESOURCE_SHORTAGE;
1414 }
1415
1416 /* backtrace kernel */
1417 if(kregs) {
1418 addr64_t address = 0ULL;
1419 size_t size = 0UL;
1420
1421 // do the backtrace
1422 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1423
1424 // and do a nofault read of (r|e)sp
1425 uint64_t rsp = 0ULL;
1426 size = sizeof(uint64_t);
1427
1428 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1429 address = 0ULL;
1430 }
1431
1432 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1433 callstack[bufferIndex++] = (uint64_t)rsp;
1434 }
1435 } else if(regs64) {
1436 uint64_t rsp = 0ULL;
1437
1438 // backtrace the 64bit side.
1439 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1440 bufferMaxIndex - 1, TRUE);
1441
1442 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1443 bufferIndex < bufferMaxIndex) {
1444 callstack[bufferIndex++] = rsp;
1445 }
1446
1447 } else if(regs32) {
1448 uint32_t esp = 0UL;
1449
1450 // backtrace the 32bit side.
1451 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1452 bufferMaxIndex - 1, TRUE);
1453
1454 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1455 bufferIndex < bufferMaxIndex) {
1456 callstack[bufferIndex++] = (uint64_t) esp;
1457 }
1458 } else if(u_regs64 && !kern_only) {
1459 /* backtrace user land */
1460 uint64_t rsp = 0ULL;
1461
1462 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1463 bufferMaxIndex - 1, FALSE);
1464
1465 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1466 bufferIndex < bufferMaxIndex) {
1467 callstack[bufferIndex++] = rsp;
1468 }
1469
1470 } else if(u_regs32 && !kern_only) {
1471 uint32_t esp = 0UL;
1472
1473 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1474 bufferMaxIndex - 1, FALSE);
1475
1476 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1477 bufferIndex < bufferMaxIndex) {
1478 callstack[bufferIndex++] = (uint64_t) esp;
1479 }
1480 }
1481
1482 *count = bufferIndex;
1483 return kr;
1484 }
1485
1486 __private_extern__
1487 kern_return_t chudxnu_thread_get_callstack64_kperf(
1488 thread_t thread,
1489 uint64_t *callstack,
1490 mach_msg_type_number_t *count,
1491 boolean_t is_user)
1492 {
1493 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1494 }
1495 #else /* !__arm__ && !__arm64__ && !__x86_64__ */
1496 #error kperf: unsupported architecture
1497 #endif /* !__arm__ && !__arm64__ && !__x86_64__ */