]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
a958aa754e58b0f8978c0147507cc975e5140e67
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
33
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
37
38 #include <vm/vm_map.h>
39 #include <vm/pmap.h>
40
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
43
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
47
48 #if 0
49 #pragma mark **** thread state ****
50 #endif
51
52 __private_extern__ kern_return_t
53 chudxnu_thread_get_state(
54 thread_t thread,
55 thread_flavor_t flavor,
56 thread_state_t tstate,
57 mach_msg_type_number_t *count,
58 boolean_t user_only)
59 {
60 if (user_only) {
61 /* We can't get user state for kernel threads */
62 if (thread->task == kernel_task)
63 return KERN_FAILURE;
64 /* this properly handles deciding whether or not the thread is 64 bit or not */
65 return machine_thread_get_state(thread, flavor, tstate, count);
66 } else {
67 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
68 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
69 // interrupt state available
70
71 // the real purpose of this branch is the following:
72 // the user doesn't care if the thread states are user or kernel, he
73 // just wants the thread state, so we need to determine the proper one
74 // to return, kernel or user, for the given thread.
75 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
76 // the above are conditions where we possibly can read the kernel
77 // state. we still need to determine if this interrupt happened in
78 // kernel or user context
79 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
80 current_cpu_datap()->cpu_interrupt_level == 1) {
81 // interrupt happened in user land
82 return machine_thread_get_state(thread, flavor, tstate, count);
83 } else {
84 // kernel interrupt.
85 return machine_thread_get_kern_state(thread, flavor, tstate, count);
86 }
87 } else {
88 // get the user-mode thread state
89 return machine_thread_get_state(thread, flavor, tstate, count);
90 }
91 }
92 }
93
94 __private_extern__ kern_return_t
95 chudxnu_thread_set_state(
96 thread_t thread,
97 thread_flavor_t flavor,
98 thread_state_t tstate,
99 mach_msg_type_number_t count,
100 boolean_t user_only)
101 {
102 #pragma unused (user_only)
103 return machine_thread_set_state(thread, flavor, tstate, count);
104 }
105
106 #if 0
107 #pragma mark **** task memory read/write ****
108 #endif
109
110 __private_extern__ kern_return_t
111 chudxnu_task_read(
112 task_t task,
113 void *kernaddr,
114 uint64_t usraddr,
115 vm_size_t size)
116 {
117 kern_return_t ret = KERN_SUCCESS;
118 boolean_t old_level;
119
120 if(ml_at_interrupt_context()) {
121 return KERN_FAILURE; // Can't look at tasks on interrupt stack
122 }
123
124 /*
125 * pmap layer requires interrupts to be on
126 */
127 old_level = ml_set_interrupts_enabled(TRUE);
128
129 if(current_task()==task) {
130
131 if(copyin(usraddr, kernaddr, size)) {
132 ret = KERN_FAILURE;
133 }
134 } else {
135 vm_map_t map = get_task_map(task);
136 ret = vm_map_read_user(map, usraddr, kernaddr, size);
137 }
138
139 ml_set_interrupts_enabled(old_level);
140
141 return ret;
142 }
143
144 __private_extern__ kern_return_t
145 chudxnu_task_write(
146 task_t task,
147 uint64_t useraddr,
148 void *kernaddr,
149 vm_size_t size)
150 {
151 kern_return_t ret = KERN_SUCCESS;
152 boolean_t old_level;
153
154 if(ml_at_interrupt_context()) {
155 return KERN_FAILURE; // can't poke into tasks on interrupt stack
156 }
157
158 /*
159 * pmap layer requires interrupts to be on
160 */
161 old_level = ml_set_interrupts_enabled(TRUE);
162
163 if(current_task()==task) {
164
165 if(copyout(kernaddr, useraddr, size)) {
166 ret = KERN_FAILURE;
167 }
168 } else {
169 vm_map_t map = get_task_map(task);
170 ret = vm_map_write_user(map, kernaddr, useraddr, size);
171 }
172
173 ml_set_interrupts_enabled(old_level);
174
175 return ret;
176 }
177
178 __private_extern__ kern_return_t
179 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
180 {
181 return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
182 KERN_SUCCESS: KERN_FAILURE);
183 }
184
185 __private_extern__ kern_return_t
186 chudxnu_kern_write(
187 vm_offset_t dstaddr,
188 void *srcaddr,
189 vm_size_t size)
190 {
191 return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
192 KERN_SUCCESS: KERN_FAILURE);
193 }
194
195 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
196 // don't try to read in the hole
197 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
198 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
199 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
200
201 typedef struct _cframe64_t {
202 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
203 uint64_t caller;
204 uint64_t args[0];
205 }cframe64_t;
206
207
208 typedef struct _cframe_t {
209 uint32_t prev; // this is really a user32-space pointer to the previous frame
210 uint32_t caller;
211 uint32_t args[0];
212 } cframe_t;
213
214 extern void * find_user_regs(thread_t);
215 extern x86_saved_state32_t *find_kern_regs(thread_t);
216
217 static kern_return_t do_backtrace32(
218 task_t task,
219 thread_t thread,
220 x86_saved_state32_t *regs,
221 uint64_t *frames,
222 mach_msg_type_number_t *start_idx,
223 mach_msg_type_number_t max_idx,
224 boolean_t supervisor)
225 {
226 uint32_t tmpWord = 0UL;
227 uint64_t currPC = (uint64_t) regs->eip;
228 uint64_t currFP = (uint64_t) regs->ebp;
229 uint64_t prevPC = 0ULL;
230 uint64_t prevFP = 0ULL;
231 uint64_t kernStackMin = thread->kernel_stack;
232 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
233 mach_msg_type_number_t ct = *start_idx;
234 kern_return_t kr = KERN_FAILURE;
235
236 if(ct >= max_idx)
237 return KERN_RESOURCE_SHORTAGE; // no frames traced
238
239 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
240
241 // build a backtrace of this 32 bit state.
242 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
243 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
244
245 if(!currFP) {
246 currPC = 0;
247 break;
248 }
249
250 if(ct >= max_idx) {
251 *start_idx = ct;
252 return KERN_RESOURCE_SHORTAGE;
253 }
254
255 /* read our caller */
256 if(supervisor) {
257 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
258 } else {
259 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
260 }
261
262 if(kr != KERN_SUCCESS) {
263 currPC = 0ULL;
264 break;
265 }
266
267 currPC = (uint64_t) tmpWord; // promote 32 bit address
268
269 /*
270 * retrive contents of the frame pointer and advance to the next stack
271 * frame if it's valid
272 */
273 prevFP = 0;
274 if(supervisor) {
275 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
276 } else {
277 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
278 }
279 prevFP = (uint64_t) tmpWord; // promote 32 bit address
280
281 if(prevFP) {
282 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
283 prevPC = currPC;
284 }
285 if(prevFP < currFP) {
286 break;
287 } else {
288 currFP = prevFP;
289 }
290 }
291
292 *start_idx = ct;
293 return KERN_SUCCESS;
294 }
295
296 static kern_return_t do_backtrace64(
297 task_t task,
298 thread_t thread,
299 x86_saved_state64_t *regs,
300 uint64_t *frames,
301 mach_msg_type_number_t *start_idx,
302 mach_msg_type_number_t max_idx,
303 boolean_t supervisor)
304 {
305 uint64_t currPC = regs->isf.rip;
306 uint64_t currFP = regs->rbp;
307 uint64_t prevPC = 0ULL;
308 uint64_t prevFP = 0ULL;
309 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
310 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
311 mach_msg_type_number_t ct = *start_idx;
312 kern_return_t kr = KERN_FAILURE;
313
314 if(*start_idx >= max_idx)
315 return KERN_RESOURCE_SHORTAGE; // no frames traced
316
317 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
318
319 // build a backtrace of this 32 bit state.
320 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
321 // this is the address where caller lives in the user thread
322 uint64_t caller = currFP + sizeof(uint64_t);
323
324 if(!currFP) {
325 currPC = 0;
326 break;
327 }
328
329 if(ct >= max_idx) {
330 *start_idx = ct;
331 return KERN_RESOURCE_SHORTAGE;
332 }
333
334 /* read our caller */
335 if(supervisor) {
336 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
337 } else {
338 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
339 }
340
341 if(kr != KERN_SUCCESS) {
342 currPC = 0ULL;
343 break;
344 }
345
346 /*
347 * retrive contents of the frame pointer and advance to the next stack
348 * frame if it's valid
349 */
350 prevFP = 0;
351 if(supervisor) {
352 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
353 } else {
354 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
355 }
356
357 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
358 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
359 prevPC = currPC;
360 }
361 if(prevFP < currFP) {
362 break;
363 } else {
364 currFP = prevFP;
365 }
366 }
367
368 *start_idx = ct;
369 return KERN_SUCCESS;
370 }
371
372 static kern_return_t do_kernel_backtrace(
373 thread_t thread,
374 struct x86_kernel_state *regs,
375 uint64_t *frames,
376 mach_msg_type_number_t *start_idx,
377 mach_msg_type_number_t max_idx)
378 {
379 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
380 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
381 mach_msg_type_number_t ct = *start_idx;
382 kern_return_t kr = KERN_FAILURE;
383
384 #if __LP64__
385 uint64_t currPC = 0ULL;
386 uint64_t currFP = 0ULL;
387 uint64_t prevPC = 0ULL;
388 uint64_t prevFP = 0ULL;
389 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
390 return KERN_FAILURE;
391 }
392 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
393 return KERN_FAILURE;
394 }
395 #else
396 uint32_t currPC = 0U;
397 uint32_t currFP = 0U;
398 uint32_t prevPC = 0U;
399 uint32_t prevFP = 0U;
400 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
401 return KERN_FAILURE;
402 }
403 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
404 return KERN_FAILURE;
405 }
406 #endif
407
408 if(*start_idx >= max_idx)
409 return KERN_RESOURCE_SHORTAGE; // no frames traced
410
411 if(!currPC) {
412 return KERN_FAILURE;
413 }
414
415 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
416
417 // build a backtrace of this kernel state
418 #if __LP64__
419 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
420 // this is the address where caller lives in the user thread
421 uint64_t caller = currFP + sizeof(uint64_t);
422 #else
423 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
424 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
425 #endif
426
427 if(!currFP || !currPC) {
428 currPC = 0;
429 break;
430 }
431
432 if(ct >= max_idx) {
433 *start_idx = ct;
434 return KERN_RESOURCE_SHORTAGE;
435 }
436
437 /* read our caller */
438 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
439
440 if(kr != KERN_SUCCESS || !currPC) {
441 currPC = 0UL;
442 break;
443 }
444
445 /*
446 * retrive contents of the frame pointer and advance to the next stack
447 * frame if it's valid
448 */
449 prevFP = 0;
450 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
451
452 #if __LP64__
453 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
454 #else
455 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
456 #endif
457 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
458 prevPC = currPC;
459 }
460 if(prevFP <= currFP) {
461 break;
462 } else {
463 currFP = prevFP;
464 }
465 }
466
467 *start_idx = ct;
468 return KERN_SUCCESS;
469 }
470
471 static
472 kern_return_t chudxnu_thread_get_callstack64_internal(
473 thread_t thread,
474 uint64_t *callstack,
475 mach_msg_type_number_t *count,
476 boolean_t user_only,
477 boolean_t kern_only)
478 {
479 kern_return_t kr = KERN_FAILURE;
480 task_t task = thread->task;
481 uint64_t currPC = 0ULL;
482 boolean_t supervisor = FALSE;
483 mach_msg_type_number_t bufferIndex = 0;
484 mach_msg_type_number_t bufferMaxIndex = *count;
485 x86_saved_state_t *tagged_regs = NULL; // kernel register state
486 x86_saved_state64_t *regs64 = NULL;
487 x86_saved_state32_t *regs32 = NULL;
488 x86_saved_state32_t *u_regs32 = NULL;
489 x86_saved_state64_t *u_regs64 = NULL;
490 struct x86_kernel_state *kregs = NULL;
491
492 if(ml_at_interrupt_context()) {
493
494 if(user_only) {
495 /* can't backtrace user state on interrupt stack. */
496 return KERN_FAILURE;
497 }
498
499 /* backtracing at interrupt context? */
500 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
501 /*
502 * Locate the registers for the interrupted thread, assuming it is
503 * current_thread().
504 */
505 tagged_regs = current_cpu_datap()->cpu_int_state;
506
507 if(is_saved_state64(tagged_regs)) {
508 /* 64 bit registers */
509 regs64 = saved_state64(tagged_regs);
510 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
511 } else {
512 /* 32 bit registers */
513 regs32 = saved_state32(tagged_regs);
514 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
515 }
516 }
517 }
518
519 if(!ml_at_interrupt_context() && kernel_task == task) {
520
521 if(!thread->kernel_stack) {
522 return KERN_FAILURE;
523 }
524
525 // Kernel thread not at interrupt context
526 kregs = (struct x86_kernel_state *)NULL;
527
528 // nofault read of the thread->kernel_stack pointer
529 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
530 return KERN_FAILURE;
531 }
532
533 // Adjust to find the saved kernel state
534 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
535
536 supervisor = TRUE;
537 } else if(!tagged_regs) {
538 /*
539 * not at interrupt context, or tracing a different thread than
540 * current_thread() at interrupt context
541 */
542 tagged_regs = USER_STATE(thread);
543 if(is_saved_state64(tagged_regs)) {
544 /* 64 bit registers */
545 regs64 = saved_state64(tagged_regs);
546 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
547 } else {
548 /* 32 bit registers */
549 regs32 = saved_state32(tagged_regs);
550 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
551 }
552 }
553
554 *count = 0;
555
556 if(supervisor) {
557 // the caller only wants a user callstack.
558 if(user_only) {
559 // bail - we've only got kernel state
560 return KERN_FAILURE;
561 }
562 } else {
563 // regs32(64) is not in supervisor mode.
564 u_regs32 = regs32;
565 u_regs64 = regs64;
566 regs32 = NULL;
567 regs64 = NULL;
568 }
569
570 if (user_only) {
571 /* we only want to backtrace the user mode */
572 if(!(u_regs32 || u_regs64)) {
573 /* no user state to look at */
574 return KERN_FAILURE;
575 }
576 }
577
578 /*
579 * Order of preference for top of stack:
580 * 64 bit kernel state (not likely)
581 * 32 bit kernel state
582 * 64 bit user land state
583 * 32 bit user land state
584 */
585
586 if(kregs) {
587 /*
588 * nofault read of the registers from the kernel stack (as they can
589 * disappear on the fly).
590 */
591
592 #if __LP64__
593 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
594 return KERN_FAILURE;
595 }
596 #else
597 uint32_t tmp;
598 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
599 return KERN_FAILURE;
600 }
601 currPC = (uint64_t)tmp;
602 #endif
603 } else if(regs64) {
604 currPC = regs64->isf.rip;
605 } else if(regs32) {
606 currPC = (uint64_t) regs32->eip;
607 } else if(u_regs64) {
608 currPC = u_regs64->isf.rip;
609 } else if(u_regs32) {
610 currPC = (uint64_t) u_regs32->eip;
611 }
612
613 if(!currPC) {
614 /* no top of the stack, bail out */
615 return KERN_FAILURE;
616 }
617
618 bufferIndex = 0;
619
620 if(bufferMaxIndex < 1) {
621 *count = 0;
622 return KERN_RESOURCE_SHORTAGE;
623 }
624
625 /* backtrace kernel */
626 if(kregs) {
627 addr64_t address = 0ULL;
628 size_t size = 0UL;
629
630 // do the backtrace
631 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
632
633 // and do a nofault read of (r|e)sp
634 #if __LP64__
635 uint64_t rsp = 0ULL;
636 size = sizeof(uint64_t);
637
638 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
639 address = 0ULL;
640 }
641 #else
642 uint32_t rsp = 0ULL, tmp = 0ULL;
643 size = sizeof(uint32_t);
644
645 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
646 address = 0ULL;
647 } else {
648 address = (addr64_t)tmp;
649 }
650 #endif
651
652 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
653 callstack[bufferIndex++] = (uint64_t)rsp;
654 }
655 } else if(regs64) {
656 uint64_t rsp = 0ULL;
657
658 // backtrace the 64bit side.
659 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
660 bufferMaxIndex, TRUE);
661
662 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
663 bufferIndex < bufferMaxIndex) {
664 callstack[bufferIndex++] = rsp;
665 }
666
667 } else if(regs32) {
668 uint32_t esp = 0UL;
669
670 // backtrace the 32bit side.
671 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
672 bufferMaxIndex, TRUE);
673
674 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
675 bufferIndex < bufferMaxIndex) {
676 callstack[bufferIndex++] = (uint64_t) esp;
677 }
678 } else if(u_regs64 && !kern_only) {
679 /* backtrace user land */
680 uint64_t rsp = 0ULL;
681
682 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
683 bufferMaxIndex, FALSE);
684
685 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
686 bufferIndex < bufferMaxIndex) {
687 callstack[bufferIndex++] = rsp;
688 }
689
690 } else if(u_regs32 && !kern_only) {
691 uint32_t esp = 0UL;
692
693 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
694 bufferMaxIndex, FALSE);
695
696 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
697 bufferIndex < bufferMaxIndex) {
698 callstack[bufferIndex++] = (uint64_t) esp;
699 }
700 }
701
702 *count = bufferIndex;
703 return kr;
704 }
705
706 __private_extern__
707 kern_return_t chudxnu_thread_get_callstack64_kperf(
708 thread_t thread,
709 uint64_t *callstack,
710 mach_msg_type_number_t *count,
711 boolean_t is_user)
712 {
713 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
714 }
715
716 __private_extern__
717 kern_return_t chudxnu_thread_get_callstack64(
718 thread_t thread,
719 uint64_t *callstack,
720 mach_msg_type_number_t *count,
721 boolean_t user_only)
722 {
723 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, user_only, 0);
724 }
725