]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
33
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
37
38 #include <vm/vm_map.h>
39 #include <vm/pmap.h>
40
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
43
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
47
48 #if 0
49 #pragma mark **** thread state ****
50 #endif
51
52 __private_extern__ kern_return_t
53 chudxnu_thread_get_state(
54 thread_t thread,
55 thread_flavor_t flavor,
56 thread_state_t tstate,
57 mach_msg_type_number_t *count,
58 boolean_t user_only)
59 {
60 if (user_only) {
61 /* We can't get user state for kernel threads */
62 if (thread->task == kernel_task)
63 return KERN_FAILURE;
64 /* this properly handles deciding whether or not the thread is 64 bit or not */
65 return machine_thread_get_state(thread, flavor, tstate, count);
66 } else {
67 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
68 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
69 // interrupt state available
70
71 // the real purpose of this branch is the following:
72 // the user doesn't care if the thread states are user or kernel, he
73 // just wants the thread state, so we need to determine the proper one
74 // to return, kernel or user, for the given thread.
75 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
76 // the above are conditions where we possibly can read the kernel
77 // state. we still need to determine if this interrupt happened in
78 // kernel or user context
79 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
80 current_cpu_datap()->cpu_interrupt_level == 1) {
81 // interrupt happened in user land
82 return machine_thread_get_state(thread, flavor, tstate, count);
83 } else {
84 // kernel interrupt.
85 return machine_thread_get_kern_state(thread, flavor, tstate, count);
86 }
87 } else {
88 // get the user-mode thread state
89 return machine_thread_get_state(thread, flavor, tstate, count);
90 }
91 }
92 }
93
94 __private_extern__ kern_return_t
95 chudxnu_thread_set_state(
96 thread_t thread,
97 thread_flavor_t flavor,
98 thread_state_t tstate,
99 mach_msg_type_number_t count,
100 boolean_t user_only)
101 {
102 #pragma unused (user_only)
103 return machine_thread_set_state(thread, flavor, tstate, count);
104 }
105
106 #if 0
107 #pragma mark **** task memory read/write ****
108 #endif
109
110 __private_extern__ kern_return_t
111 chudxnu_task_read(
112 task_t task,
113 void *kernaddr,
114 uint64_t usraddr,
115 vm_size_t size)
116 {
117 kern_return_t ret = KERN_SUCCESS;
118 boolean_t old_level;
119
120 if(ml_at_interrupt_context()) {
121 return KERN_FAILURE; // Can't look at tasks on interrupt stack
122 }
123
124 /*
125 * pmap layer requires interrupts to be on
126 */
127 old_level = ml_set_interrupts_enabled(TRUE);
128
129 if(current_task()==task) {
130
131 if(copyin(usraddr, kernaddr, size)) {
132 ret = KERN_FAILURE;
133 }
134 } else {
135 vm_map_t map = get_task_map(task);
136 ret = vm_map_read_user(map, usraddr, kernaddr, size);
137 }
138
139 ml_set_interrupts_enabled(old_level);
140
141 return ret;
142 }
143
144 __private_extern__ kern_return_t
145 chudxnu_task_write(
146 task_t task,
147 uint64_t useraddr,
148 void *kernaddr,
149 vm_size_t size)
150 {
151 kern_return_t ret = KERN_SUCCESS;
152 boolean_t old_level;
153
154 if(ml_at_interrupt_context()) {
155 return KERN_FAILURE; // can't poke into tasks on interrupt stack
156 }
157
158 /*
159 * pmap layer requires interrupts to be on
160 */
161 old_level = ml_set_interrupts_enabled(TRUE);
162
163 if(current_task()==task) {
164
165 if(copyout(kernaddr, useraddr, size)) {
166 ret = KERN_FAILURE;
167 }
168 } else {
169 vm_map_t map = get_task_map(task);
170 ret = vm_map_write_user(map, kernaddr, useraddr, size);
171 }
172
173 ml_set_interrupts_enabled(old_level);
174
175 return ret;
176 }
177
178 __private_extern__ kern_return_t
179 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
180 {
181 return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
182 KERN_SUCCESS: KERN_FAILURE);
183 }
184
185 __private_extern__ kern_return_t
186 chudxnu_kern_write(
187 vm_offset_t dstaddr,
188 void *srcaddr,
189 vm_size_t size)
190 {
191 return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
192 KERN_SUCCESS: KERN_FAILURE);
193 }
194
195 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
196 // don't try to read in the hole
197 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
198 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
199 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
200
201 typedef struct _cframe64_t {
202 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
203 uint64_t caller;
204 uint64_t args[0];
205 }cframe64_t;
206
207
208 typedef struct _cframe_t {
209 uint32_t prev; // this is really a user32-space pointer to the previous frame
210 uint32_t caller;
211 uint32_t args[0];
212 } cframe_t;
213
214 extern void * find_user_regs(thread_t);
215 extern x86_saved_state32_t *find_kern_regs(thread_t);
216
217 static kern_return_t do_backtrace32(
218 task_t task,
219 thread_t thread,
220 x86_saved_state32_t *regs,
221 uint64_t *frames,
222 mach_msg_type_number_t *start_idx,
223 mach_msg_type_number_t max_idx,
224 boolean_t supervisor)
225 {
226 uint32_t tmpWord = 0UL;
227 uint64_t currPC = (uint64_t) regs->eip;
228 uint64_t currFP = (uint64_t) regs->ebp;
229 uint64_t prevPC = 0ULL;
230 uint64_t prevFP = 0ULL;
231 uint64_t kernStackMin = thread->kernel_stack;
232 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
233 mach_msg_type_number_t ct = *start_idx;
234 kern_return_t kr = KERN_FAILURE;
235
236 if(ct >= max_idx)
237 return KERN_RESOURCE_SHORTAGE; // no frames traced
238
239 frames[ct++] = currPC;
240
241 // build a backtrace of this 32 bit state.
242 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
243 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
244
245 if(!currFP) {
246 currPC = 0;
247 break;
248 }
249
250 if(ct >= max_idx) {
251 *start_idx = ct;
252 return KERN_RESOURCE_SHORTAGE;
253 }
254
255 /* read our caller */
256 if(supervisor) {
257 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
258 } else {
259 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
260 }
261
262 if(kr != KERN_SUCCESS) {
263 currPC = 0ULL;
264 break;
265 }
266
267 currPC = (uint64_t) tmpWord; // promote 32 bit address
268
269 /*
270 * retrive contents of the frame pointer and advance to the next stack
271 * frame if it's valid
272 */
273 prevFP = 0;
274 if(supervisor) {
275 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
276 } else {
277 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
278 }
279 prevFP = (uint64_t) tmpWord; // promote 32 bit address
280
281 if(prevFP) {
282 frames[ct++] = currPC;
283 prevPC = currPC;
284 }
285 if(prevFP < currFP) {
286 break;
287 } else {
288 currFP = prevFP;
289 }
290 }
291
292 *start_idx = ct;
293 return KERN_SUCCESS;
294 }
295
296 static kern_return_t do_backtrace64(
297 task_t task,
298 thread_t thread,
299 x86_saved_state64_t *regs,
300 uint64_t *frames,
301 mach_msg_type_number_t *start_idx,
302 mach_msg_type_number_t max_idx,
303 boolean_t supervisor)
304 {
305 uint64_t currPC = regs->isf.rip;
306 uint64_t currFP = regs->rbp;
307 uint64_t prevPC = 0ULL;
308 uint64_t prevFP = 0ULL;
309 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
310 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
311 mach_msg_type_number_t ct = *start_idx;
312 kern_return_t kr = KERN_FAILURE;
313
314 if(*start_idx >= max_idx)
315 return KERN_RESOURCE_SHORTAGE; // no frames traced
316
317 frames[ct++] = currPC;
318
319 // build a backtrace of this 32 bit state.
320 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
321 // this is the address where caller lives in the user thread
322 uint64_t caller = currFP + sizeof(uint64_t);
323
324 if(!currFP) {
325 currPC = 0;
326 break;
327 }
328
329 if(ct >= max_idx) {
330 *start_idx = ct;
331 return KERN_RESOURCE_SHORTAGE;
332 }
333
334 /* read our caller */
335 if(supervisor) {
336 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
337 } else {
338 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
339 }
340
341 if(kr != KERN_SUCCESS) {
342 currPC = 0ULL;
343 break;
344 }
345
346 /*
347 * retrive contents of the frame pointer and advance to the next stack
348 * frame if it's valid
349 */
350 prevFP = 0;
351 if(supervisor) {
352 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
353 } else {
354 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
355 }
356
357 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
358 frames[ct++] = currPC;
359 prevPC = currPC;
360 }
361 if(prevFP < currFP) {
362 break;
363 } else {
364 currFP = prevFP;
365 }
366 }
367
368 *start_idx = ct;
369 return KERN_SUCCESS;
370 }
371
372 static kern_return_t do_kernel_backtrace(
373 thread_t thread,
374 struct x86_kernel_state *regs,
375 uint64_t *frames,
376 mach_msg_type_number_t *start_idx,
377 mach_msg_type_number_t max_idx)
378 {
379 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
380 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
381 mach_msg_type_number_t ct = *start_idx;
382 kern_return_t kr = KERN_FAILURE;
383
384 #if __LP64__
385 uint64_t currPC = 0ULL;
386 uint64_t currFP = 0ULL;
387 uint64_t prevPC = 0ULL;
388 uint64_t prevFP = 0ULL;
389 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
390 return KERN_FAILURE;
391 }
392 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
393 return KERN_FAILURE;
394 }
395 #else
396 uint32_t currPC = 0U;
397 uint32_t currFP = 0U;
398 uint32_t prevPC = 0U;
399 uint32_t prevFP = 0U;
400 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
401 return KERN_FAILURE;
402 }
403 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
404 return KERN_FAILURE;
405 }
406 #endif
407
408 if(*start_idx >= max_idx)
409 return KERN_RESOURCE_SHORTAGE; // no frames traced
410
411 if(!currPC) {
412 return KERN_FAILURE;
413 }
414
415 frames[ct++] = (uint64_t)currPC;
416
417 // build a backtrace of this kernel state
418 #if __LP64__
419 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
420 // this is the address where caller lives in the user thread
421 uint64_t caller = currFP + sizeof(uint64_t);
422 #else
423 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
424 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
425 #endif
426
427 if(!currFP || !currPC) {
428 currPC = 0;
429 break;
430 }
431
432 if(ct >= max_idx) {
433 *start_idx = ct;
434 return KERN_RESOURCE_SHORTAGE;
435 }
436
437 /* read our caller */
438 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
439
440 if(kr != KERN_SUCCESS || !currPC) {
441 currPC = 0UL;
442 break;
443 }
444
445 /*
446 * retrive contents of the frame pointer and advance to the next stack
447 * frame if it's valid
448 */
449 prevFP = 0;
450 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
451
452 #if __LP64__
453 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
454 #else
455 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
456 #endif
457 frames[ct++] = (uint64_t)currPC;
458 prevPC = currPC;
459 }
460 if(prevFP <= currFP) {
461 break;
462 } else {
463 currFP = prevFP;
464 }
465 }
466
467 *start_idx = ct;
468 return KERN_SUCCESS;
469 }
470
471
472
473 __private_extern__
474 kern_return_t chudxnu_thread_get_callstack64(
475 thread_t thread,
476 uint64_t *callstack,
477 mach_msg_type_number_t *count,
478 boolean_t user_only)
479 {
480 kern_return_t kr = KERN_FAILURE;
481 task_t task = thread->task;
482 uint64_t currPC = 0ULL;
483 boolean_t supervisor = FALSE;
484 mach_msg_type_number_t bufferIndex = 0;
485 mach_msg_type_number_t bufferMaxIndex = *count;
486 x86_saved_state_t *tagged_regs = NULL; // kernel register state
487 x86_saved_state64_t *regs64 = NULL;
488 x86_saved_state32_t *regs32 = NULL;
489 x86_saved_state32_t *u_regs32 = NULL;
490 x86_saved_state64_t *u_regs64 = NULL;
491 struct x86_kernel_state *kregs = NULL;
492
493 if(ml_at_interrupt_context()) {
494
495 if(user_only) {
496 /* can't backtrace user state on interrupt stack. */
497 return KERN_FAILURE;
498 }
499
500 /* backtracing at interrupt context? */
501 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
502 /*
503 * Locate the registers for the interrupted thread, assuming it is
504 * current_thread().
505 */
506 tagged_regs = current_cpu_datap()->cpu_int_state;
507
508 if(is_saved_state64(tagged_regs)) {
509 /* 64 bit registers */
510 regs64 = saved_state64(tagged_regs);
511 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
512 } else {
513 /* 32 bit registers */
514 regs32 = saved_state32(tagged_regs);
515 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
516 }
517 }
518 }
519
520 if(!ml_at_interrupt_context() && kernel_task == task) {
521
522 if(!thread->kernel_stack) {
523 return KERN_FAILURE;
524 }
525
526 // Kernel thread not at interrupt context
527 kregs = (struct x86_kernel_state *)NULL;
528
529 // nofault read of the thread->kernel_stack pointer
530 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
531 return KERN_FAILURE;
532 }
533
534 // Adjust to find the saved kernel state
535 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
536
537 supervisor = TRUE;
538 } else if(!tagged_regs) {
539 /*
540 * not at interrupt context, or tracing a different thread than
541 * current_thread() at interrupt context
542 */
543 tagged_regs = USER_STATE(thread);
544 if(is_saved_state64(tagged_regs)) {
545 /* 64 bit registers */
546 regs64 = saved_state64(tagged_regs);
547 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
548 } else {
549 /* 32 bit registers */
550 regs32 = saved_state32(tagged_regs);
551 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
552 }
553 }
554
555 *count = 0;
556
557 if(supervisor) {
558 // the caller only wants a user callstack.
559 if(user_only) {
560 // bail - we've only got kernel state
561 return KERN_FAILURE;
562 }
563 } else {
564 // regs32(64) is not in supervisor mode.
565 u_regs32 = regs32;
566 u_regs64 = regs64;
567 regs32 = NULL;
568 regs64 = NULL;
569 }
570
571 if (user_only) {
572 /* we only want to backtrace the user mode */
573 if(!(u_regs32 || u_regs64)) {
574 /* no user state to look at */
575 return KERN_FAILURE;
576 }
577 }
578
579 /*
580 * Order of preference for top of stack:
581 * 64 bit kernel state (not likely)
582 * 32 bit kernel state
583 * 64 bit user land state
584 * 32 bit user land state
585 */
586
587 if(kregs) {
588 /*
589 * nofault read of the registers from the kernel stack (as they can
590 * disappear on the fly).
591 */
592
593 #if __LP64__
594 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
595 return KERN_FAILURE;
596 }
597 #else
598 uint32_t tmp;
599 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
600 return KERN_FAILURE;
601 }
602 currPC = (uint64_t)tmp;
603 #endif
604 } else if(regs64) {
605 currPC = regs64->isf.rip;
606 } else if(regs32) {
607 currPC = (uint64_t) regs32->eip;
608 } else if(u_regs64) {
609 currPC = u_regs64->isf.rip;
610 } else if(u_regs32) {
611 currPC = (uint64_t) u_regs32->eip;
612 }
613
614 if(!currPC) {
615 /* no top of the stack, bail out */
616 return KERN_FAILURE;
617 }
618
619 bufferIndex = 0;
620
621 if(bufferMaxIndex < 1) {
622 *count = 0;
623 return KERN_RESOURCE_SHORTAGE;
624 }
625
626 /* backtrace kernel */
627 if(kregs) {
628 addr64_t address = 0ULL;
629 size_t size = 0UL;
630
631 // do the backtrace
632 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
633
634 // and do a nofault read of (r|e)sp
635 #if __LP64__
636 uint64_t rsp = 0ULL;
637 size = sizeof(uint64_t);
638
639 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
640 address = 0ULL;
641 }
642 #else
643 uint32_t rsp = 0ULL, tmp = 0ULL;
644 size = sizeof(uint32_t);
645
646 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
647 address = 0ULL;
648 } else {
649 address = (addr64_t)tmp;
650 }
651 #endif
652
653 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
654 callstack[bufferIndex++] = (uint64_t)rsp;
655 }
656 } else if(regs64) {
657 uint64_t rsp = 0ULL;
658
659 // backtrace the 64bit side.
660 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
661 bufferMaxIndex, TRUE);
662
663 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
664 bufferIndex < bufferMaxIndex) {
665 callstack[bufferIndex++] = rsp;
666 }
667
668 } else if(regs32) {
669 uint32_t esp = 0UL;
670
671 // backtrace the 32bit side.
672 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
673 bufferMaxIndex, TRUE);
674
675 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
676 bufferIndex < bufferMaxIndex) {
677 callstack[bufferIndex++] = (uint64_t) esp;
678 }
679 } else if(u_regs64) {
680 /* backtrace user land */
681 uint64_t rsp = 0ULL;
682
683 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
684 bufferMaxIndex, FALSE);
685
686 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
687 bufferIndex < bufferMaxIndex) {
688 callstack[bufferIndex++] = rsp;
689 }
690
691 } else if(u_regs32) {
692 uint32_t esp = 0UL;
693
694 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
695 bufferMaxIndex, FALSE);
696
697 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
698 bufferIndex < bufferMaxIndex) {
699 callstack[bufferIndex++] = (uint64_t) esp;
700 }
701 }
702
703 *count = bufferIndex;
704 return kr;
705 }
706