]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
33
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
37
38 #include <vm/vm_map.h>
39 #include <vm/pmap.h>
40
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
43
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
47
48
49 static uint64_t
50 chudxnu_vm_unslide( uint64_t ptr, int kaddr )
51 {
52 if( !kaddr )
53 return ptr;
54
55 return VM_KERNEL_UNSLIDE(ptr);
56 }
57
58 #if 0
59 #pragma mark **** thread state ****
60 #endif
61
62 __private_extern__ kern_return_t
63 chudxnu_thread_get_state(
64 thread_t thread,
65 thread_flavor_t flavor,
66 thread_state_t tstate,
67 mach_msg_type_number_t *count,
68 boolean_t user_only)
69 {
70 if (user_only) {
71 /* We can't get user state for kernel threads */
72 if (thread->task == kernel_task)
73 return KERN_FAILURE;
74 /* this properly handles deciding whether or not the thread is 64 bit or not */
75 return machine_thread_get_state(thread, flavor, tstate, count);
76 } else {
77 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
78 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
79 // interrupt state available
80
81 // the real purpose of this branch is the following:
82 // the user doesn't care if the thread states are user or kernel, he
83 // just wants the thread state, so we need to determine the proper one
84 // to return, kernel or user, for the given thread.
85 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
86 // the above are conditions where we possibly can read the kernel
87 // state. we still need to determine if this interrupt happened in
88 // kernel or user context
89 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
90 current_cpu_datap()->cpu_interrupt_level == 1) {
91 // interrupt happened in user land
92 return machine_thread_get_state(thread, flavor, tstate, count);
93 } else {
94 // kernel interrupt.
95 return machine_thread_get_kern_state(thread, flavor, tstate, count);
96 }
97 } else {
98 // get the user-mode thread state
99 return machine_thread_get_state(thread, flavor, tstate, count);
100 }
101 }
102 }
103
104 __private_extern__ kern_return_t
105 chudxnu_thread_set_state(
106 thread_t thread,
107 thread_flavor_t flavor,
108 thread_state_t tstate,
109 mach_msg_type_number_t count,
110 boolean_t user_only)
111 {
112 #pragma unused (user_only)
113 return machine_thread_set_state(thread, flavor, tstate, count);
114 }
115
116 #if 0
117 #pragma mark **** task memory read/write ****
118 #endif
119
120 __private_extern__ kern_return_t
121 chudxnu_task_read(
122 task_t task,
123 void *kernaddr,
124 uint64_t usraddr,
125 vm_size_t size)
126 {
127 kern_return_t ret = KERN_SUCCESS;
128 boolean_t old_level;
129
130 if(ml_at_interrupt_context()) {
131 return KERN_FAILURE; // Can't look at tasks on interrupt stack
132 }
133
134 /*
135 * pmap layer requires interrupts to be on
136 */
137 old_level = ml_set_interrupts_enabled(TRUE);
138
139 if(current_task()==task) {
140
141 if(copyin(usraddr, kernaddr, size)) {
142 ret = KERN_FAILURE;
143 }
144 } else {
145 vm_map_t map = get_task_map(task);
146 ret = vm_map_read_user(map, usraddr, kernaddr, size);
147 }
148
149 ml_set_interrupts_enabled(old_level);
150
151 return ret;
152 }
153
154 __private_extern__ kern_return_t
155 chudxnu_task_write(
156 task_t task,
157 uint64_t useraddr,
158 void *kernaddr,
159 vm_size_t size)
160 {
161 kern_return_t ret = KERN_SUCCESS;
162 boolean_t old_level;
163
164 if(ml_at_interrupt_context()) {
165 return KERN_FAILURE; // can't poke into tasks on interrupt stack
166 }
167
168 /*
169 * pmap layer requires interrupts to be on
170 */
171 old_level = ml_set_interrupts_enabled(TRUE);
172
173 if(current_task()==task) {
174
175 if(copyout(kernaddr, useraddr, size)) {
176 ret = KERN_FAILURE;
177 }
178 } else {
179 vm_map_t map = get_task_map(task);
180 ret = vm_map_write_user(map, kernaddr, useraddr, size);
181 }
182
183 ml_set_interrupts_enabled(old_level);
184
185 return ret;
186 }
187
188 __private_extern__ kern_return_t
189 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
190 {
191 return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
192 KERN_SUCCESS: KERN_FAILURE);
193 }
194
195 __private_extern__ kern_return_t
196 chudxnu_kern_write(
197 vm_offset_t dstaddr,
198 void *srcaddr,
199 vm_size_t size)
200 {
201 return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
202 KERN_SUCCESS: KERN_FAILURE);
203 }
204
205 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
206 // don't try to read in the hole
207 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
208 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
209 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
210
211 typedef struct _cframe64_t {
212 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
213 uint64_t caller;
214 uint64_t args[0];
215 }cframe64_t;
216
217
218 typedef struct _cframe_t {
219 uint32_t prev; // this is really a user32-space pointer to the previous frame
220 uint32_t caller;
221 uint32_t args[0];
222 } cframe_t;
223
224 extern void * find_user_regs(thread_t);
225 extern x86_saved_state32_t *find_kern_regs(thread_t);
226
227 static kern_return_t do_backtrace32(
228 task_t task,
229 thread_t thread,
230 x86_saved_state32_t *regs,
231 uint64_t *frames,
232 mach_msg_type_number_t *start_idx,
233 mach_msg_type_number_t max_idx,
234 boolean_t supervisor)
235 {
236 uint32_t tmpWord = 0UL;
237 uint64_t currPC = (uint64_t) regs->eip;
238 uint64_t currFP = (uint64_t) regs->ebp;
239 uint64_t prevPC = 0ULL;
240 uint64_t prevFP = 0ULL;
241 uint64_t kernStackMin = thread->kernel_stack;
242 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
243 mach_msg_type_number_t ct = *start_idx;
244 kern_return_t kr = KERN_FAILURE;
245
246 if(ct >= max_idx)
247 return KERN_RESOURCE_SHORTAGE; // no frames traced
248
249 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
250
251 // build a backtrace of this 32 bit state.
252 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
253 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
254
255 if(!currFP) {
256 currPC = 0;
257 break;
258 }
259
260 if(ct >= max_idx) {
261 *start_idx = ct;
262 return KERN_RESOURCE_SHORTAGE;
263 }
264
265 /* read our caller */
266 if(supervisor) {
267 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
268 } else {
269 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
270 }
271
272 if(kr != KERN_SUCCESS) {
273 currPC = 0ULL;
274 break;
275 }
276
277 currPC = (uint64_t) tmpWord; // promote 32 bit address
278
279 /*
280 * retrive contents of the frame pointer and advance to the next stack
281 * frame if it's valid
282 */
283 prevFP = 0;
284 if(supervisor) {
285 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
286 } else {
287 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
288 }
289 prevFP = (uint64_t) tmpWord; // promote 32 bit address
290
291 if(prevFP) {
292 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
293 prevPC = currPC;
294 }
295 if(prevFP < currFP) {
296 break;
297 } else {
298 currFP = prevFP;
299 }
300 }
301
302 *start_idx = ct;
303 return KERN_SUCCESS;
304 }
305
306 static kern_return_t do_backtrace64(
307 task_t task,
308 thread_t thread,
309 x86_saved_state64_t *regs,
310 uint64_t *frames,
311 mach_msg_type_number_t *start_idx,
312 mach_msg_type_number_t max_idx,
313 boolean_t supervisor)
314 {
315 uint64_t currPC = regs->isf.rip;
316 uint64_t currFP = regs->rbp;
317 uint64_t prevPC = 0ULL;
318 uint64_t prevFP = 0ULL;
319 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
320 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
321 mach_msg_type_number_t ct = *start_idx;
322 kern_return_t kr = KERN_FAILURE;
323
324 if(*start_idx >= max_idx)
325 return KERN_RESOURCE_SHORTAGE; // no frames traced
326
327 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
328
329 // build a backtrace of this 32 bit state.
330 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
331 // this is the address where caller lives in the user thread
332 uint64_t caller = currFP + sizeof(uint64_t);
333
334 if(!currFP) {
335 currPC = 0;
336 break;
337 }
338
339 if(ct >= max_idx) {
340 *start_idx = ct;
341 return KERN_RESOURCE_SHORTAGE;
342 }
343
344 /* read our caller */
345 if(supervisor) {
346 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
347 } else {
348 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
349 }
350
351 if(kr != KERN_SUCCESS) {
352 currPC = 0ULL;
353 break;
354 }
355
356 /*
357 * retrive contents of the frame pointer and advance to the next stack
358 * frame if it's valid
359 */
360 prevFP = 0;
361 if(supervisor) {
362 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
363 } else {
364 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
365 }
366
367 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
368 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
369 prevPC = currPC;
370 }
371 if(prevFP < currFP) {
372 break;
373 } else {
374 currFP = prevFP;
375 }
376 }
377
378 *start_idx = ct;
379 return KERN_SUCCESS;
380 }
381
382 static kern_return_t do_kernel_backtrace(
383 thread_t thread,
384 struct x86_kernel_state *regs,
385 uint64_t *frames,
386 mach_msg_type_number_t *start_idx,
387 mach_msg_type_number_t max_idx)
388 {
389 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
390 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
391 mach_msg_type_number_t ct = *start_idx;
392 kern_return_t kr = KERN_FAILURE;
393
394 #if __LP64__
395 uint64_t currPC = 0ULL;
396 uint64_t currFP = 0ULL;
397 uint64_t prevPC = 0ULL;
398 uint64_t prevFP = 0ULL;
399 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
400 return KERN_FAILURE;
401 }
402 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
403 return KERN_FAILURE;
404 }
405 #else
406 uint32_t currPC = 0U;
407 uint32_t currFP = 0U;
408 uint32_t prevPC = 0U;
409 uint32_t prevFP = 0U;
410 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
411 return KERN_FAILURE;
412 }
413 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
414 return KERN_FAILURE;
415 }
416 #endif
417
418 if(*start_idx >= max_idx)
419 return KERN_RESOURCE_SHORTAGE; // no frames traced
420
421 if(!currPC) {
422 return KERN_FAILURE;
423 }
424
425 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
426
427 // build a backtrace of this kernel state
428 #if __LP64__
429 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
430 // this is the address where caller lives in the user thread
431 uint64_t caller = currFP + sizeof(uint64_t);
432 #else
433 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
434 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
435 #endif
436
437 if(!currFP || !currPC) {
438 currPC = 0;
439 break;
440 }
441
442 if(ct >= max_idx) {
443 *start_idx = ct;
444 return KERN_RESOURCE_SHORTAGE;
445 }
446
447 /* read our caller */
448 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
449
450 if(kr != KERN_SUCCESS || !currPC) {
451 currPC = 0UL;
452 break;
453 }
454
455 /*
456 * retrive contents of the frame pointer and advance to the next stack
457 * frame if it's valid
458 */
459 prevFP = 0;
460 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
461
462 #if __LP64__
463 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
464 #else
465 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
466 #endif
467 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
468 prevPC = currPC;
469 }
470 if(prevFP <= currFP) {
471 break;
472 } else {
473 currFP = prevFP;
474 }
475 }
476
477 *start_idx = ct;
478 return KERN_SUCCESS;
479 }
480
481
482
483 __private_extern__
484 kern_return_t chudxnu_thread_get_callstack64(
485 thread_t thread,
486 uint64_t *callstack,
487 mach_msg_type_number_t *count,
488 boolean_t user_only)
489 {
490 kern_return_t kr = KERN_FAILURE;
491 task_t task = thread->task;
492 uint64_t currPC = 0ULL;
493 boolean_t supervisor = FALSE;
494 mach_msg_type_number_t bufferIndex = 0;
495 mach_msg_type_number_t bufferMaxIndex = *count;
496 x86_saved_state_t *tagged_regs = NULL; // kernel register state
497 x86_saved_state64_t *regs64 = NULL;
498 x86_saved_state32_t *regs32 = NULL;
499 x86_saved_state32_t *u_regs32 = NULL;
500 x86_saved_state64_t *u_regs64 = NULL;
501 struct x86_kernel_state *kregs = NULL;
502
503 if(ml_at_interrupt_context()) {
504
505 if(user_only) {
506 /* can't backtrace user state on interrupt stack. */
507 return KERN_FAILURE;
508 }
509
510 /* backtracing at interrupt context? */
511 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
512 /*
513 * Locate the registers for the interrupted thread, assuming it is
514 * current_thread().
515 */
516 tagged_regs = current_cpu_datap()->cpu_int_state;
517
518 if(is_saved_state64(tagged_regs)) {
519 /* 64 bit registers */
520 regs64 = saved_state64(tagged_regs);
521 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
522 } else {
523 /* 32 bit registers */
524 regs32 = saved_state32(tagged_regs);
525 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
526 }
527 }
528 }
529
530 if(!ml_at_interrupt_context() && kernel_task == task) {
531
532 if(!thread->kernel_stack) {
533 return KERN_FAILURE;
534 }
535
536 // Kernel thread not at interrupt context
537 kregs = (struct x86_kernel_state *)NULL;
538
539 // nofault read of the thread->kernel_stack pointer
540 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
541 return KERN_FAILURE;
542 }
543
544 // Adjust to find the saved kernel state
545 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
546
547 supervisor = TRUE;
548 } else if(!tagged_regs) {
549 /*
550 * not at interrupt context, or tracing a different thread than
551 * current_thread() at interrupt context
552 */
553 tagged_regs = USER_STATE(thread);
554 if(is_saved_state64(tagged_regs)) {
555 /* 64 bit registers */
556 regs64 = saved_state64(tagged_regs);
557 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
558 } else {
559 /* 32 bit registers */
560 regs32 = saved_state32(tagged_regs);
561 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
562 }
563 }
564
565 *count = 0;
566
567 if(supervisor) {
568 // the caller only wants a user callstack.
569 if(user_only) {
570 // bail - we've only got kernel state
571 return KERN_FAILURE;
572 }
573 } else {
574 // regs32(64) is not in supervisor mode.
575 u_regs32 = regs32;
576 u_regs64 = regs64;
577 regs32 = NULL;
578 regs64 = NULL;
579 }
580
581 if (user_only) {
582 /* we only want to backtrace the user mode */
583 if(!(u_regs32 || u_regs64)) {
584 /* no user state to look at */
585 return KERN_FAILURE;
586 }
587 }
588
589 /*
590 * Order of preference for top of stack:
591 * 64 bit kernel state (not likely)
592 * 32 bit kernel state
593 * 64 bit user land state
594 * 32 bit user land state
595 */
596
597 if(kregs) {
598 /*
599 * nofault read of the registers from the kernel stack (as they can
600 * disappear on the fly).
601 */
602
603 #if __LP64__
604 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
605 return KERN_FAILURE;
606 }
607 #else
608 uint32_t tmp;
609 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
610 return KERN_FAILURE;
611 }
612 currPC = (uint64_t)tmp;
613 #endif
614 } else if(regs64) {
615 currPC = regs64->isf.rip;
616 } else if(regs32) {
617 currPC = (uint64_t) regs32->eip;
618 } else if(u_regs64) {
619 currPC = u_regs64->isf.rip;
620 } else if(u_regs32) {
621 currPC = (uint64_t) u_regs32->eip;
622 }
623
624 if(!currPC) {
625 /* no top of the stack, bail out */
626 return KERN_FAILURE;
627 }
628
629 bufferIndex = 0;
630
631 if(bufferMaxIndex < 1) {
632 *count = 0;
633 return KERN_RESOURCE_SHORTAGE;
634 }
635
636 /* backtrace kernel */
637 if(kregs) {
638 addr64_t address = 0ULL;
639 size_t size = 0UL;
640
641 // do the backtrace
642 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
643
644 // and do a nofault read of (r|e)sp
645 #if __LP64__
646 uint64_t rsp = 0ULL;
647 size = sizeof(uint64_t);
648
649 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
650 address = 0ULL;
651 }
652 #else
653 uint32_t rsp = 0ULL, tmp = 0ULL;
654 size = sizeof(uint32_t);
655
656 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
657 address = 0ULL;
658 } else {
659 address = (addr64_t)tmp;
660 }
661 #endif
662
663 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
664 callstack[bufferIndex++] = (uint64_t)rsp;
665 }
666 } else if(regs64) {
667 uint64_t rsp = 0ULL;
668
669 // backtrace the 64bit side.
670 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
671 bufferMaxIndex, TRUE);
672
673 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
674 bufferIndex < bufferMaxIndex) {
675 callstack[bufferIndex++] = rsp;
676 }
677
678 } else if(regs32) {
679 uint32_t esp = 0UL;
680
681 // backtrace the 32bit side.
682 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
683 bufferMaxIndex, TRUE);
684
685 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
686 bufferIndex < bufferMaxIndex) {
687 callstack[bufferIndex++] = (uint64_t) esp;
688 }
689 } else if(u_regs64) {
690 /* backtrace user land */
691 uint64_t rsp = 0ULL;
692
693 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
694 bufferMaxIndex, FALSE);
695
696 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
697 bufferIndex < bufferMaxIndex) {
698 callstack[bufferIndex++] = rsp;
699 }
700
701 } else if(u_regs32) {
702 uint32_t esp = 0UL;
703
704 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
705 bufferMaxIndex, FALSE);
706
707 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
708 bufferIndex < bufferMaxIndex) {
709 callstack[bufferIndex++] = (uint64_t) esp;
710 }
711 }
712
713 *count = bufferIndex;
714 return kr;
715 }
716