]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
33
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
37
38 #include <vm/vm_map.h>
39 #include <vm/pmap.h>
40
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
43
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
47
48 #if 0
49 #pragma mark **** thread state ****
50 #endif
51
52 __private_extern__ kern_return_t
53 chudxnu_thread_user_state_available(thread_t thread)
54 {
55 #pragma unused (thread)
56 return KERN_SUCCESS;
57 }
58
59 __private_extern__ kern_return_t
60 chudxnu_thread_get_state(
61 thread_t thread,
62 thread_flavor_t flavor,
63 thread_state_t tstate,
64 mach_msg_type_number_t *count,
65 boolean_t user_only)
66 {
67 if (user_only) {
68 /* We can't get user state for kernel threads */
69 if (thread->task == kernel_task)
70 return KERN_FAILURE;
71 /* this properly handles deciding whether or not the thread is 64 bit or not */
72 return machine_thread_get_state(thread, flavor, tstate, count);
73 } else {
74 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
75 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
76 // interrupt state available
77
78 // the real purpose of this branch is the following:
79 // the user doesn't care if the thread states are user or kernel, he
80 // just wants the thread state, so we need to determine the proper one
81 // to return, kernel or user, for the given thread.
82 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
83 // the above are conditions where we possibly can read the kernel
84 // state. we still need to determine if this interrupt happened in
85 // kernel or user context
86 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
87 current_cpu_datap()->cpu_interrupt_level == 1) {
88 // interrupt happened in user land
89 return machine_thread_get_state(thread, flavor, tstate, count);
90 } else {
91 // kernel interrupt.
92 return machine_thread_get_kern_state(thread, flavor, tstate, count);
93 }
94 } else {
95 // get the user-mode thread state
96 return machine_thread_get_state(thread, flavor, tstate, count);
97 }
98 }
99 }
100
101 __private_extern__ kern_return_t
102 chudxnu_thread_set_state(
103 thread_t thread,
104 thread_flavor_t flavor,
105 thread_state_t tstate,
106 mach_msg_type_number_t count,
107 boolean_t user_only)
108 {
109 #pragma unused (user_only)
110 return machine_thread_set_state(thread, flavor, tstate, count);
111 }
112
113 #if 0
114 #pragma mark **** task memory read/write ****
115 #endif
116
117 __private_extern__ kern_return_t
118 chudxnu_task_read(
119 task_t task,
120 void *kernaddr,
121 uint64_t usraddr,
122 vm_size_t size)
123 {
124 kern_return_t ret = KERN_SUCCESS;
125 boolean_t old_level;
126
127 if(ml_at_interrupt_context()) {
128 return KERN_FAILURE; // Can't look at tasks on interrupt stack
129 }
130
131 /*
132 * pmap layer requires interrupts to be on
133 */
134 old_level = ml_set_interrupts_enabled(TRUE);
135
136 if(current_task()==task) {
137
138 if(copyin(usraddr, kernaddr, size)) {
139 ret = KERN_FAILURE;
140 }
141 } else {
142 vm_map_t map = get_task_map(task);
143 ret = vm_map_read_user(map, usraddr, kernaddr, size);
144 }
145
146 ml_set_interrupts_enabled(old_level);
147
148 return ret;
149 }
150
151 __private_extern__ kern_return_t
152 chudxnu_task_write(
153 task_t task,
154 uint64_t useraddr,
155 void *kernaddr,
156 vm_size_t size)
157 {
158 kern_return_t ret = KERN_SUCCESS;
159 boolean_t old_level;
160
161 if(ml_at_interrupt_context()) {
162 return KERN_FAILURE; // can't poke into tasks on interrupt stack
163 }
164
165 /*
166 * pmap layer requires interrupts to be on
167 */
168 old_level = ml_set_interrupts_enabled(TRUE);
169
170 if(current_task()==task) {
171
172 if(copyout(kernaddr, useraddr, size)) {
173 ret = KERN_FAILURE;
174 }
175 } else {
176 vm_map_t map = get_task_map(task);
177 ret = vm_map_write_user(map, kernaddr, useraddr, size);
178 }
179
180 ml_set_interrupts_enabled(old_level);
181
182 return ret;
183 }
184
185 __private_extern__ kern_return_t
186 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
187 {
188 return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
189 KERN_SUCCESS: KERN_FAILURE);
190 }
191
192 __private_extern__ kern_return_t
193 chudxnu_kern_write(
194 vm_offset_t dstaddr,
195 void *srcaddr,
196 vm_size_t size)
197 {
198 return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
199 KERN_SUCCESS: KERN_FAILURE);
200 }
201
202 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
203 // don't try to read in the hole
204 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
205 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
206 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
207
208 typedef struct _cframe64_t {
209 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
210 uint64_t caller;
211 uint64_t args[0];
212 }cframe64_t;
213
214
215 typedef struct _cframe_t {
216 uint32_t prev; // this is really a user32-space pointer to the previous frame
217 uint32_t caller;
218 uint32_t args[0];
219 } cframe_t;
220
221 extern void * find_user_regs(thread_t);
222 extern x86_saved_state32_t *find_kern_regs(thread_t);
223
224 static kern_return_t do_backtrace32(
225 task_t task,
226 thread_t thread,
227 x86_saved_state32_t *regs,
228 uint64_t *frames,
229 mach_msg_type_number_t *start_idx,
230 mach_msg_type_number_t max_idx,
231 boolean_t supervisor)
232 {
233 uint32_t tmpWord = 0UL;
234 uint64_t currPC = (uint64_t) regs->eip;
235 uint64_t currFP = (uint64_t) regs->ebp;
236 uint64_t prevPC = 0ULL;
237 uint64_t prevFP = 0ULL;
238 uint64_t kernStackMin = thread->kernel_stack;
239 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
240 mach_msg_type_number_t ct = *start_idx;
241 kern_return_t kr = KERN_FAILURE;
242
243 if(ct >= max_idx)
244 return KERN_RESOURCE_SHORTAGE; // no frames traced
245
246 frames[ct++] = currPC;
247
248 // build a backtrace of this 32 bit state.
249 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
250 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
251
252 if(!currFP) {
253 currPC = 0;
254 break;
255 }
256
257 if(ct >= max_idx) {
258 *start_idx = ct;
259 return KERN_RESOURCE_SHORTAGE;
260 }
261
262 /* read our caller */
263 if(supervisor) {
264 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
265 } else {
266 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
267 }
268
269 if(kr != KERN_SUCCESS) {
270 currPC = 0ULL;
271 break;
272 }
273
274 currPC = (uint64_t) tmpWord; // promote 32 bit address
275
276 /*
277 * retrive contents of the frame pointer and advance to the next stack
278 * frame if it's valid
279 */
280 prevFP = 0;
281 if(supervisor) {
282 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
283 } else {
284 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
285 }
286 prevFP = (uint64_t) tmpWord; // promote 32 bit address
287
288 if(prevFP) {
289 frames[ct++] = currPC;
290 prevPC = currPC;
291 }
292 if(prevFP < currFP) {
293 break;
294 } else {
295 currFP = prevFP;
296 }
297 }
298
299 *start_idx = ct;
300 return KERN_SUCCESS;
301 }
302
303 static kern_return_t do_backtrace64(
304 task_t task,
305 thread_t thread,
306 x86_saved_state64_t *regs,
307 uint64_t *frames,
308 mach_msg_type_number_t *start_idx,
309 mach_msg_type_number_t max_idx,
310 boolean_t supervisor)
311 {
312 uint64_t currPC = regs->isf.rip;
313 uint64_t currFP = regs->rbp;
314 uint64_t prevPC = 0ULL;
315 uint64_t prevFP = 0ULL;
316 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
317 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
318 mach_msg_type_number_t ct = *start_idx;
319 kern_return_t kr = KERN_FAILURE;
320
321 if(*start_idx >= max_idx)
322 return KERN_RESOURCE_SHORTAGE; // no frames traced
323
324 frames[ct++] = currPC;
325
326 // build a backtrace of this 32 bit state.
327 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
328 // this is the address where caller lives in the user thread
329 uint64_t caller = currFP + sizeof(uint64_t);
330
331 if(!currFP) {
332 currPC = 0;
333 break;
334 }
335
336 if(ct >= max_idx) {
337 *start_idx = ct;
338 return KERN_RESOURCE_SHORTAGE;
339 }
340
341 /* read our caller */
342 if(supervisor) {
343 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
344 } else {
345 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
346 }
347
348 if(kr != KERN_SUCCESS) {
349 currPC = 0ULL;
350 break;
351 }
352
353 /*
354 * retrive contents of the frame pointer and advance to the next stack
355 * frame if it's valid
356 */
357 prevFP = 0;
358 if(supervisor) {
359 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
360 } else {
361 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
362 }
363
364 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
365 frames[ct++] = currPC;
366 prevPC = currPC;
367 }
368 if(prevFP < currFP) {
369 break;
370 } else {
371 currFP = prevFP;
372 }
373 }
374
375 *start_idx = ct;
376 return KERN_SUCCESS;
377 }
378
379 static kern_return_t do_kernel_backtrace(
380 thread_t thread,
381 struct x86_kernel_state *regs,
382 uint64_t *frames,
383 mach_msg_type_number_t *start_idx,
384 mach_msg_type_number_t max_idx)
385 {
386 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
387 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
388 mach_msg_type_number_t ct = *start_idx;
389 kern_return_t kr = KERN_FAILURE;
390
391 #if __LP64__
392 uint64_t currPC = 0ULL;
393 uint64_t currFP = 0ULL;
394 uint64_t prevPC = 0ULL;
395 uint64_t prevFP = 0ULL;
396 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
397 return KERN_FAILURE;
398 }
399 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
400 return KERN_FAILURE;
401 }
402 #else
403 uint32_t currPC = 0U;
404 uint32_t currFP = 0U;
405 uint32_t prevPC = 0U;
406 uint32_t prevFP = 0U;
407 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
408 return KERN_FAILURE;
409 }
410 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
411 return KERN_FAILURE;
412 }
413 #endif
414
415 if(*start_idx >= max_idx)
416 return KERN_RESOURCE_SHORTAGE; // no frames traced
417
418 if(!currPC) {
419 return KERN_FAILURE;
420 }
421
422 frames[ct++] = (uint64_t)currPC;
423
424 // build a backtrace of this kernel state
425 #if __LP64__
426 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
427 // this is the address where caller lives in the user thread
428 uint64_t caller = currFP + sizeof(uint64_t);
429 #else
430 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
431 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
432 #endif
433
434 if(!currFP || !currPC) {
435 currPC = 0;
436 break;
437 }
438
439 if(ct >= max_idx) {
440 *start_idx = ct;
441 return KERN_RESOURCE_SHORTAGE;
442 }
443
444 /* read our caller */
445 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
446
447 if(kr != KERN_SUCCESS || !currPC) {
448 currPC = 0UL;
449 break;
450 }
451
452 /*
453 * retrive contents of the frame pointer and advance to the next stack
454 * frame if it's valid
455 */
456 prevFP = 0;
457 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
458
459 #if __LP64__
460 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
461 #else
462 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
463 #endif
464 frames[ct++] = (uint64_t)currPC;
465 prevPC = currPC;
466 }
467 if(prevFP <= currFP) {
468 break;
469 } else {
470 currFP = prevFP;
471 }
472 }
473
474 *start_idx = ct;
475 return KERN_SUCCESS;
476 }
477
478
479
480 __private_extern__
481 kern_return_t chudxnu_thread_get_callstack64(
482 thread_t thread,
483 uint64_t *callstack,
484 mach_msg_type_number_t *count,
485 boolean_t user_only)
486 {
487 kern_return_t kr = KERN_FAILURE;
488 task_t task = thread->task;
489 uint64_t currPC = 0ULL;
490 boolean_t supervisor = FALSE;
491 mach_msg_type_number_t bufferIndex = 0;
492 mach_msg_type_number_t bufferMaxIndex = *count;
493 x86_saved_state_t *tagged_regs = NULL; // kernel register state
494 x86_saved_state64_t *regs64 = NULL;
495 x86_saved_state32_t *regs32 = NULL;
496 x86_saved_state32_t *u_regs32 = NULL;
497 x86_saved_state64_t *u_regs64 = NULL;
498 struct x86_kernel_state *kregs = NULL;
499
500 if(ml_at_interrupt_context()) {
501
502 if(user_only) {
503 /* can't backtrace user state on interrupt stack. */
504 return KERN_FAILURE;
505 }
506
507 /* backtracing at interrupt context? */
508 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
509 /*
510 * Locate the registers for the interrupted thread, assuming it is
511 * current_thread().
512 */
513 tagged_regs = current_cpu_datap()->cpu_int_state;
514
515 if(is_saved_state64(tagged_regs)) {
516 /* 64 bit registers */
517 regs64 = saved_state64(tagged_regs);
518 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
519 } else {
520 /* 32 bit registers */
521 regs32 = saved_state32(tagged_regs);
522 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
523 }
524 }
525 }
526
527 if(!ml_at_interrupt_context() && kernel_task == task) {
528
529 if(!thread->kernel_stack) {
530 return KERN_FAILURE;
531 }
532
533 // Kernel thread not at interrupt context
534 kregs = (struct x86_kernel_state *)NULL;
535
536 // nofault read of the thread->kernel_stack pointer
537 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
538 return KERN_FAILURE;
539 }
540
541 // Adjust to find the saved kernel state
542 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
543
544 supervisor = TRUE;
545 } else if(!tagged_regs) {
546 /*
547 * not at interrupt context, or tracing a different thread than
548 * current_thread() at interrupt context
549 */
550 tagged_regs = USER_STATE(thread);
551 if(is_saved_state64(tagged_regs)) {
552 /* 64 bit registers */
553 regs64 = saved_state64(tagged_regs);
554 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
555 } else {
556 /* 32 bit registers */
557 regs32 = saved_state32(tagged_regs);
558 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
559 }
560 }
561
562 *count = 0;
563
564 if(supervisor) {
565 // the caller only wants a user callstack.
566 if(user_only) {
567 // bail - we've only got kernel state
568 return KERN_FAILURE;
569 }
570 } else {
571 // regs32(64) is not in supervisor mode.
572 u_regs32 = regs32;
573 u_regs64 = regs64;
574 regs32 = NULL;
575 regs64 = NULL;
576 }
577
578 if (user_only) {
579 /* we only want to backtrace the user mode */
580 if(!(u_regs32 || u_regs64)) {
581 /* no user state to look at */
582 return KERN_FAILURE;
583 }
584 }
585
586 /*
587 * Order of preference for top of stack:
588 * 64 bit kernel state (not likely)
589 * 32 bit kernel state
590 * 64 bit user land state
591 * 32 bit user land state
592 */
593
594 if(kregs) {
595 /*
596 * nofault read of the registers from the kernel stack (as they can
597 * disappear on the fly).
598 */
599
600 #if __LP64__
601 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
602 return KERN_FAILURE;
603 }
604 #else
605 uint32_t tmp;
606 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
607 return KERN_FAILURE;
608 }
609 currPC = (uint64_t)tmp;
610 #endif
611 } else if(regs64) {
612 currPC = regs64->isf.rip;
613 } else if(regs32) {
614 currPC = (uint64_t) regs32->eip;
615 } else if(u_regs64) {
616 currPC = u_regs64->isf.rip;
617 } else if(u_regs32) {
618 currPC = (uint64_t) u_regs32->eip;
619 }
620
621 if(!currPC) {
622 /* no top of the stack, bail out */
623 return KERN_FAILURE;
624 }
625
626 bufferIndex = 0;
627
628 if(bufferMaxIndex < 1) {
629 *count = 0;
630 return KERN_RESOURCE_SHORTAGE;
631 }
632
633 /* backtrace kernel */
634 if(kregs) {
635 addr64_t address = 0ULL;
636 size_t size = 0UL;
637
638 // do the backtrace
639 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
640
641 // and do a nofault read of (r|e)sp
642 #if __LP64__
643 uint64_t rsp = 0ULL;
644 size = sizeof(uint64_t);
645
646 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
647 address = 0ULL;
648 }
649 #else
650 uint32_t rsp = 0ULL, tmp = 0ULL;
651 size = sizeof(uint32_t);
652
653 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
654 address = 0ULL;
655 } else {
656 address = (addr64_t)tmp;
657 }
658 #endif
659
660 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
661 callstack[bufferIndex++] = (uint64_t)rsp;
662 }
663 } else if(regs64) {
664 uint64_t rsp = 0ULL;
665
666 // backtrace the 64bit side.
667 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
668 bufferMaxIndex, TRUE);
669
670 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
671 bufferIndex < bufferMaxIndex) {
672 callstack[bufferIndex++] = rsp;
673 }
674
675 } else if(regs32) {
676 uint32_t esp = 0UL;
677
678 // backtrace the 32bit side.
679 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
680 bufferMaxIndex, TRUE);
681
682 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
683 bufferIndex < bufferMaxIndex) {
684 callstack[bufferIndex++] = (uint64_t) esp;
685 }
686 } else if(u_regs64) {
687 /* backtrace user land */
688 uint64_t rsp = 0ULL;
689
690 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
691 bufferMaxIndex, FALSE);
692
693 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
694 bufferIndex < bufferMaxIndex) {
695 callstack[bufferIndex++] = rsp;
696 }
697
698 } else if(u_regs32) {
699 uint32_t esp = 0UL;
700
701 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
702 bufferMaxIndex, FALSE);
703
704 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
705 bufferIndex < bufferMaxIndex) {
706 callstack[bufferIndex++] = (uint64_t) esp;
707 }
708 }
709
710 *count = bufferIndex;
711 return kr;
712 }
713