]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/i386/chud_thread_i386.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <mach/mach_types.h>
30#include <mach/task.h>
31#include <mach/thread_act.h>
b0d623f7 32#include <machine/thread.h>
0c530ab8
A
33
34#include <kern/kern_types.h>
35#include <kern/processor.h>
36#include <kern/thread.h>
37
38#include <vm/vm_map.h>
39#include <vm/pmap.h>
40
41#include <chud/chud_xnu.h>
42#include <chud/chud_xnu_private.h>
43
0c530ab8
A
44#include <i386/proc_reg.h>
45#include <i386/mp_desc.h>
b0d623f7 46#include <i386/misc_protos.h>
0c530ab8 47
316670eb
A
48
49static uint64_t
50chudxnu_vm_unslide( uint64_t ptr, int kaddr )
51{
52 if( !kaddr )
53 return ptr;
54
55 return VM_KERNEL_UNSLIDE(ptr);
56}
57
b0d623f7 58#if 0
0c530ab8 59#pragma mark **** thread state ****
b0d623f7 60#endif
0c530ab8
A
61
62__private_extern__ kern_return_t
0c530ab8 63chudxnu_thread_get_state(
2d21ac55
A
64 thread_t thread,
65 thread_flavor_t flavor,
66 thread_state_t tstate,
67 mach_msg_type_number_t *count,
68 boolean_t user_only)
0c530ab8
A
69{
70 if (user_only) {
71 /* We can't get user state for kernel threads */
72 if (thread->task == kernel_task)
73 return KERN_FAILURE;
74 /* this properly handles deciding whether or not the thread is 64 bit or not */
75 return machine_thread_get_state(thread, flavor, tstate, count);
76 } else {
77 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
78 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
79 // interrupt state available
2d21ac55 80
0c530ab8
A
81 // the real purpose of this branch is the following:
82 // the user doesn't care if the thread states are user or kernel, he
83 // just wants the thread state, so we need to determine the proper one
84 // to return, kernel or user, for the given thread.
85 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
86 // the above are conditions where we possibly can read the kernel
87 // state. we still need to determine if this interrupt happened in
88 // kernel or user context
89 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
2d21ac55 90 current_cpu_datap()->cpu_interrupt_level == 1) {
0c530ab8
A
91 // interrupt happened in user land
92 return machine_thread_get_state(thread, flavor, tstate, count);
93 } else {
94 // kernel interrupt.
95 return machine_thread_get_kern_state(thread, flavor, tstate, count);
96 }
97 } else {
98 // get the user-mode thread state
99 return machine_thread_get_state(thread, flavor, tstate, count);
100 }
101 }
102}
103
104__private_extern__ kern_return_t
105chudxnu_thread_set_state(
2d21ac55
A
106 thread_t thread,
107 thread_flavor_t flavor,
108 thread_state_t tstate,
109 mach_msg_type_number_t count,
110 boolean_t user_only)
0c530ab8
A
111{
112#pragma unused (user_only)
113 return machine_thread_set_state(thread, flavor, tstate, count);
114}
115
b0d623f7 116#if 0
0c530ab8 117#pragma mark **** task memory read/write ****
b0d623f7 118#endif
2d21ac55 119
0c530ab8
A
120__private_extern__ kern_return_t
121chudxnu_task_read(
2d21ac55
A
122 task_t task,
123 void *kernaddr,
124 uint64_t usraddr,
125 vm_size_t size)
0c530ab8
A
126{
127 kern_return_t ret = KERN_SUCCESS;
2d21ac55 128 boolean_t old_level;
0c530ab8 129
2d21ac55
A
130 if(ml_at_interrupt_context()) {
131 return KERN_FAILURE; // Can't look at tasks on interrupt stack
132 }
133
134 /*
135 * pmap layer requires interrupts to be on
136 */
137 old_level = ml_set_interrupts_enabled(TRUE);
138
139 if(current_task()==task) {
140
0c530ab8
A
141 if(copyin(usraddr, kernaddr, size)) {
142 ret = KERN_FAILURE;
143 }
144 } else {
145 vm_map_t map = get_task_map(task);
146 ret = vm_map_read_user(map, usraddr, kernaddr, size);
147 }
148
2d21ac55
A
149 ml_set_interrupts_enabled(old_level);
150
0c530ab8
A
151 return ret;
152}
2d21ac55 153
0c530ab8
A
154__private_extern__ kern_return_t
155chudxnu_task_write(
2d21ac55
A
156 task_t task,
157 uint64_t useraddr,
158 void *kernaddr,
159 vm_size_t size)
0c530ab8
A
160{
161 kern_return_t ret = KERN_SUCCESS;
2d21ac55
A
162 boolean_t old_level;
163
164 if(ml_at_interrupt_context()) {
165 return KERN_FAILURE; // can't poke into tasks on interrupt stack
166 }
167
168 /*
169 * pmap layer requires interrupts to be on
170 */
171 old_level = ml_set_interrupts_enabled(TRUE);
0c530ab8 172
2d21ac55
A
173 if(current_task()==task) {
174
0c530ab8
A
175 if(copyout(kernaddr, useraddr, size)) {
176 ret = KERN_FAILURE;
177 }
178 } else {
179 vm_map_t map = get_task_map(task);
180 ret = vm_map_write_user(map, kernaddr, useraddr, size);
181 }
2d21ac55
A
182
183 ml_set_interrupts_enabled(old_level);
184
0c530ab8
A
185 return ret;
186}
187
188__private_extern__ kern_return_t
189chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
190{
2d21ac55
A
191 return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
192 KERN_SUCCESS: KERN_FAILURE);
0c530ab8
A
193}
194
195__private_extern__ kern_return_t
196chudxnu_kern_write(
2d21ac55
A
197 vm_offset_t dstaddr,
198 void *srcaddr,
199 vm_size_t size)
0c530ab8 200{
2d21ac55
A
201 return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
202 KERN_SUCCESS: KERN_FAILURE);
0c530ab8
A
203}
204
205#define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
206// don't try to read in the hole
207#define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
b0d623f7
A
208(supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
209((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
0c530ab8
A
210
211typedef struct _cframe64_t {
212 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
213 uint64_t caller;
214 uint64_t args[0];
215}cframe64_t;
216
217
218typedef struct _cframe_t {
b0d623f7 219 uint32_t prev; // this is really a user32-space pointer to the previous frame
0c530ab8
A
220 uint32_t caller;
221 uint32_t args[0];
222} cframe_t;
223
2d21ac55
A
224extern void * find_user_regs(thread_t);
225extern x86_saved_state32_t *find_kern_regs(thread_t);
226
227static kern_return_t do_backtrace32(
228 task_t task,
229 thread_t thread,
230 x86_saved_state32_t *regs,
231 uint64_t *frames,
232 mach_msg_type_number_t *start_idx,
233 mach_msg_type_number_t max_idx,
234 boolean_t supervisor)
235{
236 uint32_t tmpWord = 0UL;
237 uint64_t currPC = (uint64_t) regs->eip;
238 uint64_t currFP = (uint64_t) regs->ebp;
239 uint64_t prevPC = 0ULL;
240 uint64_t prevFP = 0ULL;
241 uint64_t kernStackMin = thread->kernel_stack;
b0d623f7 242 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
2d21ac55
A
243 mach_msg_type_number_t ct = *start_idx;
244 kern_return_t kr = KERN_FAILURE;
245
246 if(ct >= max_idx)
247 return KERN_RESOURCE_SHORTAGE; // no frames traced
248
316670eb 249 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
2d21ac55
A
250
251 // build a backtrace of this 32 bit state.
252 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
b0d623f7 253 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
2d21ac55
A
254
255 if(!currFP) {
256 currPC = 0;
257 break;
258 }
259
260 if(ct >= max_idx) {
261 *start_idx = ct;
262 return KERN_RESOURCE_SHORTAGE;
263 }
264
265 /* read our caller */
266 if(supervisor) {
267 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
268 } else {
269 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
270 }
271
272 if(kr != KERN_SUCCESS) {
273 currPC = 0ULL;
274 break;
275 }
276
277 currPC = (uint64_t) tmpWord; // promote 32 bit address
278
279 /*
280 * retrive contents of the frame pointer and advance to the next stack
281 * frame if it's valid
282 */
283 prevFP = 0;
284 if(supervisor) {
285 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
286 } else {
287 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
288 }
289 prevFP = (uint64_t) tmpWord; // promote 32 bit address
290
291 if(prevFP) {
316670eb 292 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
2d21ac55
A
293 prevPC = currPC;
294 }
295 if(prevFP < currFP) {
296 break;
297 } else {
298 currFP = prevFP;
299 }
300 }
301
302 *start_idx = ct;
303 return KERN_SUCCESS;
304}
305
306static kern_return_t do_backtrace64(
307 task_t task,
308 thread_t thread,
309 x86_saved_state64_t *regs,
310 uint64_t *frames,
311 mach_msg_type_number_t *start_idx,
312 mach_msg_type_number_t max_idx,
313 boolean_t supervisor)
314{
315 uint64_t currPC = regs->isf.rip;
316 uint64_t currFP = regs->rbp;
317 uint64_t prevPC = 0ULL;
318 uint64_t prevFP = 0ULL;
319 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
b0d623f7 320 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
2d21ac55
A
321 mach_msg_type_number_t ct = *start_idx;
322 kern_return_t kr = KERN_FAILURE;
323
324 if(*start_idx >= max_idx)
325 return KERN_RESOURCE_SHORTAGE; // no frames traced
326
316670eb 327 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
2d21ac55
A
328
329 // build a backtrace of this 32 bit state.
330 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
331 // this is the address where caller lives in the user thread
332 uint64_t caller = currFP + sizeof(uint64_t);
333
334 if(!currFP) {
335 currPC = 0;
336 break;
337 }
338
339 if(ct >= max_idx) {
340 *start_idx = ct;
341 return KERN_RESOURCE_SHORTAGE;
342 }
343
344 /* read our caller */
345 if(supervisor) {
b0d623f7 346 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
2d21ac55
A
347 } else {
348 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
349 }
350
351 if(kr != KERN_SUCCESS) {
352 currPC = 0ULL;
353 break;
354 }
355
356 /*
357 * retrive contents of the frame pointer and advance to the next stack
358 * frame if it's valid
359 */
360 prevFP = 0;
361 if(supervisor) {
b0d623f7 362 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
2d21ac55
A
363 } else {
364 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
365 }
366
367 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
316670eb 368 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
2d21ac55
A
369 prevPC = currPC;
370 }
371 if(prevFP < currFP) {
372 break;
373 } else {
374 currFP = prevFP;
375 }
376 }
377
378 *start_idx = ct;
379 return KERN_SUCCESS;
380}
381
b0d623f7
A
382static kern_return_t do_kernel_backtrace(
383 thread_t thread,
384 struct x86_kernel_state *regs,
385 uint64_t *frames,
386 mach_msg_type_number_t *start_idx,
387 mach_msg_type_number_t max_idx)
388{
389 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
390 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
391 mach_msg_type_number_t ct = *start_idx;
392 kern_return_t kr = KERN_FAILURE;
393
394#if __LP64__
395 uint64_t currPC = 0ULL;
396 uint64_t currFP = 0ULL;
397 uint64_t prevPC = 0ULL;
398 uint64_t prevFP = 0ULL;
399 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
400 return KERN_FAILURE;
401 }
402 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
403 return KERN_FAILURE;
404 }
405#else
406 uint32_t currPC = 0U;
407 uint32_t currFP = 0U;
408 uint32_t prevPC = 0U;
409 uint32_t prevFP = 0U;
410 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
411 return KERN_FAILURE;
412 }
413 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
414 return KERN_FAILURE;
415 }
416#endif
417
418 if(*start_idx >= max_idx)
419 return KERN_RESOURCE_SHORTAGE; // no frames traced
420
421 if(!currPC) {
422 return KERN_FAILURE;
423 }
424
316670eb 425 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
b0d623f7
A
426
427 // build a backtrace of this kernel state
428#if __LP64__
429 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
430 // this is the address where caller lives in the user thread
431 uint64_t caller = currFP + sizeof(uint64_t);
432#else
433 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
434 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
435#endif
436
437 if(!currFP || !currPC) {
438 currPC = 0;
439 break;
440 }
441
442 if(ct >= max_idx) {
443 *start_idx = ct;
444 return KERN_RESOURCE_SHORTAGE;
445 }
446
447 /* read our caller */
448 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
449
450 if(kr != KERN_SUCCESS || !currPC) {
451 currPC = 0UL;
452 break;
453 }
454
455 /*
456 * retrive contents of the frame pointer and advance to the next stack
457 * frame if it's valid
458 */
459 prevFP = 0;
460 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
461
462#if __LP64__
463 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
464#else
465 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
466#endif
316670eb 467 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
b0d623f7
A
468 prevPC = currPC;
469 }
470 if(prevFP <= currFP) {
471 break;
472 } else {
473 currFP = prevFP;
474 }
475 }
476
477 *start_idx = ct;
478 return KERN_SUCCESS;
479}
480
481
482
0c530ab8
A
483__private_extern__
484kern_return_t chudxnu_thread_get_callstack64(
485 thread_t thread,
486 uint64_t *callstack,
487 mach_msg_type_number_t *count,
488 boolean_t user_only)
489{
2d21ac55 490 kern_return_t kr = KERN_FAILURE;
0c530ab8 491 task_t task = thread->task;
b0d623f7 492 uint64_t currPC = 0ULL;
2d21ac55
A
493 boolean_t supervisor = FALSE;
494 mach_msg_type_number_t bufferIndex = 0;
495 mach_msg_type_number_t bufferMaxIndex = *count;
496 x86_saved_state_t *tagged_regs = NULL; // kernel register state
497 x86_saved_state64_t *regs64 = NULL;
498 x86_saved_state32_t *regs32 = NULL;
499 x86_saved_state32_t *u_regs32 = NULL;
500 x86_saved_state64_t *u_regs64 = NULL;
b0d623f7 501 struct x86_kernel_state *kregs = NULL;
2d21ac55
A
502
503 if(ml_at_interrupt_context()) {
504
505 if(user_only) {
506 /* can't backtrace user state on interrupt stack. */
0c530ab8
A
507 return KERN_FAILURE;
508 }
2d21ac55
A
509
510 /* backtracing at interrupt context? */
511 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
512 /*
513 * Locate the registers for the interrupted thread, assuming it is
514 * current_thread().
515 */
516 tagged_regs = current_cpu_datap()->cpu_int_state;
0c530ab8 517
2d21ac55
A
518 if(is_saved_state64(tagged_regs)) {
519 /* 64 bit registers */
520 regs64 = saved_state64(tagged_regs);
521 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
0c530ab8 522 } else {
2d21ac55
A
523 /* 32 bit registers */
524 regs32 = saved_state32(tagged_regs);
525 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
0c530ab8 526 }
2d21ac55
A
527 }
528 }
0c530ab8 529
b0d623f7
A
530 if(!ml_at_interrupt_context() && kernel_task == task) {
531
532 if(!thread->kernel_stack) {
533 return KERN_FAILURE;
534 }
535
536 // Kernel thread not at interrupt context
537 kregs = (struct x86_kernel_state *)NULL;
538
539 // nofault read of the thread->kernel_stack pointer
540 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
541 return KERN_FAILURE;
542 }
543
544 // Adjust to find the saved kernel state
545 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
546
547 supervisor = TRUE;
548 } else if(!tagged_regs) {
2d21ac55
A
549 /*
550 * not at interrupt context, or tracing a different thread than
551 * current_thread() at interrupt context
552 */
553 tagged_regs = USER_STATE(thread);
554 if(is_saved_state64(tagged_regs)) {
555 /* 64 bit registers */
556 regs64 = saved_state64(tagged_regs);
b0d623f7 557 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
2d21ac55
A
558 } else {
559 /* 32 bit registers */
560 regs32 = saved_state32(tagged_regs);
561 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
0c530ab8 562 }
2d21ac55 563 }
0c530ab8 564
2d21ac55
A
565 *count = 0;
566
567 if(supervisor) {
568 // the caller only wants a user callstack.
0c530ab8 569 if(user_only) {
2d21ac55 570 // bail - we've only got kernel state
0c530ab8
A
571 return KERN_FAILURE;
572 }
2d21ac55
A
573 } else {
574 // regs32(64) is not in supervisor mode.
575 u_regs32 = regs32;
576 u_regs64 = regs64;
577 regs32 = NULL;
578 regs64 = NULL;
579 }
580
581 if (user_only) {
582 /* we only want to backtrace the user mode */
583 if(!(u_regs32 || u_regs64)) {
584 /* no user state to look at */
585 return KERN_FAILURE;
0c530ab8 586 }
2d21ac55 587 }
0c530ab8 588
2d21ac55
A
589 /*
590 * Order of preference for top of stack:
591 * 64 bit kernel state (not likely)
592 * 32 bit kernel state
593 * 64 bit user land state
594 * 32 bit user land state
595 */
596
b0d623f7
A
597 if(kregs) {
598 /*
599 * nofault read of the registers from the kernel stack (as they can
600 * disappear on the fly).
601 */
602
603#if __LP64__
604 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
605 return KERN_FAILURE;
606 }
607#else
608 uint32_t tmp;
609 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) {
610 return KERN_FAILURE;
611 }
612 currPC = (uint64_t)tmp;
613#endif
614 } else if(regs64) {
2d21ac55
A
615 currPC = regs64->isf.rip;
616 } else if(regs32) {
617 currPC = (uint64_t) regs32->eip;
618 } else if(u_regs64) {
619 currPC = u_regs64->isf.rip;
620 } else if(u_regs32) {
621 currPC = (uint64_t) u_regs32->eip;
622 }
0c530ab8 623
2d21ac55
A
624 if(!currPC) {
625 /* no top of the stack, bail out */
626 return KERN_FAILURE;
627 }
0c530ab8 628
2d21ac55 629 bufferIndex = 0;
0c530ab8 630
2d21ac55
A
631 if(bufferMaxIndex < 1) {
632 *count = 0;
633 return KERN_RESOURCE_SHORTAGE;
634 }
635
636 /* backtrace kernel */
b0d623f7
A
637 if(kregs) {
638 addr64_t address = 0ULL;
639 size_t size = 0UL;
640
641 // do the backtrace
642 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
643
644 // and do a nofault read of (r|e)sp
645#if __LP64__
646 uint64_t rsp = 0ULL;
647 size = sizeof(uint64_t);
648
649 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
650 address = 0ULL;
651 }
652#else
653 uint32_t rsp = 0ULL, tmp = 0ULL;
654 size = sizeof(uint32_t);
655
656 if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) {
657 address = 0ULL;
658 } else {
659 address = (addr64_t)tmp;
660 }
661#endif
662
663 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
664 callstack[bufferIndex++] = (uint64_t)rsp;
665 }
666 } else if(regs64) {
2d21ac55
A
667 uint64_t rsp = 0ULL;
668
669 // backtrace the 64bit side.
670 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
671 bufferMaxIndex, TRUE);
672
b0d623f7 673 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
2d21ac55
A
674 bufferIndex < bufferMaxIndex) {
675 callstack[bufferIndex++] = rsp;
0c530ab8
A
676 }
677
2d21ac55
A
678 } else if(regs32) {
679 uint32_t esp = 0UL;
680
681 // backtrace the 32bit side.
682 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
683 bufferMaxIndex, TRUE);
0c530ab8 684
b0d623f7 685 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
2d21ac55
A
686 bufferIndex < bufferMaxIndex) {
687 callstack[bufferIndex++] = (uint64_t) esp;
0c530ab8 688 }
2d21ac55
A
689 } else if(u_regs64) {
690 /* backtrace user land */
691 uint64_t rsp = 0ULL;
692
693 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
694 bufferMaxIndex, FALSE);
0c530ab8 695
2d21ac55
A
696 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
697 bufferIndex < bufferMaxIndex) {
698 callstack[bufferIndex++] = rsp;
699 }
0c530ab8 700
2d21ac55
A
701 } else if(u_regs32) {
702 uint32_t esp = 0UL;
703
704 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
705 bufferMaxIndex, FALSE);
0c530ab8 706
2d21ac55
A
707 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
708 bufferIndex < bufferMaxIndex) {
709 callstack[bufferIndex++] = (uint64_t) esp;
0c530ab8 710 }
2d21ac55 711 }
0c530ab8 712
0c530ab8 713 *count = bufferIndex;
2d21ac55 714 return kr;
0c530ab8
A
715}
716