2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/ipc_tt.h>
38 #include <vm/vm_map.h>
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
44 #include <ppc/misc_protos.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/machine_routines.h>
47 #include <ppc/fpu_protos.h>
49 #pragma mark **** thread state ****
52 kern_return_t
chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
, struct savearea
*sv
)
54 struct ppc_thread_state
*ts
;
55 struct ppc_thread_state64
*xts
;
58 case PPC_THREAD_STATE
:
59 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
61 return KERN_INVALID_ARGUMENT
;
63 ts
= (struct ppc_thread_state
*) tstate
;
65 ts
->r0
= (unsigned int)sv
->save_r0
;
66 ts
->r1
= (unsigned int)sv
->save_r1
;
67 ts
->r2
= (unsigned int)sv
->save_r2
;
68 ts
->r3
= (unsigned int)sv
->save_r3
;
69 ts
->r4
= (unsigned int)sv
->save_r4
;
70 ts
->r5
= (unsigned int)sv
->save_r5
;
71 ts
->r6
= (unsigned int)sv
->save_r6
;
72 ts
->r7
= (unsigned int)sv
->save_r7
;
73 ts
->r8
= (unsigned int)sv
->save_r8
;
74 ts
->r9
= (unsigned int)sv
->save_r9
;
75 ts
->r10
= (unsigned int)sv
->save_r10
;
76 ts
->r11
= (unsigned int)sv
->save_r11
;
77 ts
->r12
= (unsigned int)sv
->save_r12
;
78 ts
->r13
= (unsigned int)sv
->save_r13
;
79 ts
->r14
= (unsigned int)sv
->save_r14
;
80 ts
->r15
= (unsigned int)sv
->save_r15
;
81 ts
->r16
= (unsigned int)sv
->save_r16
;
82 ts
->r17
= (unsigned int)sv
->save_r17
;
83 ts
->r18
= (unsigned int)sv
->save_r18
;
84 ts
->r19
= (unsigned int)sv
->save_r19
;
85 ts
->r20
= (unsigned int)sv
->save_r20
;
86 ts
->r21
= (unsigned int)sv
->save_r21
;
87 ts
->r22
= (unsigned int)sv
->save_r22
;
88 ts
->r23
= (unsigned int)sv
->save_r23
;
89 ts
->r24
= (unsigned int)sv
->save_r24
;
90 ts
->r25
= (unsigned int)sv
->save_r25
;
91 ts
->r26
= (unsigned int)sv
->save_r26
;
92 ts
->r27
= (unsigned int)sv
->save_r27
;
93 ts
->r28
= (unsigned int)sv
->save_r28
;
94 ts
->r29
= (unsigned int)sv
->save_r29
;
95 ts
->r30
= (unsigned int)sv
->save_r30
;
96 ts
->r31
= (unsigned int)sv
->save_r31
;
97 ts
->cr
= (unsigned int)sv
->save_cr
;
98 ts
->xer
= (unsigned int)sv
->save_xer
;
99 ts
->lr
= (unsigned int)sv
->save_lr
;
100 ts
->ctr
= (unsigned int)sv
->save_ctr
;
101 ts
->srr0
= (unsigned int)sv
->save_srr0
;
102 ts
->srr1
= (unsigned int)sv
->save_srr1
;
104 ts
->vrsave
= (unsigned int)sv
->save_vrsave
;
106 bzero((void *)ts
, sizeof(struct ppc_thread_state
));
108 *count
= PPC_THREAD_STATE_COUNT
; /* Pass back the amount we actually copied */
111 case PPC_THREAD_STATE64
:
112 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
113 return KERN_INVALID_ARGUMENT
;
115 xts
= (struct ppc_thread_state64
*) tstate
;
117 xts
->r0
= sv
->save_r0
;
118 xts
->r1
= sv
->save_r1
;
119 xts
->r2
= sv
->save_r2
;
120 xts
->r3
= sv
->save_r3
;
121 xts
->r4
= sv
->save_r4
;
122 xts
->r5
= sv
->save_r5
;
123 xts
->r6
= sv
->save_r6
;
124 xts
->r7
= sv
->save_r7
;
125 xts
->r8
= sv
->save_r8
;
126 xts
->r9
= sv
->save_r9
;
127 xts
->r10
= sv
->save_r10
;
128 xts
->r11
= sv
->save_r11
;
129 xts
->r12
= sv
->save_r12
;
130 xts
->r13
= sv
->save_r13
;
131 xts
->r14
= sv
->save_r14
;
132 xts
->r15
= sv
->save_r15
;
133 xts
->r16
= sv
->save_r16
;
134 xts
->r17
= sv
->save_r17
;
135 xts
->r18
= sv
->save_r18
;
136 xts
->r19
= sv
->save_r19
;
137 xts
->r20
= sv
->save_r20
;
138 xts
->r21
= sv
->save_r21
;
139 xts
->r22
= sv
->save_r22
;
140 xts
->r23
= sv
->save_r23
;
141 xts
->r24
= sv
->save_r24
;
142 xts
->r25
= sv
->save_r25
;
143 xts
->r26
= sv
->save_r26
;
144 xts
->r27
= sv
->save_r27
;
145 xts
->r28
= sv
->save_r28
;
146 xts
->r29
= sv
->save_r29
;
147 xts
->r30
= sv
->save_r30
;
148 xts
->r31
= sv
->save_r31
;
149 xts
->cr
= sv
->save_cr
;
150 xts
->xer
= sv
->save_xer
;
151 xts
->lr
= sv
->save_lr
;
152 xts
->ctr
= sv
->save_ctr
;
153 xts
->srr0
= sv
->save_srr0
;
154 xts
->srr1
= sv
->save_srr1
;
155 xts
->vrsave
= sv
->save_vrsave
;
157 bzero((void *)xts
, sizeof(struct ppc_thread_state64
));
159 *count
= PPC_THREAD_STATE64_COUNT
; /* Pass back the amount we actually copied */
164 return KERN_INVALID_ARGUMENT
;
170 kern_return_t
chudxnu_copy_threadstate_to_savearea(struct savearea
*sv
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
)
172 struct ppc_thread_state
*ts
;
173 struct ppc_thread_state64
*xts
;
176 case PPC_THREAD_STATE
:
177 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
178 return KERN_INVALID_ARGUMENT
;
180 ts
= (struct ppc_thread_state
*) tstate
;
182 sv
->save_r0
= (uint64_t)ts
->r0
;
183 sv
->save_r1
= (uint64_t)ts
->r1
;
184 sv
->save_r2
= (uint64_t)ts
->r2
;
185 sv
->save_r3
= (uint64_t)ts
->r3
;
186 sv
->save_r4
= (uint64_t)ts
->r4
;
187 sv
->save_r5
= (uint64_t)ts
->r5
;
188 sv
->save_r6
= (uint64_t)ts
->r6
;
189 sv
->save_r7
= (uint64_t)ts
->r7
;
190 sv
->save_r8
= (uint64_t)ts
->r8
;
191 sv
->save_r9
= (uint64_t)ts
->r9
;
192 sv
->save_r10
= (uint64_t)ts
->r10
;
193 sv
->save_r11
= (uint64_t)ts
->r11
;
194 sv
->save_r12
= (uint64_t)ts
->r12
;
195 sv
->save_r13
= (uint64_t)ts
->r13
;
196 sv
->save_r14
= (uint64_t)ts
->r14
;
197 sv
->save_r15
= (uint64_t)ts
->r15
;
198 sv
->save_r16
= (uint64_t)ts
->r16
;
199 sv
->save_r17
= (uint64_t)ts
->r17
;
200 sv
->save_r18
= (uint64_t)ts
->r18
;
201 sv
->save_r19
= (uint64_t)ts
->r19
;
202 sv
->save_r20
= (uint64_t)ts
->r20
;
203 sv
->save_r21
= (uint64_t)ts
->r21
;
204 sv
->save_r22
= (uint64_t)ts
->r22
;
205 sv
->save_r23
= (uint64_t)ts
->r23
;
206 sv
->save_r24
= (uint64_t)ts
->r24
;
207 sv
->save_r25
= (uint64_t)ts
->r25
;
208 sv
->save_r26
= (uint64_t)ts
->r26
;
209 sv
->save_r27
= (uint64_t)ts
->r27
;
210 sv
->save_r28
= (uint64_t)ts
->r28
;
211 sv
->save_r29
= (uint64_t)ts
->r29
;
212 sv
->save_r30
= (uint64_t)ts
->r30
;
213 sv
->save_r31
= (uint64_t)ts
->r31
;
214 sv
->save_cr
= ts
->cr
;
215 sv
->save_xer
= (uint64_t)ts
->xer
;
216 sv
->save_lr
= (uint64_t)ts
->lr
;
217 sv
->save_ctr
= (uint64_t)ts
->ctr
;
218 sv
->save_srr0
= (uint64_t)ts
->srr0
;
219 sv
->save_srr1
= (uint64_t)ts
->srr1
;
220 sv
->save_vrsave
= ts
->vrsave
;
224 case PPC_THREAD_STATE64
:
225 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
226 return KERN_INVALID_ARGUMENT
;
228 xts
= (struct ppc_thread_state64
*) tstate
;
230 sv
->save_r0
= xts
->r0
;
231 sv
->save_r1
= xts
->r1
;
232 sv
->save_r2
= xts
->r2
;
233 sv
->save_r3
= xts
->r3
;
234 sv
->save_r4
= xts
->r4
;
235 sv
->save_r5
= xts
->r5
;
236 sv
->save_r6
= xts
->r6
;
237 sv
->save_r7
= xts
->r7
;
238 sv
->save_r8
= xts
->r8
;
239 sv
->save_r9
= xts
->r9
;
240 sv
->save_r10
= xts
->r10
;
241 sv
->save_r11
= xts
->r11
;
242 sv
->save_r12
= xts
->r12
;
243 sv
->save_r13
= xts
->r13
;
244 sv
->save_r14
= xts
->r14
;
245 sv
->save_r15
= xts
->r15
;
246 sv
->save_r16
= xts
->r16
;
247 sv
->save_r17
= xts
->r17
;
248 sv
->save_r18
= xts
->r18
;
249 sv
->save_r19
= xts
->r19
;
250 sv
->save_r20
= xts
->r20
;
251 sv
->save_r21
= xts
->r21
;
252 sv
->save_r22
= xts
->r22
;
253 sv
->save_r23
= xts
->r23
;
254 sv
->save_r24
= xts
->r24
;
255 sv
->save_r25
= xts
->r25
;
256 sv
->save_r26
= xts
->r26
;
257 sv
->save_r27
= xts
->r27
;
258 sv
->save_r28
= xts
->r28
;
259 sv
->save_r29
= xts
->r29
;
260 sv
->save_r30
= xts
->r30
;
261 sv
->save_r31
= xts
->r31
;
262 sv
->save_cr
= xts
->cr
;
263 sv
->save_xer
= xts
->xer
;
264 sv
->save_lr
= xts
->lr
;
265 sv
->save_ctr
= xts
->ctr
;
266 sv
->save_srr0
= xts
->srr0
;
267 sv
->save_srr1
= xts
->srr1
;
268 sv
->save_vrsave
= xts
->vrsave
;
276 kern_return_t
chudxnu_thread_user_state_available(thread_t thread
)
278 if(find_user_regs(thread
)) {
286 kern_return_t
chudxnu_thread_get_state(thread_t thread
,
287 thread_flavor_t flavor
,
288 thread_state_t tstate
,
289 mach_msg_type_number_t
*count
,
292 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_get_state filters out some bits
295 sv
= find_user_regs(thread
);
297 sv
= find_kern_regs(thread
);
299 return chudxnu_copy_savearea_to_threadstate(flavor
, tstate
, count
, sv
);
302 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
304 // doesn't do FP or VMX
305 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
311 kern_return_t
chudxnu_thread_set_state(thread_t thread
,
312 thread_flavor_t flavor
,
313 thread_state_t tstate
,
314 mach_msg_type_number_t count
,
317 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_set_state filters out some bits
320 sv
= find_user_regs(thread
);
322 sv
= find_kern_regs(thread
);
324 return chudxnu_copy_threadstate_to_savearea(sv
, flavor
, tstate
, &count
);
326 return machine_thread_set_state(thread
, flavor
, tstate
, count
); // always user
330 #pragma mark **** task memory read/write ****
333 kern_return_t
chudxnu_task_read(task_t task
, void *kernaddr
, uint64_t usraddr
, vm_size_t size
)
335 kern_return_t ret
= KERN_SUCCESS
;
337 if(ml_at_interrupt_context()) {
338 // can't do this on an interrupt stack
342 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
343 usraddr
&= 0x00000000FFFFFFFFULL
;
346 if(current_task()==task
) {
347 thread_t cur_thr
= current_thread();
348 vm_offset_t recover_handler
= cur_thr
->recover
;
350 if(copyin(usraddr
, kernaddr
, size
)) {
354 cur_thr
->recover
= recover_handler
;
357 vm_map_t map
= get_task_map(task
);
358 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
365 kern_return_t
chudxnu_task_write(task_t task
, uint64_t useraddr
, void *kernaddr
, vm_size_t size
)
367 kern_return_t ret
= KERN_SUCCESS
;
369 if(ml_at_interrupt_context()) {
370 // can't do this on an interrupt stack
374 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
375 useraddr
&= 0x00000000FFFFFFFFULL
;
378 if(current_task()==task
) {
379 thread_t cur_thr
= current_thread();
380 vm_offset_t recover_handler
= cur_thr
->recover
;
382 if(copyout(kernaddr
, useraddr
, size
)) {
385 cur_thr
->recover
= recover_handler
;
388 vm_map_t map
= get_task_map(task
);
389 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
396 kern_return_t
chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
398 return (ml_nofault_copy(srcaddr
, (vm_offset_t
) dstaddr
, size
) == size
?
399 KERN_SUCCESS
: KERN_FAILURE
);
403 kern_return_t
chudxnu_kern_write(vm_offset_t dstaddr
, void *srcaddr
, vm_size_t size
)
405 return (ml_nofault_copy((vm_offset_t
) srcaddr
, dstaddr
, size
) == size
?
406 KERN_SUCCESS
: KERN_FAILURE
);
409 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
410 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
411 // after sampling has finished.
413 // For an N-entry callstack:
416 // [1..N-3] stack frames (including current one)
417 // [N-2] current LR (return value if we're in a leaf function)
418 // [N-1] current r0 (in case we've saved LR in r0)
421 #define FP_LINK_OFFSET 2
422 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
423 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
426 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
429 #ifndef SUPERVISOR_MODE
430 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
433 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && \
434 (addr&STACK_ALIGNMENT_MASK)==0x0 && \
436 (addr>=kernStackMin && \
437 addr<=kernStackMax) : \
442 kern_return_t
chudxnu_thread_get_callstack64( thread_t thread
,
444 mach_msg_type_number_t
*count
,
448 task_t task
= get_threadtask(thread
);
449 uint64_t nextFramePointer
= 0;
450 uint64_t currPC
, currLR
, currR0
;
451 uint64_t framePointer
;
453 uint64_t kernStackMin
= thread
->kernel_stack
;
454 uint64_t kernStackMax
= kernStackMin
+ KERNEL_STACK_SIZE
;
455 uint64_t *buffer
= callStack
;
458 int bufferMaxIndex
= *count
;
459 boolean_t supervisor
;
464 sv
= find_user_regs(thread
);
466 sv
= find_kern_regs(thread
);
474 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
476 is64Bit
= FALSE
; /* XXX assuming task is always 32-bit */
478 is64Bit
= chudxnu_is_64bit_task(task
);
481 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
482 if(bufferMaxIndex
<2) {
484 return KERN_RESOURCE_SHORTAGE
;
487 currPC
= sv
->save_srr0
;
488 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
489 currLR
= sv
->save_lr
;
490 currR0
= sv
->save_r0
;
492 bufferIndex
= 0; // start with a stack of size zero
493 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
495 // Now, fill buffer with stack backtraces.
496 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
498 // Above the stack pointer, the following values are saved:
503 // Here, we'll get the lr from the stack.
507 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
509 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
512 // Note that we read the pc even for the first stack frame (which, in theory,
513 // is always empty because the callee fills it in just before it lowers the
514 // stack. However, if we catch the program in between filling in the return
515 // address and lowering the stack, we want to still have a valid backtrace.
516 // FixupStack correctly disregards this value if necessary.
520 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
522 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
527 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
529 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
533 if(kr
!=KERN_SUCCESS
) {
538 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
541 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
543 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
544 nextFramePointer
= tmpWord
;
548 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
550 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
551 nextFramePointer
= tmpWord
;
554 if(kr
!=KERN_SUCCESS
) {
555 nextFramePointer
= 0;
558 if(nextFramePointer
) {
559 buffer
[bufferIndex
++] = pc
;
563 if(nextFramePointer
<framePointer
) {
566 framePointer
= nextFramePointer
;
570 if(bufferIndex
>=bufferMaxIndex
) {
572 return KERN_RESOURCE_SHORTAGE
;
575 // Save link register and R0 at bottom of stack (used for later fixup).
576 buffer
[bufferIndex
++] = currLR
;
577 buffer
[bufferIndex
++] = currR0
;
579 *count
= bufferIndex
;
583 #pragma mark **** DEPRECATED ****
587 kern_return_t
chudxnu_thread_get_callstack( thread_t thread
,
589 mach_msg_type_number_t
*count
,
593 task_t task
= get_threadtask(thread
);
594 uint64_t nextFramePointer
= 0;
595 uint64_t currPC
, currLR
, currR0
;
596 uint64_t framePointer
;
598 uint64_t kernStackMin
= thread
->kernel_stack
;
599 uint64_t kernStackMax
= kernStackMin
+ KERNEL_STACK_SIZE
;
600 uint32_t *buffer
= callStack
;
603 int bufferMaxIndex
= *count
;
604 boolean_t supervisor
;
609 sv
= find_user_regs(thread
);
611 sv
= find_kern_regs(thread
);
619 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
621 is64Bit
= FALSE
; /* XXX assuming kernel task is always 32-bit */
623 is64Bit
= chudxnu_is_64bit_task(task
);
626 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
627 if(bufferMaxIndex
<2) {
629 return KERN_RESOURCE_SHORTAGE
;
632 currPC
= sv
->save_srr0
;
633 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
634 currLR
= sv
->save_lr
;
635 currR0
= sv
->save_r0
;
637 bufferIndex
= 0; // start with a stack of size zero
638 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
640 // Now, fill buffer with stack backtraces.
641 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
643 // Above the stack pointer, the following values are saved:
648 // Here, we'll get the lr from the stack.
652 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
654 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
657 // Note that we read the pc even for the first stack frame (which, in theory,
658 // is always empty because the callee fills it in just before it lowers the
659 // stack. However, if we catch the program in between filling in the return
660 // address and lowering the stack, we want to still have a valid backtrace.
661 // FixupStack correctly disregards this value if necessary.
665 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
667 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
672 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
674 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
678 if(kr
!=KERN_SUCCESS
) {
683 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
686 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
688 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
689 nextFramePointer
= tmpWord
;
693 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
695 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
696 nextFramePointer
= tmpWord
;
699 if(kr
!=KERN_SUCCESS
) {
700 nextFramePointer
= 0;
703 if(nextFramePointer
) {
704 buffer
[bufferIndex
++] = pc
;
708 if(nextFramePointer
<framePointer
) {
711 framePointer
= nextFramePointer
;
715 if(bufferIndex
>=bufferMaxIndex
) {
717 return KERN_RESOURCE_SHORTAGE
;
720 // Save link register and R0 at bottom of stack (used for later fixup).
721 buffer
[bufferIndex
++] = currLR
;
722 buffer
[bufferIndex
++] = currR0
;
724 *count
= bufferIndex
;