2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/ipc_tt.h>
38 #include <vm/vm_map.h>
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
44 #include <ppc/misc_protos.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/machine_routines.h>
47 #include <ppc/fpu_protos.h>
49 // forward declarations
50 extern kern_return_t
machine_thread_get_kern_state( thread_t thread
,
51 thread_flavor_t flavor
,
52 thread_state_t tstate
,
53 mach_msg_type_number_t
*count
);
56 #pragma mark **** thread state ****
59 kern_return_t
chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
, struct savearea
*sv
)
61 struct ppc_thread_state
*ts
;
62 struct ppc_thread_state64
*xts
;
65 case PPC_THREAD_STATE
:
66 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
68 return KERN_INVALID_ARGUMENT
;
70 ts
= (struct ppc_thread_state
*) tstate
;
72 ts
->r0
= (unsigned int)sv
->save_r0
;
73 ts
->r1
= (unsigned int)sv
->save_r1
;
74 ts
->r2
= (unsigned int)sv
->save_r2
;
75 ts
->r3
= (unsigned int)sv
->save_r3
;
76 ts
->r4
= (unsigned int)sv
->save_r4
;
77 ts
->r5
= (unsigned int)sv
->save_r5
;
78 ts
->r6
= (unsigned int)sv
->save_r6
;
79 ts
->r7
= (unsigned int)sv
->save_r7
;
80 ts
->r8
= (unsigned int)sv
->save_r8
;
81 ts
->r9
= (unsigned int)sv
->save_r9
;
82 ts
->r10
= (unsigned int)sv
->save_r10
;
83 ts
->r11
= (unsigned int)sv
->save_r11
;
84 ts
->r12
= (unsigned int)sv
->save_r12
;
85 ts
->r13
= (unsigned int)sv
->save_r13
;
86 ts
->r14
= (unsigned int)sv
->save_r14
;
87 ts
->r15
= (unsigned int)sv
->save_r15
;
88 ts
->r16
= (unsigned int)sv
->save_r16
;
89 ts
->r17
= (unsigned int)sv
->save_r17
;
90 ts
->r18
= (unsigned int)sv
->save_r18
;
91 ts
->r19
= (unsigned int)sv
->save_r19
;
92 ts
->r20
= (unsigned int)sv
->save_r20
;
93 ts
->r21
= (unsigned int)sv
->save_r21
;
94 ts
->r22
= (unsigned int)sv
->save_r22
;
95 ts
->r23
= (unsigned int)sv
->save_r23
;
96 ts
->r24
= (unsigned int)sv
->save_r24
;
97 ts
->r25
= (unsigned int)sv
->save_r25
;
98 ts
->r26
= (unsigned int)sv
->save_r26
;
99 ts
->r27
= (unsigned int)sv
->save_r27
;
100 ts
->r28
= (unsigned int)sv
->save_r28
;
101 ts
->r29
= (unsigned int)sv
->save_r29
;
102 ts
->r30
= (unsigned int)sv
->save_r30
;
103 ts
->r31
= (unsigned int)sv
->save_r31
;
104 ts
->cr
= (unsigned int)sv
->save_cr
;
105 ts
->xer
= (unsigned int)sv
->save_xer
;
106 ts
->lr
= (unsigned int)sv
->save_lr
;
107 ts
->ctr
= (unsigned int)sv
->save_ctr
;
108 ts
->srr0
= (unsigned int)sv
->save_srr0
;
109 ts
->srr1
= (unsigned int)sv
->save_srr1
;
111 ts
->vrsave
= (unsigned int)sv
->save_vrsave
;
113 bzero((void *)ts
, sizeof(struct ppc_thread_state
));
115 *count
= PPC_THREAD_STATE_COUNT
; /* Pass back the amount we actually copied */
118 case PPC_THREAD_STATE64
:
119 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
120 return KERN_INVALID_ARGUMENT
;
122 xts
= (struct ppc_thread_state64
*) tstate
;
124 xts
->r0
= sv
->save_r0
;
125 xts
->r1
= sv
->save_r1
;
126 xts
->r2
= sv
->save_r2
;
127 xts
->r3
= sv
->save_r3
;
128 xts
->r4
= sv
->save_r4
;
129 xts
->r5
= sv
->save_r5
;
130 xts
->r6
= sv
->save_r6
;
131 xts
->r7
= sv
->save_r7
;
132 xts
->r8
= sv
->save_r8
;
133 xts
->r9
= sv
->save_r9
;
134 xts
->r10
= sv
->save_r10
;
135 xts
->r11
= sv
->save_r11
;
136 xts
->r12
= sv
->save_r12
;
137 xts
->r13
= sv
->save_r13
;
138 xts
->r14
= sv
->save_r14
;
139 xts
->r15
= sv
->save_r15
;
140 xts
->r16
= sv
->save_r16
;
141 xts
->r17
= sv
->save_r17
;
142 xts
->r18
= sv
->save_r18
;
143 xts
->r19
= sv
->save_r19
;
144 xts
->r20
= sv
->save_r20
;
145 xts
->r21
= sv
->save_r21
;
146 xts
->r22
= sv
->save_r22
;
147 xts
->r23
= sv
->save_r23
;
148 xts
->r24
= sv
->save_r24
;
149 xts
->r25
= sv
->save_r25
;
150 xts
->r26
= sv
->save_r26
;
151 xts
->r27
= sv
->save_r27
;
152 xts
->r28
= sv
->save_r28
;
153 xts
->r29
= sv
->save_r29
;
154 xts
->r30
= sv
->save_r30
;
155 xts
->r31
= sv
->save_r31
;
156 xts
->cr
= sv
->save_cr
;
157 xts
->xer
= sv
->save_xer
;
158 xts
->lr
= sv
->save_lr
;
159 xts
->ctr
= sv
->save_ctr
;
160 xts
->srr0
= sv
->save_srr0
;
161 xts
->srr1
= sv
->save_srr1
;
162 xts
->vrsave
= sv
->save_vrsave
;
164 bzero((void *)xts
, sizeof(struct ppc_thread_state64
));
166 *count
= PPC_THREAD_STATE64_COUNT
; /* Pass back the amount we actually copied */
171 return KERN_INVALID_ARGUMENT
;
177 kern_return_t
chudxnu_copy_threadstate_to_savearea(struct savearea
*sv
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
)
179 struct ppc_thread_state
*ts
;
180 struct ppc_thread_state64
*xts
;
183 case PPC_THREAD_STATE
:
184 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
185 return KERN_INVALID_ARGUMENT
;
187 ts
= (struct ppc_thread_state
*) tstate
;
189 sv
->save_r0
= (uint64_t)ts
->r0
;
190 sv
->save_r1
= (uint64_t)ts
->r1
;
191 sv
->save_r2
= (uint64_t)ts
->r2
;
192 sv
->save_r3
= (uint64_t)ts
->r3
;
193 sv
->save_r4
= (uint64_t)ts
->r4
;
194 sv
->save_r5
= (uint64_t)ts
->r5
;
195 sv
->save_r6
= (uint64_t)ts
->r6
;
196 sv
->save_r7
= (uint64_t)ts
->r7
;
197 sv
->save_r8
= (uint64_t)ts
->r8
;
198 sv
->save_r9
= (uint64_t)ts
->r9
;
199 sv
->save_r10
= (uint64_t)ts
->r10
;
200 sv
->save_r11
= (uint64_t)ts
->r11
;
201 sv
->save_r12
= (uint64_t)ts
->r12
;
202 sv
->save_r13
= (uint64_t)ts
->r13
;
203 sv
->save_r14
= (uint64_t)ts
->r14
;
204 sv
->save_r15
= (uint64_t)ts
->r15
;
205 sv
->save_r16
= (uint64_t)ts
->r16
;
206 sv
->save_r17
= (uint64_t)ts
->r17
;
207 sv
->save_r18
= (uint64_t)ts
->r18
;
208 sv
->save_r19
= (uint64_t)ts
->r19
;
209 sv
->save_r20
= (uint64_t)ts
->r20
;
210 sv
->save_r21
= (uint64_t)ts
->r21
;
211 sv
->save_r22
= (uint64_t)ts
->r22
;
212 sv
->save_r23
= (uint64_t)ts
->r23
;
213 sv
->save_r24
= (uint64_t)ts
->r24
;
214 sv
->save_r25
= (uint64_t)ts
->r25
;
215 sv
->save_r26
= (uint64_t)ts
->r26
;
216 sv
->save_r27
= (uint64_t)ts
->r27
;
217 sv
->save_r28
= (uint64_t)ts
->r28
;
218 sv
->save_r29
= (uint64_t)ts
->r29
;
219 sv
->save_r30
= (uint64_t)ts
->r30
;
220 sv
->save_r31
= (uint64_t)ts
->r31
;
221 sv
->save_cr
= ts
->cr
;
222 sv
->save_xer
= (uint64_t)ts
->xer
;
223 sv
->save_lr
= (uint64_t)ts
->lr
;
224 sv
->save_ctr
= (uint64_t)ts
->ctr
;
225 sv
->save_srr0
= (uint64_t)ts
->srr0
;
226 sv
->save_srr1
= (uint64_t)ts
->srr1
;
227 sv
->save_vrsave
= ts
->vrsave
;
231 case PPC_THREAD_STATE64
:
232 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
233 return KERN_INVALID_ARGUMENT
;
235 xts
= (struct ppc_thread_state64
*) tstate
;
237 sv
->save_r0
= xts
->r0
;
238 sv
->save_r1
= xts
->r1
;
239 sv
->save_r2
= xts
->r2
;
240 sv
->save_r3
= xts
->r3
;
241 sv
->save_r4
= xts
->r4
;
242 sv
->save_r5
= xts
->r5
;
243 sv
->save_r6
= xts
->r6
;
244 sv
->save_r7
= xts
->r7
;
245 sv
->save_r8
= xts
->r8
;
246 sv
->save_r9
= xts
->r9
;
247 sv
->save_r10
= xts
->r10
;
248 sv
->save_r11
= xts
->r11
;
249 sv
->save_r12
= xts
->r12
;
250 sv
->save_r13
= xts
->r13
;
251 sv
->save_r14
= xts
->r14
;
252 sv
->save_r15
= xts
->r15
;
253 sv
->save_r16
= xts
->r16
;
254 sv
->save_r17
= xts
->r17
;
255 sv
->save_r18
= xts
->r18
;
256 sv
->save_r19
= xts
->r19
;
257 sv
->save_r20
= xts
->r20
;
258 sv
->save_r21
= xts
->r21
;
259 sv
->save_r22
= xts
->r22
;
260 sv
->save_r23
= xts
->r23
;
261 sv
->save_r24
= xts
->r24
;
262 sv
->save_r25
= xts
->r25
;
263 sv
->save_r26
= xts
->r26
;
264 sv
->save_r27
= xts
->r27
;
265 sv
->save_r28
= xts
->r28
;
266 sv
->save_r29
= xts
->r29
;
267 sv
->save_r30
= xts
->r30
;
268 sv
->save_r31
= xts
->r31
;
269 sv
->save_cr
= xts
->cr
;
270 sv
->save_xer
= xts
->xer
;
271 sv
->save_lr
= xts
->lr
;
272 sv
->save_ctr
= xts
->ctr
;
273 sv
->save_srr0
= xts
->srr0
;
274 sv
->save_srr1
= xts
->srr1
;
275 sv
->save_vrsave
= xts
->vrsave
;
283 kern_return_t
chudxnu_thread_user_state_available(thread_t thread
)
285 if(find_user_regs(thread
)) {
293 kern_return_t
chudxnu_thread_get_state(thread_t thread
,
294 thread_flavor_t flavor
,
295 thread_state_t tstate
,
296 mach_msg_type_number_t
*count
,
299 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_get_state filters out some bits
302 sv
= find_user_regs(thread
);
304 sv
= find_kern_regs(thread
);
306 return chudxnu_copy_savearea_to_threadstate(flavor
, tstate
, count
, sv
);
309 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
311 // doesn't do FP or VMX
312 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
318 kern_return_t
chudxnu_thread_set_state(thread_t thread
,
319 thread_flavor_t flavor
,
320 thread_state_t tstate
,
321 mach_msg_type_number_t count
,
324 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_set_state filters out some bits
327 sv
= find_user_regs(thread
);
329 sv
= find_kern_regs(thread
);
331 return chudxnu_copy_threadstate_to_savearea(sv
, flavor
, tstate
, &count
);
333 return machine_thread_set_state(thread
, flavor
, tstate
, count
); // always user
337 #pragma mark **** task memory read/write ****
340 kern_return_t
chudxnu_task_read(task_t task
, void *kernaddr
, uint64_t usraddr
, vm_size_t size
)
342 kern_return_t ret
= KERN_SUCCESS
;
344 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
345 usraddr
&= 0x00000000FFFFFFFFULL
;
348 if(current_task()==task
) {
349 thread_t cur_thr
= current_thread();
350 vm_offset_t recover_handler
= cur_thr
->recover
;
352 if(ml_at_interrupt_context()) {
353 return KERN_FAILURE
; // can't do copyin on interrupt stack
356 if(copyin(usraddr
, kernaddr
, size
)) {
359 cur_thr
->recover
= recover_handler
;
361 vm_map_t map
= get_task_map(task
);
362 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
369 kern_return_t
chudxnu_task_write(task_t task
, uint64_t useraddr
, void *kernaddr
, vm_size_t size
)
371 kern_return_t ret
= KERN_SUCCESS
;
373 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
374 useraddr
&= 0x00000000FFFFFFFFULL
;
377 if(current_task()==task
) {
378 thread_t cur_thr
= current_thread();
379 vm_offset_t recover_handler
= cur_thr
->recover
;
381 if(ml_at_interrupt_context()) {
382 return KERN_FAILURE
; // can't do copyout on interrupt stack
385 if(copyout(kernaddr
, useraddr
, size
)) {
388 cur_thr
->recover
= recover_handler
;
390 vm_map_t map
= get_task_map(task
);
391 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
398 kern_return_t
chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
404 pp
= pmap_find_phys(kernel_pmap
, srcaddr
); /* Get the page number */
406 return KERN_FAILURE
; /* Not mapped... */
409 phys_addr
= ((addr64_t
)pp
<< 12) | (srcaddr
& 0x0000000000000FFFULL
); /* Shove in the page offset */
410 if(phys_addr
>= mem_actual
) {
411 return KERN_FAILURE
; /* out of range */
414 if((phys_addr
&0x1) || size
==1) {
415 *((uint8_t *)dstaddr
) = ml_phys_read_byte_64(phys_addr
);
416 ((uint8_t *)dstaddr
)++;
417 srcaddr
+= sizeof(uint8_t);
418 size
-= sizeof(uint8_t);
419 } else if((phys_addr
&0x3) || size
<=2) {
420 *((uint16_t *)dstaddr
) = ml_phys_read_half_64(phys_addr
);
421 ((uint16_t *)dstaddr
)++;
422 srcaddr
+= sizeof(uint16_t);
423 size
-= sizeof(uint16_t);
425 *((uint32_t *)dstaddr
) = ml_phys_read_word_64(phys_addr
);
426 ((uint32_t *)dstaddr
)++;
427 srcaddr
+= sizeof(uint32_t);
428 size
-= sizeof(uint32_t);
435 kern_return_t
chudxnu_kern_write(vm_offset_t dstaddr
, void *srcaddr
, vm_size_t size
)
441 pp
= pmap_find_phys(kernel_pmap
, dstaddr
); /* Get the page number */
443 return KERN_FAILURE
; /* Not mapped... */
446 phys_addr
= ((addr64_t
)pp
<< 12) | (dstaddr
& 0x0000000000000FFFULL
); /* Shove in the page offset */
447 if(phys_addr
>= mem_actual
) {
448 return KERN_FAILURE
; /* out of range */
451 if((phys_addr
&0x1) || size
==1) {
452 ml_phys_write_byte_64(phys_addr
, *((uint8_t *)srcaddr
));
453 ((uint8_t *)srcaddr
)++;
454 dstaddr
+= sizeof(uint8_t);
455 size
-= sizeof(uint8_t);
456 } else if((phys_addr
&0x3) || size
<=2) {
457 ml_phys_write_half_64(phys_addr
, *((uint16_t *)srcaddr
));
458 ((uint16_t *)srcaddr
)++;
459 dstaddr
+= sizeof(uint16_t);
460 size
-= sizeof(uint16_t);
462 ml_phys_write_word_64(phys_addr
, *((uint32_t *)srcaddr
));
463 ((uint32_t *)srcaddr
)++;
464 dstaddr
+= sizeof(uint32_t);
465 size
-= sizeof(uint32_t);
472 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
473 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
474 // after sampling has finished.
476 // For an N-entry callstack:
479 // [1..N-3] stack frames (including current one)
480 // [N-2] current LR (return value if we're in a leaf function)
481 // [N-1] current r0 (in case we've saved LR in r0)
484 #define FP_LINK_OFFSET 2
485 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
486 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
489 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
492 #ifndef SUPERVISOR_MODE
493 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
496 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && \
497 (addr&STACK_ALIGNMENT_MASK)==0x0 && \
499 (addr>=kernStackMin && \
500 addr<=kernStackMax) : \
505 kern_return_t
chudxnu_thread_get_callstack64( thread_t thread
,
507 mach_msg_type_number_t
*count
,
511 task_t task
= get_threadtask(thread
);
512 uint64_t nextFramePointer
= 0;
513 uint64_t currPC
, currLR
, currR0
;
514 uint64_t framePointer
;
516 uint64_t kernStackMin
= min_valid_stack_address();
517 uint64_t kernStackMax
= max_valid_stack_address();
518 uint64_t *buffer
= callStack
;
521 int bufferMaxIndex
= *count
;
522 boolean_t supervisor
;
527 sv
= find_user_regs(thread
);
529 sv
= find_kern_regs(thread
);
537 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
539 #warning assuming kernel task is always 32-bit
542 is64Bit
= chudxnu_is_64bit_task(task
);
545 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
546 if(bufferMaxIndex
<2) {
548 return KERN_RESOURCE_SHORTAGE
;
551 currPC
= sv
->save_srr0
;
552 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
553 currLR
= sv
->save_lr
;
554 currR0
= sv
->save_r0
;
556 bufferIndex
= 0; // start with a stack of size zero
557 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
559 // Now, fill buffer with stack backtraces.
560 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
562 // Above the stack pointer, the following values are saved:
567 // Here, we'll get the lr from the stack.
571 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
573 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
576 // Note that we read the pc even for the first stack frame (which, in theory,
577 // is always empty because the callee fills it in just before it lowers the
578 // stack. However, if we catch the program in between filling in the return
579 // address and lowering the stack, we want to still have a valid backtrace.
580 // FixupStack correctly disregards this value if necessary.
584 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
586 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
591 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
593 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
597 if(kr
!=KERN_SUCCESS
) {
602 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
605 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
607 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
608 nextFramePointer
= tmpWord
;
612 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
614 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
615 nextFramePointer
= tmpWord
;
618 if(kr
!=KERN_SUCCESS
) {
619 nextFramePointer
= 0;
622 if(nextFramePointer
) {
623 buffer
[bufferIndex
++] = pc
;
627 if(nextFramePointer
<framePointer
) {
630 framePointer
= nextFramePointer
;
634 if(bufferIndex
>=bufferMaxIndex
) {
636 return KERN_RESOURCE_SHORTAGE
;
639 // Save link register and R0 at bottom of stack (used for later fixup).
640 buffer
[bufferIndex
++] = currLR
;
641 buffer
[bufferIndex
++] = currR0
;
643 *count
= bufferIndex
;
648 kern_return_t
chudxnu_thread_get_callstack( thread_t thread
,
650 mach_msg_type_number_t
*count
,
654 task_t task
= get_threadtask(thread
);
655 uint64_t nextFramePointer
= 0;
656 uint64_t currPC
, currLR
, currR0
;
657 uint64_t framePointer
;
659 uint64_t kernStackMin
= min_valid_stack_address();
660 uint64_t kernStackMax
= max_valid_stack_address();
661 uint32_t *buffer
= callStack
;
664 int bufferMaxIndex
= *count
;
665 boolean_t supervisor
;
670 sv
= find_user_regs(thread
);
672 sv
= find_kern_regs(thread
);
680 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
682 #warning assuming kernel task is always 32-bit
685 is64Bit
= chudxnu_is_64bit_task(task
);
688 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
689 if(bufferMaxIndex
<2) {
691 return KERN_RESOURCE_SHORTAGE
;
694 currPC
= sv
->save_srr0
;
695 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
696 currLR
= sv
->save_lr
;
697 currR0
= sv
->save_r0
;
699 bufferIndex
= 0; // start with a stack of size zero
700 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
702 // Now, fill buffer with stack backtraces.
703 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
705 // Above the stack pointer, the following values are saved:
710 // Here, we'll get the lr from the stack.
714 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
716 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
719 // Note that we read the pc even for the first stack frame (which, in theory,
720 // is always empty because the callee fills it in just before it lowers the
721 // stack. However, if we catch the program in between filling in the return
722 // address and lowering the stack, we want to still have a valid backtrace.
723 // FixupStack correctly disregards this value if necessary.
727 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
729 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
734 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
736 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
740 if(kr
!=KERN_SUCCESS
) {
745 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
748 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
750 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
751 nextFramePointer
= tmpWord
;
755 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
757 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
758 nextFramePointer
= tmpWord
;
761 if(kr
!=KERN_SUCCESS
) {
762 nextFramePointer
= 0;
765 if(nextFramePointer
) {
766 buffer
[bufferIndex
++] = pc
;
770 if(nextFramePointer
<framePointer
) {
773 framePointer
= nextFramePointer
;
777 if(bufferIndex
>=bufferMaxIndex
) {
779 return KERN_RESOURCE_SHORTAGE
;
782 // Save link register and R0 at bottom of stack (used for later fixup).
783 buffer
[bufferIndex
++] = currLR
;
784 buffer
[bufferIndex
++] = currR0
;
786 *count
= bufferIndex
;
790 #pragma mark **** DEPRECATED ****
794 kern_return_t
chudxnu_bind_current_thread(int cpu
)
796 return chudxnu_bind_thread(current_thread(), cpu
);
800 kern_return_t
chudxnu_unbind_current_thread(void)
802 return chudxnu_unbind_thread(current_thread());
807 kern_return_t
chudxnu_current_thread_get_callstack( uint32_t *callStack
,
808 mach_msg_type_number_t
*count
,
811 return chudxnu_thread_get_callstack(current_thread(), callStack
, count
, user_only
);
816 thread_t
chudxnu_current_act(void)
818 return chudxnu_current_thread();