2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/mach_types.h>
24 #include <mach/task.h>
25 #include <mach/thread_act.h>
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/thread.h>
30 #include <kern/ipc_tt.h>
32 #include <vm/vm_map.h>
35 #include <ppc/chud/chud_xnu.h>
36 #include <ppc/chud/chud_xnu_private.h>
38 #include <ppc/misc_protos.h>
39 #include <ppc/proc_reg.h>
40 #include <ppc/machine_routines.h>
41 #include <ppc/fpu_protos.h>
43 // forward declarations
44 extern kern_return_t
machine_thread_get_kern_state( thread_t thread
,
45 thread_flavor_t flavor
,
46 thread_state_t tstate
,
47 mach_msg_type_number_t
*count
);
50 #pragma mark **** thread binding ****
53 kern_return_t
chudxnu_bind_thread(thread_t thread
, int cpu
)
55 if(cpu
>=0 && cpu
<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */
56 thread_bind(thread
, cpu_to_processor(cpu
));
57 if(thread
==current_thread()) {
58 (void)thread_block(THREAD_CONTINUE_NULL
);
67 kern_return_t
chudxnu_unbind_thread(thread_t thread
)
69 thread_bind(thread
, PROCESSOR_NULL
);
73 #pragma mark **** thread state ****
76 kern_return_t
chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
, struct savearea
*sv
)
78 struct ppc_thread_state
*ts
;
79 struct ppc_thread_state64
*xts
;
82 case PPC_THREAD_STATE
:
83 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
85 return KERN_INVALID_ARGUMENT
;
87 ts
= (struct ppc_thread_state
*) tstate
;
89 ts
->r0
= (unsigned int)sv
->save_r0
;
90 ts
->r1
= (unsigned int)sv
->save_r1
;
91 ts
->r2
= (unsigned int)sv
->save_r2
;
92 ts
->r3
= (unsigned int)sv
->save_r3
;
93 ts
->r4
= (unsigned int)sv
->save_r4
;
94 ts
->r5
= (unsigned int)sv
->save_r5
;
95 ts
->r6
= (unsigned int)sv
->save_r6
;
96 ts
->r7
= (unsigned int)sv
->save_r7
;
97 ts
->r8
= (unsigned int)sv
->save_r8
;
98 ts
->r9
= (unsigned int)sv
->save_r9
;
99 ts
->r10
= (unsigned int)sv
->save_r10
;
100 ts
->r11
= (unsigned int)sv
->save_r11
;
101 ts
->r12
= (unsigned int)sv
->save_r12
;
102 ts
->r13
= (unsigned int)sv
->save_r13
;
103 ts
->r14
= (unsigned int)sv
->save_r14
;
104 ts
->r15
= (unsigned int)sv
->save_r15
;
105 ts
->r16
= (unsigned int)sv
->save_r16
;
106 ts
->r17
= (unsigned int)sv
->save_r17
;
107 ts
->r18
= (unsigned int)sv
->save_r18
;
108 ts
->r19
= (unsigned int)sv
->save_r19
;
109 ts
->r20
= (unsigned int)sv
->save_r20
;
110 ts
->r21
= (unsigned int)sv
->save_r21
;
111 ts
->r22
= (unsigned int)sv
->save_r22
;
112 ts
->r23
= (unsigned int)sv
->save_r23
;
113 ts
->r24
= (unsigned int)sv
->save_r24
;
114 ts
->r25
= (unsigned int)sv
->save_r25
;
115 ts
->r26
= (unsigned int)sv
->save_r26
;
116 ts
->r27
= (unsigned int)sv
->save_r27
;
117 ts
->r28
= (unsigned int)sv
->save_r28
;
118 ts
->r29
= (unsigned int)sv
->save_r29
;
119 ts
->r30
= (unsigned int)sv
->save_r30
;
120 ts
->r31
= (unsigned int)sv
->save_r31
;
121 ts
->cr
= (unsigned int)sv
->save_cr
;
122 ts
->xer
= (unsigned int)sv
->save_xer
;
123 ts
->lr
= (unsigned int)sv
->save_lr
;
124 ts
->ctr
= (unsigned int)sv
->save_ctr
;
125 ts
->srr0
= (unsigned int)sv
->save_srr0
;
126 ts
->srr1
= (unsigned int)sv
->save_srr1
;
128 ts
->vrsave
= (unsigned int)sv
->save_vrsave
;
130 bzero((void *)ts
, sizeof(struct ppc_thread_state
));
132 *count
= PPC_THREAD_STATE_COUNT
; /* Pass back the amount we actually copied */
135 case PPC_THREAD_STATE64
:
136 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
137 return KERN_INVALID_ARGUMENT
;
139 xts
= (struct ppc_thread_state64
*) tstate
;
141 xts
->r0
= sv
->save_r0
;
142 xts
->r1
= sv
->save_r1
;
143 xts
->r2
= sv
->save_r2
;
144 xts
->r3
= sv
->save_r3
;
145 xts
->r4
= sv
->save_r4
;
146 xts
->r5
= sv
->save_r5
;
147 xts
->r6
= sv
->save_r6
;
148 xts
->r7
= sv
->save_r7
;
149 xts
->r8
= sv
->save_r8
;
150 xts
->r9
= sv
->save_r9
;
151 xts
->r10
= sv
->save_r10
;
152 xts
->r11
= sv
->save_r11
;
153 xts
->r12
= sv
->save_r12
;
154 xts
->r13
= sv
->save_r13
;
155 xts
->r14
= sv
->save_r14
;
156 xts
->r15
= sv
->save_r15
;
157 xts
->r16
= sv
->save_r16
;
158 xts
->r17
= sv
->save_r17
;
159 xts
->r18
= sv
->save_r18
;
160 xts
->r19
= sv
->save_r19
;
161 xts
->r20
= sv
->save_r20
;
162 xts
->r21
= sv
->save_r21
;
163 xts
->r22
= sv
->save_r22
;
164 xts
->r23
= sv
->save_r23
;
165 xts
->r24
= sv
->save_r24
;
166 xts
->r25
= sv
->save_r25
;
167 xts
->r26
= sv
->save_r26
;
168 xts
->r27
= sv
->save_r27
;
169 xts
->r28
= sv
->save_r28
;
170 xts
->r29
= sv
->save_r29
;
171 xts
->r30
= sv
->save_r30
;
172 xts
->r31
= sv
->save_r31
;
173 xts
->cr
= sv
->save_cr
;
174 xts
->xer
= sv
->save_xer
;
175 xts
->lr
= sv
->save_lr
;
176 xts
->ctr
= sv
->save_ctr
;
177 xts
->srr0
= sv
->save_srr0
;
178 xts
->srr1
= sv
->save_srr1
;
179 xts
->vrsave
= sv
->save_vrsave
;
181 bzero((void *)xts
, sizeof(struct ppc_thread_state64
));
183 *count
= PPC_THREAD_STATE64_COUNT
; /* Pass back the amount we actually copied */
188 return KERN_INVALID_ARGUMENT
;
194 kern_return_t
chudxnu_copy_threadstate_to_savearea(struct savearea
*sv
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
)
196 struct ppc_thread_state
*ts
;
197 struct ppc_thread_state64
*xts
;
200 case PPC_THREAD_STATE
:
201 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
202 return KERN_INVALID_ARGUMENT
;
204 ts
= (struct ppc_thread_state
*) tstate
;
206 sv
->save_r0
= (uint64_t)ts
->r0
;
207 sv
->save_r1
= (uint64_t)ts
->r1
;
208 sv
->save_r2
= (uint64_t)ts
->r2
;
209 sv
->save_r3
= (uint64_t)ts
->r3
;
210 sv
->save_r4
= (uint64_t)ts
->r4
;
211 sv
->save_r5
= (uint64_t)ts
->r5
;
212 sv
->save_r6
= (uint64_t)ts
->r6
;
213 sv
->save_r7
= (uint64_t)ts
->r7
;
214 sv
->save_r8
= (uint64_t)ts
->r8
;
215 sv
->save_r9
= (uint64_t)ts
->r9
;
216 sv
->save_r10
= (uint64_t)ts
->r10
;
217 sv
->save_r11
= (uint64_t)ts
->r11
;
218 sv
->save_r12
= (uint64_t)ts
->r12
;
219 sv
->save_r13
= (uint64_t)ts
->r13
;
220 sv
->save_r14
= (uint64_t)ts
->r14
;
221 sv
->save_r15
= (uint64_t)ts
->r15
;
222 sv
->save_r16
= (uint64_t)ts
->r16
;
223 sv
->save_r17
= (uint64_t)ts
->r17
;
224 sv
->save_r18
= (uint64_t)ts
->r18
;
225 sv
->save_r19
= (uint64_t)ts
->r19
;
226 sv
->save_r20
= (uint64_t)ts
->r20
;
227 sv
->save_r21
= (uint64_t)ts
->r21
;
228 sv
->save_r22
= (uint64_t)ts
->r22
;
229 sv
->save_r23
= (uint64_t)ts
->r23
;
230 sv
->save_r24
= (uint64_t)ts
->r24
;
231 sv
->save_r25
= (uint64_t)ts
->r25
;
232 sv
->save_r26
= (uint64_t)ts
->r26
;
233 sv
->save_r27
= (uint64_t)ts
->r27
;
234 sv
->save_r28
= (uint64_t)ts
->r28
;
235 sv
->save_r29
= (uint64_t)ts
->r29
;
236 sv
->save_r30
= (uint64_t)ts
->r30
;
237 sv
->save_r31
= (uint64_t)ts
->r31
;
238 sv
->save_cr
= ts
->cr
;
239 sv
->save_xer
= (uint64_t)ts
->xer
;
240 sv
->save_lr
= (uint64_t)ts
->lr
;
241 sv
->save_ctr
= (uint64_t)ts
->ctr
;
242 sv
->save_srr0
= (uint64_t)ts
->srr0
;
243 sv
->save_srr1
= (uint64_t)ts
->srr1
;
244 sv
->save_vrsave
= ts
->vrsave
;
248 case PPC_THREAD_STATE64
:
249 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
250 return KERN_INVALID_ARGUMENT
;
252 xts
= (struct ppc_thread_state64
*) tstate
;
254 sv
->save_r0
= xts
->r0
;
255 sv
->save_r1
= xts
->r1
;
256 sv
->save_r2
= xts
->r2
;
257 sv
->save_r3
= xts
->r3
;
258 sv
->save_r4
= xts
->r4
;
259 sv
->save_r5
= xts
->r5
;
260 sv
->save_r6
= xts
->r6
;
261 sv
->save_r7
= xts
->r7
;
262 sv
->save_r8
= xts
->r8
;
263 sv
->save_r9
= xts
->r9
;
264 sv
->save_r10
= xts
->r10
;
265 sv
->save_r11
= xts
->r11
;
266 sv
->save_r12
= xts
->r12
;
267 sv
->save_r13
= xts
->r13
;
268 sv
->save_r14
= xts
->r14
;
269 sv
->save_r15
= xts
->r15
;
270 sv
->save_r16
= xts
->r16
;
271 sv
->save_r17
= xts
->r17
;
272 sv
->save_r18
= xts
->r18
;
273 sv
->save_r19
= xts
->r19
;
274 sv
->save_r20
= xts
->r20
;
275 sv
->save_r21
= xts
->r21
;
276 sv
->save_r22
= xts
->r22
;
277 sv
->save_r23
= xts
->r23
;
278 sv
->save_r24
= xts
->r24
;
279 sv
->save_r25
= xts
->r25
;
280 sv
->save_r26
= xts
->r26
;
281 sv
->save_r27
= xts
->r27
;
282 sv
->save_r28
= xts
->r28
;
283 sv
->save_r29
= xts
->r29
;
284 sv
->save_r30
= xts
->r30
;
285 sv
->save_r31
= xts
->r31
;
286 sv
->save_cr
= xts
->cr
;
287 sv
->save_xer
= xts
->xer
;
288 sv
->save_lr
= xts
->lr
;
289 sv
->save_ctr
= xts
->ctr
;
290 sv
->save_srr0
= xts
->srr0
;
291 sv
->save_srr1
= xts
->srr1
;
292 sv
->save_vrsave
= xts
->vrsave
;
300 kern_return_t
chudxnu_thread_user_state_available(thread_t thread
)
302 if(find_user_regs(thread
)) {
310 kern_return_t
chudxnu_thread_get_state(thread_t thread
,
311 thread_flavor_t flavor
,
312 thread_state_t tstate
,
313 mach_msg_type_number_t
*count
,
316 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_get_state filters out some bits
319 sv
= find_user_regs(thread
);
321 sv
= find_kern_regs(thread
);
323 return chudxnu_copy_savearea_to_threadstate(flavor
, tstate
, count
, sv
);
326 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
328 // doesn't do FP or VMX
329 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
335 kern_return_t
chudxnu_thread_set_state(thread_t thread
,
336 thread_flavor_t flavor
,
337 thread_state_t tstate
,
338 mach_msg_type_number_t count
,
341 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_set_state filters out some bits
344 sv
= find_user_regs(thread
);
346 sv
= find_kern_regs(thread
);
348 return chudxnu_copy_threadstate_to_savearea(sv
, flavor
, tstate
, &count
);
350 return machine_thread_set_state(thread
, flavor
, tstate
, count
); // always user
354 #pragma mark **** task memory read/write ****
357 kern_return_t
chudxnu_task_read(task_t task
, void *kernaddr
, uint64_t usraddr
, vm_size_t size
)
359 kern_return_t ret
= KERN_SUCCESS
;
361 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
362 usraddr
&= 0x00000000FFFFFFFFULL
;
365 if(current_task()==task
) {
366 thread_t cur_thr
= current_thread();
367 vm_offset_t recover_handler
= cur_thr
->recover
;
369 if(ml_at_interrupt_context()) {
370 return KERN_FAILURE
; // can't do copyin on interrupt stack
373 if(copyin(usraddr
, kernaddr
, size
)) {
376 cur_thr
->recover
= recover_handler
;
378 vm_map_t map
= get_task_map(task
);
379 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
386 kern_return_t
chudxnu_task_write(task_t task
, uint64_t useraddr
, void *kernaddr
, vm_size_t size
)
388 kern_return_t ret
= KERN_SUCCESS
;
390 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
391 useraddr
&= 0x00000000FFFFFFFFULL
;
394 if(current_task()==task
) {
395 thread_t cur_thr
= current_thread();
396 vm_offset_t recover_handler
= cur_thr
->recover
;
398 if(ml_at_interrupt_context()) {
399 return KERN_FAILURE
; // can't do copyout on interrupt stack
402 if(copyout(kernaddr
, useraddr
, size
)) {
405 cur_thr
->recover
= recover_handler
;
407 vm_map_t map
= get_task_map(task
);
408 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
415 kern_return_t
chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
421 pp
= pmap_find_phys(kernel_pmap
, srcaddr
); /* Get the page number */
423 return KERN_FAILURE
; /* Not mapped... */
426 phys_addr
= ((addr64_t
)pp
<< 12) | (srcaddr
& 0x0000000000000FFFULL
); /* Shove in the page offset */
427 if(phys_addr
>= mem_actual
) {
428 return KERN_FAILURE
; /* out of range */
431 if((phys_addr
&0x1) || size
==1) {
432 *((uint8_t *)dstaddr
) = ml_phys_read_byte_64(phys_addr
);
433 ((uint8_t *)dstaddr
)++;
434 srcaddr
+= sizeof(uint8_t);
435 size
-= sizeof(uint8_t);
436 } else if((phys_addr
&0x3) || size
<=2) {
437 *((uint16_t *)dstaddr
) = ml_phys_read_half_64(phys_addr
);
438 ((uint16_t *)dstaddr
)++;
439 srcaddr
+= sizeof(uint16_t);
440 size
-= sizeof(uint16_t);
442 *((uint32_t *)dstaddr
) = ml_phys_read_word_64(phys_addr
);
443 ((uint32_t *)dstaddr
)++;
444 srcaddr
+= sizeof(uint32_t);
445 size
-= sizeof(uint32_t);
452 kern_return_t
chudxnu_kern_write(vm_offset_t dstaddr
, void *srcaddr
, vm_size_t size
)
458 pp
= pmap_find_phys(kernel_pmap
, dstaddr
); /* Get the page number */
460 return KERN_FAILURE
; /* Not mapped... */
463 phys_addr
= ((addr64_t
)pp
<< 12) | (dstaddr
& 0x0000000000000FFFULL
); /* Shove in the page offset */
464 if(phys_addr
>= mem_actual
) {
465 return KERN_FAILURE
; /* out of range */
468 if((phys_addr
&0x1) || size
==1) {
469 ml_phys_write_byte_64(phys_addr
, *((uint8_t *)srcaddr
));
470 ((uint8_t *)srcaddr
)++;
471 dstaddr
+= sizeof(uint8_t);
472 size
-= sizeof(uint8_t);
473 } else if((phys_addr
&0x3) || size
<=2) {
474 ml_phys_write_half_64(phys_addr
, *((uint16_t *)srcaddr
));
475 ((uint16_t *)srcaddr
)++;
476 dstaddr
+= sizeof(uint16_t);
477 size
-= sizeof(uint16_t);
479 ml_phys_write_word_64(phys_addr
, *((uint32_t *)srcaddr
));
480 ((uint32_t *)srcaddr
)++;
481 dstaddr
+= sizeof(uint32_t);
482 size
-= sizeof(uint32_t);
489 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
490 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
491 // after sampling has finished.
493 // For an N-entry callstack:
496 // [1..N-3] stack frames (including current one)
497 // [N-2] current LR (return value if we're in a leaf function)
498 // [N-1] current r0 (in case we've saved LR in r0)
501 #define FP_LINK_OFFSET 2
502 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
503 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
506 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
509 #ifndef SUPERVISOR_MODE
510 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
513 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE))
517 kern_return_t
chudxnu_thread_get_callstack64( thread_t thread
,
519 mach_msg_type_number_t
*count
,
523 task_t task
= get_threadtask(thread
);
524 uint64_t nextFramePointer
= 0;
525 uint64_t currPC
, currLR
, currR0
;
526 uint64_t framePointer
;
528 uint64_t kernStackMin
= min_valid_stack_address();
529 uint64_t kernStackMax
= max_valid_stack_address();
530 uint64_t *buffer
= callStack
;
533 int bufferMaxIndex
= *count
;
534 boolean_t supervisor
;
539 sv
= find_user_regs(thread
);
541 sv
= find_kern_regs(thread
);
549 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
551 #warning assuming kernel task is always 32-bit
554 is64Bit
= chudxnu_is_64bit_task(task
);
557 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
558 if(bufferMaxIndex
<2) {
560 return KERN_RESOURCE_SHORTAGE
;
563 currPC
= sv
->save_srr0
;
564 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
565 currLR
= sv
->save_lr
;
566 currR0
= sv
->save_r0
;
568 bufferIndex
= 0; // start with a stack of size zero
569 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
571 // Now, fill buffer with stack backtraces.
572 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
574 // Above the stack pointer, the following values are saved:
579 // Here, we'll get the lr from the stack.
583 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
585 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
588 // Note that we read the pc even for the first stack frame (which, in theory,
589 // is always empty because the callee fills it in just before it lowers the
590 // stack. However, if we catch the program in between filling in the return
591 // address and lowering the stack, we want to still have a valid backtrace.
592 // FixupStack correctly disregards this value if necessary.
596 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
598 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
603 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
605 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
609 if(kr
!=KERN_SUCCESS
) {
614 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
617 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
619 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
620 nextFramePointer
= tmpWord
;
624 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
626 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
627 nextFramePointer
= tmpWord
;
630 if(kr
!=KERN_SUCCESS
) {
631 nextFramePointer
= 0;
634 if(nextFramePointer
) {
635 buffer
[bufferIndex
++] = pc
;
639 if(nextFramePointer
<framePointer
) {
642 framePointer
= nextFramePointer
;
646 if(bufferIndex
>=bufferMaxIndex
) {
648 return KERN_RESOURCE_SHORTAGE
;
651 // Save link register and R0 at bottom of stack (used for later fixup).
652 buffer
[bufferIndex
++] = currLR
;
653 buffer
[bufferIndex
++] = currR0
;
655 *count
= bufferIndex
;
660 kern_return_t
chudxnu_thread_get_callstack( thread_t thread
,
662 mach_msg_type_number_t
*count
,
666 task_t task
= get_threadtask(thread
);
667 uint64_t nextFramePointer
= 0;
668 uint64_t currPC
, currLR
, currR0
;
669 uint64_t framePointer
;
671 uint64_t kernStackMin
= min_valid_stack_address();
672 uint64_t kernStackMax
= max_valid_stack_address();
673 uint32_t *buffer
= callStack
;
676 int bufferMaxIndex
= *count
;
677 boolean_t supervisor
;
682 sv
= find_user_regs(thread
);
684 sv
= find_kern_regs(thread
);
692 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
694 #warning assuming kernel task is always 32-bit
697 is64Bit
= chudxnu_is_64bit_task(task
);
700 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
701 if(bufferMaxIndex
<2) {
703 return KERN_RESOURCE_SHORTAGE
;
706 currPC
= sv
->save_srr0
;
707 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
708 currLR
= sv
->save_lr
;
709 currR0
= sv
->save_r0
;
711 bufferIndex
= 0; // start with a stack of size zero
712 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
714 // Now, fill buffer with stack backtraces.
715 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
717 // Above the stack pointer, the following values are saved:
722 // Here, we'll get the lr from the stack.
726 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
728 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
731 // Note that we read the pc even for the first stack frame (which, in theory,
732 // is always empty because the callee fills it in just before it lowers the
733 // stack. However, if we catch the program in between filling in the return
734 // address and lowering the stack, we want to still have a valid backtrace.
735 // FixupStack correctly disregards this value if necessary.
739 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
741 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
746 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
748 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
752 if(kr
!=KERN_SUCCESS
) {
757 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
760 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
762 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
763 nextFramePointer
= tmpWord
;
767 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
769 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
770 nextFramePointer
= tmpWord
;
773 if(kr
!=KERN_SUCCESS
) {
774 nextFramePointer
= 0;
777 if(nextFramePointer
) {
778 buffer
[bufferIndex
++] = pc
;
782 if(nextFramePointer
<framePointer
) {
785 framePointer
= nextFramePointer
;
789 if(bufferIndex
>=bufferMaxIndex
) {
791 return KERN_RESOURCE_SHORTAGE
;
794 // Save link register and R0 at bottom of stack (used for later fixup).
795 buffer
[bufferIndex
++] = currLR
;
796 buffer
[bufferIndex
++] = currR0
;
798 *count
= bufferIndex
;
802 #pragma mark **** task and thread info ****
805 boolean_t
chudxnu_is_64bit_task(task_t task
)
807 return (task_has_64BitAddr(task
));
811 #define THING_THREAD 1
813 // an exact copy of processor_set_things() except no mig conversion at the end!
814 static kern_return_t
chudxnu_private_processor_set_things( processor_set_t pset
,
815 mach_port_t
**thing_list
,
816 mach_msg_type_number_t
*count
,
819 unsigned int actual
; /* this many things */
820 unsigned int maxthings
;
823 vm_size_t size
, size_needed
;
826 if (pset
== PROCESSOR_SET_NULL
)
827 return (KERN_INVALID_ARGUMENT
);
836 return (KERN_FAILURE
);
839 if (type
== THING_TASK
)
840 maxthings
= pset
->task_count
;
842 maxthings
= pset
->thread_count
;
844 /* do we have the memory we need? */
846 size_needed
= maxthings
* sizeof (mach_port_t
);
847 if (size_needed
<= size
)
850 /* unlock the pset and allocate more memory */
856 assert(size_needed
> 0);
861 return (KERN_RESOURCE_SHORTAGE
);
864 /* OK, have memory and the processor_set is locked & active */
871 task_t task
, *tasks
= (task_t
*)addr
;
873 for (task
= (task_t
)queue_first(&pset
->tasks
);
874 !queue_end(&pset
->tasks
, (queue_entry_t
)task
);
875 task
= (task_t
)queue_next(&task
->pset_tasks
)) {
876 task_reference_internal(task
);
877 tasks
[actual
++] = task
;
885 thread_t thread
, *threads
= (thread_t
*)addr
;
887 for (i
= 0, thread
= (thread_t
)queue_first(&pset
->threads
);
888 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
889 thread
= (thread_t
)queue_next(&thread
->pset_threads
)) {
890 thread_reference_internal(thread
);
891 threads
[actual
++] = thread
;
900 if (actual
< maxthings
)
901 size_needed
= actual
* sizeof (mach_port_t
);
904 /* no things, so return null pointer and deallocate memory */
912 /* if we allocated too much, must copy */
914 if (size_needed
< size
) {
917 newaddr
= kalloc(size_needed
);
923 task_t
*tasks
= (task_t
*)addr
;
925 for (i
= 0; i
< actual
; i
++)
926 task_deallocate(tasks
[i
]);
932 thread_t
*threads
= (thread_t
*)addr
;
934 for (i
= 0; i
< actual
; i
++)
935 thread_deallocate(threads
[i
]);
941 return (KERN_RESOURCE_SHORTAGE
);
944 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
949 *thing_list
= (mach_port_t
*)addr
;
953 return (KERN_SUCCESS
);
956 // an exact copy of task_threads() except no mig conversion at the end!
957 static kern_return_t
chudxnu_private_task_threads(task_t task
,
958 thread_act_array_t
*threads_out
,
959 mach_msg_type_number_t
*count
)
961 mach_msg_type_number_t actual
;
964 vm_size_t size
, size_needed
;
968 if (task
== TASK_NULL
)
969 return (KERN_INVALID_ARGUMENT
);
981 return (KERN_FAILURE
);
984 actual
= task
->thread_count
;
986 /* do we have the memory we need? */
987 size_needed
= actual
* sizeof (mach_port_t
);
988 if (size_needed
<= size
)
991 /* unlock the task and allocate more memory */
997 assert(size_needed
> 0);
1000 addr
= kalloc(size
);
1002 return (KERN_RESOURCE_SHORTAGE
);
1005 /* OK, have memory and the task is locked & active */
1006 threads
= (thread_t
*)addr
;
1010 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
1011 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
1012 thread_reference_internal(thread
);
1013 threads
[j
++] = thread
;
1016 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
1019 size_needed
= actual
* sizeof (mach_port_t
);
1021 /* can unlock task now that we've got the thread refs */
1025 /* no threads, so return null pointer and deallocate memory */
1034 /* if we allocated too much, must copy */
1036 if (size_needed
< size
) {
1039 newaddr
= kalloc(size_needed
);
1041 for (i
= 0; i
< actual
; ++i
)
1042 thread_deallocate(threads
[i
]);
1044 return (KERN_RESOURCE_SHORTAGE
);
1047 bcopy(addr
, newaddr
, size_needed
);
1049 threads
= (thread_t
*)newaddr
;
1052 *threads_out
= threads
;
1056 return (KERN_SUCCESS
);
1061 kern_return_t
chudxnu_all_tasks(task_array_t
*task_list
,
1062 mach_msg_type_number_t
*count
)
1064 return chudxnu_private_processor_set_things(&default_pset
, (mach_port_t
**)task_list
, count
, THING_TASK
);
1068 kern_return_t
chudxnu_free_task_list(task_array_t
*task_list
,
1069 mach_msg_type_number_t
*count
)
1071 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
1072 void *addr
= *task_list
;
1075 int i
, maxCount
= *count
;
1076 for(i
=0; i
<maxCount
; i
++) {
1077 task_deallocate((*task_list
)[i
]);
1082 return KERN_SUCCESS
;
1084 return KERN_FAILURE
;
1089 kern_return_t
chudxnu_all_threads( thread_array_t
*thread_list
,
1090 mach_msg_type_number_t
*count
)
1092 return chudxnu_private_processor_set_things(&default_pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
);
1096 kern_return_t
chudxnu_task_threads( task_t task
,
1097 thread_array_t
*thread_list
,
1098 mach_msg_type_number_t
*count
)
1100 return chudxnu_private_task_threads(task
, thread_list
, count
);
1104 kern_return_t
chudxnu_free_thread_list(thread_array_t
*thread_list
,
1105 mach_msg_type_number_t
*count
)
1107 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
1108 void *addr
= *thread_list
;
1111 int i
, maxCount
= *count
;
1112 for(i
=0; i
<maxCount
; i
++) {
1113 thread_deallocate((*thread_list
)[i
]);
1116 *thread_list
= NULL
;
1118 return KERN_SUCCESS
;
1120 return KERN_FAILURE
;
1125 task_t
chudxnu_current_task(void)
1127 return current_task();
1131 thread_t
chudxnu_current_thread(void)
1133 return current_thread();
1137 task_t
chudxnu_task_for_thread(thread_t thread
)
1139 return get_threadtask(thread
);
1143 kern_return_t
chudxnu_thread_info(thread_t thread
,
1144 thread_flavor_t flavor
,
1145 thread_info_t thread_info_out
,
1146 mach_msg_type_number_t
*thread_info_count
)
1148 return thread_info(thread
, flavor
, thread_info_out
, thread_info_count
);
1152 kern_return_t
chudxnu_thread_last_context_switch(thread_t thread
, uint64_t *timestamp
)
1154 *timestamp
= thread
->last_switch
;
1155 return KERN_SUCCESS
;
1158 #pragma mark **** DEPRECATED ****
1162 kern_return_t
chudxnu_bind_current_thread(int cpu
)
1164 return chudxnu_bind_thread(current_thread(), cpu
);
1168 kern_return_t
chudxnu_unbind_current_thread(void)
1170 return chudxnu_unbind_thread(current_thread());
1175 kern_return_t
chudxnu_current_thread_get_callstack( uint32_t *callStack
,
1176 mach_msg_type_number_t
*count
,
1177 boolean_t user_only
)
1179 return chudxnu_thread_get_callstack(current_thread(), callStack
, count
, user_only
);
1184 thread_t
chudxnu_current_act(void)
1186 return chudxnu_current_thread();