2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/ipc_tt.h>
38 #include <vm/vm_map.h>
41 #include <ppc/chud/chud_xnu.h>
42 #include <ppc/chud/chud_xnu_private.h>
44 #include <ppc/misc_protos.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/machine_routines.h>
47 #include <ppc/fpu_protos.h>
49 // forward declarations
50 extern kern_return_t
machine_thread_get_kern_state( thread_t thread
,
51 thread_flavor_t flavor
,
52 thread_state_t tstate
,
53 mach_msg_type_number_t
*count
);
56 #pragma mark **** thread binding ****
59 kern_return_t
chudxnu_bind_thread(thread_t thread
, int cpu
)
61 if(cpu
>=0 && cpu
<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */
62 thread_bind(thread
, cpu_to_processor(cpu
));
63 if(thread
==current_thread()) {
64 (void)thread_block(THREAD_CONTINUE_NULL
);
73 kern_return_t
chudxnu_unbind_thread(thread_t thread
)
75 thread_bind(thread
, PROCESSOR_NULL
);
79 #pragma mark **** thread state ****
82 kern_return_t
chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
, struct savearea
*sv
)
84 struct ppc_thread_state
*ts
;
85 struct ppc_thread_state64
*xts
;
88 case PPC_THREAD_STATE
:
89 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
91 return KERN_INVALID_ARGUMENT
;
93 ts
= (struct ppc_thread_state
*) tstate
;
95 ts
->r0
= (unsigned int)sv
->save_r0
;
96 ts
->r1
= (unsigned int)sv
->save_r1
;
97 ts
->r2
= (unsigned int)sv
->save_r2
;
98 ts
->r3
= (unsigned int)sv
->save_r3
;
99 ts
->r4
= (unsigned int)sv
->save_r4
;
100 ts
->r5
= (unsigned int)sv
->save_r5
;
101 ts
->r6
= (unsigned int)sv
->save_r6
;
102 ts
->r7
= (unsigned int)sv
->save_r7
;
103 ts
->r8
= (unsigned int)sv
->save_r8
;
104 ts
->r9
= (unsigned int)sv
->save_r9
;
105 ts
->r10
= (unsigned int)sv
->save_r10
;
106 ts
->r11
= (unsigned int)sv
->save_r11
;
107 ts
->r12
= (unsigned int)sv
->save_r12
;
108 ts
->r13
= (unsigned int)sv
->save_r13
;
109 ts
->r14
= (unsigned int)sv
->save_r14
;
110 ts
->r15
= (unsigned int)sv
->save_r15
;
111 ts
->r16
= (unsigned int)sv
->save_r16
;
112 ts
->r17
= (unsigned int)sv
->save_r17
;
113 ts
->r18
= (unsigned int)sv
->save_r18
;
114 ts
->r19
= (unsigned int)sv
->save_r19
;
115 ts
->r20
= (unsigned int)sv
->save_r20
;
116 ts
->r21
= (unsigned int)sv
->save_r21
;
117 ts
->r22
= (unsigned int)sv
->save_r22
;
118 ts
->r23
= (unsigned int)sv
->save_r23
;
119 ts
->r24
= (unsigned int)sv
->save_r24
;
120 ts
->r25
= (unsigned int)sv
->save_r25
;
121 ts
->r26
= (unsigned int)sv
->save_r26
;
122 ts
->r27
= (unsigned int)sv
->save_r27
;
123 ts
->r28
= (unsigned int)sv
->save_r28
;
124 ts
->r29
= (unsigned int)sv
->save_r29
;
125 ts
->r30
= (unsigned int)sv
->save_r30
;
126 ts
->r31
= (unsigned int)sv
->save_r31
;
127 ts
->cr
= (unsigned int)sv
->save_cr
;
128 ts
->xer
= (unsigned int)sv
->save_xer
;
129 ts
->lr
= (unsigned int)sv
->save_lr
;
130 ts
->ctr
= (unsigned int)sv
->save_ctr
;
131 ts
->srr0
= (unsigned int)sv
->save_srr0
;
132 ts
->srr1
= (unsigned int)sv
->save_srr1
;
134 ts
->vrsave
= (unsigned int)sv
->save_vrsave
;
136 bzero((void *)ts
, sizeof(struct ppc_thread_state
));
138 *count
= PPC_THREAD_STATE_COUNT
; /* Pass back the amount we actually copied */
141 case PPC_THREAD_STATE64
:
142 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
143 return KERN_INVALID_ARGUMENT
;
145 xts
= (struct ppc_thread_state64
*) tstate
;
147 xts
->r0
= sv
->save_r0
;
148 xts
->r1
= sv
->save_r1
;
149 xts
->r2
= sv
->save_r2
;
150 xts
->r3
= sv
->save_r3
;
151 xts
->r4
= sv
->save_r4
;
152 xts
->r5
= sv
->save_r5
;
153 xts
->r6
= sv
->save_r6
;
154 xts
->r7
= sv
->save_r7
;
155 xts
->r8
= sv
->save_r8
;
156 xts
->r9
= sv
->save_r9
;
157 xts
->r10
= sv
->save_r10
;
158 xts
->r11
= sv
->save_r11
;
159 xts
->r12
= sv
->save_r12
;
160 xts
->r13
= sv
->save_r13
;
161 xts
->r14
= sv
->save_r14
;
162 xts
->r15
= sv
->save_r15
;
163 xts
->r16
= sv
->save_r16
;
164 xts
->r17
= sv
->save_r17
;
165 xts
->r18
= sv
->save_r18
;
166 xts
->r19
= sv
->save_r19
;
167 xts
->r20
= sv
->save_r20
;
168 xts
->r21
= sv
->save_r21
;
169 xts
->r22
= sv
->save_r22
;
170 xts
->r23
= sv
->save_r23
;
171 xts
->r24
= sv
->save_r24
;
172 xts
->r25
= sv
->save_r25
;
173 xts
->r26
= sv
->save_r26
;
174 xts
->r27
= sv
->save_r27
;
175 xts
->r28
= sv
->save_r28
;
176 xts
->r29
= sv
->save_r29
;
177 xts
->r30
= sv
->save_r30
;
178 xts
->r31
= sv
->save_r31
;
179 xts
->cr
= sv
->save_cr
;
180 xts
->xer
= sv
->save_xer
;
181 xts
->lr
= sv
->save_lr
;
182 xts
->ctr
= sv
->save_ctr
;
183 xts
->srr0
= sv
->save_srr0
;
184 xts
->srr1
= sv
->save_srr1
;
185 xts
->vrsave
= sv
->save_vrsave
;
187 bzero((void *)xts
, sizeof(struct ppc_thread_state64
));
189 *count
= PPC_THREAD_STATE64_COUNT
; /* Pass back the amount we actually copied */
194 return KERN_INVALID_ARGUMENT
;
200 kern_return_t
chudxnu_copy_threadstate_to_savearea(struct savearea
*sv
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
)
202 struct ppc_thread_state
*ts
;
203 struct ppc_thread_state64
*xts
;
206 case PPC_THREAD_STATE
:
207 if(*count
< PPC_THREAD_STATE_COUNT
) { /* Is the count ok? */
208 return KERN_INVALID_ARGUMENT
;
210 ts
= (struct ppc_thread_state
*) tstate
;
212 sv
->save_r0
= (uint64_t)ts
->r0
;
213 sv
->save_r1
= (uint64_t)ts
->r1
;
214 sv
->save_r2
= (uint64_t)ts
->r2
;
215 sv
->save_r3
= (uint64_t)ts
->r3
;
216 sv
->save_r4
= (uint64_t)ts
->r4
;
217 sv
->save_r5
= (uint64_t)ts
->r5
;
218 sv
->save_r6
= (uint64_t)ts
->r6
;
219 sv
->save_r7
= (uint64_t)ts
->r7
;
220 sv
->save_r8
= (uint64_t)ts
->r8
;
221 sv
->save_r9
= (uint64_t)ts
->r9
;
222 sv
->save_r10
= (uint64_t)ts
->r10
;
223 sv
->save_r11
= (uint64_t)ts
->r11
;
224 sv
->save_r12
= (uint64_t)ts
->r12
;
225 sv
->save_r13
= (uint64_t)ts
->r13
;
226 sv
->save_r14
= (uint64_t)ts
->r14
;
227 sv
->save_r15
= (uint64_t)ts
->r15
;
228 sv
->save_r16
= (uint64_t)ts
->r16
;
229 sv
->save_r17
= (uint64_t)ts
->r17
;
230 sv
->save_r18
= (uint64_t)ts
->r18
;
231 sv
->save_r19
= (uint64_t)ts
->r19
;
232 sv
->save_r20
= (uint64_t)ts
->r20
;
233 sv
->save_r21
= (uint64_t)ts
->r21
;
234 sv
->save_r22
= (uint64_t)ts
->r22
;
235 sv
->save_r23
= (uint64_t)ts
->r23
;
236 sv
->save_r24
= (uint64_t)ts
->r24
;
237 sv
->save_r25
= (uint64_t)ts
->r25
;
238 sv
->save_r26
= (uint64_t)ts
->r26
;
239 sv
->save_r27
= (uint64_t)ts
->r27
;
240 sv
->save_r28
= (uint64_t)ts
->r28
;
241 sv
->save_r29
= (uint64_t)ts
->r29
;
242 sv
->save_r30
= (uint64_t)ts
->r30
;
243 sv
->save_r31
= (uint64_t)ts
->r31
;
244 sv
->save_cr
= ts
->cr
;
245 sv
->save_xer
= (uint64_t)ts
->xer
;
246 sv
->save_lr
= (uint64_t)ts
->lr
;
247 sv
->save_ctr
= (uint64_t)ts
->ctr
;
248 sv
->save_srr0
= (uint64_t)ts
->srr0
;
249 sv
->save_srr1
= (uint64_t)ts
->srr1
;
250 sv
->save_vrsave
= ts
->vrsave
;
254 case PPC_THREAD_STATE64
:
255 if(*count
< PPC_THREAD_STATE64_COUNT
) { /* Is the count ok? */
256 return KERN_INVALID_ARGUMENT
;
258 xts
= (struct ppc_thread_state64
*) tstate
;
260 sv
->save_r0
= xts
->r0
;
261 sv
->save_r1
= xts
->r1
;
262 sv
->save_r2
= xts
->r2
;
263 sv
->save_r3
= xts
->r3
;
264 sv
->save_r4
= xts
->r4
;
265 sv
->save_r5
= xts
->r5
;
266 sv
->save_r6
= xts
->r6
;
267 sv
->save_r7
= xts
->r7
;
268 sv
->save_r8
= xts
->r8
;
269 sv
->save_r9
= xts
->r9
;
270 sv
->save_r10
= xts
->r10
;
271 sv
->save_r11
= xts
->r11
;
272 sv
->save_r12
= xts
->r12
;
273 sv
->save_r13
= xts
->r13
;
274 sv
->save_r14
= xts
->r14
;
275 sv
->save_r15
= xts
->r15
;
276 sv
->save_r16
= xts
->r16
;
277 sv
->save_r17
= xts
->r17
;
278 sv
->save_r18
= xts
->r18
;
279 sv
->save_r19
= xts
->r19
;
280 sv
->save_r20
= xts
->r20
;
281 sv
->save_r21
= xts
->r21
;
282 sv
->save_r22
= xts
->r22
;
283 sv
->save_r23
= xts
->r23
;
284 sv
->save_r24
= xts
->r24
;
285 sv
->save_r25
= xts
->r25
;
286 sv
->save_r26
= xts
->r26
;
287 sv
->save_r27
= xts
->r27
;
288 sv
->save_r28
= xts
->r28
;
289 sv
->save_r29
= xts
->r29
;
290 sv
->save_r30
= xts
->r30
;
291 sv
->save_r31
= xts
->r31
;
292 sv
->save_cr
= xts
->cr
;
293 sv
->save_xer
= xts
->xer
;
294 sv
->save_lr
= xts
->lr
;
295 sv
->save_ctr
= xts
->ctr
;
296 sv
->save_srr0
= xts
->srr0
;
297 sv
->save_srr1
= xts
->srr1
;
298 sv
->save_vrsave
= xts
->vrsave
;
306 kern_return_t
chudxnu_thread_user_state_available(thread_t thread
)
308 if(find_user_regs(thread
)) {
316 kern_return_t
chudxnu_thread_get_state(thread_t thread
,
317 thread_flavor_t flavor
,
318 thread_state_t tstate
,
319 mach_msg_type_number_t
*count
,
322 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_get_state filters out some bits
325 sv
= find_user_regs(thread
);
327 sv
= find_kern_regs(thread
);
329 return chudxnu_copy_savearea_to_threadstate(flavor
, tstate
, count
, sv
);
332 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
334 // doesn't do FP or VMX
335 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
341 kern_return_t
chudxnu_thread_set_state(thread_t thread
,
342 thread_flavor_t flavor
,
343 thread_state_t tstate
,
344 mach_msg_type_number_t count
,
347 if(flavor
==PPC_THREAD_STATE
|| flavor
==PPC_THREAD_STATE64
) { // machine_thread_set_state filters out some bits
350 sv
= find_user_regs(thread
);
352 sv
= find_kern_regs(thread
);
354 return chudxnu_copy_threadstate_to_savearea(sv
, flavor
, tstate
, &count
);
356 return machine_thread_set_state(thread
, flavor
, tstate
, count
); // always user
360 #pragma mark **** task memory read/write ****
363 kern_return_t
chudxnu_task_read(task_t task
, void *kernaddr
, uint64_t usraddr
, vm_size_t size
)
365 kern_return_t ret
= KERN_SUCCESS
;
367 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
368 usraddr
&= 0x00000000FFFFFFFFULL
;
371 if(current_task()==task
) {
372 thread_t cur_thr
= current_thread();
373 vm_offset_t recover_handler
= cur_thr
->recover
;
375 if(ml_at_interrupt_context()) {
376 return KERN_FAILURE
; // can't do copyin on interrupt stack
379 if(copyin(usraddr
, kernaddr
, size
)) {
382 cur_thr
->recover
= recover_handler
;
384 vm_map_t map
= get_task_map(task
);
385 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
392 kern_return_t
chudxnu_task_write(task_t task
, uint64_t useraddr
, void *kernaddr
, vm_size_t size
)
394 kern_return_t ret
= KERN_SUCCESS
;
396 if(!chudxnu_is_64bit_task(task
)) { // clear any cruft out of upper 32-bits for 32-bit tasks
397 useraddr
&= 0x00000000FFFFFFFFULL
;
400 if(current_task()==task
) {
401 thread_t cur_thr
= current_thread();
402 vm_offset_t recover_handler
= cur_thr
->recover
;
404 if(ml_at_interrupt_context()) {
405 return KERN_FAILURE
; // can't do copyout on interrupt stack
408 if(copyout(kernaddr
, useraddr
, size
)) {
411 cur_thr
->recover
= recover_handler
;
413 vm_map_t map
= get_task_map(task
);
414 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
421 kern_return_t
chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
427 pp
= pmap_find_phys(kernel_pmap
, srcaddr
); /* Get the page number */
429 return KERN_FAILURE
; /* Not mapped... */
432 phys_addr
= ((addr64_t
)pp
<< 12) | (srcaddr
& 0x0000000000000FFFULL
); /* Shove in the page offset */
433 if(phys_addr
>= mem_actual
) {
434 return KERN_FAILURE
; /* out of range */
437 if((phys_addr
&0x1) || size
==1) {
438 *((uint8_t *)dstaddr
) = ml_phys_read_byte_64(phys_addr
);
439 ((uint8_t *)dstaddr
)++;
440 srcaddr
+= sizeof(uint8_t);
441 size
-= sizeof(uint8_t);
442 } else if((phys_addr
&0x3) || size
<=2) {
443 *((uint16_t *)dstaddr
) = ml_phys_read_half_64(phys_addr
);
444 ((uint16_t *)dstaddr
)++;
445 srcaddr
+= sizeof(uint16_t);
446 size
-= sizeof(uint16_t);
448 *((uint32_t *)dstaddr
) = ml_phys_read_word_64(phys_addr
);
449 ((uint32_t *)dstaddr
)++;
450 srcaddr
+= sizeof(uint32_t);
451 size
-= sizeof(uint32_t);
458 kern_return_t
chudxnu_kern_write(vm_offset_t dstaddr
, void *srcaddr
, vm_size_t size
)
464 pp
= pmap_find_phys(kernel_pmap
, dstaddr
); /* Get the page number */
466 return KERN_FAILURE
; /* Not mapped... */
469 phys_addr
= ((addr64_t
)pp
<< 12) | (dstaddr
& 0x0000000000000FFFULL
); /* Shove in the page offset */
470 if(phys_addr
>= mem_actual
) {
471 return KERN_FAILURE
; /* out of range */
474 if((phys_addr
&0x1) || size
==1) {
475 ml_phys_write_byte_64(phys_addr
, *((uint8_t *)srcaddr
));
476 ((uint8_t *)srcaddr
)++;
477 dstaddr
+= sizeof(uint8_t);
478 size
-= sizeof(uint8_t);
479 } else if((phys_addr
&0x3) || size
<=2) {
480 ml_phys_write_half_64(phys_addr
, *((uint16_t *)srcaddr
));
481 ((uint16_t *)srcaddr
)++;
482 dstaddr
+= sizeof(uint16_t);
483 size
-= sizeof(uint16_t);
485 ml_phys_write_word_64(phys_addr
, *((uint32_t *)srcaddr
));
486 ((uint32_t *)srcaddr
)++;
487 dstaddr
+= sizeof(uint32_t);
488 size
-= sizeof(uint32_t);
495 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
496 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
497 // after sampling has finished.
499 // For an N-entry callstack:
502 // [1..N-3] stack frames (including current one)
503 // [N-2] current LR (return value if we're in a leaf function)
504 // [N-1] current r0 (in case we've saved LR in r0)
507 #define FP_LINK_OFFSET 2
508 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
509 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
512 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
515 #ifndef SUPERVISOR_MODE
516 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
519 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE))
523 kern_return_t
chudxnu_thread_get_callstack64( thread_t thread
,
525 mach_msg_type_number_t
*count
,
529 task_t task
= get_threadtask(thread
);
530 uint64_t nextFramePointer
= 0;
531 uint64_t currPC
, currLR
, currR0
;
532 uint64_t framePointer
;
534 uint64_t kernStackMin
= min_valid_stack_address();
535 uint64_t kernStackMax
= max_valid_stack_address();
536 uint64_t *buffer
= callStack
;
539 int bufferMaxIndex
= *count
;
540 boolean_t supervisor
;
545 sv
= find_user_regs(thread
);
547 sv
= find_kern_regs(thread
);
555 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
557 #warning assuming kernel task is always 32-bit
560 is64Bit
= chudxnu_is_64bit_task(task
);
563 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
564 if(bufferMaxIndex
<2) {
566 return KERN_RESOURCE_SHORTAGE
;
569 currPC
= sv
->save_srr0
;
570 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
571 currLR
= sv
->save_lr
;
572 currR0
= sv
->save_r0
;
574 bufferIndex
= 0; // start with a stack of size zero
575 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
577 // Now, fill buffer with stack backtraces.
578 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
580 // Above the stack pointer, the following values are saved:
585 // Here, we'll get the lr from the stack.
589 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
591 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
594 // Note that we read the pc even for the first stack frame (which, in theory,
595 // is always empty because the callee fills it in just before it lowers the
596 // stack. However, if we catch the program in between filling in the return
597 // address and lowering the stack, we want to still have a valid backtrace.
598 // FixupStack correctly disregards this value if necessary.
602 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
604 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
609 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
611 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
615 if(kr
!=KERN_SUCCESS
) {
620 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
623 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
625 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
626 nextFramePointer
= tmpWord
;
630 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
632 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
633 nextFramePointer
= tmpWord
;
636 if(kr
!=KERN_SUCCESS
) {
637 nextFramePointer
= 0;
640 if(nextFramePointer
) {
641 buffer
[bufferIndex
++] = pc
;
645 if(nextFramePointer
<framePointer
) {
648 framePointer
= nextFramePointer
;
652 if(bufferIndex
>=bufferMaxIndex
) {
654 return KERN_RESOURCE_SHORTAGE
;
657 // Save link register and R0 at bottom of stack (used for later fixup).
658 buffer
[bufferIndex
++] = currLR
;
659 buffer
[bufferIndex
++] = currR0
;
661 *count
= bufferIndex
;
666 kern_return_t
chudxnu_thread_get_callstack( thread_t thread
,
668 mach_msg_type_number_t
*count
,
672 task_t task
= get_threadtask(thread
);
673 uint64_t nextFramePointer
= 0;
674 uint64_t currPC
, currLR
, currR0
;
675 uint64_t framePointer
;
677 uint64_t kernStackMin
= min_valid_stack_address();
678 uint64_t kernStackMax
= max_valid_stack_address();
679 uint32_t *buffer
= callStack
;
682 int bufferMaxIndex
= *count
;
683 boolean_t supervisor
;
688 sv
= find_user_regs(thread
);
690 sv
= find_kern_regs(thread
);
698 supervisor
= SUPERVISOR_MODE(sv
->save_srr1
);
700 #warning assuming kernel task is always 32-bit
703 is64Bit
= chudxnu_is_64bit_task(task
);
706 bufferMaxIndex
= bufferMaxIndex
- 2; // allot space for saving the LR and R0 on the stack at the end.
707 if(bufferMaxIndex
<2) {
709 return KERN_RESOURCE_SHORTAGE
;
712 currPC
= sv
->save_srr0
;
713 framePointer
= sv
->save_r1
; /* r1 is the stack pointer (no FP on PPC) */
714 currLR
= sv
->save_lr
;
715 currR0
= sv
->save_r0
;
717 bufferIndex
= 0; // start with a stack of size zero
718 buffer
[bufferIndex
++] = currPC
; // save PC in position 0.
720 // Now, fill buffer with stack backtraces.
721 while(bufferIndex
<bufferMaxIndex
&& VALID_STACK_ADDRESS(framePointer
)) {
723 // Above the stack pointer, the following values are saved:
728 // Here, we'll get the lr from the stack.
732 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint64_t);
734 fp_link
= framePointer
+ FP_LINK_OFFSET
*sizeof(uint32_t);
737 // Note that we read the pc even for the first stack frame (which, in theory,
738 // is always empty because the callee fills it in just before it lowers the
739 // stack. However, if we catch the program in between filling in the return
740 // address and lowering the stack, we want to still have a valid backtrace.
741 // FixupStack correctly disregards this value if necessary.
745 kr
= chudxnu_kern_read(&pc
, fp_link
, sizeof(uint64_t));
747 kr
= chudxnu_kern_read(&tmpWord
, fp_link
, sizeof(uint32_t));
752 kr
= chudxnu_task_read(task
, &pc
, fp_link
, sizeof(uint64_t));
754 kr
= chudxnu_task_read(task
, &tmpWord
, fp_link
, sizeof(uint32_t));
758 if(kr
!=KERN_SUCCESS
) {
763 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
766 kr
= chudxnu_kern_read(&nextFramePointer
, framePointer
, sizeof(uint64_t));
768 kr
= chudxnu_kern_read(&tmpWord
, framePointer
, sizeof(uint32_t));
769 nextFramePointer
= tmpWord
;
773 kr
= chudxnu_task_read(task
, &nextFramePointer
, framePointer
, sizeof(uint64_t));
775 kr
= chudxnu_task_read(task
, &tmpWord
, framePointer
, sizeof(uint32_t));
776 nextFramePointer
= tmpWord
;
779 if(kr
!=KERN_SUCCESS
) {
780 nextFramePointer
= 0;
783 if(nextFramePointer
) {
784 buffer
[bufferIndex
++] = pc
;
788 if(nextFramePointer
<framePointer
) {
791 framePointer
= nextFramePointer
;
795 if(bufferIndex
>=bufferMaxIndex
) {
797 return KERN_RESOURCE_SHORTAGE
;
800 // Save link register and R0 at bottom of stack (used for later fixup).
801 buffer
[bufferIndex
++] = currLR
;
802 buffer
[bufferIndex
++] = currR0
;
804 *count
= bufferIndex
;
808 #pragma mark **** task and thread info ****
811 boolean_t
chudxnu_is_64bit_task(task_t task
)
813 return (task_has_64BitAddr(task
));
817 #define THING_THREAD 1
819 // an exact copy of processor_set_things() except no mig conversion at the end!
820 static kern_return_t
chudxnu_private_processor_set_things( processor_set_t pset
,
821 mach_port_t
**thing_list
,
822 mach_msg_type_number_t
*count
,
825 unsigned int actual
; /* this many things */
826 unsigned int maxthings
;
829 vm_size_t size
, size_needed
;
832 if (pset
== PROCESSOR_SET_NULL
)
833 return (KERN_INVALID_ARGUMENT
);
842 return (KERN_FAILURE
);
845 if (type
== THING_TASK
)
846 maxthings
= pset
->task_count
;
848 maxthings
= pset
->thread_count
;
850 /* do we have the memory we need? */
852 size_needed
= maxthings
* sizeof (mach_port_t
);
853 if (size_needed
<= size
)
856 /* unlock the pset and allocate more memory */
862 assert(size_needed
> 0);
867 return (KERN_RESOURCE_SHORTAGE
);
870 /* OK, have memory and the processor_set is locked & active */
877 task_t task
, *tasks
= (task_t
*)addr
;
879 for (task
= (task_t
)queue_first(&pset
->tasks
);
880 !queue_end(&pset
->tasks
, (queue_entry_t
)task
);
881 task
= (task_t
)queue_next(&task
->pset_tasks
)) {
882 task_reference_internal(task
);
883 tasks
[actual
++] = task
;
891 thread_t thread
, *threads
= (thread_t
*)addr
;
893 for (i
= 0, thread
= (thread_t
)queue_first(&pset
->threads
);
894 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
895 thread
= (thread_t
)queue_next(&thread
->pset_threads
)) {
896 thread_reference_internal(thread
);
897 threads
[actual
++] = thread
;
906 if (actual
< maxthings
)
907 size_needed
= actual
* sizeof (mach_port_t
);
910 /* no things, so return null pointer and deallocate memory */
918 /* if we allocated too much, must copy */
920 if (size_needed
< size
) {
923 newaddr
= kalloc(size_needed
);
929 task_t
*tasks
= (task_t
*)addr
;
931 for (i
= 0; i
< actual
; i
++)
932 task_deallocate(tasks
[i
]);
938 thread_t
*threads
= (thread_t
*)addr
;
940 for (i
= 0; i
< actual
; i
++)
941 thread_deallocate(threads
[i
]);
947 return (KERN_RESOURCE_SHORTAGE
);
950 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
955 *thing_list
= (mach_port_t
*)addr
;
959 return (KERN_SUCCESS
);
962 // an exact copy of task_threads() except no mig conversion at the end!
963 static kern_return_t
chudxnu_private_task_threads(task_t task
,
964 thread_act_array_t
*threads_out
,
965 mach_msg_type_number_t
*count
)
967 mach_msg_type_number_t actual
;
970 vm_size_t size
, size_needed
;
974 if (task
== TASK_NULL
)
975 return (KERN_INVALID_ARGUMENT
);
987 return (KERN_FAILURE
);
990 actual
= task
->thread_count
;
992 /* do we have the memory we need? */
993 size_needed
= actual
* sizeof (mach_port_t
);
994 if (size_needed
<= size
)
997 /* unlock the task and allocate more memory */
1003 assert(size_needed
> 0);
1006 addr
= kalloc(size
);
1008 return (KERN_RESOURCE_SHORTAGE
);
1011 /* OK, have memory and the task is locked & active */
1012 threads
= (thread_t
*)addr
;
1016 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
1017 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
1018 thread_reference_internal(thread
);
1019 threads
[j
++] = thread
;
1022 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
1025 size_needed
= actual
* sizeof (mach_port_t
);
1027 /* can unlock task now that we've got the thread refs */
1031 /* no threads, so return null pointer and deallocate memory */
1040 /* if we allocated too much, must copy */
1042 if (size_needed
< size
) {
1045 newaddr
= kalloc(size_needed
);
1047 for (i
= 0; i
< actual
; ++i
)
1048 thread_deallocate(threads
[i
]);
1050 return (KERN_RESOURCE_SHORTAGE
);
1053 bcopy(addr
, newaddr
, size_needed
);
1055 threads
= (thread_t
*)newaddr
;
1058 *threads_out
= threads
;
1062 return (KERN_SUCCESS
);
1067 kern_return_t
chudxnu_all_tasks(task_array_t
*task_list
,
1068 mach_msg_type_number_t
*count
)
1070 return chudxnu_private_processor_set_things(&default_pset
, (mach_port_t
**)task_list
, count
, THING_TASK
);
1074 kern_return_t
chudxnu_free_task_list(task_array_t
*task_list
,
1075 mach_msg_type_number_t
*count
)
1077 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
1078 void *addr
= *task_list
;
1081 int i
, maxCount
= *count
;
1082 for(i
=0; i
<maxCount
; i
++) {
1083 task_deallocate((*task_list
)[i
]);
1088 return KERN_SUCCESS
;
1090 return KERN_FAILURE
;
1095 kern_return_t
chudxnu_all_threads( thread_array_t
*thread_list
,
1096 mach_msg_type_number_t
*count
)
1098 return chudxnu_private_processor_set_things(&default_pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
);
1102 kern_return_t
chudxnu_task_threads( task_t task
,
1103 thread_array_t
*thread_list
,
1104 mach_msg_type_number_t
*count
)
1106 return chudxnu_private_task_threads(task
, thread_list
, count
);
1110 kern_return_t
chudxnu_free_thread_list(thread_array_t
*thread_list
,
1111 mach_msg_type_number_t
*count
)
1113 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
1114 void *addr
= *thread_list
;
1117 int i
, maxCount
= *count
;
1118 for(i
=0; i
<maxCount
; i
++) {
1119 thread_deallocate((*thread_list
)[i
]);
1122 *thread_list
= NULL
;
1124 return KERN_SUCCESS
;
1126 return KERN_FAILURE
;
1131 task_t
chudxnu_current_task(void)
1133 return current_task();
1137 thread_t
chudxnu_current_thread(void)
1139 return current_thread();
1143 task_t
chudxnu_task_for_thread(thread_t thread
)
1145 return get_threadtask(thread
);
1149 kern_return_t
chudxnu_thread_info(thread_t thread
,
1150 thread_flavor_t flavor
,
1151 thread_info_t thread_info_out
,
1152 mach_msg_type_number_t
*thread_info_count
)
1154 return thread_info(thread
, flavor
, thread_info_out
, thread_info_count
);
1158 kern_return_t
chudxnu_thread_last_context_switch(thread_t thread
, uint64_t *timestamp
)
1160 *timestamp
= thread
->last_switch
;
1161 return KERN_SUCCESS
;
1164 #pragma mark **** DEPRECATED ****
1168 kern_return_t
chudxnu_bind_current_thread(int cpu
)
1170 return chudxnu_bind_thread(current_thread(), cpu
);
1174 kern_return_t
chudxnu_unbind_current_thread(void)
1176 return chudxnu_unbind_thread(current_thread());
1181 kern_return_t
chudxnu_current_thread_get_callstack( uint32_t *callStack
,
1182 mach_msg_type_number_t
*count
,
1183 boolean_t user_only
)
1185 return chudxnu_thread_get_callstack(current_thread(), callStack
, count
, user_only
);
1190 thread_t
chudxnu_current_act(void)
1192 return chudxnu_current_thread();