2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/mach_types.h>
24 #include <mach/task.h>
25 #include <mach/thread_act.h>
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/thread.h>
31 #include <vm/vm_map.h>
34 #include <chud/chud_xnu.h>
35 #include <chud/chud_xnu_private.h>
37 #include <i386/misc_protos.h>
38 #include <i386/proc_reg.h>
39 #include <i386/mp_desc.h>
41 #pragma mark **** thread state ****
43 __private_extern__ kern_return_t
44 chudxnu_thread_user_state_available(thread_t thread
)
46 #pragma unused (thread)
50 __private_extern__ kern_return_t
51 chudxnu_thread_get_state(
53 thread_flavor_t flavor
,
54 thread_state_t tstate
,
55 mach_msg_type_number_t
*count
,
59 /* We can't get user state for kernel threads */
60 if (thread
->task
== kernel_task
)
62 /* this properly handles deciding whether or not the thread is 64 bit or not */
63 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
65 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
66 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
67 // interrupt state available
69 // the real purpose of this branch is the following:
70 // the user doesn't care if the thread states are user or kernel, he
71 // just wants the thread state, so we need to determine the proper one
72 // to return, kernel or user, for the given thread.
73 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
74 // the above are conditions where we possibly can read the kernel
75 // state. we still need to determine if this interrupt happened in
76 // kernel or user context
77 if(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
78 current_cpu_datap()->cpu_interrupt_level
== 1) {
79 // interrupt happened in user land
80 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
83 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
86 // get the user-mode thread state
87 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
92 __private_extern__ kern_return_t
93 chudxnu_thread_set_state(
95 thread_flavor_t flavor
,
96 thread_state_t tstate
,
97 mach_msg_type_number_t count
,
100 #pragma unused (user_only)
101 return machine_thread_set_state(thread
, flavor
, tstate
, count
);
104 #pragma mark **** task memory read/write ****
106 __private_extern__ kern_return_t
113 kern_return_t ret
= KERN_SUCCESS
;
115 if(current_task()==task
) {
116 if(ml_at_interrupt_context()) {
117 return KERN_FAILURE
; // can't do copyin on interrupt stack
120 if(copyin(usraddr
, kernaddr
, size
)) {
124 vm_map_t map
= get_task_map(task
);
125 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
131 __private_extern__ kern_return_t
138 kern_return_t ret
= KERN_SUCCESS
;
140 if(current_task()==task
) {
141 if(ml_at_interrupt_context()) {
142 return KERN_FAILURE
; // can't do copyout on interrupt stack
145 if(copyout(kernaddr
, useraddr
, size
)) {
149 vm_map_t map
= get_task_map(task
);
150 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
156 __private_extern__ kern_return_t
157 chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
163 /* Get the page number */
164 pp
= pmap_find_phys(kernel_pmap
, srcaddr
);
166 return KERN_FAILURE
; /* Not mapped... */
169 /* Shove in the page offset */
170 phys_addr
= ((addr64_t
)pp
<< 12) |
171 (srcaddr
& 0x0000000000000FFFULL
);
172 if(phys_addr
>= mem_actual
) {
173 return KERN_FAILURE
; /* out of range */
176 if((phys_addr
&0x1) || size
==1) {
177 *((uint8_t *)dstaddr
) =
178 ml_phys_read_byte_64(phys_addr
);
179 dstaddr
= ((uint8_t *)dstaddr
) + 1;
180 srcaddr
+= sizeof(uint8_t);
181 size
-= sizeof(uint8_t);
182 } else if((phys_addr
&0x3) || size
<=2) {
183 *((uint16_t *)dstaddr
) =
184 ml_phys_read_half_64(phys_addr
);
185 dstaddr
= ((uint16_t *)dstaddr
) + 1;
186 srcaddr
+= sizeof(uint16_t);
187 size
-= sizeof(uint16_t);
189 *((uint32_t *)dstaddr
) =
190 ml_phys_read_word_64(phys_addr
);
191 dstaddr
= ((uint32_t *)dstaddr
) + 1;
192 srcaddr
+= sizeof(uint32_t);
193 size
-= sizeof(uint32_t);
199 __private_extern__ kern_return_t
209 /* Get the page number */
210 pp
= pmap_find_phys(kernel_pmap
, dstaddr
);
212 return KERN_FAILURE
; /* Not mapped... */
215 /* Shove in the page offset */
216 phys_addr
= ((addr64_t
)pp
<< 12) |
217 (dstaddr
& 0x0000000000000FFFULL
);
218 if(phys_addr
> mem_actual
) {
219 return KERN_FAILURE
; /* out of range */
222 if((phys_addr
&0x1) || size
==1) {
223 ml_phys_write_byte_64(phys_addr
, *((uint8_t *)srcaddr
));
224 srcaddr
= ((uint8_t *)srcaddr
) + 1;
225 dstaddr
+= sizeof(uint8_t);
226 size
-= sizeof(uint8_t);
227 } else if((phys_addr
&0x3) || size
<=2) {
228 ml_phys_write_half_64(phys_addr
, *((uint16_t *)srcaddr
));
229 srcaddr
= ((uint16_t *)srcaddr
) + 1;
230 dstaddr
+= sizeof(uint16_t);
231 size
-= sizeof(uint16_t);
233 ml_phys_write_word_64(phys_addr
, *((uint32_t *)srcaddr
));
234 srcaddr
= ((uint32_t *)srcaddr
) + 1;
235 dstaddr
+= sizeof(uint32_t);
236 size
-= sizeof(uint32_t);
243 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
244 // don't try to read in the hole
245 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
246 (supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
247 (addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
249 typedef struct _cframe64_t
{
250 uint64_t prevFP
; // can't use a real pointer here until we're a 64 bit kernel
256 typedef struct _cframe_t
{
257 struct _cframe_t
*prev
; // when we go 64 bits, this needs to be capped at 32 bits
263 kern_return_t
chudxnu_thread_get_callstack64(
266 mach_msg_type_number_t
*count
,
269 kern_return_t kr
= KERN_FAILURE
;
270 kern_return_t ret
= KERN_SUCCESS
;
271 task_t task
= thread
->task
;
277 uint64_t kernStackMin
= min_valid_stack_address();
278 uint64_t kernStackMax
= max_valid_stack_address();
279 uint64_t *buffer
= callstack
;
281 int bufferMaxIndex
= *count
;
282 boolean_t supervisor
= FALSE
;
283 boolean_t is64bit
= FALSE
;
287 /* We can't get user state for kernel threads */
288 if (task
== kernel_task
) {
291 t_regs
= USER_STATE(thread
);
293 if(is_saved_state64(t_regs
)) {
294 void *int_state
= current_cpu_datap()->cpu_int_state
;
295 x86_saved_state64_t
*s64
= saved_state64(t_regs
);
297 if(int_state
) { // are we on an interrupt that happened in user land
298 supervisor
= !(t_regs
== int_state
&& current_cpu_datap()->cpu_interrupt_level
== 1);
301 supervisor
= ((s64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
303 // assume 32 bit kernel
309 x86_saved_state32_t
*regs
;
311 regs
= saved_state32(t_regs
);
313 // find out if we're in supervisor mode
314 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
318 t_regs
= current_cpu_datap()->cpu_int_state
;
319 x86_saved_state32_t
*regs
;
321 regs
= saved_state32(t_regs
);
323 // find out if we're in supervisor mode
324 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
329 x86_saved_state64_t
*regs
= saved_state64(t_regs
);
332 /* cant get user state for kernel threads */
333 if(task
== kernel_task
) {
336 regs
= USER_REGS64(thread
);
339 currPC
= regs
->isf
.rip
;
350 //allot space for saving %rsp on the
351 //bottom of the stack for user callstacks
353 bufferMaxIndex
= bufferMaxIndex
- 1;
355 if(bufferMaxIndex
< 1) {
357 return KERN_RESOURCE_SHORTAGE
;
359 buffer
[bufferIndex
++] = currPC
; // save RIP on the top of the stack
361 // now make a 64bit back trace
362 while (VALID_STACK_ADDRESS64(supervisor
, currFP
, kernStackMin
, kernStackMax
))
364 // this is the address where caller lives in the user thread
365 uint64_t caller
= currFP
+ sizeof(uint64_t);
371 if(bufferIndex
>= bufferMaxIndex
) {
372 *count
= bufferMaxIndex
;
373 return KERN_RESOURCE_SHORTAGE
;
376 /* read our caller */
377 kr
= chudxnu_task_read(task
, &currPC
, caller
, sizeof(uint64_t));
379 if(kr
!= KERN_SUCCESS
) {
385 * retrive contents of the frame pointer and advance to the next stack
386 * frame if it's valid
389 kr
= chudxnu_task_read(task
, &prevFP
, currFP
, sizeof(uint64_t));
391 if(kr
!= KERN_SUCCESS
) {
396 if(VALID_STACK_ADDRESS64(supervisor
, prevFP
, kernStackMin
, kernStackMax
)) {
397 buffer
[bufferIndex
++] = currPC
;
400 if(prevFP
< currFP
) {
407 // append (rsp) on the bottom of the callstack
408 kr
= chudxnu_task_read(task
, &rsp
, (addr64_t
) regs
->isf
.rsp
, sizeof(uint64_t));
409 if(kr
== KERN_SUCCESS
) {
410 buffer
[bufferIndex
++] = rsp
;
413 /* !thread_is_64bit() */
414 /* we grab 32 bit frames and silently promote them to 64 bits */
415 uint32_t tmpWord
= 0;
416 x86_saved_state32_t
*regs
= NULL
;
419 /* cant get user state for kernel threads */
420 if(task
== kernel_task
|| supervisor
) {
423 regs
= USER_REGS32(thread
);
425 regs
= saved_state32(current_cpu_datap()->cpu_int_state
);
433 currPC
= (uint64_t) regs
->eip
;
434 currFP
= (uint64_t) regs
->ebp
;
438 // bufferMaxIndex = bufferMaxIndex - 1; //allot space for saving %rsp on the stack for user callstacks
439 if(bufferMaxIndex
< 1) {
441 return KERN_RESOURCE_SHORTAGE
;
443 buffer
[bufferIndex
++] = currPC
; // save EIP on the top of the stack
445 // now make a 64bit back trace from 32 bit stack frames
446 while (VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
))
448 cframe_t
*fp
= (cframe_t
*) (uint32_t) currFP
;
450 if(bufferIndex
>= bufferMaxIndex
) {
451 *count
= bufferMaxIndex
;
452 return KERN_RESOURCE_SHORTAGE
;
455 /* read the next frame */
457 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
459 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
462 if(kr
!= KERN_SUCCESS
) {
467 currPC
= (uint64_t) tmpWord
; // promote 32 bit address
470 * retrive contents of the frame pointer and advance to the next stack
471 * frame if it's valid
475 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
477 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
479 prevFP
= (uint64_t) tmpWord
; // promote 32 bit address
482 buffer
[bufferIndex
++] = currPC
;
485 if(prevFP
< currFP
) {
492 // append (esp) on the bottom of the callstack
494 kr
= chudxnu_task_read(task
, &tmpWord
, regs
->uesp
, sizeof(uint32_t));
495 if(kr
== KERN_SUCCESS
) {
496 rsp
= (uint64_t) tmpWord
; // promote 32 bit address
497 buffer
[bufferIndex
++] = rsp
;
502 *count
= bufferIndex
;
506 __private_extern__ kern_return_t
507 chudxnu_thread_get_callstack(
510 mach_msg_type_number_t
*count
,
514 task_t task
= thread
->task
;
520 uint32_t kernStackMin
= min_valid_stack_address();
521 uint32_t kernStackMax
= max_valid_stack_address();
522 uint32_t *buffer
= callStack
;
524 int bufferMaxIndex
= *count
;
525 boolean_t supervisor
;
526 x86_saved_state32_t
*regs
= NULL
;
529 /* We can't get user state for kernel threads */
530 if (task
== kernel_task
) {
533 regs
= USER_REGS32(thread
);
535 regs
= saved_state32(current_cpu_datap()->cpu_int_state
);
543 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
550 bufferMaxIndex
-= 1; // allot space for saving userland %esp on stack
551 if (bufferMaxIndex
< 1) {
553 return KERN_RESOURCE_SHORTAGE
;
555 buffer
[bufferIndex
++] = currPC
; //save PC in position 0.
557 // Now, fill buffer with stack backtraces.
558 while (VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
559 cframe_t
*fp
= (cframe_t
*) currFP
;
561 if (bufferIndex
>= bufferMaxIndex
) {
562 *count
= bufferMaxIndex
;
563 return KERN_RESOURCE_SHORTAGE
;
567 kr
= chudxnu_kern_read(
569 (vm_offset_t
) &fp
->caller
,
572 kr
= chudxnu_task_read(
575 (vm_offset_t
) &fp
->caller
,
578 if (kr
!= KERN_SUCCESS
)
581 //retrieve the contents of the frame pointer
582 // and advance to the prev stack frame if it's valid
585 kr
= chudxnu_kern_read(
587 (vm_offset_t
) &fp
->prev
,
590 kr
= chudxnu_task_read(
593 (vm_offset_t
) &fp
->prev
,
597 buffer
[bufferIndex
++] = currPC
;
600 if (prevFP
< currFP
) {
607 // put the stack pointer on the bottom of the backtrace
609 kr
= chudxnu_task_read(task
, &esp
, regs
->uesp
, sizeof(uint32_t));
610 if(kr
== KERN_SUCCESS
) {
611 buffer
[bufferIndex
++] = esp
;
615 *count
= bufferIndex
;
620 #pragma mark **** DEPRECATED ****
624 kern_return_t
chudxnu_bind_current_thread(int cpu
)
626 return chudxnu_bind_thread(current_thread(), cpu
);
630 kern_return_t
chudxnu_unbind_current_thread(void)
632 return chudxnu_unbind_thread(current_thread());
637 kern_return_t
chudxnu_current_thread_get_callstack(
639 mach_msg_type_number_t
*count
,
642 return chudxnu_thread_get_callstack(
643 current_thread(), callStack
, count
, user_only
);
648 thread_t
chudxnu_current_act(void)
650 return chudxnu_current_thread();