2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
37 #include <vm/vm_map.h>
40 #include <chud/chud_xnu.h>
41 #include <chud/chud_xnu_private.h>
43 #include <i386/misc_protos.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
47 #pragma mark **** thread state ****
49 __private_extern__ kern_return_t
50 chudxnu_thread_user_state_available(thread_t thread
)
52 #pragma unused (thread)
56 __private_extern__ kern_return_t
57 chudxnu_thread_get_state(
59 thread_flavor_t flavor
,
60 thread_state_t tstate
,
61 mach_msg_type_number_t
*count
,
65 /* We can't get user state for kernel threads */
66 if (thread
->task
== kernel_task
)
68 /* this properly handles deciding whether or not the thread is 64 bit or not */
69 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
71 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
72 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
73 // interrupt state available
75 // the real purpose of this branch is the following:
76 // the user doesn't care if the thread states are user or kernel, he
77 // just wants the thread state, so we need to determine the proper one
78 // to return, kernel or user, for the given thread.
79 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
80 // the above are conditions where we possibly can read the kernel
81 // state. we still need to determine if this interrupt happened in
82 // kernel or user context
83 if(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
84 current_cpu_datap()->cpu_interrupt_level
== 1) {
85 // interrupt happened in user land
86 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
89 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
92 // get the user-mode thread state
93 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
98 __private_extern__ kern_return_t
99 chudxnu_thread_set_state(
101 thread_flavor_t flavor
,
102 thread_state_t tstate
,
103 mach_msg_type_number_t count
,
106 #pragma unused (user_only)
107 return machine_thread_set_state(thread
, flavor
, tstate
, count
);
110 #pragma mark **** task memory read/write ****
112 __private_extern__ kern_return_t
119 kern_return_t ret
= KERN_SUCCESS
;
121 if(current_task()==task
) {
122 if(ml_at_interrupt_context()) {
123 return KERN_FAILURE
; // can't do copyin on interrupt stack
126 if(copyin(usraddr
, kernaddr
, size
)) {
130 vm_map_t map
= get_task_map(task
);
131 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
137 __private_extern__ kern_return_t
144 kern_return_t ret
= KERN_SUCCESS
;
146 if(current_task()==task
) {
147 if(ml_at_interrupt_context()) {
148 return KERN_FAILURE
; // can't do copyout on interrupt stack
151 if(copyout(kernaddr
, useraddr
, size
)) {
155 vm_map_t map
= get_task_map(task
);
156 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
162 __private_extern__ kern_return_t
163 chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
169 /* Get the page number */
170 pp
= pmap_find_phys(kernel_pmap
, srcaddr
);
172 return KERN_FAILURE
; /* Not mapped... */
175 /* Shove in the page offset */
176 phys_addr
= ((addr64_t
)pp
<< 12) |
177 (srcaddr
& 0x0000000000000FFFULL
);
178 if(phys_addr
>= mem_actual
) {
179 return KERN_FAILURE
; /* out of range */
182 if((phys_addr
&0x1) || size
==1) {
183 *((uint8_t *)dstaddr
) =
184 ml_phys_read_byte_64(phys_addr
);
185 dstaddr
= ((uint8_t *)dstaddr
) + 1;
186 srcaddr
+= sizeof(uint8_t);
187 size
-= sizeof(uint8_t);
188 } else if((phys_addr
&0x3) || size
<=2) {
189 *((uint16_t *)dstaddr
) =
190 ml_phys_read_half_64(phys_addr
);
191 dstaddr
= ((uint16_t *)dstaddr
) + 1;
192 srcaddr
+= sizeof(uint16_t);
193 size
-= sizeof(uint16_t);
195 *((uint32_t *)dstaddr
) =
196 ml_phys_read_word_64(phys_addr
);
197 dstaddr
= ((uint32_t *)dstaddr
) + 1;
198 srcaddr
+= sizeof(uint32_t);
199 size
-= sizeof(uint32_t);
205 __private_extern__ kern_return_t
215 /* Get the page number */
216 pp
= pmap_find_phys(kernel_pmap
, dstaddr
);
218 return KERN_FAILURE
; /* Not mapped... */
221 /* Shove in the page offset */
222 phys_addr
= ((addr64_t
)pp
<< 12) |
223 (dstaddr
& 0x0000000000000FFFULL
);
224 if(phys_addr
> mem_actual
) {
225 return KERN_FAILURE
; /* out of range */
228 if((phys_addr
&0x1) || size
==1) {
229 ml_phys_write_byte_64(phys_addr
, *((uint8_t *)srcaddr
));
230 srcaddr
= ((uint8_t *)srcaddr
) + 1;
231 dstaddr
+= sizeof(uint8_t);
232 size
-= sizeof(uint8_t);
233 } else if((phys_addr
&0x3) || size
<=2) {
234 ml_phys_write_half_64(phys_addr
, *((uint16_t *)srcaddr
));
235 srcaddr
= ((uint16_t *)srcaddr
) + 1;
236 dstaddr
+= sizeof(uint16_t);
237 size
-= sizeof(uint16_t);
239 ml_phys_write_word_64(phys_addr
, *((uint32_t *)srcaddr
));
240 srcaddr
= ((uint32_t *)srcaddr
) + 1;
241 dstaddr
+= sizeof(uint32_t);
242 size
-= sizeof(uint32_t);
249 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
250 // don't try to read in the hole
251 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
252 (supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
253 (addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
255 typedef struct _cframe64_t
{
256 uint64_t prevFP
; // can't use a real pointer here until we're a 64 bit kernel
262 typedef struct _cframe_t
{
263 struct _cframe_t
*prev
; // when we go 64 bits, this needs to be capped at 32 bits
269 kern_return_t
chudxnu_thread_get_callstack64(
272 mach_msg_type_number_t
*count
,
275 kern_return_t kr
= KERN_FAILURE
;
276 kern_return_t ret
= KERN_SUCCESS
;
277 task_t task
= thread
->task
;
283 uint64_t kernStackMin
= min_valid_stack_address();
284 uint64_t kernStackMax
= max_valid_stack_address();
285 uint64_t *buffer
= callstack
;
287 int bufferMaxIndex
= *count
;
288 boolean_t supervisor
= FALSE
;
289 boolean_t is64bit
= FALSE
;
293 /* We can't get user state for kernel threads */
294 if (task
== kernel_task
) {
297 t_regs
= USER_STATE(thread
);
299 if(is_saved_state64(t_regs
)) {
300 void *int_state
= current_cpu_datap()->cpu_int_state
;
301 x86_saved_state64_t
*s64
= saved_state64(t_regs
);
303 if(int_state
) { // are we on an interrupt that happened in user land
304 supervisor
= !(t_regs
== int_state
&& current_cpu_datap()->cpu_interrupt_level
== 1);
307 supervisor
= ((s64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
309 // assume 32 bit kernel
315 x86_saved_state32_t
*regs
;
317 regs
= saved_state32(t_regs
);
319 // find out if we're in supervisor mode
320 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
324 t_regs
= current_cpu_datap()->cpu_int_state
;
325 x86_saved_state32_t
*regs
;
327 regs
= saved_state32(t_regs
);
329 // find out if we're in supervisor mode
330 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
335 x86_saved_state64_t
*regs
= saved_state64(t_regs
);
338 /* cant get user state for kernel threads */
339 if(task
== kernel_task
) {
342 regs
= USER_REGS64(thread
);
345 currPC
= regs
->isf
.rip
;
356 //allot space for saving %rsp on the
357 //bottom of the stack for user callstacks
359 bufferMaxIndex
= bufferMaxIndex
- 1;
361 if(bufferMaxIndex
< 1) {
363 return KERN_RESOURCE_SHORTAGE
;
365 buffer
[bufferIndex
++] = currPC
; // save RIP on the top of the stack
367 // now make a 64bit back trace
368 while (VALID_STACK_ADDRESS64(supervisor
, currFP
, kernStackMin
, kernStackMax
))
370 // this is the address where caller lives in the user thread
371 uint64_t caller
= currFP
+ sizeof(uint64_t);
377 if(bufferIndex
>= bufferMaxIndex
) {
378 *count
= bufferMaxIndex
;
379 return KERN_RESOURCE_SHORTAGE
;
382 /* read our caller */
383 kr
= chudxnu_task_read(task
, &currPC
, caller
, sizeof(uint64_t));
385 if(kr
!= KERN_SUCCESS
) {
391 * retrive contents of the frame pointer and advance to the next stack
392 * frame if it's valid
395 kr
= chudxnu_task_read(task
, &prevFP
, currFP
, sizeof(uint64_t));
397 if(kr
!= KERN_SUCCESS
) {
402 if(VALID_STACK_ADDRESS64(supervisor
, prevFP
, kernStackMin
, kernStackMax
)) {
403 buffer
[bufferIndex
++] = currPC
;
406 if(prevFP
< currFP
) {
413 // append (rsp) on the bottom of the callstack
414 kr
= chudxnu_task_read(task
, &rsp
, (addr64_t
) regs
->isf
.rsp
, sizeof(uint64_t));
415 if(kr
== KERN_SUCCESS
) {
416 buffer
[bufferIndex
++] = rsp
;
419 /* !thread_is_64bit() */
420 /* we grab 32 bit frames and silently promote them to 64 bits */
421 uint32_t tmpWord
= 0;
422 x86_saved_state32_t
*regs
= NULL
;
425 /* cant get user state for kernel threads */
426 if(task
== kernel_task
|| supervisor
) {
429 regs
= USER_REGS32(thread
);
431 regs
= saved_state32(current_cpu_datap()->cpu_int_state
);
439 currPC
= (uint64_t) regs
->eip
;
440 currFP
= (uint64_t) regs
->ebp
;
444 // bufferMaxIndex = bufferMaxIndex - 1; //allot space for saving %rsp on the stack for user callstacks
445 if(bufferMaxIndex
< 1) {
447 return KERN_RESOURCE_SHORTAGE
;
449 buffer
[bufferIndex
++] = currPC
; // save EIP on the top of the stack
451 // now make a 64bit back trace from 32 bit stack frames
452 while (VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
))
454 cframe_t
*fp
= (cframe_t
*) (uint32_t) currFP
;
456 if(bufferIndex
>= bufferMaxIndex
) {
457 *count
= bufferMaxIndex
;
458 return KERN_RESOURCE_SHORTAGE
;
461 /* read the next frame */
463 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
465 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
468 if(kr
!= KERN_SUCCESS
) {
473 currPC
= (uint64_t) tmpWord
; // promote 32 bit address
476 * retrive contents of the frame pointer and advance to the next stack
477 * frame if it's valid
481 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
483 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
485 prevFP
= (uint64_t) tmpWord
; // promote 32 bit address
488 buffer
[bufferIndex
++] = currPC
;
491 if(prevFP
< currFP
) {
498 // append (esp) on the bottom of the callstack
500 kr
= chudxnu_task_read(task
, &tmpWord
, regs
->uesp
, sizeof(uint32_t));
501 if(kr
== KERN_SUCCESS
) {
502 rsp
= (uint64_t) tmpWord
; // promote 32 bit address
503 buffer
[bufferIndex
++] = rsp
;
508 *count
= bufferIndex
;
512 __private_extern__ kern_return_t
513 chudxnu_thread_get_callstack(
516 mach_msg_type_number_t
*count
,
520 task_t task
= thread
->task
;
526 uint32_t kernStackMin
= min_valid_stack_address();
527 uint32_t kernStackMax
= max_valid_stack_address();
528 uint32_t *buffer
= callStack
;
530 int bufferMaxIndex
= *count
;
531 boolean_t supervisor
;
532 x86_saved_state32_t
*regs
= NULL
;
535 /* We can't get user state for kernel threads */
536 if (task
== kernel_task
) {
539 regs
= USER_REGS32(thread
);
541 regs
= saved_state32(current_cpu_datap()->cpu_int_state
);
549 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
556 bufferMaxIndex
-= 1; // allot space for saving userland %esp on stack
557 if (bufferMaxIndex
< 1) {
559 return KERN_RESOURCE_SHORTAGE
;
561 buffer
[bufferIndex
++] = currPC
; //save PC in position 0.
563 // Now, fill buffer with stack backtraces.
564 while (VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
565 cframe_t
*fp
= (cframe_t
*) currFP
;
567 if (bufferIndex
>= bufferMaxIndex
) {
568 *count
= bufferMaxIndex
;
569 return KERN_RESOURCE_SHORTAGE
;
573 kr
= chudxnu_kern_read(
575 (vm_offset_t
) &fp
->caller
,
578 kr
= chudxnu_task_read(
581 (vm_offset_t
) &fp
->caller
,
584 if (kr
!= KERN_SUCCESS
)
587 //retrieve the contents of the frame pointer
588 // and advance to the prev stack frame if it's valid
591 kr
= chudxnu_kern_read(
593 (vm_offset_t
) &fp
->prev
,
596 kr
= chudxnu_task_read(
599 (vm_offset_t
) &fp
->prev
,
603 buffer
[bufferIndex
++] = currPC
;
606 if (prevFP
< currFP
) {
613 // put the stack pointer on the bottom of the backtrace
615 kr
= chudxnu_task_read(task
, &esp
, regs
->uesp
, sizeof(uint32_t));
616 if(kr
== KERN_SUCCESS
) {
617 buffer
[bufferIndex
++] = esp
;
621 *count
= bufferIndex
;
626 #pragma mark **** DEPRECATED ****
630 kern_return_t
chudxnu_bind_current_thread(int cpu
)
632 return chudxnu_bind_thread(current_thread(), cpu
);
636 kern_return_t
chudxnu_unbind_current_thread(void)
638 return chudxnu_unbind_thread(current_thread());
643 kern_return_t
chudxnu_current_thread_get_callstack(
645 mach_msg_type_number_t
*count
,
648 return chudxnu_thread_get_callstack(
649 current_thread(), callStack
, count
, user_only
);
654 thread_t
chudxnu_current_act(void)
656 return chudxnu_current_thread();