2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
38 #include <vm/vm_map.h>
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
49 #pragma mark **** thread state ****
52 __private_extern__ kern_return_t
53 chudxnu_thread_user_state_available(thread_t thread
)
55 #pragma unused (thread)
59 __private_extern__ kern_return_t
60 chudxnu_thread_get_state(
62 thread_flavor_t flavor
,
63 thread_state_t tstate
,
64 mach_msg_type_number_t
*count
,
68 /* We can't get user state for kernel threads */
69 if (thread
->task
== kernel_task
)
71 /* this properly handles deciding whether or not the thread is 64 bit or not */
72 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
74 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
75 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
76 // interrupt state available
78 // the real purpose of this branch is the following:
79 // the user doesn't care if the thread states are user or kernel, he
80 // just wants the thread state, so we need to determine the proper one
81 // to return, kernel or user, for the given thread.
82 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
83 // the above are conditions where we possibly can read the kernel
84 // state. we still need to determine if this interrupt happened in
85 // kernel or user context
86 if(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
87 current_cpu_datap()->cpu_interrupt_level
== 1) {
88 // interrupt happened in user land
89 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
92 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
95 // get the user-mode thread state
96 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
101 __private_extern__ kern_return_t
102 chudxnu_thread_set_state(
104 thread_flavor_t flavor
,
105 thread_state_t tstate
,
106 mach_msg_type_number_t count
,
109 #pragma unused (user_only)
110 return machine_thread_set_state(thread
, flavor
, tstate
, count
);
114 #pragma mark **** task memory read/write ****
117 __private_extern__ kern_return_t
124 kern_return_t ret
= KERN_SUCCESS
;
127 if(ml_at_interrupt_context()) {
128 return KERN_FAILURE
; // Can't look at tasks on interrupt stack
132 * pmap layer requires interrupts to be on
134 old_level
= ml_set_interrupts_enabled(TRUE
);
136 if(current_task()==task
) {
138 if(copyin(usraddr
, kernaddr
, size
)) {
142 vm_map_t map
= get_task_map(task
);
143 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
146 ml_set_interrupts_enabled(old_level
);
151 __private_extern__ kern_return_t
158 kern_return_t ret
= KERN_SUCCESS
;
161 if(ml_at_interrupt_context()) {
162 return KERN_FAILURE
; // can't poke into tasks on interrupt stack
166 * pmap layer requires interrupts to be on
168 old_level
= ml_set_interrupts_enabled(TRUE
);
170 if(current_task()==task
) {
172 if(copyout(kernaddr
, useraddr
, size
)) {
176 vm_map_t map
= get_task_map(task
);
177 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
180 ml_set_interrupts_enabled(old_level
);
185 __private_extern__ kern_return_t
186 chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
188 return (ml_nofault_copy(srcaddr
, (vm_offset_t
) dstaddr
, size
) == size
?
189 KERN_SUCCESS
: KERN_FAILURE
);
192 __private_extern__ kern_return_t
198 return (ml_nofault_copy((vm_offset_t
) srcaddr
, dstaddr
, size
) == size
?
199 KERN_SUCCESS
: KERN_FAILURE
);
202 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
203 // don't try to read in the hole
204 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
205 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
206 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
208 typedef struct _cframe64_t
{
209 uint64_t prevFP
; // can't use a real pointer here until we're a 64 bit kernel
215 typedef struct _cframe_t
{
216 uint32_t prev
; // this is really a user32-space pointer to the previous frame
221 extern void * find_user_regs(thread_t
);
222 extern x86_saved_state32_t
*find_kern_regs(thread_t
);
224 static kern_return_t
do_backtrace32(
227 x86_saved_state32_t
*regs
,
229 mach_msg_type_number_t
*start_idx
,
230 mach_msg_type_number_t max_idx
,
231 boolean_t supervisor
)
233 uint32_t tmpWord
= 0UL;
234 uint64_t currPC
= (uint64_t) regs
->eip
;
235 uint64_t currFP
= (uint64_t) regs
->ebp
;
236 uint64_t prevPC
= 0ULL;
237 uint64_t prevFP
= 0ULL;
238 uint64_t kernStackMin
= thread
->kernel_stack
;
239 uint64_t kernStackMax
= kernStackMin
+ kernel_stack_size
;
240 mach_msg_type_number_t ct
= *start_idx
;
241 kern_return_t kr
= KERN_FAILURE
;
244 return KERN_RESOURCE_SHORTAGE
; // no frames traced
246 frames
[ct
++] = currPC
;
248 // build a backtrace of this 32 bit state.
249 while(VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
250 cframe_t
*fp
= (cframe_t
*) (uintptr_t) currFP
;
259 return KERN_RESOURCE_SHORTAGE
;
262 /* read our caller */
264 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
266 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
269 if(kr
!= KERN_SUCCESS
) {
274 currPC
= (uint64_t) tmpWord
; // promote 32 bit address
277 * retrive contents of the frame pointer and advance to the next stack
278 * frame if it's valid
282 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
284 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
286 prevFP
= (uint64_t) tmpWord
; // promote 32 bit address
289 frames
[ct
++] = currPC
;
292 if(prevFP
< currFP
) {
303 static kern_return_t
do_backtrace64(
306 x86_saved_state64_t
*regs
,
308 mach_msg_type_number_t
*start_idx
,
309 mach_msg_type_number_t max_idx
,
310 boolean_t supervisor
)
312 uint64_t currPC
= regs
->isf
.rip
;
313 uint64_t currFP
= regs
->rbp
;
314 uint64_t prevPC
= 0ULL;
315 uint64_t prevFP
= 0ULL;
316 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
317 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ kernel_stack_size
;
318 mach_msg_type_number_t ct
= *start_idx
;
319 kern_return_t kr
= KERN_FAILURE
;
321 if(*start_idx
>= max_idx
)
322 return KERN_RESOURCE_SHORTAGE
; // no frames traced
324 frames
[ct
++] = currPC
;
326 // build a backtrace of this 32 bit state.
327 while(VALID_STACK_ADDRESS64(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
328 // this is the address where caller lives in the user thread
329 uint64_t caller
= currFP
+ sizeof(uint64_t);
338 return KERN_RESOURCE_SHORTAGE
;
341 /* read our caller */
343 kr
= chudxnu_kern_read(&currPC
, (vm_offset_t
)caller
, sizeof(uint64_t));
345 kr
= chudxnu_task_read(task
, &currPC
, caller
, sizeof(uint64_t));
348 if(kr
!= KERN_SUCCESS
) {
354 * retrive contents of the frame pointer and advance to the next stack
355 * frame if it's valid
359 kr
= chudxnu_kern_read(&prevFP
, (vm_offset_t
)currFP
, sizeof(uint64_t));
361 kr
= chudxnu_task_read(task
, &prevFP
, currFP
, sizeof(uint64_t));
364 if(VALID_STACK_ADDRESS64(supervisor
, prevFP
, kernStackMin
, kernStackMax
)) {
365 frames
[ct
++] = currPC
;
368 if(prevFP
< currFP
) {
379 static kern_return_t
do_kernel_backtrace(
381 struct x86_kernel_state
*regs
,
383 mach_msg_type_number_t
*start_idx
,
384 mach_msg_type_number_t max_idx
)
386 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
387 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ kernel_stack_size
;
388 mach_msg_type_number_t ct
= *start_idx
;
389 kern_return_t kr
= KERN_FAILURE
;
392 uint64_t currPC
= 0ULL;
393 uint64_t currFP
= 0ULL;
394 uint64_t prevPC
= 0ULL;
395 uint64_t prevFP
= 0ULL;
396 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(regs
->k_rip
), sizeof(uint64_t))) {
399 if(KERN_SUCCESS
!= chudxnu_kern_read(&currFP
, (vm_offset_t
)&(regs
->k_rbp
), sizeof(uint64_t))) {
403 uint32_t currPC
= 0U;
404 uint32_t currFP
= 0U;
405 uint32_t prevPC
= 0U;
406 uint32_t prevFP
= 0U;
407 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(regs
->k_eip
), sizeof(uint32_t))) {
410 if(KERN_SUCCESS
!= chudxnu_kern_read(&currFP
, (vm_offset_t
)&(regs
->k_ebp
), sizeof(uint32_t))) {
415 if(*start_idx
>= max_idx
)
416 return KERN_RESOURCE_SHORTAGE
; // no frames traced
422 frames
[ct
++] = (uint64_t)currPC
;
424 // build a backtrace of this kernel state
426 while(VALID_STACK_ADDRESS64(TRUE
, currFP
, kernStackMin
, kernStackMax
)) {
427 // this is the address where caller lives in the user thread
428 uint64_t caller
= currFP
+ sizeof(uint64_t);
430 while(VALID_STACK_ADDRESS(TRUE
, currFP
, kernStackMin
, kernStackMax
)) {
431 uint32_t caller
= (uint32_t)currFP
+ sizeof(uint32_t);
434 if(!currFP
|| !currPC
) {
441 return KERN_RESOURCE_SHORTAGE
;
444 /* read our caller */
445 kr
= chudxnu_kern_read(&currPC
, (vm_offset_t
)caller
, sizeof(currPC
));
447 if(kr
!= KERN_SUCCESS
|| !currPC
) {
453 * retrive contents of the frame pointer and advance to the next stack
454 * frame if it's valid
457 kr
= chudxnu_kern_read(&prevFP
, (vm_offset_t
)currFP
, sizeof(currPC
));
460 if(VALID_STACK_ADDRESS64(TRUE
, prevFP
, kernStackMin
, kernStackMax
)) {
462 if(VALID_STACK_ADDRESS(TRUE
, prevFP
, kernStackMin
, kernStackMax
)) {
464 frames
[ct
++] = (uint64_t)currPC
;
467 if(prevFP
<= currFP
) {
481 kern_return_t
chudxnu_thread_get_callstack64(
484 mach_msg_type_number_t
*count
,
487 kern_return_t kr
= KERN_FAILURE
;
488 task_t task
= thread
->task
;
489 uint64_t currPC
= 0ULL;
490 boolean_t supervisor
= FALSE
;
491 mach_msg_type_number_t bufferIndex
= 0;
492 mach_msg_type_number_t bufferMaxIndex
= *count
;
493 x86_saved_state_t
*tagged_regs
= NULL
; // kernel register state
494 x86_saved_state64_t
*regs64
= NULL
;
495 x86_saved_state32_t
*regs32
= NULL
;
496 x86_saved_state32_t
*u_regs32
= NULL
;
497 x86_saved_state64_t
*u_regs64
= NULL
;
498 struct x86_kernel_state
*kregs
= NULL
;
500 if(ml_at_interrupt_context()) {
503 /* can't backtrace user state on interrupt stack. */
507 /* backtracing at interrupt context? */
508 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
510 * Locate the registers for the interrupted thread, assuming it is
513 tagged_regs
= current_cpu_datap()->cpu_int_state
;
515 if(is_saved_state64(tagged_regs
)) {
516 /* 64 bit registers */
517 regs64
= saved_state64(tagged_regs
);
518 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
520 /* 32 bit registers */
521 regs32
= saved_state32(tagged_regs
);
522 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
527 if(!ml_at_interrupt_context() && kernel_task
== task
) {
529 if(!thread
->kernel_stack
) {
533 // Kernel thread not at interrupt context
534 kregs
= (struct x86_kernel_state
*)NULL
;
536 // nofault read of the thread->kernel_stack pointer
537 if(KERN_SUCCESS
!= chudxnu_kern_read(&kregs
, (vm_offset_t
)&(thread
->kernel_stack
), sizeof(void *))) {
541 // Adjust to find the saved kernel state
542 kregs
= STACK_IKS((vm_offset_t
)(uintptr_t)kregs
);
545 } else if(!tagged_regs
) {
547 * not at interrupt context, or tracing a different thread than
548 * current_thread() at interrupt context
550 tagged_regs
= USER_STATE(thread
);
551 if(is_saved_state64(tagged_regs
)) {
552 /* 64 bit registers */
553 regs64
= saved_state64(tagged_regs
);
554 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
556 /* 32 bit registers */
557 regs32
= saved_state32(tagged_regs
);
558 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
565 // the caller only wants a user callstack.
567 // bail - we've only got kernel state
571 // regs32(64) is not in supervisor mode.
579 /* we only want to backtrace the user mode */
580 if(!(u_regs32
|| u_regs64
)) {
581 /* no user state to look at */
587 * Order of preference for top of stack:
588 * 64 bit kernel state (not likely)
589 * 32 bit kernel state
590 * 64 bit user land state
591 * 32 bit user land state
596 * nofault read of the registers from the kernel stack (as they can
597 * disappear on the fly).
601 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(kregs
->k_rip
), sizeof(uint64_t))) {
606 if(KERN_SUCCESS
!= chudxnu_kern_read(&tmp
, (vm_offset_t
)&(kregs
->k_eip
), sizeof(uint32_t))) {
609 currPC
= (uint64_t)tmp
;
612 currPC
= regs64
->isf
.rip
;
614 currPC
= (uint64_t) regs32
->eip
;
615 } else if(u_regs64
) {
616 currPC
= u_regs64
->isf
.rip
;
617 } else if(u_regs32
) {
618 currPC
= (uint64_t) u_regs32
->eip
;
622 /* no top of the stack, bail out */
628 if(bufferMaxIndex
< 1) {
630 return KERN_RESOURCE_SHORTAGE
;
633 /* backtrace kernel */
635 addr64_t address
= 0ULL;
639 kr
= do_kernel_backtrace(thread
, kregs
, callstack
, &bufferIndex
, bufferMaxIndex
);
641 // and do a nofault read of (r|e)sp
644 size
= sizeof(uint64_t);
646 if(KERN_SUCCESS
!= chudxnu_kern_read(&address
, (vm_offset_t
)&(kregs
->k_rsp
), size
)) {
650 uint32_t rsp
= 0ULL, tmp
= 0ULL;
651 size
= sizeof(uint32_t);
653 if(KERN_SUCCESS
!= chudxnu_kern_read(&tmp
, (vm_offset_t
)&(kregs
->k_esp
), size
)) {
656 address
= (addr64_t
)tmp
;
660 if(address
&& KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (vm_offset_t
)address
, size
) && bufferIndex
< bufferMaxIndex
) {
661 callstack
[bufferIndex
++] = (uint64_t)rsp
;
666 // backtrace the 64bit side.
667 kr
= do_backtrace64(task
, thread
, regs64
, callstack
, &bufferIndex
,
668 bufferMaxIndex
, TRUE
);
670 if(KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (vm_offset_t
) regs64
->isf
.rsp
, sizeof(uint64_t)) &&
671 bufferIndex
< bufferMaxIndex
) {
672 callstack
[bufferIndex
++] = rsp
;
678 // backtrace the 32bit side.
679 kr
= do_backtrace32(task
, thread
, regs32
, callstack
, &bufferIndex
,
680 bufferMaxIndex
, TRUE
);
682 if(KERN_SUCCESS
== chudxnu_kern_read(&esp
, (vm_offset_t
) regs32
->uesp
, sizeof(uint32_t)) &&
683 bufferIndex
< bufferMaxIndex
) {
684 callstack
[bufferIndex
++] = (uint64_t) esp
;
686 } else if(u_regs64
) {
687 /* backtrace user land */
690 kr
= do_backtrace64(task
, thread
, u_regs64
, callstack
, &bufferIndex
,
691 bufferMaxIndex
, FALSE
);
693 if(KERN_SUCCESS
== chudxnu_task_read(task
, &rsp
, (addr64_t
) u_regs64
->isf
.rsp
, sizeof(uint64_t)) &&
694 bufferIndex
< bufferMaxIndex
) {
695 callstack
[bufferIndex
++] = rsp
;
698 } else if(u_regs32
) {
701 kr
= do_backtrace32(task
, thread
, u_regs32
, callstack
, &bufferIndex
,
702 bufferMaxIndex
, FALSE
);
704 if(KERN_SUCCESS
== chudxnu_task_read(task
, &esp
, (addr64_t
) u_regs32
->uesp
, sizeof(uint32_t)) &&
705 bufferIndex
< bufferMaxIndex
) {
706 callstack
[bufferIndex
++] = (uint64_t) esp
;
710 *count
= bufferIndex
;