2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
38 #include <vm/vm_map.h>
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
49 #pragma mark **** thread state ****
52 __private_extern__ kern_return_t
53 chudxnu_thread_get_state(
55 thread_flavor_t flavor
,
56 thread_state_t tstate
,
57 mach_msg_type_number_t
*count
,
61 /* We can't get user state for kernel threads */
62 if (thread
->task
== kernel_task
)
64 /* this properly handles deciding whether or not the thread is 64 bit or not */
65 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
67 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
68 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
69 // interrupt state available
71 // the real purpose of this branch is the following:
72 // the user doesn't care if the thread states are user or kernel, he
73 // just wants the thread state, so we need to determine the proper one
74 // to return, kernel or user, for the given thread.
75 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
76 // the above are conditions where we possibly can read the kernel
77 // state. we still need to determine if this interrupt happened in
78 // kernel or user context
79 if(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
80 current_cpu_datap()->cpu_interrupt_level
== 1) {
81 // interrupt happened in user land
82 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
85 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
88 // get the user-mode thread state
89 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
94 __private_extern__ kern_return_t
95 chudxnu_thread_set_state(
97 thread_flavor_t flavor
,
98 thread_state_t tstate
,
99 mach_msg_type_number_t count
,
102 #pragma unused (user_only)
103 return machine_thread_set_state(thread
, flavor
, tstate
, count
);
107 #pragma mark **** task memory read/write ****
110 __private_extern__ kern_return_t
117 kern_return_t ret
= KERN_SUCCESS
;
120 if(ml_at_interrupt_context()) {
121 return KERN_FAILURE
; // Can't look at tasks on interrupt stack
125 * pmap layer requires interrupts to be on
127 old_level
= ml_set_interrupts_enabled(TRUE
);
129 if(current_task()==task
) {
131 if(copyin(usraddr
, kernaddr
, size
)) {
135 vm_map_t map
= get_task_map(task
);
136 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
139 ml_set_interrupts_enabled(old_level
);
144 __private_extern__ kern_return_t
151 kern_return_t ret
= KERN_SUCCESS
;
154 if(ml_at_interrupt_context()) {
155 return KERN_FAILURE
; // can't poke into tasks on interrupt stack
159 * pmap layer requires interrupts to be on
161 old_level
= ml_set_interrupts_enabled(TRUE
);
163 if(current_task()==task
) {
165 if(copyout(kernaddr
, useraddr
, size
)) {
169 vm_map_t map
= get_task_map(task
);
170 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
173 ml_set_interrupts_enabled(old_level
);
178 __private_extern__ kern_return_t
179 chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
181 return (ml_nofault_copy(srcaddr
, (vm_offset_t
) dstaddr
, size
) == size
?
182 KERN_SUCCESS
: KERN_FAILURE
);
185 __private_extern__ kern_return_t
191 return (ml_nofault_copy((vm_offset_t
) srcaddr
, dstaddr
, size
) == size
?
192 KERN_SUCCESS
: KERN_FAILURE
);
195 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
196 // don't try to read in the hole
197 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
198 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
199 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
201 typedef struct _cframe64_t
{
202 uint64_t prevFP
; // can't use a real pointer here until we're a 64 bit kernel
208 typedef struct _cframe_t
{
209 uint32_t prev
; // this is really a user32-space pointer to the previous frame
214 extern void * find_user_regs(thread_t
);
215 extern x86_saved_state32_t
*find_kern_regs(thread_t
);
217 static kern_return_t
do_backtrace32(
220 x86_saved_state32_t
*regs
,
222 mach_msg_type_number_t
*start_idx
,
223 mach_msg_type_number_t max_idx
,
224 boolean_t supervisor
)
226 uint32_t tmpWord
= 0UL;
227 uint64_t currPC
= (uint64_t) regs
->eip
;
228 uint64_t currFP
= (uint64_t) regs
->ebp
;
229 uint64_t prevPC
= 0ULL;
230 uint64_t prevFP
= 0ULL;
231 uint64_t kernStackMin
= thread
->kernel_stack
;
232 uint64_t kernStackMax
= kernStackMin
+ kernel_stack_size
;
233 mach_msg_type_number_t ct
= *start_idx
;
234 kern_return_t kr
= KERN_FAILURE
;
237 return KERN_RESOURCE_SHORTAGE
; // no frames traced
239 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
241 // build a backtrace of this 32 bit state.
242 while(VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
243 cframe_t
*fp
= (cframe_t
*) (uintptr_t) currFP
;
252 return KERN_RESOURCE_SHORTAGE
;
255 /* read our caller */
257 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
259 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
262 if(kr
!= KERN_SUCCESS
) {
267 currPC
= (uint64_t) tmpWord
; // promote 32 bit address
270 * retrive contents of the frame pointer and advance to the next stack
271 * frame if it's valid
275 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
277 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
279 prevFP
= (uint64_t) tmpWord
; // promote 32 bit address
282 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
285 if(prevFP
< currFP
) {
296 static kern_return_t
do_backtrace64(
299 x86_saved_state64_t
*regs
,
301 mach_msg_type_number_t
*start_idx
,
302 mach_msg_type_number_t max_idx
,
303 boolean_t supervisor
)
305 uint64_t currPC
= regs
->isf
.rip
;
306 uint64_t currFP
= regs
->rbp
;
307 uint64_t prevPC
= 0ULL;
308 uint64_t prevFP
= 0ULL;
309 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
310 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ kernel_stack_size
;
311 mach_msg_type_number_t ct
= *start_idx
;
312 kern_return_t kr
= KERN_FAILURE
;
314 if(*start_idx
>= max_idx
)
315 return KERN_RESOURCE_SHORTAGE
; // no frames traced
317 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
319 // build a backtrace of this 32 bit state.
320 while(VALID_STACK_ADDRESS64(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
321 // this is the address where caller lives in the user thread
322 uint64_t caller
= currFP
+ sizeof(uint64_t);
331 return KERN_RESOURCE_SHORTAGE
;
334 /* read our caller */
336 kr
= chudxnu_kern_read(&currPC
, (vm_offset_t
)caller
, sizeof(uint64_t));
338 kr
= chudxnu_task_read(task
, &currPC
, caller
, sizeof(uint64_t));
341 if(kr
!= KERN_SUCCESS
) {
347 * retrive contents of the frame pointer and advance to the next stack
348 * frame if it's valid
352 kr
= chudxnu_kern_read(&prevFP
, (vm_offset_t
)currFP
, sizeof(uint64_t));
354 kr
= chudxnu_task_read(task
, &prevFP
, currFP
, sizeof(uint64_t));
357 if(VALID_STACK_ADDRESS64(supervisor
, prevFP
, kernStackMin
, kernStackMax
)) {
358 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
361 if(prevFP
< currFP
) {
372 static kern_return_t
do_kernel_backtrace(
374 struct x86_kernel_state
*regs
,
376 mach_msg_type_number_t
*start_idx
,
377 mach_msg_type_number_t max_idx
)
379 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
380 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ kernel_stack_size
;
381 mach_msg_type_number_t ct
= *start_idx
;
382 kern_return_t kr
= KERN_FAILURE
;
385 uint64_t currPC
= 0ULL;
386 uint64_t currFP
= 0ULL;
387 uint64_t prevPC
= 0ULL;
388 uint64_t prevFP
= 0ULL;
389 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(regs
->k_rip
), sizeof(uint64_t))) {
392 if(KERN_SUCCESS
!= chudxnu_kern_read(&currFP
, (vm_offset_t
)&(regs
->k_rbp
), sizeof(uint64_t))) {
396 uint32_t currPC
= 0U;
397 uint32_t currFP
= 0U;
398 uint32_t prevPC
= 0U;
399 uint32_t prevFP
= 0U;
400 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(regs
->k_eip
), sizeof(uint32_t))) {
403 if(KERN_SUCCESS
!= chudxnu_kern_read(&currFP
, (vm_offset_t
)&(regs
->k_ebp
), sizeof(uint32_t))) {
408 if(*start_idx
>= max_idx
)
409 return KERN_RESOURCE_SHORTAGE
; // no frames traced
415 frames
[ct
++] = chudxnu_vm_unslide((uint64_t)currPC
, 1);
417 // build a backtrace of this kernel state
419 while(VALID_STACK_ADDRESS64(TRUE
, currFP
, kernStackMin
, kernStackMax
)) {
420 // this is the address where caller lives in the user thread
421 uint64_t caller
= currFP
+ sizeof(uint64_t);
423 while(VALID_STACK_ADDRESS(TRUE
, currFP
, kernStackMin
, kernStackMax
)) {
424 uint32_t caller
= (uint32_t)currFP
+ sizeof(uint32_t);
427 if(!currFP
|| !currPC
) {
434 return KERN_RESOURCE_SHORTAGE
;
437 /* read our caller */
438 kr
= chudxnu_kern_read(&currPC
, (vm_offset_t
)caller
, sizeof(currPC
));
440 if(kr
!= KERN_SUCCESS
|| !currPC
) {
446 * retrive contents of the frame pointer and advance to the next stack
447 * frame if it's valid
450 kr
= chudxnu_kern_read(&prevFP
, (vm_offset_t
)currFP
, sizeof(currPC
));
453 if(VALID_STACK_ADDRESS64(TRUE
, prevFP
, kernStackMin
, kernStackMax
)) {
455 if(VALID_STACK_ADDRESS(TRUE
, prevFP
, kernStackMin
, kernStackMax
)) {
457 frames
[ct
++] = chudxnu_vm_unslide((uint64_t)currPC
, 1);
460 if(prevFP
<= currFP
) {
472 kern_return_t
chudxnu_thread_get_callstack64_internal(
475 mach_msg_type_number_t
*count
,
479 kern_return_t kr
= KERN_FAILURE
;
480 task_t task
= thread
->task
;
481 uint64_t currPC
= 0ULL;
482 boolean_t supervisor
= FALSE
;
483 mach_msg_type_number_t bufferIndex
= 0;
484 mach_msg_type_number_t bufferMaxIndex
= *count
;
485 x86_saved_state_t
*tagged_regs
= NULL
; // kernel register state
486 x86_saved_state64_t
*regs64
= NULL
;
487 x86_saved_state32_t
*regs32
= NULL
;
488 x86_saved_state32_t
*u_regs32
= NULL
;
489 x86_saved_state64_t
*u_regs64
= NULL
;
490 struct x86_kernel_state
*kregs
= NULL
;
492 if(ml_at_interrupt_context()) {
495 /* can't backtrace user state on interrupt stack. */
499 /* backtracing at interrupt context? */
500 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
502 * Locate the registers for the interrupted thread, assuming it is
505 tagged_regs
= current_cpu_datap()->cpu_int_state
;
507 if(is_saved_state64(tagged_regs
)) {
508 /* 64 bit registers */
509 regs64
= saved_state64(tagged_regs
);
510 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
512 /* 32 bit registers */
513 regs32
= saved_state32(tagged_regs
);
514 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
519 if(!ml_at_interrupt_context() && kernel_task
== task
) {
521 if(!thread
->kernel_stack
) {
525 // Kernel thread not at interrupt context
526 kregs
= (struct x86_kernel_state
*)NULL
;
528 // nofault read of the thread->kernel_stack pointer
529 if(KERN_SUCCESS
!= chudxnu_kern_read(&kregs
, (vm_offset_t
)&(thread
->kernel_stack
), sizeof(void *))) {
533 // Adjust to find the saved kernel state
534 kregs
= STACK_IKS((vm_offset_t
)(uintptr_t)kregs
);
537 } else if(!tagged_regs
) {
539 * not at interrupt context, or tracing a different thread than
540 * current_thread() at interrupt context
542 tagged_regs
= USER_STATE(thread
);
543 if(is_saved_state64(tagged_regs
)) {
544 /* 64 bit registers */
545 regs64
= saved_state64(tagged_regs
);
546 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
548 /* 32 bit registers */
549 regs32
= saved_state32(tagged_regs
);
550 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
557 // the caller only wants a user callstack.
559 // bail - we've only got kernel state
563 // regs32(64) is not in supervisor mode.
571 /* we only want to backtrace the user mode */
572 if(!(u_regs32
|| u_regs64
)) {
573 /* no user state to look at */
579 * Order of preference for top of stack:
580 * 64 bit kernel state (not likely)
581 * 32 bit kernel state
582 * 64 bit user land state
583 * 32 bit user land state
588 * nofault read of the registers from the kernel stack (as they can
589 * disappear on the fly).
593 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(kregs
->k_rip
), sizeof(uint64_t))) {
598 if(KERN_SUCCESS
!= chudxnu_kern_read(&tmp
, (vm_offset_t
)&(kregs
->k_eip
), sizeof(uint32_t))) {
601 currPC
= (uint64_t)tmp
;
604 currPC
= regs64
->isf
.rip
;
606 currPC
= (uint64_t) regs32
->eip
;
607 } else if(u_regs64
) {
608 currPC
= u_regs64
->isf
.rip
;
609 } else if(u_regs32
) {
610 currPC
= (uint64_t) u_regs32
->eip
;
614 /* no top of the stack, bail out */
620 if(bufferMaxIndex
< 1) {
622 return KERN_RESOURCE_SHORTAGE
;
625 /* backtrace kernel */
627 addr64_t address
= 0ULL;
631 kr
= do_kernel_backtrace(thread
, kregs
, callstack
, &bufferIndex
, bufferMaxIndex
);
633 // and do a nofault read of (r|e)sp
636 size
= sizeof(uint64_t);
638 if(KERN_SUCCESS
!= chudxnu_kern_read(&address
, (vm_offset_t
)&(kregs
->k_rsp
), size
)) {
642 uint32_t rsp
= 0ULL, tmp
= 0ULL;
643 size
= sizeof(uint32_t);
645 if(KERN_SUCCESS
!= chudxnu_kern_read(&tmp
, (vm_offset_t
)&(kregs
->k_esp
), size
)) {
648 address
= (addr64_t
)tmp
;
652 if(address
&& KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (vm_offset_t
)address
, size
) && bufferIndex
< bufferMaxIndex
) {
653 callstack
[bufferIndex
++] = (uint64_t)rsp
;
658 // backtrace the 64bit side.
659 kr
= do_backtrace64(task
, thread
, regs64
, callstack
, &bufferIndex
,
660 bufferMaxIndex
, TRUE
);
662 if(KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (vm_offset_t
) regs64
->isf
.rsp
, sizeof(uint64_t)) &&
663 bufferIndex
< bufferMaxIndex
) {
664 callstack
[bufferIndex
++] = rsp
;
670 // backtrace the 32bit side.
671 kr
= do_backtrace32(task
, thread
, regs32
, callstack
, &bufferIndex
,
672 bufferMaxIndex
, TRUE
);
674 if(KERN_SUCCESS
== chudxnu_kern_read(&esp
, (vm_offset_t
) regs32
->uesp
, sizeof(uint32_t)) &&
675 bufferIndex
< bufferMaxIndex
) {
676 callstack
[bufferIndex
++] = (uint64_t) esp
;
678 } else if(u_regs64
&& !kern_only
) {
679 /* backtrace user land */
682 kr
= do_backtrace64(task
, thread
, u_regs64
, callstack
, &bufferIndex
,
683 bufferMaxIndex
, FALSE
);
685 if(KERN_SUCCESS
== chudxnu_task_read(task
, &rsp
, (addr64_t
) u_regs64
->isf
.rsp
, sizeof(uint64_t)) &&
686 bufferIndex
< bufferMaxIndex
) {
687 callstack
[bufferIndex
++] = rsp
;
690 } else if(u_regs32
&& !kern_only
) {
693 kr
= do_backtrace32(task
, thread
, u_regs32
, callstack
, &bufferIndex
,
694 bufferMaxIndex
, FALSE
);
696 if(KERN_SUCCESS
== chudxnu_task_read(task
, &esp
, (addr64_t
) u_regs32
->uesp
, sizeof(uint32_t)) &&
697 bufferIndex
< bufferMaxIndex
) {
698 callstack
[bufferIndex
++] = (uint64_t) esp
;
702 *count
= bufferIndex
;
707 kern_return_t
chudxnu_thread_get_callstack64_kperf(
710 mach_msg_type_number_t
*count
,
713 return chudxnu_thread_get_callstack64_internal(thread
, callstack
, count
, is_user
, !is_user
);
717 kern_return_t
chudxnu_thread_get_callstack64(
720 mach_msg_type_number_t
*count
,
723 return chudxnu_thread_get_callstack64_internal(thread
, callstack
, count
, user_only
, 0);