2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32 #include <machine/thread.h>
34 #include <kern/kern_types.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
38 #include <vm/vm_map.h>
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46 #include <i386/misc_protos.h>
50 chudxnu_vm_unslide( uint64_t ptr
, int kaddr
)
55 return VM_KERNEL_UNSLIDE(ptr
);
59 #pragma mark **** thread state ****
62 __private_extern__ kern_return_t
63 chudxnu_thread_get_state(
65 thread_flavor_t flavor
,
66 thread_state_t tstate
,
67 mach_msg_type_number_t
*count
,
71 /* We can't get user state for kernel threads */
72 if (thread
->task
== kernel_task
)
74 /* this properly handles deciding whether or not the thread is 64 bit or not */
75 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
77 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
78 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
79 // interrupt state available
81 // the real purpose of this branch is the following:
82 // the user doesn't care if the thread states are user or kernel, he
83 // just wants the thread state, so we need to determine the proper one
84 // to return, kernel or user, for the given thread.
85 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
86 // the above are conditions where we possibly can read the kernel
87 // state. we still need to determine if this interrupt happened in
88 // kernel or user context
89 if(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
90 current_cpu_datap()->cpu_interrupt_level
== 1) {
91 // interrupt happened in user land
92 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
95 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
98 // get the user-mode thread state
99 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
104 __private_extern__ kern_return_t
105 chudxnu_thread_set_state(
107 thread_flavor_t flavor
,
108 thread_state_t tstate
,
109 mach_msg_type_number_t count
,
112 #pragma unused (user_only)
113 return machine_thread_set_state(thread
, flavor
, tstate
, count
);
117 #pragma mark **** task memory read/write ****
120 __private_extern__ kern_return_t
127 kern_return_t ret
= KERN_SUCCESS
;
130 if(ml_at_interrupt_context()) {
131 return KERN_FAILURE
; // Can't look at tasks on interrupt stack
135 * pmap layer requires interrupts to be on
137 old_level
= ml_set_interrupts_enabled(TRUE
);
139 if(current_task()==task
) {
141 if(copyin(usraddr
, kernaddr
, size
)) {
145 vm_map_t map
= get_task_map(task
);
146 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
149 ml_set_interrupts_enabled(old_level
);
154 __private_extern__ kern_return_t
161 kern_return_t ret
= KERN_SUCCESS
;
164 if(ml_at_interrupt_context()) {
165 return KERN_FAILURE
; // can't poke into tasks on interrupt stack
169 * pmap layer requires interrupts to be on
171 old_level
= ml_set_interrupts_enabled(TRUE
);
173 if(current_task()==task
) {
175 if(copyout(kernaddr
, useraddr
, size
)) {
179 vm_map_t map
= get_task_map(task
);
180 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
183 ml_set_interrupts_enabled(old_level
);
188 __private_extern__ kern_return_t
189 chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
191 return (ml_nofault_copy(srcaddr
, (vm_offset_t
) dstaddr
, size
) == size
?
192 KERN_SUCCESS
: KERN_FAILURE
);
195 __private_extern__ kern_return_t
201 return (ml_nofault_copy((vm_offset_t
) srcaddr
, dstaddr
, size
) == size
?
202 KERN_SUCCESS
: KERN_FAILURE
);
205 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
206 // don't try to read in the hole
207 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
208 (supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
209 ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
211 typedef struct _cframe64_t
{
212 uint64_t prevFP
; // can't use a real pointer here until we're a 64 bit kernel
218 typedef struct _cframe_t
{
219 uint32_t prev
; // this is really a user32-space pointer to the previous frame
224 extern void * find_user_regs(thread_t
);
225 extern x86_saved_state32_t
*find_kern_regs(thread_t
);
227 static kern_return_t
do_backtrace32(
230 x86_saved_state32_t
*regs
,
232 mach_msg_type_number_t
*start_idx
,
233 mach_msg_type_number_t max_idx
,
234 boolean_t supervisor
)
236 uint32_t tmpWord
= 0UL;
237 uint64_t currPC
= (uint64_t) regs
->eip
;
238 uint64_t currFP
= (uint64_t) regs
->ebp
;
239 uint64_t prevPC
= 0ULL;
240 uint64_t prevFP
= 0ULL;
241 uint64_t kernStackMin
= thread
->kernel_stack
;
242 uint64_t kernStackMax
= kernStackMin
+ kernel_stack_size
;
243 mach_msg_type_number_t ct
= *start_idx
;
244 kern_return_t kr
= KERN_FAILURE
;
247 return KERN_RESOURCE_SHORTAGE
; // no frames traced
249 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
251 // build a backtrace of this 32 bit state.
252 while(VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
253 cframe_t
*fp
= (cframe_t
*) (uintptr_t) currFP
;
262 return KERN_RESOURCE_SHORTAGE
;
265 /* read our caller */
267 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
269 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
272 if(kr
!= KERN_SUCCESS
) {
277 currPC
= (uint64_t) tmpWord
; // promote 32 bit address
280 * retrive contents of the frame pointer and advance to the next stack
281 * frame if it's valid
285 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
287 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
289 prevFP
= (uint64_t) tmpWord
; // promote 32 bit address
292 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
295 if(prevFP
< currFP
) {
306 static kern_return_t
do_backtrace64(
309 x86_saved_state64_t
*regs
,
311 mach_msg_type_number_t
*start_idx
,
312 mach_msg_type_number_t max_idx
,
313 boolean_t supervisor
)
315 uint64_t currPC
= regs
->isf
.rip
;
316 uint64_t currFP
= regs
->rbp
;
317 uint64_t prevPC
= 0ULL;
318 uint64_t prevFP
= 0ULL;
319 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
320 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ kernel_stack_size
;
321 mach_msg_type_number_t ct
= *start_idx
;
322 kern_return_t kr
= KERN_FAILURE
;
324 if(*start_idx
>= max_idx
)
325 return KERN_RESOURCE_SHORTAGE
; // no frames traced
327 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
329 // build a backtrace of this 32 bit state.
330 while(VALID_STACK_ADDRESS64(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
331 // this is the address where caller lives in the user thread
332 uint64_t caller
= currFP
+ sizeof(uint64_t);
341 return KERN_RESOURCE_SHORTAGE
;
344 /* read our caller */
346 kr
= chudxnu_kern_read(&currPC
, (vm_offset_t
)caller
, sizeof(uint64_t));
348 kr
= chudxnu_task_read(task
, &currPC
, caller
, sizeof(uint64_t));
351 if(kr
!= KERN_SUCCESS
) {
357 * retrive contents of the frame pointer and advance to the next stack
358 * frame if it's valid
362 kr
= chudxnu_kern_read(&prevFP
, (vm_offset_t
)currFP
, sizeof(uint64_t));
364 kr
= chudxnu_task_read(task
, &prevFP
, currFP
, sizeof(uint64_t));
367 if(VALID_STACK_ADDRESS64(supervisor
, prevFP
, kernStackMin
, kernStackMax
)) {
368 frames
[ct
++] = chudxnu_vm_unslide(currPC
, supervisor
);
371 if(prevFP
< currFP
) {
382 static kern_return_t
do_kernel_backtrace(
384 struct x86_kernel_state
*regs
,
386 mach_msg_type_number_t
*start_idx
,
387 mach_msg_type_number_t max_idx
)
389 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
390 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ kernel_stack_size
;
391 mach_msg_type_number_t ct
= *start_idx
;
392 kern_return_t kr
= KERN_FAILURE
;
395 uint64_t currPC
= 0ULL;
396 uint64_t currFP
= 0ULL;
397 uint64_t prevPC
= 0ULL;
398 uint64_t prevFP
= 0ULL;
399 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(regs
->k_rip
), sizeof(uint64_t))) {
402 if(KERN_SUCCESS
!= chudxnu_kern_read(&currFP
, (vm_offset_t
)&(regs
->k_rbp
), sizeof(uint64_t))) {
406 uint32_t currPC
= 0U;
407 uint32_t currFP
= 0U;
408 uint32_t prevPC
= 0U;
409 uint32_t prevFP
= 0U;
410 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(regs
->k_eip
), sizeof(uint32_t))) {
413 if(KERN_SUCCESS
!= chudxnu_kern_read(&currFP
, (vm_offset_t
)&(regs
->k_ebp
), sizeof(uint32_t))) {
418 if(*start_idx
>= max_idx
)
419 return KERN_RESOURCE_SHORTAGE
; // no frames traced
425 frames
[ct
++] = chudxnu_vm_unslide((uint64_t)currPC
, 1);
427 // build a backtrace of this kernel state
429 while(VALID_STACK_ADDRESS64(TRUE
, currFP
, kernStackMin
, kernStackMax
)) {
430 // this is the address where caller lives in the user thread
431 uint64_t caller
= currFP
+ sizeof(uint64_t);
433 while(VALID_STACK_ADDRESS(TRUE
, currFP
, kernStackMin
, kernStackMax
)) {
434 uint32_t caller
= (uint32_t)currFP
+ sizeof(uint32_t);
437 if(!currFP
|| !currPC
) {
444 return KERN_RESOURCE_SHORTAGE
;
447 /* read our caller */
448 kr
= chudxnu_kern_read(&currPC
, (vm_offset_t
)caller
, sizeof(currPC
));
450 if(kr
!= KERN_SUCCESS
|| !currPC
) {
456 * retrive contents of the frame pointer and advance to the next stack
457 * frame if it's valid
460 kr
= chudxnu_kern_read(&prevFP
, (vm_offset_t
)currFP
, sizeof(currPC
));
463 if(VALID_STACK_ADDRESS64(TRUE
, prevFP
, kernStackMin
, kernStackMax
)) {
465 if(VALID_STACK_ADDRESS(TRUE
, prevFP
, kernStackMin
, kernStackMax
)) {
467 frames
[ct
++] = chudxnu_vm_unslide((uint64_t)currPC
, 1);
470 if(prevFP
<= currFP
) {
484 kern_return_t
chudxnu_thread_get_callstack64(
487 mach_msg_type_number_t
*count
,
490 kern_return_t kr
= KERN_FAILURE
;
491 task_t task
= thread
->task
;
492 uint64_t currPC
= 0ULL;
493 boolean_t supervisor
= FALSE
;
494 mach_msg_type_number_t bufferIndex
= 0;
495 mach_msg_type_number_t bufferMaxIndex
= *count
;
496 x86_saved_state_t
*tagged_regs
= NULL
; // kernel register state
497 x86_saved_state64_t
*regs64
= NULL
;
498 x86_saved_state32_t
*regs32
= NULL
;
499 x86_saved_state32_t
*u_regs32
= NULL
;
500 x86_saved_state64_t
*u_regs64
= NULL
;
501 struct x86_kernel_state
*kregs
= NULL
;
503 if(ml_at_interrupt_context()) {
506 /* can't backtrace user state on interrupt stack. */
510 /* backtracing at interrupt context? */
511 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
513 * Locate the registers for the interrupted thread, assuming it is
516 tagged_regs
= current_cpu_datap()->cpu_int_state
;
518 if(is_saved_state64(tagged_regs
)) {
519 /* 64 bit registers */
520 regs64
= saved_state64(tagged_regs
);
521 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
523 /* 32 bit registers */
524 regs32
= saved_state32(tagged_regs
);
525 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
530 if(!ml_at_interrupt_context() && kernel_task
== task
) {
532 if(!thread
->kernel_stack
) {
536 // Kernel thread not at interrupt context
537 kregs
= (struct x86_kernel_state
*)NULL
;
539 // nofault read of the thread->kernel_stack pointer
540 if(KERN_SUCCESS
!= chudxnu_kern_read(&kregs
, (vm_offset_t
)&(thread
->kernel_stack
), sizeof(void *))) {
544 // Adjust to find the saved kernel state
545 kregs
= STACK_IKS((vm_offset_t
)(uintptr_t)kregs
);
548 } else if(!tagged_regs
) {
550 * not at interrupt context, or tracing a different thread than
551 * current_thread() at interrupt context
553 tagged_regs
= USER_STATE(thread
);
554 if(is_saved_state64(tagged_regs
)) {
555 /* 64 bit registers */
556 regs64
= saved_state64(tagged_regs
);
557 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
559 /* 32 bit registers */
560 regs32
= saved_state32(tagged_regs
);
561 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
568 // the caller only wants a user callstack.
570 // bail - we've only got kernel state
574 // regs32(64) is not in supervisor mode.
582 /* we only want to backtrace the user mode */
583 if(!(u_regs32
|| u_regs64
)) {
584 /* no user state to look at */
590 * Order of preference for top of stack:
591 * 64 bit kernel state (not likely)
592 * 32 bit kernel state
593 * 64 bit user land state
594 * 32 bit user land state
599 * nofault read of the registers from the kernel stack (as they can
600 * disappear on the fly).
604 if(KERN_SUCCESS
!= chudxnu_kern_read(&currPC
, (vm_offset_t
)&(kregs
->k_rip
), sizeof(uint64_t))) {
609 if(KERN_SUCCESS
!= chudxnu_kern_read(&tmp
, (vm_offset_t
)&(kregs
->k_eip
), sizeof(uint32_t))) {
612 currPC
= (uint64_t)tmp
;
615 currPC
= regs64
->isf
.rip
;
617 currPC
= (uint64_t) regs32
->eip
;
618 } else if(u_regs64
) {
619 currPC
= u_regs64
->isf
.rip
;
620 } else if(u_regs32
) {
621 currPC
= (uint64_t) u_regs32
->eip
;
625 /* no top of the stack, bail out */
631 if(bufferMaxIndex
< 1) {
633 return KERN_RESOURCE_SHORTAGE
;
636 /* backtrace kernel */
638 addr64_t address
= 0ULL;
642 kr
= do_kernel_backtrace(thread
, kregs
, callstack
, &bufferIndex
, bufferMaxIndex
);
644 // and do a nofault read of (r|e)sp
647 size
= sizeof(uint64_t);
649 if(KERN_SUCCESS
!= chudxnu_kern_read(&address
, (vm_offset_t
)&(kregs
->k_rsp
), size
)) {
653 uint32_t rsp
= 0ULL, tmp
= 0ULL;
654 size
= sizeof(uint32_t);
656 if(KERN_SUCCESS
!= chudxnu_kern_read(&tmp
, (vm_offset_t
)&(kregs
->k_esp
), size
)) {
659 address
= (addr64_t
)tmp
;
663 if(address
&& KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (vm_offset_t
)address
, size
) && bufferIndex
< bufferMaxIndex
) {
664 callstack
[bufferIndex
++] = (uint64_t)rsp
;
669 // backtrace the 64bit side.
670 kr
= do_backtrace64(task
, thread
, regs64
, callstack
, &bufferIndex
,
671 bufferMaxIndex
, TRUE
);
673 if(KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (vm_offset_t
) regs64
->isf
.rsp
, sizeof(uint64_t)) &&
674 bufferIndex
< bufferMaxIndex
) {
675 callstack
[bufferIndex
++] = rsp
;
681 // backtrace the 32bit side.
682 kr
= do_backtrace32(task
, thread
, regs32
, callstack
, &bufferIndex
,
683 bufferMaxIndex
, TRUE
);
685 if(KERN_SUCCESS
== chudxnu_kern_read(&esp
, (vm_offset_t
) regs32
->uesp
, sizeof(uint32_t)) &&
686 bufferIndex
< bufferMaxIndex
) {
687 callstack
[bufferIndex
++] = (uint64_t) esp
;
689 } else if(u_regs64
) {
690 /* backtrace user land */
693 kr
= do_backtrace64(task
, thread
, u_regs64
, callstack
, &bufferIndex
,
694 bufferMaxIndex
, FALSE
);
696 if(KERN_SUCCESS
== chudxnu_task_read(task
, &rsp
, (addr64_t
) u_regs64
->isf
.rsp
, sizeof(uint64_t)) &&
697 bufferIndex
< bufferMaxIndex
) {
698 callstack
[bufferIndex
++] = rsp
;
701 } else if(u_regs32
) {
704 kr
= do_backtrace32(task
, thread
, u_regs32
, callstack
, &bufferIndex
,
705 bufferMaxIndex
, FALSE
);
707 if(KERN_SUCCESS
== chudxnu_task_read(task
, &esp
, (addr64_t
) u_regs32
->uesp
, sizeof(uint32_t)) &&
708 bufferIndex
< bufferMaxIndex
) {
709 callstack
[bufferIndex
++] = (uint64_t) esp
;
713 *count
= bufferIndex
;