2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
37 #include <vm/vm_map.h>
40 #include <chud/chud_xnu.h>
41 #include <chud/chud_xnu_private.h>
43 #include <i386/misc_protos.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
47 #pragma mark **** thread state ****
49 __private_extern__ kern_return_t
50 chudxnu_thread_user_state_available(thread_t thread
)
52 #pragma unused (thread)
56 __private_extern__ kern_return_t
57 chudxnu_thread_get_state(
59 thread_flavor_t flavor
,
60 thread_state_t tstate
,
61 mach_msg_type_number_t
*count
,
65 /* We can't get user state for kernel threads */
66 if (thread
->task
== kernel_task
)
68 /* this properly handles deciding whether or not the thread is 64 bit or not */
69 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
71 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
72 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
73 // interrupt state available
75 // the real purpose of this branch is the following:
76 // the user doesn't care if the thread states are user or kernel, he
77 // just wants the thread state, so we need to determine the proper one
78 // to return, kernel or user, for the given thread.
79 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
80 // the above are conditions where we possibly can read the kernel
81 // state. we still need to determine if this interrupt happened in
82 // kernel or user context
83 if(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
84 current_cpu_datap()->cpu_interrupt_level
== 1) {
85 // interrupt happened in user land
86 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
89 return machine_thread_get_kern_state(thread
, flavor
, tstate
, count
);
92 // get the user-mode thread state
93 return machine_thread_get_state(thread
, flavor
, tstate
, count
);
98 __private_extern__ kern_return_t
99 chudxnu_thread_set_state(
101 thread_flavor_t flavor
,
102 thread_state_t tstate
,
103 mach_msg_type_number_t count
,
106 #pragma unused (user_only)
107 return machine_thread_set_state(thread
, flavor
, tstate
, count
);
110 #pragma mark **** task memory read/write ****
112 __private_extern__ kern_return_t
119 kern_return_t ret
= KERN_SUCCESS
;
122 if(ml_at_interrupt_context()) {
123 return KERN_FAILURE
; // Can't look at tasks on interrupt stack
127 * pmap layer requires interrupts to be on
129 old_level
= ml_set_interrupts_enabled(TRUE
);
131 if(current_task()==task
) {
133 if(copyin(usraddr
, kernaddr
, size
)) {
137 vm_map_t map
= get_task_map(task
);
138 ret
= vm_map_read_user(map
, usraddr
, kernaddr
, size
);
141 ml_set_interrupts_enabled(old_level
);
146 __private_extern__ kern_return_t
153 kern_return_t ret
= KERN_SUCCESS
;
156 if(ml_at_interrupt_context()) {
157 return KERN_FAILURE
; // can't poke into tasks on interrupt stack
161 * pmap layer requires interrupts to be on
163 old_level
= ml_set_interrupts_enabled(TRUE
);
165 if(current_task()==task
) {
167 if(copyout(kernaddr
, useraddr
, size
)) {
171 vm_map_t map
= get_task_map(task
);
172 ret
= vm_map_write_user(map
, kernaddr
, useraddr
, size
);
175 ml_set_interrupts_enabled(old_level
);
180 __private_extern__ kern_return_t
181 chudxnu_kern_read(void *dstaddr
, vm_offset_t srcaddr
, vm_size_t size
)
183 return (ml_nofault_copy(srcaddr
, (vm_offset_t
) dstaddr
, size
) == size
?
184 KERN_SUCCESS
: KERN_FAILURE
);
187 __private_extern__ kern_return_t
193 return (ml_nofault_copy((vm_offset_t
) srcaddr
, dstaddr
, size
) == size
?
194 KERN_SUCCESS
: KERN_FAILURE
);
197 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
198 // don't try to read in the hole
199 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
200 (supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
201 (addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
203 typedef struct _cframe64_t
{
204 uint64_t prevFP
; // can't use a real pointer here until we're a 64 bit kernel
210 typedef struct _cframe_t
{
211 struct _cframe_t
*prev
; // when we go 64 bits, this needs to be capped at 32 bits
216 extern void * find_user_regs(thread_t
);
217 extern x86_saved_state32_t
*find_kern_regs(thread_t
);
219 static kern_return_t
do_backtrace32(
222 x86_saved_state32_t
*regs
,
224 mach_msg_type_number_t
*start_idx
,
225 mach_msg_type_number_t max_idx
,
226 boolean_t supervisor
)
228 uint32_t tmpWord
= 0UL;
229 uint64_t currPC
= (uint64_t) regs
->eip
;
230 uint64_t currFP
= (uint64_t) regs
->ebp
;
231 uint64_t prevPC
= 0ULL;
232 uint64_t prevFP
= 0ULL;
233 uint64_t kernStackMin
= thread
->kernel_stack
;
234 uint64_t kernStackMax
= kernStackMin
+ KERNEL_STACK_SIZE
;
235 mach_msg_type_number_t ct
= *start_idx
;
236 kern_return_t kr
= KERN_FAILURE
;
239 return KERN_RESOURCE_SHORTAGE
; // no frames traced
241 frames
[ct
++] = currPC
;
243 // build a backtrace of this 32 bit state.
244 while(VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
245 cframe_t
*fp
= (cframe_t
*) (uint32_t) currFP
;
254 return KERN_RESOURCE_SHORTAGE
;
257 /* read our caller */
259 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
261 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
) &fp
->caller
, sizeof(uint32_t));
264 if(kr
!= KERN_SUCCESS
) {
269 currPC
= (uint64_t) tmpWord
; // promote 32 bit address
272 * retrive contents of the frame pointer and advance to the next stack
273 * frame if it's valid
277 kr
= chudxnu_kern_read(&tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
279 kr
= chudxnu_task_read(task
, &tmpWord
, (vm_offset_t
)&fp
->prev
, sizeof(uint32_t));
281 prevFP
= (uint64_t) tmpWord
; // promote 32 bit address
284 frames
[ct
++] = currPC
;
287 if(prevFP
< currFP
) {
298 static kern_return_t
do_backtrace64(
301 x86_saved_state64_t
*regs
,
303 mach_msg_type_number_t
*start_idx
,
304 mach_msg_type_number_t max_idx
,
305 boolean_t supervisor
)
307 uint64_t currPC
= regs
->isf
.rip
;
308 uint64_t currFP
= regs
->rbp
;
309 uint64_t prevPC
= 0ULL;
310 uint64_t prevFP
= 0ULL;
311 uint64_t kernStackMin
= (uint64_t)thread
->kernel_stack
;
312 uint64_t kernStackMax
= (uint64_t)kernStackMin
+ KERNEL_STACK_SIZE
;
313 mach_msg_type_number_t ct
= *start_idx
;
314 kern_return_t kr
= KERN_FAILURE
;
316 if(*start_idx
>= max_idx
)
317 return KERN_RESOURCE_SHORTAGE
; // no frames traced
319 frames
[ct
++] = currPC
;
321 // build a backtrace of this 32 bit state.
322 while(VALID_STACK_ADDRESS64(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
323 // this is the address where caller lives in the user thread
324 uint64_t caller
= currFP
+ sizeof(uint64_t);
333 return KERN_RESOURCE_SHORTAGE
;
336 /* read our caller */
340 kr
= chudxnu_task_read(task
, &currPC
, caller
, sizeof(uint64_t));
343 if(kr
!= KERN_SUCCESS
) {
349 * retrive contents of the frame pointer and advance to the next stack
350 * frame if it's valid
356 kr
= chudxnu_task_read(task
, &prevFP
, currFP
, sizeof(uint64_t));
359 if(VALID_STACK_ADDRESS64(supervisor
, prevFP
, kernStackMin
, kernStackMax
)) {
360 frames
[ct
++] = currPC
;
363 if(prevFP
< currFP
) {
375 kern_return_t
chudxnu_thread_get_callstack64(
378 mach_msg_type_number_t
*count
,
381 kern_return_t kr
= KERN_FAILURE
;
382 task_t task
= thread
->task
;
384 boolean_t supervisor
= FALSE
;
385 mach_msg_type_number_t bufferIndex
= 0;
386 mach_msg_type_number_t bufferMaxIndex
= *count
;
387 x86_saved_state_t
*tagged_regs
= NULL
; // kernel register state
388 x86_saved_state64_t
*regs64
= NULL
;
389 x86_saved_state32_t
*regs32
= NULL
;
390 x86_saved_state32_t
*u_regs32
= NULL
;
391 x86_saved_state64_t
*u_regs64
= NULL
;
393 if(ml_at_interrupt_context()) {
396 /* can't backtrace user state on interrupt stack. */
400 /* backtracing at interrupt context? */
401 if(thread
== current_thread() && current_cpu_datap()->cpu_int_state
) {
403 * Locate the registers for the interrupted thread, assuming it is
406 tagged_regs
= current_cpu_datap()->cpu_int_state
;
408 if(is_saved_state64(tagged_regs
)) {
409 /* 64 bit registers */
410 regs64
= saved_state64(tagged_regs
);
411 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
413 /* 32 bit registers */
414 regs32
= saved_state32(tagged_regs
);
415 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
422 * not at interrupt context, or tracing a different thread than
423 * current_thread() at interrupt context
425 tagged_regs
= USER_STATE(thread
);
426 if(is_saved_state64(tagged_regs
)) {
427 /* 64 bit registers */
428 regs64
= saved_state64(tagged_regs
);
429 supervisor
= ((regs64
->isf
.cs
& SEL_PL
) != SEL_PL_U
);
431 /* 32 bit registers */
432 regs32
= saved_state32(tagged_regs
);
433 supervisor
= ((regs32
->cs
& SEL_PL
) != SEL_PL_U
);
440 // the caller only wants a user callstack.
442 // bail - we've only got kernel state
446 // regs32(64) is not in supervisor mode.
454 /* we only want to backtrace the user mode */
455 if(!(u_regs32
|| u_regs64
)) {
456 /* no user state to look at */
462 * Order of preference for top of stack:
463 * 64 bit kernel state (not likely)
464 * 32 bit kernel state
465 * 64 bit user land state
466 * 32 bit user land state
470 currPC
= regs64
->isf
.rip
;
472 currPC
= (uint64_t) regs32
->eip
;
473 } else if(u_regs64
) {
474 currPC
= u_regs64
->isf
.rip
;
475 } else if(u_regs32
) {
476 currPC
= (uint64_t) u_regs32
->eip
;
480 /* no top of the stack, bail out */
486 if(bufferMaxIndex
< 1) {
488 return KERN_RESOURCE_SHORTAGE
;
491 /* backtrace kernel */
495 // backtrace the 64bit side.
496 kr
= do_backtrace64(task
, thread
, regs64
, callstack
, &bufferIndex
,
497 bufferMaxIndex
, TRUE
);
499 if(KERN_SUCCESS
== chudxnu_kern_read(&rsp
, (addr64_t
) regs64
->isf
.rsp
, sizeof(uint64_t)) &&
500 bufferIndex
< bufferMaxIndex
) {
501 callstack
[bufferIndex
++] = rsp
;
507 // backtrace the 32bit side.
508 kr
= do_backtrace32(task
, thread
, regs32
, callstack
, &bufferIndex
,
509 bufferMaxIndex
, TRUE
);
511 if(KERN_SUCCESS
== chudxnu_kern_read(&esp
, (addr64_t
) regs32
->uesp
, sizeof(uint32_t)) &&
512 bufferIndex
< bufferMaxIndex
) {
513 callstack
[bufferIndex
++] = (uint64_t) esp
;
515 } else if(u_regs64
) {
516 /* backtrace user land */
519 kr
= do_backtrace64(task
, thread
, u_regs64
, callstack
, &bufferIndex
,
520 bufferMaxIndex
, FALSE
);
522 if(KERN_SUCCESS
== chudxnu_task_read(task
, &rsp
, (addr64_t
) u_regs64
->isf
.rsp
, sizeof(uint64_t)) &&
523 bufferIndex
< bufferMaxIndex
) {
524 callstack
[bufferIndex
++] = rsp
;
527 } else if(u_regs32
) {
530 kr
= do_backtrace32(task
, thread
, u_regs32
, callstack
, &bufferIndex
,
531 bufferMaxIndex
, FALSE
);
533 if(KERN_SUCCESS
== chudxnu_task_read(task
, &esp
, (addr64_t
) u_regs32
->uesp
, sizeof(uint32_t)) &&
534 bufferIndex
< bufferMaxIndex
) {
535 callstack
[bufferIndex
++] = (uint64_t) esp
;
539 *count
= bufferIndex
;
543 #pragma mark **** DEPRECATED ****
546 __private_extern__ kern_return_t
547 chudxnu_thread_get_callstack(
550 mach_msg_type_number_t
*count
,
554 task_t task
= thread
->task
;
560 uint32_t kernStackMin
= thread
->kernel_stack
;
561 uint32_t kernStackMax
= kernStackMin
+ KERNEL_STACK_SIZE
;
562 uint32_t *buffer
= callStack
;
564 int bufferMaxIndex
= *count
;
565 boolean_t supervisor
;
566 x86_saved_state32_t
*regs
= NULL
;
569 /* We can't get user state for kernel threads */
570 if (task
== kernel_task
) {
573 regs
= USER_REGS32(thread
);
575 regs
= saved_state32(current_cpu_datap()->cpu_int_state
);
583 supervisor
= ((regs
->cs
& SEL_PL
) != SEL_PL_U
);
590 bufferMaxIndex
-= 1; // allot space for saving userland %esp on stack
591 if (bufferMaxIndex
< 1) {
593 return KERN_RESOURCE_SHORTAGE
;
595 buffer
[bufferIndex
++] = currPC
; //save PC in position 0.
597 // Now, fill buffer with stack backtraces.
598 while (VALID_STACK_ADDRESS(supervisor
, currFP
, kernStackMin
, kernStackMax
)) {
599 cframe_t
*fp
= (cframe_t
*) currFP
;
601 if (bufferIndex
>= bufferMaxIndex
) {
602 *count
= bufferMaxIndex
;
603 return KERN_RESOURCE_SHORTAGE
;
607 kr
= chudxnu_kern_read(
609 (vm_offset_t
) &fp
->caller
,
612 kr
= chudxnu_task_read(
615 (vm_offset_t
) &fp
->caller
,
618 if (kr
!= KERN_SUCCESS
)
621 //retrieve the contents of the frame pointer
622 // and advance to the prev stack frame if it's valid
625 kr
= chudxnu_kern_read(
627 (vm_offset_t
) &fp
->prev
,
630 kr
= chudxnu_task_read(
633 (vm_offset_t
) &fp
->prev
,
637 buffer
[bufferIndex
++] = currPC
;
640 if (prevFP
< currFP
) {
647 // put the stack pointer on the bottom of the backtrace
649 kr
= chudxnu_task_read(task
, &esp
, regs
->uesp
, sizeof(uint32_t));
650 if(kr
== KERN_SUCCESS
) {
651 buffer
[bufferIndex
++] = esp
;
655 *count
= bufferIndex
;