]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36
37 #include <vm/vm_map.h>
38 #include <vm/pmap.h>
39
40 #include <chud/chud_xnu.h>
41 #include <chud/chud_xnu_private.h>
42
43 #include <i386/misc_protos.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46
47 #pragma mark **** thread state ****
48
49 __private_extern__ kern_return_t
50 chudxnu_thread_user_state_available(thread_t thread)
51 {
52 #pragma unused (thread)
53 return KERN_SUCCESS;
54 }
55
56 __private_extern__ kern_return_t
57 chudxnu_thread_get_state(
58 thread_t thread,
59 thread_flavor_t flavor,
60 thread_state_t tstate,
61 mach_msg_type_number_t *count,
62 boolean_t user_only)
63 {
64 if (user_only) {
65 /* We can't get user state for kernel threads */
66 if (thread->task == kernel_task)
67 return KERN_FAILURE;
68 /* this properly handles deciding whether or not the thread is 64 bit or not */
69 return machine_thread_get_state(thread, flavor, tstate, count);
70 } else {
71 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
72 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
73 // interrupt state available
74
75 // the real purpose of this branch is the following:
76 // the user doesn't care if the thread states are user or kernel, he
77 // just wants the thread state, so we need to determine the proper one
78 // to return, kernel or user, for the given thread.
79 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
80 // the above are conditions where we possibly can read the kernel
81 // state. we still need to determine if this interrupt happened in
82 // kernel or user context
83 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
84 current_cpu_datap()->cpu_interrupt_level == 1) {
85 // interrupt happened in user land
86 return machine_thread_get_state(thread, flavor, tstate, count);
87 } else {
88 // kernel interrupt.
89 return machine_thread_get_kern_state(thread, flavor, tstate, count);
90 }
91 } else {
92 // get the user-mode thread state
93 return machine_thread_get_state(thread, flavor, tstate, count);
94 }
95 }
96 }
97
98 __private_extern__ kern_return_t
99 chudxnu_thread_set_state(
100 thread_t thread,
101 thread_flavor_t flavor,
102 thread_state_t tstate,
103 mach_msg_type_number_t count,
104 boolean_t user_only)
105 {
106 #pragma unused (user_only)
107 return machine_thread_set_state(thread, flavor, tstate, count);
108 }
109
110 #pragma mark **** task memory read/write ****
111
112 __private_extern__ kern_return_t
113 chudxnu_task_read(
114 task_t task,
115 void *kernaddr,
116 uint64_t usraddr,
117 vm_size_t size)
118 {
119 kern_return_t ret = KERN_SUCCESS;
120 boolean_t old_level;
121
122 if(ml_at_interrupt_context()) {
123 return KERN_FAILURE; // Can't look at tasks on interrupt stack
124 }
125
126 /*
127 * pmap layer requires interrupts to be on
128 */
129 old_level = ml_set_interrupts_enabled(TRUE);
130
131 if(current_task()==task) {
132
133 if(copyin(usraddr, kernaddr, size)) {
134 ret = KERN_FAILURE;
135 }
136 } else {
137 vm_map_t map = get_task_map(task);
138 ret = vm_map_read_user(map, usraddr, kernaddr, size);
139 }
140
141 ml_set_interrupts_enabled(old_level);
142
143 return ret;
144 }
145
146 __private_extern__ kern_return_t
147 chudxnu_task_write(
148 task_t task,
149 uint64_t useraddr,
150 void *kernaddr,
151 vm_size_t size)
152 {
153 kern_return_t ret = KERN_SUCCESS;
154 boolean_t old_level;
155
156 if(ml_at_interrupt_context()) {
157 return KERN_FAILURE; // can't poke into tasks on interrupt stack
158 }
159
160 /*
161 * pmap layer requires interrupts to be on
162 */
163 old_level = ml_set_interrupts_enabled(TRUE);
164
165 if(current_task()==task) {
166
167 if(copyout(kernaddr, useraddr, size)) {
168 ret = KERN_FAILURE;
169 }
170 } else {
171 vm_map_t map = get_task_map(task);
172 ret = vm_map_write_user(map, kernaddr, useraddr, size);
173 }
174
175 ml_set_interrupts_enabled(old_level);
176
177 return ret;
178 }
179
180 __private_extern__ kern_return_t
181 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
182 {
183 return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
184 KERN_SUCCESS: KERN_FAILURE);
185 }
186
187 __private_extern__ kern_return_t
188 chudxnu_kern_write(
189 vm_offset_t dstaddr,
190 void *srcaddr,
191 vm_size_t size)
192 {
193 return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
194 KERN_SUCCESS: KERN_FAILURE);
195 }
196
197 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
198 // don't try to read in the hole
199 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
200 (supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
201 (addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
202
203 typedef struct _cframe64_t {
204 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
205 uint64_t caller;
206 uint64_t args[0];
207 }cframe64_t;
208
209
210 typedef struct _cframe_t {
211 struct _cframe_t *prev; // when we go 64 bits, this needs to be capped at 32 bits
212 uint32_t caller;
213 uint32_t args[0];
214 } cframe_t;
215
216 extern void * find_user_regs(thread_t);
217 extern x86_saved_state32_t *find_kern_regs(thread_t);
218
219 static kern_return_t do_backtrace32(
220 task_t task,
221 thread_t thread,
222 x86_saved_state32_t *regs,
223 uint64_t *frames,
224 mach_msg_type_number_t *start_idx,
225 mach_msg_type_number_t max_idx,
226 boolean_t supervisor)
227 {
228 uint32_t tmpWord = 0UL;
229 uint64_t currPC = (uint64_t) regs->eip;
230 uint64_t currFP = (uint64_t) regs->ebp;
231 uint64_t prevPC = 0ULL;
232 uint64_t prevFP = 0ULL;
233 uint64_t kernStackMin = thread->kernel_stack;
234 uint64_t kernStackMax = kernStackMin + KERNEL_STACK_SIZE;
235 mach_msg_type_number_t ct = *start_idx;
236 kern_return_t kr = KERN_FAILURE;
237
238 if(ct >= max_idx)
239 return KERN_RESOURCE_SHORTAGE; // no frames traced
240
241 frames[ct++] = currPC;
242
243 // build a backtrace of this 32 bit state.
244 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
245 cframe_t *fp = (cframe_t *) (uint32_t) currFP;
246
247 if(!currFP) {
248 currPC = 0;
249 break;
250 }
251
252 if(ct >= max_idx) {
253 *start_idx = ct;
254 return KERN_RESOURCE_SHORTAGE;
255 }
256
257 /* read our caller */
258 if(supervisor) {
259 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
260 } else {
261 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
262 }
263
264 if(kr != KERN_SUCCESS) {
265 currPC = 0ULL;
266 break;
267 }
268
269 currPC = (uint64_t) tmpWord; // promote 32 bit address
270
271 /*
272 * retrive contents of the frame pointer and advance to the next stack
273 * frame if it's valid
274 */
275 prevFP = 0;
276 if(supervisor) {
277 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
278 } else {
279 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
280 }
281 prevFP = (uint64_t) tmpWord; // promote 32 bit address
282
283 if(prevFP) {
284 frames[ct++] = currPC;
285 prevPC = currPC;
286 }
287 if(prevFP < currFP) {
288 break;
289 } else {
290 currFP = prevFP;
291 }
292 }
293
294 *start_idx = ct;
295 return KERN_SUCCESS;
296 }
297
298 static kern_return_t do_backtrace64(
299 task_t task,
300 thread_t thread,
301 x86_saved_state64_t *regs,
302 uint64_t *frames,
303 mach_msg_type_number_t *start_idx,
304 mach_msg_type_number_t max_idx,
305 boolean_t supervisor)
306 {
307 uint64_t currPC = regs->isf.rip;
308 uint64_t currFP = regs->rbp;
309 uint64_t prevPC = 0ULL;
310 uint64_t prevFP = 0ULL;
311 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
312 uint64_t kernStackMax = (uint64_t)kernStackMin + KERNEL_STACK_SIZE;
313 mach_msg_type_number_t ct = *start_idx;
314 kern_return_t kr = KERN_FAILURE;
315
316 if(*start_idx >= max_idx)
317 return KERN_RESOURCE_SHORTAGE; // no frames traced
318
319 frames[ct++] = currPC;
320
321 // build a backtrace of this 32 bit state.
322 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
323 // this is the address where caller lives in the user thread
324 uint64_t caller = currFP + sizeof(uint64_t);
325
326 if(!currFP) {
327 currPC = 0;
328 break;
329 }
330
331 if(ct >= max_idx) {
332 *start_idx = ct;
333 return KERN_RESOURCE_SHORTAGE;
334 }
335
336 /* read our caller */
337 if(supervisor) {
338 kr = KERN_FAILURE;
339 } else {
340 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
341 }
342
343 if(kr != KERN_SUCCESS) {
344 currPC = 0ULL;
345 break;
346 }
347
348 /*
349 * retrive contents of the frame pointer and advance to the next stack
350 * frame if it's valid
351 */
352 prevFP = 0;
353 if(supervisor) {
354 kr = KERN_FAILURE;
355 } else {
356 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
357 }
358
359 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
360 frames[ct++] = currPC;
361 prevPC = currPC;
362 }
363 if(prevFP < currFP) {
364 break;
365 } else {
366 currFP = prevFP;
367 }
368 }
369
370 *start_idx = ct;
371 return KERN_SUCCESS;
372 }
373
374 __private_extern__
375 kern_return_t chudxnu_thread_get_callstack64(
376 thread_t thread,
377 uint64_t *callstack,
378 mach_msg_type_number_t *count,
379 boolean_t user_only)
380 {
381 kern_return_t kr = KERN_FAILURE;
382 task_t task = thread->task;
383 uint64_t currPC = 0;
384 boolean_t supervisor = FALSE;
385 mach_msg_type_number_t bufferIndex = 0;
386 mach_msg_type_number_t bufferMaxIndex = *count;
387 x86_saved_state_t *tagged_regs = NULL; // kernel register state
388 x86_saved_state64_t *regs64 = NULL;
389 x86_saved_state32_t *regs32 = NULL;
390 x86_saved_state32_t *u_regs32 = NULL;
391 x86_saved_state64_t *u_regs64 = NULL;
392
393 if(ml_at_interrupt_context()) {
394
395 if(user_only) {
396 /* can't backtrace user state on interrupt stack. */
397 return KERN_FAILURE;
398 }
399
400 /* backtracing at interrupt context? */
401 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
402 /*
403 * Locate the registers for the interrupted thread, assuming it is
404 * current_thread().
405 */
406 tagged_regs = current_cpu_datap()->cpu_int_state;
407
408 if(is_saved_state64(tagged_regs)) {
409 /* 64 bit registers */
410 regs64 = saved_state64(tagged_regs);
411 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
412 } else {
413 /* 32 bit registers */
414 regs32 = saved_state32(tagged_regs);
415 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
416 }
417 }
418 }
419
420 if(!tagged_regs) {
421 /*
422 * not at interrupt context, or tracing a different thread than
423 * current_thread() at interrupt context
424 */
425 tagged_regs = USER_STATE(thread);
426 if(is_saved_state64(tagged_regs)) {
427 /* 64 bit registers */
428 regs64 = saved_state64(tagged_regs);
429 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
430 } else {
431 /* 32 bit registers */
432 regs32 = saved_state32(tagged_regs);
433 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
434 }
435 }
436
437 *count = 0;
438
439 if(supervisor) {
440 // the caller only wants a user callstack.
441 if(user_only) {
442 // bail - we've only got kernel state
443 return KERN_FAILURE;
444 }
445 } else {
446 // regs32(64) is not in supervisor mode.
447 u_regs32 = regs32;
448 u_regs64 = regs64;
449 regs32 = NULL;
450 regs64 = NULL;
451 }
452
453 if (user_only) {
454 /* we only want to backtrace the user mode */
455 if(!(u_regs32 || u_regs64)) {
456 /* no user state to look at */
457 return KERN_FAILURE;
458 }
459 }
460
461 /*
462 * Order of preference for top of stack:
463 * 64 bit kernel state (not likely)
464 * 32 bit kernel state
465 * 64 bit user land state
466 * 32 bit user land state
467 */
468
469 if(regs64) {
470 currPC = regs64->isf.rip;
471 } else if(regs32) {
472 currPC = (uint64_t) regs32->eip;
473 } else if(u_regs64) {
474 currPC = u_regs64->isf.rip;
475 } else if(u_regs32) {
476 currPC = (uint64_t) u_regs32->eip;
477 }
478
479 if(!currPC) {
480 /* no top of the stack, bail out */
481 return KERN_FAILURE;
482 }
483
484 bufferIndex = 0;
485
486 if(bufferMaxIndex < 1) {
487 *count = 0;
488 return KERN_RESOURCE_SHORTAGE;
489 }
490
491 /* backtrace kernel */
492 if(regs64) {
493 uint64_t rsp = 0ULL;
494
495 // backtrace the 64bit side.
496 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
497 bufferMaxIndex, TRUE);
498
499 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (addr64_t) regs64->isf.rsp, sizeof(uint64_t)) &&
500 bufferIndex < bufferMaxIndex) {
501 callstack[bufferIndex++] = rsp;
502 }
503
504 } else if(regs32) {
505 uint32_t esp = 0UL;
506
507 // backtrace the 32bit side.
508 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
509 bufferMaxIndex, TRUE);
510
511 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (addr64_t) regs32->uesp, sizeof(uint32_t)) &&
512 bufferIndex < bufferMaxIndex) {
513 callstack[bufferIndex++] = (uint64_t) esp;
514 }
515 } else if(u_regs64) {
516 /* backtrace user land */
517 uint64_t rsp = 0ULL;
518
519 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
520 bufferMaxIndex, FALSE);
521
522 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
523 bufferIndex < bufferMaxIndex) {
524 callstack[bufferIndex++] = rsp;
525 }
526
527 } else if(u_regs32) {
528 uint32_t esp = 0UL;
529
530 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
531 bufferMaxIndex, FALSE);
532
533 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
534 bufferIndex < bufferMaxIndex) {
535 callstack[bufferIndex++] = (uint64_t) esp;
536 }
537 }
538
539 *count = bufferIndex;
540 return kr;
541 }
542
543 #pragma mark **** DEPRECATED ****
544
545 // DEPRECATED
546 __private_extern__ kern_return_t
547 chudxnu_thread_get_callstack(
548 thread_t thread,
549 uint32_t *callStack,
550 mach_msg_type_number_t *count,
551 boolean_t user_only)
552 {
553 kern_return_t kr;
554 task_t task = thread->task;
555 uint32_t currPC;
556 uint32_t currFP;
557 uint32_t prevFP = 0;
558 uint32_t prevPC = 0;
559 uint32_t esp = 0;
560 uint32_t kernStackMin = thread->kernel_stack;
561 uint32_t kernStackMax = kernStackMin + KERNEL_STACK_SIZE;
562 uint32_t *buffer = callStack;
563 int bufferIndex = 0;
564 int bufferMaxIndex = *count;
565 boolean_t supervisor;
566 x86_saved_state32_t *regs = NULL;
567
568 if (user_only) {
569 /* We can't get user state for kernel threads */
570 if (task == kernel_task) {
571 return KERN_FAILURE;
572 }
573 regs = USER_REGS32(thread);
574 } else {
575 regs = saved_state32(current_cpu_datap()->cpu_int_state);
576 }
577
578 if (regs == NULL) {
579 *count = 0;
580 return KERN_FAILURE;
581 }
582
583 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
584
585 currPC = regs->eip;
586 currFP = regs->ebp;
587
588 bufferIndex = 0;
589 if(!supervisor)
590 bufferMaxIndex -= 1; // allot space for saving userland %esp on stack
591 if (bufferMaxIndex < 1) {
592 *count = 0;
593 return KERN_RESOURCE_SHORTAGE;
594 }
595 buffer[bufferIndex++] = currPC; //save PC in position 0.
596
597 // Now, fill buffer with stack backtraces.
598 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
599 cframe_t *fp = (cframe_t *) currFP;
600
601 if (bufferIndex >= bufferMaxIndex) {
602 *count = bufferMaxIndex;
603 return KERN_RESOURCE_SHORTAGE;
604 }
605
606 if (supervisor) {
607 kr = chudxnu_kern_read(
608 &currPC,
609 (vm_offset_t) &fp->caller,
610 sizeof(currPC));
611 } else {
612 kr = chudxnu_task_read(
613 task,
614 &currPC,
615 (vm_offset_t) &fp->caller,
616 sizeof(currPC));
617 }
618 if (kr != KERN_SUCCESS)
619 break;
620
621 //retrieve the contents of the frame pointer
622 // and advance to the prev stack frame if it's valid
623 prevFP = 0;
624 if (supervisor) {
625 kr = chudxnu_kern_read(
626 &prevFP,
627 (vm_offset_t) &fp->prev,
628 sizeof(prevFP));
629 } else {
630 kr = chudxnu_task_read(
631 task,
632 &prevFP,
633 (vm_offset_t) &fp->prev,
634 sizeof(prevFP));
635 }
636 if (prevFP) {
637 buffer[bufferIndex++] = currPC;
638 prevPC = currPC;
639 }
640 if (prevFP < currFP) {
641 break;
642 } else {
643 currFP = prevFP;
644 }
645 }
646
647 // put the stack pointer on the bottom of the backtrace
648 if(!supervisor) {
649 kr = chudxnu_task_read(task, &esp, regs->uesp, sizeof(uint32_t));
650 if(kr == KERN_SUCCESS) {
651 buffer[bufferIndex++] = esp;
652 }
653 }
654
655 *count = bufferIndex;
656 return KERN_SUCCESS;
657 }
658