]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
ef90c2379da43f58b921f678d21d389441431055
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach/mach_types.h>
24 #include <mach/task.h>
25 #include <mach/thread_act.h>
26
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/thread.h>
30
31 #include <vm/vm_map.h>
32 #include <vm/pmap.h>
33
34 #include <chud/chud_xnu.h>
35 #include <chud/chud_xnu_private.h>
36
37 #include <i386/misc_protos.h>
38 #include <i386/proc_reg.h>
39 #include <i386/mp_desc.h>
40
41 #pragma mark **** thread state ****
42
43 __private_extern__ kern_return_t
44 chudxnu_thread_user_state_available(thread_t thread)
45 {
46 #pragma unused (thread)
47 return KERN_SUCCESS;
48 }
49
50 __private_extern__ kern_return_t
51 chudxnu_thread_get_state(
52 thread_t thread,
53 thread_flavor_t flavor,
54 thread_state_t tstate,
55 mach_msg_type_number_t *count,
56 boolean_t user_only)
57 {
58 if (user_only) {
59 /* We can't get user state for kernel threads */
60 if (thread->task == kernel_task)
61 return KERN_FAILURE;
62 /* this properly handles deciding whether or not the thread is 64 bit or not */
63 return machine_thread_get_state(thread, flavor, tstate, count);
64 } else {
65 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
66 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
67 // interrupt state available
68
69 // the real purpose of this branch is the following:
70 // the user doesn't care if the thread states are user or kernel, he
71 // just wants the thread state, so we need to determine the proper one
72 // to return, kernel or user, for the given thread.
73 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
74 // the above are conditions where we possibly can read the kernel
75 // state. we still need to determine if this interrupt happened in
76 // kernel or user context
77 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
78 current_cpu_datap()->cpu_interrupt_level == 1) {
79 // interrupt happened in user land
80 return machine_thread_get_state(thread, flavor, tstate, count);
81 } else {
82 // kernel interrupt.
83 return machine_thread_get_kern_state(thread, flavor, tstate, count);
84 }
85 } else {
86 // get the user-mode thread state
87 return machine_thread_get_state(thread, flavor, tstate, count);
88 }
89 }
90 }
91
92 __private_extern__ kern_return_t
93 chudxnu_thread_set_state(
94 thread_t thread,
95 thread_flavor_t flavor,
96 thread_state_t tstate,
97 mach_msg_type_number_t count,
98 boolean_t user_only)
99 {
100 #pragma unused (user_only)
101 return machine_thread_set_state(thread, flavor, tstate, count);
102 }
103
104 #pragma mark **** task memory read/write ****
105
106 __private_extern__ kern_return_t
107 chudxnu_task_read(
108 task_t task,
109 void *kernaddr,
110 uint64_t usraddr,
111 vm_size_t size)
112 {
113 kern_return_t ret = KERN_SUCCESS;
114
115 if(current_task()==task) {
116 if(ml_at_interrupt_context()) {
117 return KERN_FAILURE; // can't do copyin on interrupt stack
118 }
119
120 if(copyin(usraddr, kernaddr, size)) {
121 ret = KERN_FAILURE;
122 }
123 } else {
124 vm_map_t map = get_task_map(task);
125 ret = vm_map_read_user(map, usraddr, kernaddr, size);
126 }
127
128 return ret;
129 }
130
131 __private_extern__ kern_return_t
132 chudxnu_task_write(
133 task_t task,
134 uint64_t useraddr,
135 void *kernaddr,
136 vm_size_t size)
137 {
138 kern_return_t ret = KERN_SUCCESS;
139
140 if(current_task()==task) {
141 if(ml_at_interrupt_context()) {
142 return KERN_FAILURE; // can't do copyout on interrupt stack
143 }
144
145 if(copyout(kernaddr, useraddr, size)) {
146 ret = KERN_FAILURE;
147 }
148 } else {
149 vm_map_t map = get_task_map(task);
150 ret = vm_map_write_user(map, kernaddr, useraddr, size);
151 }
152
153 return ret;
154 }
155
156 __private_extern__ kern_return_t
157 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
158 {
159 while(size>0) {
160 ppnum_t pp;
161 addr64_t phys_addr;
162
163 /* Get the page number */
164 pp = pmap_find_phys(kernel_pmap, srcaddr);
165 if(!pp) {
166 return KERN_FAILURE; /* Not mapped... */
167 }
168
169 /* Shove in the page offset */
170 phys_addr = ((addr64_t)pp << 12) |
171 (srcaddr & 0x0000000000000FFFULL);
172 if(phys_addr >= mem_actual) {
173 return KERN_FAILURE; /* out of range */
174 }
175
176 if((phys_addr&0x1) || size==1) {
177 *((uint8_t *)dstaddr) =
178 ml_phys_read_byte_64(phys_addr);
179 dstaddr = ((uint8_t *)dstaddr) + 1;
180 srcaddr += sizeof(uint8_t);
181 size -= sizeof(uint8_t);
182 } else if((phys_addr&0x3) || size<=2) {
183 *((uint16_t *)dstaddr) =
184 ml_phys_read_half_64(phys_addr);
185 dstaddr = ((uint16_t *)dstaddr) + 1;
186 srcaddr += sizeof(uint16_t);
187 size -= sizeof(uint16_t);
188 } else {
189 *((uint32_t *)dstaddr) =
190 ml_phys_read_word_64(phys_addr);
191 dstaddr = ((uint32_t *)dstaddr) + 1;
192 srcaddr += sizeof(uint32_t);
193 size -= sizeof(uint32_t);
194 }
195 }
196 return KERN_SUCCESS;
197 }
198
199 __private_extern__ kern_return_t
200 chudxnu_kern_write(
201 vm_offset_t dstaddr,
202 void *srcaddr,
203 vm_size_t size)
204 {
205 while(size>0) {
206 ppnum_t pp;
207 addr64_t phys_addr;
208
209 /* Get the page number */
210 pp = pmap_find_phys(kernel_pmap, dstaddr);
211 if(!pp) {
212 return KERN_FAILURE; /* Not mapped... */
213 }
214
215 /* Shove in the page offset */
216 phys_addr = ((addr64_t)pp << 12) |
217 (dstaddr & 0x0000000000000FFFULL);
218 if(phys_addr > mem_actual) {
219 return KERN_FAILURE; /* out of range */
220 }
221
222 if((phys_addr&0x1) || size==1) {
223 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
224 srcaddr = ((uint8_t *)srcaddr) + 1;
225 dstaddr += sizeof(uint8_t);
226 size -= sizeof(uint8_t);
227 } else if((phys_addr&0x3) || size<=2) {
228 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
229 srcaddr = ((uint16_t *)srcaddr) + 1;
230 dstaddr += sizeof(uint16_t);
231 size -= sizeof(uint16_t);
232 } else {
233 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
234 srcaddr = ((uint32_t *)srcaddr) + 1;
235 dstaddr += sizeof(uint32_t);
236 size -= sizeof(uint32_t);
237 }
238 }
239
240 return KERN_SUCCESS;
241 }
242
243 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
244 // don't try to read in the hole
245 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
246 (supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
247 (addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
248
249 typedef struct _cframe64_t {
250 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
251 uint64_t caller;
252 uint64_t args[0];
253 }cframe64_t;
254
255
256 typedef struct _cframe_t {
257 struct _cframe_t *prev; // when we go 64 bits, this needs to be capped at 32 bits
258 uint32_t caller;
259 uint32_t args[0];
260 } cframe_t;
261
262 __private_extern__
263 kern_return_t chudxnu_thread_get_callstack64(
264 thread_t thread,
265 uint64_t *callstack,
266 mach_msg_type_number_t *count,
267 boolean_t user_only)
268 {
269 kern_return_t kr = KERN_FAILURE;
270 kern_return_t ret = KERN_SUCCESS;
271 task_t task = thread->task;
272 uint64_t currPC = 0;
273 uint64_t prevPC = 0;
274 uint64_t currFP = 0;
275 uint64_t prevFP = 0;
276 uint64_t rsp = 0;
277 uint64_t kernStackMin = min_valid_stack_address();
278 uint64_t kernStackMax = max_valid_stack_address();
279 uint64_t *buffer = callstack;
280 int bufferIndex = 0;
281 int bufferMaxIndex = *count;
282 boolean_t supervisor = FALSE;
283 boolean_t is64bit = FALSE;
284 void * t_regs;
285
286 if (user_only) {
287 /* We can't get user state for kernel threads */
288 if (task == kernel_task) {
289 return KERN_FAILURE;
290 }
291 t_regs = USER_STATE(thread);
292
293 if(is_saved_state64(t_regs)) {
294 void *int_state = current_cpu_datap()->cpu_int_state;
295 x86_saved_state64_t *s64 = saved_state64(t_regs);
296
297 if(int_state) { // are we on an interrupt that happened in user land
298 supervisor = !(t_regs == int_state && current_cpu_datap()->cpu_interrupt_level == 1);
299 } else {
300 if(s64) {
301 supervisor = ((s64->isf.cs & SEL_PL) != SEL_PL_U);
302 } else {
303 // assume 32 bit kernel
304 supervisor = FALSE;
305 }
306 }
307 is64bit = TRUE;
308 } else {
309 x86_saved_state32_t *regs;
310
311 regs = saved_state32(t_regs);
312
313 // find out if we're in supervisor mode
314 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
315 is64bit = FALSE;
316 }
317 } else {
318 t_regs = current_cpu_datap()->cpu_int_state;
319 x86_saved_state32_t *regs;
320
321 regs = saved_state32(t_regs);
322
323 // find out if we're in supervisor mode
324 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
325 is64bit = FALSE;
326 }
327
328 if(is64bit) {
329 x86_saved_state64_t *regs = saved_state64(t_regs);
330
331 if(user_only) {
332 /* cant get user state for kernel threads */
333 if(task == kernel_task) {
334 return KERN_FAILURE;
335 }
336 regs = USER_REGS64(thread);
337 }
338
339 currPC = regs->isf.rip;
340 currFP = regs->rbp;
341
342 if(!currPC)
343 {
344 *count = 0;
345 return KERN_FAILURE;
346 }
347
348 bufferIndex = 0;
349
350 //allot space for saving %rsp on the
351 //bottom of the stack for user callstacks
352 if(!supervisor)
353 bufferMaxIndex = bufferMaxIndex - 1;
354
355 if(bufferMaxIndex < 1) {
356 *count = 0;
357 return KERN_RESOURCE_SHORTAGE;
358 }
359 buffer[bufferIndex++] = currPC; // save RIP on the top of the stack
360
361 // now make a 64bit back trace
362 while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax))
363 {
364 // this is the address where caller lives in the user thread
365 uint64_t caller = currFP + sizeof(uint64_t);
366 if(!currFP) {
367 currPC = 0;
368 break;
369 }
370
371 if(bufferIndex >= bufferMaxIndex) {
372 *count = bufferMaxIndex;
373 return KERN_RESOURCE_SHORTAGE;
374 }
375
376 /* read our caller */
377 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
378
379 if(kr != KERN_SUCCESS) {
380 currPC = 0;
381 break;
382 }
383
384 /*
385 * retrive contents of the frame pointer and advance to the next stack
386 * frame if it's valid
387 */
388 prevFP = 0;
389 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
390
391 if(kr != KERN_SUCCESS) {
392 currPC = 0;
393 break;
394 }
395
396 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
397 buffer[bufferIndex++] = currPC;
398 prevPC = currPC;
399 }
400 if(prevFP < currFP) {
401 break;
402 } else {
403 currFP = prevFP;
404 }
405 }
406
407 // append (rsp) on the bottom of the callstack
408 kr = chudxnu_task_read(task, &rsp, (addr64_t) regs->isf.rsp, sizeof(uint64_t));
409 if(kr == KERN_SUCCESS) {
410 buffer[bufferIndex++] = rsp;
411 }
412 } else {
413 /* !thread_is_64bit() */
414 /* we grab 32 bit frames and silently promote them to 64 bits */
415 uint32_t tmpWord = 0;
416 x86_saved_state32_t *regs = NULL;
417
418 if(user_only) {
419 /* cant get user state for kernel threads */
420 if(task == kernel_task || supervisor) {
421 return 0x11;
422 }
423 regs = USER_REGS32(thread);
424 } else {
425 regs = saved_state32(current_cpu_datap()->cpu_int_state);
426 }
427
428 if(regs == NULL) {
429 *count = 0;
430 return 0x12;
431 }
432
433 currPC = (uint64_t) regs->eip;
434 currFP = (uint64_t) regs->ebp;
435
436 bufferIndex = 0;
437 //if(!supervisor)
438 // bufferMaxIndex = bufferMaxIndex - 1; //allot space for saving %rsp on the stack for user callstacks
439 if(bufferMaxIndex < 1) {
440 *count = 0;
441 return KERN_RESOURCE_SHORTAGE;
442 }
443 buffer[bufferIndex++] = currPC; // save EIP on the top of the stack
444
445 // now make a 64bit back trace from 32 bit stack frames
446 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax))
447 {
448 cframe_t *fp = (cframe_t *) (uint32_t) currFP;
449
450 if(bufferIndex >= bufferMaxIndex) {
451 *count = bufferMaxIndex;
452 return KERN_RESOURCE_SHORTAGE;
453 }
454
455 /* read the next frame */
456 if(supervisor) {
457 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
458 } else {
459 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
460 }
461
462 if(kr != KERN_SUCCESS) {
463 currPC = 0;
464 break;
465 }
466
467 currPC = (uint64_t) tmpWord; // promote 32 bit address
468
469 /*
470 * retrive contents of the frame pointer and advance to the next stack
471 * frame if it's valid
472 */
473 prevFP = 0;
474 if(supervisor) {
475 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
476 } else {
477 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
478 }
479 prevFP = (uint64_t) tmpWord; // promote 32 bit address
480
481 if(prevFP) {
482 buffer[bufferIndex++] = currPC;
483 prevPC = currPC;
484 }
485 if(prevFP < currFP) {
486 break;
487 } else {
488 currFP = prevFP;
489 }
490 }
491
492 // append (esp) on the bottom of the callstack
493 if(!supervisor) {
494 kr = chudxnu_task_read(task, &tmpWord, regs->uesp, sizeof(uint32_t));
495 if(kr == KERN_SUCCESS) {
496 rsp = (uint64_t) tmpWord; // promote 32 bit address
497 buffer[bufferIndex++] = rsp;
498 }
499 }
500 }
501
502 *count = bufferIndex;
503 return ret;
504 }
505
506 __private_extern__ kern_return_t
507 chudxnu_thread_get_callstack(
508 thread_t thread,
509 uint32_t *callStack,
510 mach_msg_type_number_t *count,
511 boolean_t user_only)
512 {
513 kern_return_t kr;
514 task_t task = thread->task;
515 uint32_t currPC;
516 uint32_t currFP;
517 uint32_t prevFP = 0;
518 uint32_t prevPC = 0;
519 uint32_t esp = 0;
520 uint32_t kernStackMin = min_valid_stack_address();
521 uint32_t kernStackMax = max_valid_stack_address();
522 uint32_t *buffer = callStack;
523 int bufferIndex = 0;
524 int bufferMaxIndex = *count;
525 boolean_t supervisor;
526 x86_saved_state32_t *regs = NULL;
527
528 if (user_only) {
529 /* We can't get user state for kernel threads */
530 if (task == kernel_task) {
531 return KERN_FAILURE;
532 }
533 regs = USER_REGS32(thread);
534 } else {
535 regs = saved_state32(current_cpu_datap()->cpu_int_state);
536 }
537
538 if (regs == NULL) {
539 *count = 0;
540 return KERN_FAILURE;
541 }
542
543 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
544
545 currPC = regs->eip;
546 currFP = regs->ebp;
547
548 bufferIndex = 0;
549 if(!supervisor)
550 bufferMaxIndex -= 1; // allot space for saving userland %esp on stack
551 if (bufferMaxIndex < 1) {
552 *count = 0;
553 return KERN_RESOURCE_SHORTAGE;
554 }
555 buffer[bufferIndex++] = currPC; //save PC in position 0.
556
557 // Now, fill buffer with stack backtraces.
558 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
559 cframe_t *fp = (cframe_t *) currFP;
560
561 if (bufferIndex >= bufferMaxIndex) {
562 *count = bufferMaxIndex;
563 return KERN_RESOURCE_SHORTAGE;
564 }
565
566 if (supervisor) {
567 kr = chudxnu_kern_read(
568 &currPC,
569 (vm_offset_t) &fp->caller,
570 sizeof(currPC));
571 } else {
572 kr = chudxnu_task_read(
573 task,
574 &currPC,
575 (vm_offset_t) &fp->caller,
576 sizeof(currPC));
577 }
578 if (kr != KERN_SUCCESS)
579 break;
580
581 //retrieve the contents of the frame pointer
582 // and advance to the prev stack frame if it's valid
583 prevFP = 0;
584 if (supervisor) {
585 kr = chudxnu_kern_read(
586 &prevFP,
587 (vm_offset_t) &fp->prev,
588 sizeof(prevFP));
589 } else {
590 kr = chudxnu_task_read(
591 task,
592 &prevFP,
593 (vm_offset_t) &fp->prev,
594 sizeof(prevFP));
595 }
596 if (prevFP) {
597 buffer[bufferIndex++] = currPC;
598 prevPC = currPC;
599 }
600 if (prevFP < currFP) {
601 break;
602 } else {
603 currFP = prevFP;
604 }
605 }
606
607 // put the stack pointer on the bottom of the backtrace
608 if(!supervisor) {
609 kr = chudxnu_task_read(task, &esp, regs->uesp, sizeof(uint32_t));
610 if(kr == KERN_SUCCESS) {
611 buffer[bufferIndex++] = esp;
612 }
613 }
614
615 *count = bufferIndex;
616 return KERN_SUCCESS;
617 }
618
619
620 #pragma mark **** DEPRECATED ****
621
622 // DEPRECATED
623 __private_extern__
624 kern_return_t chudxnu_bind_current_thread(int cpu)
625 {
626 return chudxnu_bind_thread(current_thread(), cpu);
627 }
628
629 // DEPRECATED
630 kern_return_t chudxnu_unbind_current_thread(void)
631 {
632 return chudxnu_unbind_thread(current_thread());
633 }
634
635 // DEPRECATED
636 __private_extern__
637 kern_return_t chudxnu_current_thread_get_callstack(
638 uint32_t *callStack,
639 mach_msg_type_number_t *count,
640 boolean_t user_only)
641 {
642 return chudxnu_thread_get_callstack(
643 current_thread(), callStack, count, user_only);
644 }
645
646 // DEPRECATED
647 __private_extern__
648 thread_t chudxnu_current_act(void)
649 {
650 return chudxnu_current_thread();
651 }