]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_thread_i386.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_thread_i386.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36
37 #include <vm/vm_map.h>
38 #include <vm/pmap.h>
39
40 #include <chud/chud_xnu.h>
41 #include <chud/chud_xnu_private.h>
42
43 #include <i386/misc_protos.h>
44 #include <i386/proc_reg.h>
45 #include <i386/mp_desc.h>
46
47 #pragma mark **** thread state ****
48
49 __private_extern__ kern_return_t
50 chudxnu_thread_user_state_available(thread_t thread)
51 {
52 #pragma unused (thread)
53 return KERN_SUCCESS;
54 }
55
56 __private_extern__ kern_return_t
57 chudxnu_thread_get_state(
58 thread_t thread,
59 thread_flavor_t flavor,
60 thread_state_t tstate,
61 mach_msg_type_number_t *count,
62 boolean_t user_only)
63 {
64 if (user_only) {
65 /* We can't get user state for kernel threads */
66 if (thread->task == kernel_task)
67 return KERN_FAILURE;
68 /* this properly handles deciding whether or not the thread is 64 bit or not */
69 return machine_thread_get_state(thread, flavor, tstate, count);
70 } else {
71 // i386 machine_thread_get_kern_state() is different from the PPC version which returns
72 // the previous save area - user or kernel - rather than kernel or NULL if no kernel
73 // interrupt state available
74
75 // the real purpose of this branch is the following:
76 // the user doesn't care if the thread states are user or kernel, he
77 // just wants the thread state, so we need to determine the proper one
78 // to return, kernel or user, for the given thread.
79 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
80 // the above are conditions where we possibly can read the kernel
81 // state. we still need to determine if this interrupt happened in
82 // kernel or user context
83 if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
84 current_cpu_datap()->cpu_interrupt_level == 1) {
85 // interrupt happened in user land
86 return machine_thread_get_state(thread, flavor, tstate, count);
87 } else {
88 // kernel interrupt.
89 return machine_thread_get_kern_state(thread, flavor, tstate, count);
90 }
91 } else {
92 // get the user-mode thread state
93 return machine_thread_get_state(thread, flavor, tstate, count);
94 }
95 }
96 }
97
98 __private_extern__ kern_return_t
99 chudxnu_thread_set_state(
100 thread_t thread,
101 thread_flavor_t flavor,
102 thread_state_t tstate,
103 mach_msg_type_number_t count,
104 boolean_t user_only)
105 {
106 #pragma unused (user_only)
107 return machine_thread_set_state(thread, flavor, tstate, count);
108 }
109
110 #pragma mark **** task memory read/write ****
111
112 __private_extern__ kern_return_t
113 chudxnu_task_read(
114 task_t task,
115 void *kernaddr,
116 uint64_t usraddr,
117 vm_size_t size)
118 {
119 kern_return_t ret = KERN_SUCCESS;
120
121 if(current_task()==task) {
122 if(ml_at_interrupt_context()) {
123 return KERN_FAILURE; // can't do copyin on interrupt stack
124 }
125
126 if(copyin(usraddr, kernaddr, size)) {
127 ret = KERN_FAILURE;
128 }
129 } else {
130 vm_map_t map = get_task_map(task);
131 ret = vm_map_read_user(map, usraddr, kernaddr, size);
132 }
133
134 return ret;
135 }
136
137 __private_extern__ kern_return_t
138 chudxnu_task_write(
139 task_t task,
140 uint64_t useraddr,
141 void *kernaddr,
142 vm_size_t size)
143 {
144 kern_return_t ret = KERN_SUCCESS;
145
146 if(current_task()==task) {
147 if(ml_at_interrupt_context()) {
148 return KERN_FAILURE; // can't do copyout on interrupt stack
149 }
150
151 if(copyout(kernaddr, useraddr, size)) {
152 ret = KERN_FAILURE;
153 }
154 } else {
155 vm_map_t map = get_task_map(task);
156 ret = vm_map_write_user(map, kernaddr, useraddr, size);
157 }
158
159 return ret;
160 }
161
162 __private_extern__ kern_return_t
163 chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
164 {
165 while(size>0) {
166 ppnum_t pp;
167 addr64_t phys_addr;
168
169 /* Get the page number */
170 pp = pmap_find_phys(kernel_pmap, srcaddr);
171 if(!pp) {
172 return KERN_FAILURE; /* Not mapped... */
173 }
174
175 /* Shove in the page offset */
176 phys_addr = ((addr64_t)pp << 12) |
177 (srcaddr & 0x0000000000000FFFULL);
178 if(phys_addr >= mem_actual) {
179 return KERN_FAILURE; /* out of range */
180 }
181
182 if((phys_addr&0x1) || size==1) {
183 *((uint8_t *)dstaddr) =
184 ml_phys_read_byte_64(phys_addr);
185 dstaddr = ((uint8_t *)dstaddr) + 1;
186 srcaddr += sizeof(uint8_t);
187 size -= sizeof(uint8_t);
188 } else if((phys_addr&0x3) || size<=2) {
189 *((uint16_t *)dstaddr) =
190 ml_phys_read_half_64(phys_addr);
191 dstaddr = ((uint16_t *)dstaddr) + 1;
192 srcaddr += sizeof(uint16_t);
193 size -= sizeof(uint16_t);
194 } else {
195 *((uint32_t *)dstaddr) =
196 ml_phys_read_word_64(phys_addr);
197 dstaddr = ((uint32_t *)dstaddr) + 1;
198 srcaddr += sizeof(uint32_t);
199 size -= sizeof(uint32_t);
200 }
201 }
202 return KERN_SUCCESS;
203 }
204
205 __private_extern__ kern_return_t
206 chudxnu_kern_write(
207 vm_offset_t dstaddr,
208 void *srcaddr,
209 vm_size_t size)
210 {
211 while(size>0) {
212 ppnum_t pp;
213 addr64_t phys_addr;
214
215 /* Get the page number */
216 pp = pmap_find_phys(kernel_pmap, dstaddr);
217 if(!pp) {
218 return KERN_FAILURE; /* Not mapped... */
219 }
220
221 /* Shove in the page offset */
222 phys_addr = ((addr64_t)pp << 12) |
223 (dstaddr & 0x0000000000000FFFULL);
224 if(phys_addr > mem_actual) {
225 return KERN_FAILURE; /* out of range */
226 }
227
228 if((phys_addr&0x1) || size==1) {
229 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
230 srcaddr = ((uint8_t *)srcaddr) + 1;
231 dstaddr += sizeof(uint8_t);
232 size -= sizeof(uint8_t);
233 } else if((phys_addr&0x3) || size<=2) {
234 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
235 srcaddr = ((uint16_t *)srcaddr) + 1;
236 dstaddr += sizeof(uint16_t);
237 size -= sizeof(uint16_t);
238 } else {
239 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
240 srcaddr = ((uint32_t *)srcaddr) + 1;
241 dstaddr += sizeof(uint32_t);
242 size -= sizeof(uint32_t);
243 }
244 }
245
246 return KERN_SUCCESS;
247 }
248
249 #define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
250 // don't try to read in the hole
251 #define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
252 (supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
253 (addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
254
255 typedef struct _cframe64_t {
256 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
257 uint64_t caller;
258 uint64_t args[0];
259 }cframe64_t;
260
261
262 typedef struct _cframe_t {
263 struct _cframe_t *prev; // when we go 64 bits, this needs to be capped at 32 bits
264 uint32_t caller;
265 uint32_t args[0];
266 } cframe_t;
267
268 __private_extern__
269 kern_return_t chudxnu_thread_get_callstack64(
270 thread_t thread,
271 uint64_t *callstack,
272 mach_msg_type_number_t *count,
273 boolean_t user_only)
274 {
275 kern_return_t kr = KERN_FAILURE;
276 kern_return_t ret = KERN_SUCCESS;
277 task_t task = thread->task;
278 uint64_t currPC = 0;
279 uint64_t prevPC = 0;
280 uint64_t currFP = 0;
281 uint64_t prevFP = 0;
282 uint64_t rsp = 0;
283 uint64_t kernStackMin = min_valid_stack_address();
284 uint64_t kernStackMax = max_valid_stack_address();
285 uint64_t *buffer = callstack;
286 int bufferIndex = 0;
287 int bufferMaxIndex = *count;
288 boolean_t supervisor = FALSE;
289 boolean_t is64bit = FALSE;
290 void * t_regs;
291
292 if (user_only) {
293 /* We can't get user state for kernel threads */
294 if (task == kernel_task) {
295 return KERN_FAILURE;
296 }
297 t_regs = USER_STATE(thread);
298
299 if(is_saved_state64(t_regs)) {
300 void *int_state = current_cpu_datap()->cpu_int_state;
301 x86_saved_state64_t *s64 = saved_state64(t_regs);
302
303 if(int_state) { // are we on an interrupt that happened in user land
304 supervisor = !(t_regs == int_state && current_cpu_datap()->cpu_interrupt_level == 1);
305 } else {
306 if(s64) {
307 supervisor = ((s64->isf.cs & SEL_PL) != SEL_PL_U);
308 } else {
309 // assume 32 bit kernel
310 supervisor = FALSE;
311 }
312 }
313 is64bit = TRUE;
314 } else {
315 x86_saved_state32_t *regs;
316
317 regs = saved_state32(t_regs);
318
319 // find out if we're in supervisor mode
320 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
321 is64bit = FALSE;
322 }
323 } else {
324 t_regs = current_cpu_datap()->cpu_int_state;
325 x86_saved_state32_t *regs;
326
327 regs = saved_state32(t_regs);
328
329 // find out if we're in supervisor mode
330 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
331 is64bit = FALSE;
332 }
333
334 if(is64bit) {
335 x86_saved_state64_t *regs = saved_state64(t_regs);
336
337 if(user_only) {
338 /* cant get user state for kernel threads */
339 if(task == kernel_task) {
340 return KERN_FAILURE;
341 }
342 regs = USER_REGS64(thread);
343 }
344
345 currPC = regs->isf.rip;
346 currFP = regs->rbp;
347
348 if(!currPC)
349 {
350 *count = 0;
351 return KERN_FAILURE;
352 }
353
354 bufferIndex = 0;
355
356 //allot space for saving %rsp on the
357 //bottom of the stack for user callstacks
358 if(!supervisor)
359 bufferMaxIndex = bufferMaxIndex - 1;
360
361 if(bufferMaxIndex < 1) {
362 *count = 0;
363 return KERN_RESOURCE_SHORTAGE;
364 }
365 buffer[bufferIndex++] = currPC; // save RIP on the top of the stack
366
367 // now make a 64bit back trace
368 while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax))
369 {
370 // this is the address where caller lives in the user thread
371 uint64_t caller = currFP + sizeof(uint64_t);
372 if(!currFP) {
373 currPC = 0;
374 break;
375 }
376
377 if(bufferIndex >= bufferMaxIndex) {
378 *count = bufferMaxIndex;
379 return KERN_RESOURCE_SHORTAGE;
380 }
381
382 /* read our caller */
383 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
384
385 if(kr != KERN_SUCCESS) {
386 currPC = 0;
387 break;
388 }
389
390 /*
391 * retrive contents of the frame pointer and advance to the next stack
392 * frame if it's valid
393 */
394 prevFP = 0;
395 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
396
397 if(kr != KERN_SUCCESS) {
398 currPC = 0;
399 break;
400 }
401
402 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
403 buffer[bufferIndex++] = currPC;
404 prevPC = currPC;
405 }
406 if(prevFP < currFP) {
407 break;
408 } else {
409 currFP = prevFP;
410 }
411 }
412
413 // append (rsp) on the bottom of the callstack
414 kr = chudxnu_task_read(task, &rsp, (addr64_t) regs->isf.rsp, sizeof(uint64_t));
415 if(kr == KERN_SUCCESS) {
416 buffer[bufferIndex++] = rsp;
417 }
418 } else {
419 /* !thread_is_64bit() */
420 /* we grab 32 bit frames and silently promote them to 64 bits */
421 uint32_t tmpWord = 0;
422 x86_saved_state32_t *regs = NULL;
423
424 if(user_only) {
425 /* cant get user state for kernel threads */
426 if(task == kernel_task || supervisor) {
427 return 0x11;
428 }
429 regs = USER_REGS32(thread);
430 } else {
431 regs = saved_state32(current_cpu_datap()->cpu_int_state);
432 }
433
434 if(regs == NULL) {
435 *count = 0;
436 return 0x12;
437 }
438
439 currPC = (uint64_t) regs->eip;
440 currFP = (uint64_t) regs->ebp;
441
442 bufferIndex = 0;
443 //if(!supervisor)
444 // bufferMaxIndex = bufferMaxIndex - 1; //allot space for saving %rsp on the stack for user callstacks
445 if(bufferMaxIndex < 1) {
446 *count = 0;
447 return KERN_RESOURCE_SHORTAGE;
448 }
449 buffer[bufferIndex++] = currPC; // save EIP on the top of the stack
450
451 // now make a 64bit back trace from 32 bit stack frames
452 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax))
453 {
454 cframe_t *fp = (cframe_t *) (uint32_t) currFP;
455
456 if(bufferIndex >= bufferMaxIndex) {
457 *count = bufferMaxIndex;
458 return KERN_RESOURCE_SHORTAGE;
459 }
460
461 /* read the next frame */
462 if(supervisor) {
463 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
464 } else {
465 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
466 }
467
468 if(kr != KERN_SUCCESS) {
469 currPC = 0;
470 break;
471 }
472
473 currPC = (uint64_t) tmpWord; // promote 32 bit address
474
475 /*
476 * retrive contents of the frame pointer and advance to the next stack
477 * frame if it's valid
478 */
479 prevFP = 0;
480 if(supervisor) {
481 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
482 } else {
483 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
484 }
485 prevFP = (uint64_t) tmpWord; // promote 32 bit address
486
487 if(prevFP) {
488 buffer[bufferIndex++] = currPC;
489 prevPC = currPC;
490 }
491 if(prevFP < currFP) {
492 break;
493 } else {
494 currFP = prevFP;
495 }
496 }
497
498 // append (esp) on the bottom of the callstack
499 if(!supervisor) {
500 kr = chudxnu_task_read(task, &tmpWord, regs->uesp, sizeof(uint32_t));
501 if(kr == KERN_SUCCESS) {
502 rsp = (uint64_t) tmpWord; // promote 32 bit address
503 buffer[bufferIndex++] = rsp;
504 }
505 }
506 }
507
508 *count = bufferIndex;
509 return ret;
510 }
511
512 __private_extern__ kern_return_t
513 chudxnu_thread_get_callstack(
514 thread_t thread,
515 uint32_t *callStack,
516 mach_msg_type_number_t *count,
517 boolean_t user_only)
518 {
519 kern_return_t kr;
520 task_t task = thread->task;
521 uint32_t currPC;
522 uint32_t currFP;
523 uint32_t prevFP = 0;
524 uint32_t prevPC = 0;
525 uint32_t esp = 0;
526 uint32_t kernStackMin = min_valid_stack_address();
527 uint32_t kernStackMax = max_valid_stack_address();
528 uint32_t *buffer = callStack;
529 int bufferIndex = 0;
530 int bufferMaxIndex = *count;
531 boolean_t supervisor;
532 x86_saved_state32_t *regs = NULL;
533
534 if (user_only) {
535 /* We can't get user state for kernel threads */
536 if (task == kernel_task) {
537 return KERN_FAILURE;
538 }
539 regs = USER_REGS32(thread);
540 } else {
541 regs = saved_state32(current_cpu_datap()->cpu_int_state);
542 }
543
544 if (regs == NULL) {
545 *count = 0;
546 return KERN_FAILURE;
547 }
548
549 supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
550
551 currPC = regs->eip;
552 currFP = regs->ebp;
553
554 bufferIndex = 0;
555 if(!supervisor)
556 bufferMaxIndex -= 1; // allot space for saving userland %esp on stack
557 if (bufferMaxIndex < 1) {
558 *count = 0;
559 return KERN_RESOURCE_SHORTAGE;
560 }
561 buffer[bufferIndex++] = currPC; //save PC in position 0.
562
563 // Now, fill buffer with stack backtraces.
564 while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
565 cframe_t *fp = (cframe_t *) currFP;
566
567 if (bufferIndex >= bufferMaxIndex) {
568 *count = bufferMaxIndex;
569 return KERN_RESOURCE_SHORTAGE;
570 }
571
572 if (supervisor) {
573 kr = chudxnu_kern_read(
574 &currPC,
575 (vm_offset_t) &fp->caller,
576 sizeof(currPC));
577 } else {
578 kr = chudxnu_task_read(
579 task,
580 &currPC,
581 (vm_offset_t) &fp->caller,
582 sizeof(currPC));
583 }
584 if (kr != KERN_SUCCESS)
585 break;
586
587 //retrieve the contents of the frame pointer
588 // and advance to the prev stack frame if it's valid
589 prevFP = 0;
590 if (supervisor) {
591 kr = chudxnu_kern_read(
592 &prevFP,
593 (vm_offset_t) &fp->prev,
594 sizeof(prevFP));
595 } else {
596 kr = chudxnu_task_read(
597 task,
598 &prevFP,
599 (vm_offset_t) &fp->prev,
600 sizeof(prevFP));
601 }
602 if (prevFP) {
603 buffer[bufferIndex++] = currPC;
604 prevPC = currPC;
605 }
606 if (prevFP < currFP) {
607 break;
608 } else {
609 currFP = prevFP;
610 }
611 }
612
613 // put the stack pointer on the bottom of the backtrace
614 if(!supervisor) {
615 kr = chudxnu_task_read(task, &esp, regs->uesp, sizeof(uint32_t));
616 if(kr == KERN_SUCCESS) {
617 buffer[bufferIndex++] = esp;
618 }
619 }
620
621 *count = bufferIndex;
622 return KERN_SUCCESS;
623 }
624
625
626 #pragma mark **** DEPRECATED ****
627
628 // DEPRECATED
629 __private_extern__
630 kern_return_t chudxnu_bind_current_thread(int cpu)
631 {
632 return chudxnu_bind_thread(current_thread(), cpu);
633 }
634
635 // DEPRECATED
636 kern_return_t chudxnu_unbind_current_thread(void)
637 {
638 return chudxnu_unbind_thread(current_thread());
639 }
640
641 // DEPRECATED
642 __private_extern__
643 kern_return_t chudxnu_current_thread_get_callstack(
644 uint32_t *callStack,
645 mach_msg_type_number_t *count,
646 boolean_t user_only)
647 {
648 return chudxnu_thread_get_callstack(
649 current_thread(), callStack, count, user_only);
650 }
651
652 // DEPRECATED
653 __private_extern__
654 thread_t chudxnu_current_act(void)
655 {
656 return chudxnu_current_thread();
657 }