]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/ppc/chud_thread_ppc.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / chud / ppc / chud_thread_ppc.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach/mach_types.h>
32 #include <mach/task.h>
33 #include <mach/thread_act.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/processor.h>
37 #include <kern/thread.h>
38 #include <kern/ipc_tt.h>
39
40 #include <vm/vm_map.h>
41 #include <vm/pmap.h>
42
43 #include <chud/chud_xnu.h>
44 #include <chud/chud_xnu_private.h>
45
46 #include <ppc/misc_protos.h>
47 #include <ppc/proc_reg.h>
48 #include <ppc/machine_routines.h>
49 #include <ppc/fpu_protos.h>
50
51 // forward declarations
52 extern kern_return_t machine_thread_get_kern_state( thread_t thread,
53 thread_flavor_t flavor,
54 thread_state_t tstate,
55 mach_msg_type_number_t *count);
56
57
58 #pragma mark **** thread state ****
59
60 __private_extern__
61 kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
62 {
63 struct ppc_thread_state *ts;
64 struct ppc_thread_state64 *xts;
65
66 switch(flavor) {
67 case PPC_THREAD_STATE:
68 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
69 *count = 0;
70 return KERN_INVALID_ARGUMENT;
71 }
72 ts = (struct ppc_thread_state *) tstate;
73 if(sv) {
74 ts->r0 = (unsigned int)sv->save_r0;
75 ts->r1 = (unsigned int)sv->save_r1;
76 ts->r2 = (unsigned int)sv->save_r2;
77 ts->r3 = (unsigned int)sv->save_r3;
78 ts->r4 = (unsigned int)sv->save_r4;
79 ts->r5 = (unsigned int)sv->save_r5;
80 ts->r6 = (unsigned int)sv->save_r6;
81 ts->r7 = (unsigned int)sv->save_r7;
82 ts->r8 = (unsigned int)sv->save_r8;
83 ts->r9 = (unsigned int)sv->save_r9;
84 ts->r10 = (unsigned int)sv->save_r10;
85 ts->r11 = (unsigned int)sv->save_r11;
86 ts->r12 = (unsigned int)sv->save_r12;
87 ts->r13 = (unsigned int)sv->save_r13;
88 ts->r14 = (unsigned int)sv->save_r14;
89 ts->r15 = (unsigned int)sv->save_r15;
90 ts->r16 = (unsigned int)sv->save_r16;
91 ts->r17 = (unsigned int)sv->save_r17;
92 ts->r18 = (unsigned int)sv->save_r18;
93 ts->r19 = (unsigned int)sv->save_r19;
94 ts->r20 = (unsigned int)sv->save_r20;
95 ts->r21 = (unsigned int)sv->save_r21;
96 ts->r22 = (unsigned int)sv->save_r22;
97 ts->r23 = (unsigned int)sv->save_r23;
98 ts->r24 = (unsigned int)sv->save_r24;
99 ts->r25 = (unsigned int)sv->save_r25;
100 ts->r26 = (unsigned int)sv->save_r26;
101 ts->r27 = (unsigned int)sv->save_r27;
102 ts->r28 = (unsigned int)sv->save_r28;
103 ts->r29 = (unsigned int)sv->save_r29;
104 ts->r30 = (unsigned int)sv->save_r30;
105 ts->r31 = (unsigned int)sv->save_r31;
106 ts->cr = (unsigned int)sv->save_cr;
107 ts->xer = (unsigned int)sv->save_xer;
108 ts->lr = (unsigned int)sv->save_lr;
109 ts->ctr = (unsigned int)sv->save_ctr;
110 ts->srr0 = (unsigned int)sv->save_srr0;
111 ts->srr1 = (unsigned int)sv->save_srr1;
112 ts->mq = 0;
113 ts->vrsave = (unsigned int)sv->save_vrsave;
114 } else {
115 bzero((void *)ts, sizeof(struct ppc_thread_state));
116 }
117 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
118 return KERN_SUCCESS;
119 break;
120 case PPC_THREAD_STATE64:
121 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
122 return KERN_INVALID_ARGUMENT;
123 }
124 xts = (struct ppc_thread_state64 *) tstate;
125 if(sv) {
126 xts->r0 = sv->save_r0;
127 xts->r1 = sv->save_r1;
128 xts->r2 = sv->save_r2;
129 xts->r3 = sv->save_r3;
130 xts->r4 = sv->save_r4;
131 xts->r5 = sv->save_r5;
132 xts->r6 = sv->save_r6;
133 xts->r7 = sv->save_r7;
134 xts->r8 = sv->save_r8;
135 xts->r9 = sv->save_r9;
136 xts->r10 = sv->save_r10;
137 xts->r11 = sv->save_r11;
138 xts->r12 = sv->save_r12;
139 xts->r13 = sv->save_r13;
140 xts->r14 = sv->save_r14;
141 xts->r15 = sv->save_r15;
142 xts->r16 = sv->save_r16;
143 xts->r17 = sv->save_r17;
144 xts->r18 = sv->save_r18;
145 xts->r19 = sv->save_r19;
146 xts->r20 = sv->save_r20;
147 xts->r21 = sv->save_r21;
148 xts->r22 = sv->save_r22;
149 xts->r23 = sv->save_r23;
150 xts->r24 = sv->save_r24;
151 xts->r25 = sv->save_r25;
152 xts->r26 = sv->save_r26;
153 xts->r27 = sv->save_r27;
154 xts->r28 = sv->save_r28;
155 xts->r29 = sv->save_r29;
156 xts->r30 = sv->save_r30;
157 xts->r31 = sv->save_r31;
158 xts->cr = sv->save_cr;
159 xts->xer = sv->save_xer;
160 xts->lr = sv->save_lr;
161 xts->ctr = sv->save_ctr;
162 xts->srr0 = sv->save_srr0;
163 xts->srr1 = sv->save_srr1;
164 xts->vrsave = sv->save_vrsave;
165 } else {
166 bzero((void *)xts, sizeof(struct ppc_thread_state64));
167 }
168 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
169 return KERN_SUCCESS;
170 break;
171 default:
172 *count = 0;
173 return KERN_INVALID_ARGUMENT;
174 break;
175 }
176 }
177
178 __private_extern__
179 kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
180 {
181 struct ppc_thread_state *ts;
182 struct ppc_thread_state64 *xts;
183
184 switch(flavor) {
185 case PPC_THREAD_STATE:
186 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
187 return KERN_INVALID_ARGUMENT;
188 }
189 ts = (struct ppc_thread_state *) tstate;
190 if(sv) {
191 sv->save_r0 = (uint64_t)ts->r0;
192 sv->save_r1 = (uint64_t)ts->r1;
193 sv->save_r2 = (uint64_t)ts->r2;
194 sv->save_r3 = (uint64_t)ts->r3;
195 sv->save_r4 = (uint64_t)ts->r4;
196 sv->save_r5 = (uint64_t)ts->r5;
197 sv->save_r6 = (uint64_t)ts->r6;
198 sv->save_r7 = (uint64_t)ts->r7;
199 sv->save_r8 = (uint64_t)ts->r8;
200 sv->save_r9 = (uint64_t)ts->r9;
201 sv->save_r10 = (uint64_t)ts->r10;
202 sv->save_r11 = (uint64_t)ts->r11;
203 sv->save_r12 = (uint64_t)ts->r12;
204 sv->save_r13 = (uint64_t)ts->r13;
205 sv->save_r14 = (uint64_t)ts->r14;
206 sv->save_r15 = (uint64_t)ts->r15;
207 sv->save_r16 = (uint64_t)ts->r16;
208 sv->save_r17 = (uint64_t)ts->r17;
209 sv->save_r18 = (uint64_t)ts->r18;
210 sv->save_r19 = (uint64_t)ts->r19;
211 sv->save_r20 = (uint64_t)ts->r20;
212 sv->save_r21 = (uint64_t)ts->r21;
213 sv->save_r22 = (uint64_t)ts->r22;
214 sv->save_r23 = (uint64_t)ts->r23;
215 sv->save_r24 = (uint64_t)ts->r24;
216 sv->save_r25 = (uint64_t)ts->r25;
217 sv->save_r26 = (uint64_t)ts->r26;
218 sv->save_r27 = (uint64_t)ts->r27;
219 sv->save_r28 = (uint64_t)ts->r28;
220 sv->save_r29 = (uint64_t)ts->r29;
221 sv->save_r30 = (uint64_t)ts->r30;
222 sv->save_r31 = (uint64_t)ts->r31;
223 sv->save_cr = ts->cr;
224 sv->save_xer = (uint64_t)ts->xer;
225 sv->save_lr = (uint64_t)ts->lr;
226 sv->save_ctr = (uint64_t)ts->ctr;
227 sv->save_srr0 = (uint64_t)ts->srr0;
228 sv->save_srr1 = (uint64_t)ts->srr1;
229 sv->save_vrsave = ts->vrsave;
230 return KERN_SUCCESS;
231 }
232 break;
233 case PPC_THREAD_STATE64:
234 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
235 return KERN_INVALID_ARGUMENT;
236 }
237 xts = (struct ppc_thread_state64 *) tstate;
238 if(sv) {
239 sv->save_r0 = xts->r0;
240 sv->save_r1 = xts->r1;
241 sv->save_r2 = xts->r2;
242 sv->save_r3 = xts->r3;
243 sv->save_r4 = xts->r4;
244 sv->save_r5 = xts->r5;
245 sv->save_r6 = xts->r6;
246 sv->save_r7 = xts->r7;
247 sv->save_r8 = xts->r8;
248 sv->save_r9 = xts->r9;
249 sv->save_r10 = xts->r10;
250 sv->save_r11 = xts->r11;
251 sv->save_r12 = xts->r12;
252 sv->save_r13 = xts->r13;
253 sv->save_r14 = xts->r14;
254 sv->save_r15 = xts->r15;
255 sv->save_r16 = xts->r16;
256 sv->save_r17 = xts->r17;
257 sv->save_r18 = xts->r18;
258 sv->save_r19 = xts->r19;
259 sv->save_r20 = xts->r20;
260 sv->save_r21 = xts->r21;
261 sv->save_r22 = xts->r22;
262 sv->save_r23 = xts->r23;
263 sv->save_r24 = xts->r24;
264 sv->save_r25 = xts->r25;
265 sv->save_r26 = xts->r26;
266 sv->save_r27 = xts->r27;
267 sv->save_r28 = xts->r28;
268 sv->save_r29 = xts->r29;
269 sv->save_r30 = xts->r30;
270 sv->save_r31 = xts->r31;
271 sv->save_cr = xts->cr;
272 sv->save_xer = xts->xer;
273 sv->save_lr = xts->lr;
274 sv->save_ctr = xts->ctr;
275 sv->save_srr0 = xts->srr0;
276 sv->save_srr1 = xts->srr1;
277 sv->save_vrsave = xts->vrsave;
278 return KERN_SUCCESS;
279 }
280 }
281 return KERN_FAILURE;
282 }
283
284 __private_extern__
285 kern_return_t chudxnu_thread_user_state_available(thread_t thread)
286 {
287 if(find_user_regs(thread)) {
288 return KERN_SUCCESS;
289 } else {
290 return KERN_FAILURE;
291 }
292 }
293
294 __private_extern__
295 kern_return_t chudxnu_thread_get_state(thread_t thread,
296 thread_flavor_t flavor,
297 thread_state_t tstate,
298 mach_msg_type_number_t *count,
299 boolean_t user_only)
300 {
301 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_get_state filters out some bits
302 struct savearea *sv;
303 if(user_only) {
304 sv = find_user_regs(thread);
305 } else {
306 sv = find_kern_regs(thread);
307 }
308 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
309 } else {
310 if(user_only) {
311 return machine_thread_get_state(thread, flavor, tstate, count);
312 } else {
313 // doesn't do FP or VMX
314 return machine_thread_get_kern_state(thread, flavor, tstate, count);
315 }
316 }
317 }
318
319 __private_extern__
320 kern_return_t chudxnu_thread_set_state(thread_t thread,
321 thread_flavor_t flavor,
322 thread_state_t tstate,
323 mach_msg_type_number_t count,
324 boolean_t user_only)
325 {
326 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_set_state filters out some bits
327 struct savearea *sv;
328 if(user_only) {
329 sv = find_user_regs(thread);
330 } else {
331 sv = find_kern_regs(thread);
332 }
333 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
334 } else {
335 return machine_thread_set_state(thread, flavor, tstate, count); // always user
336 }
337 }
338
339 #pragma mark **** task memory read/write ****
340
341 __private_extern__
342 kern_return_t chudxnu_task_read(task_t task, void *kernaddr, uint64_t usraddr, vm_size_t size)
343 {
344 kern_return_t ret = KERN_SUCCESS;
345
346 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
347 usraddr &= 0x00000000FFFFFFFFULL;
348 }
349
350 if(current_task()==task) {
351 thread_t cur_thr = current_thread();
352 vm_offset_t recover_handler = cur_thr->recover;
353
354 if(ml_at_interrupt_context()) {
355 return KERN_FAILURE; // can't do copyin on interrupt stack
356 }
357
358 if(copyin(usraddr, kernaddr, size)) {
359 ret = KERN_FAILURE;
360 }
361 cur_thr->recover = recover_handler;
362 } else {
363 vm_map_t map = get_task_map(task);
364 ret = vm_map_read_user(map, usraddr, kernaddr, size);
365 }
366
367 return ret;
368 }
369
370 __private_extern__
371 kern_return_t chudxnu_task_write(task_t task, uint64_t useraddr, void *kernaddr, vm_size_t size)
372 {
373 kern_return_t ret = KERN_SUCCESS;
374
375 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
376 useraddr &= 0x00000000FFFFFFFFULL;
377 }
378
379 if(current_task()==task) {
380 thread_t cur_thr = current_thread();
381 vm_offset_t recover_handler = cur_thr->recover;
382
383 if(ml_at_interrupt_context()) {
384 return KERN_FAILURE; // can't do copyout on interrupt stack
385 }
386
387 if(copyout(kernaddr, useraddr, size)) {
388 ret = KERN_FAILURE;
389 }
390 cur_thr->recover = recover_handler;
391 } else {
392 vm_map_t map = get_task_map(task);
393 ret = vm_map_write_user(map, kernaddr, useraddr, size);
394 }
395
396 return ret;
397 }
398
399 __private_extern__
400 kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
401 {
402 while(size>0) {
403 ppnum_t pp;
404 addr64_t phys_addr;
405
406 pp = pmap_find_phys(kernel_pmap, srcaddr); /* Get the page number */
407 if(!pp) {
408 return KERN_FAILURE; /* Not mapped... */
409 }
410
411 phys_addr = ((addr64_t)pp << 12) | (srcaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
412 if(phys_addr >= mem_actual) {
413 return KERN_FAILURE; /* out of range */
414 }
415
416 if((phys_addr&0x1) || size==1) {
417 *((uint8_t *)dstaddr) = ml_phys_read_byte_64(phys_addr);
418 ((uint8_t *)dstaddr)++;
419 srcaddr += sizeof(uint8_t);
420 size -= sizeof(uint8_t);
421 } else if((phys_addr&0x3) || size<=2) {
422 *((uint16_t *)dstaddr) = ml_phys_read_half_64(phys_addr);
423 ((uint16_t *)dstaddr)++;
424 srcaddr += sizeof(uint16_t);
425 size -= sizeof(uint16_t);
426 } else {
427 *((uint32_t *)dstaddr) = ml_phys_read_word_64(phys_addr);
428 ((uint32_t *)dstaddr)++;
429 srcaddr += sizeof(uint32_t);
430 size -= sizeof(uint32_t);
431 }
432 }
433 return KERN_SUCCESS;
434 }
435
436 __private_extern__
437 kern_return_t chudxnu_kern_write(vm_offset_t dstaddr, void *srcaddr, vm_size_t size)
438 {
439 while(size>0) {
440 ppnum_t pp;
441 addr64_t phys_addr;
442
443 pp = pmap_find_phys(kernel_pmap, dstaddr); /* Get the page number */
444 if(!pp) {
445 return KERN_FAILURE; /* Not mapped... */
446 }
447
448 phys_addr = ((addr64_t)pp << 12) | (dstaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
449 if(phys_addr >= mem_actual) {
450 return KERN_FAILURE; /* out of range */
451 }
452
453 if((phys_addr&0x1) || size==1) {
454 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
455 ((uint8_t *)srcaddr)++;
456 dstaddr += sizeof(uint8_t);
457 size -= sizeof(uint8_t);
458 } else if((phys_addr&0x3) || size<=2) {
459 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
460 ((uint16_t *)srcaddr)++;
461 dstaddr += sizeof(uint16_t);
462 size -= sizeof(uint16_t);
463 } else {
464 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
465 ((uint32_t *)srcaddr)++;
466 dstaddr += sizeof(uint32_t);
467 size -= sizeof(uint32_t);
468 }
469 }
470
471 return KERN_SUCCESS;
472 }
473
474 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
475 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
476 // after sampling has finished.
477 //
478 // For an N-entry callstack:
479 //
480 // [0] current pc
481 // [1..N-3] stack frames (including current one)
482 // [N-2] current LR (return value if we're in a leaf function)
483 // [N-1] current r0 (in case we've saved LR in r0)
484 //
485
486 #define FP_LINK_OFFSET 2
487 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
488 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
489
490 #ifndef USER_MODE
491 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
492 #endif
493
494 #ifndef SUPERVISOR_MODE
495 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
496 #endif
497
498 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && \
499 (addr&STACK_ALIGNMENT_MASK)==0x0 && \
500 (supervisor ? \
501 (addr>=kernStackMin && \
502 addr<=kernStackMax) : \
503 TRUE))
504
505
506 __private_extern__
507 kern_return_t chudxnu_thread_get_callstack64( thread_t thread,
508 uint64_t *callStack,
509 mach_msg_type_number_t *count,
510 boolean_t user_only)
511 {
512 kern_return_t kr;
513 task_t task = get_threadtask(thread);
514 uint64_t nextFramePointer = 0;
515 uint64_t currPC, currLR, currR0;
516 uint64_t framePointer;
517 uint64_t prevPC = 0;
518 uint64_t kernStackMin = min_valid_stack_address();
519 uint64_t kernStackMax = max_valid_stack_address();
520 uint64_t *buffer = callStack;
521 uint32_t tmpWord;
522 int bufferIndex = 0;
523 int bufferMaxIndex = *count;
524 boolean_t supervisor;
525 boolean_t is64Bit;
526 struct savearea *sv;
527
528 if(user_only) {
529 sv = find_user_regs(thread);
530 } else {
531 sv = find_kern_regs(thread);
532 }
533
534 if(!sv) {
535 *count = 0;
536 return KERN_FAILURE;
537 }
538
539 supervisor = SUPERVISOR_MODE(sv->save_srr1);
540 if(supervisor) {
541 #warning assuming kernel task is always 32-bit
542 is64Bit = FALSE;
543 } else {
544 is64Bit = chudxnu_is_64bit_task(task);
545 }
546
547 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
548 if(bufferMaxIndex<2) {
549 *count = 0;
550 return KERN_RESOURCE_SHORTAGE;
551 }
552
553 currPC = sv->save_srr0;
554 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
555 currLR = sv->save_lr;
556 currR0 = sv->save_r0;
557
558 bufferIndex = 0; // start with a stack of size zero
559 buffer[bufferIndex++] = currPC; // save PC in position 0.
560
561 // Now, fill buffer with stack backtraces.
562 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
563 uint64_t pc = 0;
564 // Above the stack pointer, the following values are saved:
565 // saved LR
566 // saved CR
567 // saved SP
568 //-> SP
569 // Here, we'll get the lr from the stack.
570 uint64_t fp_link;
571
572 if(is64Bit) {
573 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
574 } else {
575 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
576 }
577
578 // Note that we read the pc even for the first stack frame (which, in theory,
579 // is always empty because the callee fills it in just before it lowers the
580 // stack. However, if we catch the program in between filling in the return
581 // address and lowering the stack, we want to still have a valid backtrace.
582 // FixupStack correctly disregards this value if necessary.
583
584 if(supervisor) {
585 if(is64Bit) {
586 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
587 } else {
588 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
589 pc = tmpWord;
590 }
591 } else {
592 if(is64Bit) {
593 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
594 } else {
595 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
596 pc = tmpWord;
597 }
598 }
599 if(kr!=KERN_SUCCESS) {
600 pc = 0;
601 break;
602 }
603
604 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
605 if(supervisor) {
606 if(is64Bit) {
607 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
608 } else {
609 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
610 nextFramePointer = tmpWord;
611 }
612 } else {
613 if(is64Bit) {
614 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
615 } else {
616 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
617 nextFramePointer = tmpWord;
618 }
619 }
620 if(kr!=KERN_SUCCESS) {
621 nextFramePointer = 0;
622 }
623
624 if(nextFramePointer) {
625 buffer[bufferIndex++] = pc;
626 prevPC = pc;
627 }
628
629 if(nextFramePointer<framePointer) {
630 break;
631 } else {
632 framePointer = nextFramePointer;
633 }
634 }
635
636 if(bufferIndex>=bufferMaxIndex) {
637 *count = 0;
638 return KERN_RESOURCE_SHORTAGE;
639 }
640
641 // Save link register and R0 at bottom of stack (used for later fixup).
642 buffer[bufferIndex++] = currLR;
643 buffer[bufferIndex++] = currR0;
644
645 *count = bufferIndex;
646 return KERN_SUCCESS;
647 }
648
649 __private_extern__
650 kern_return_t chudxnu_thread_get_callstack( thread_t thread,
651 uint32_t *callStack,
652 mach_msg_type_number_t *count,
653 boolean_t user_only)
654 {
655 kern_return_t kr;
656 task_t task = get_threadtask(thread);
657 uint64_t nextFramePointer = 0;
658 uint64_t currPC, currLR, currR0;
659 uint64_t framePointer;
660 uint64_t prevPC = 0;
661 uint64_t kernStackMin = min_valid_stack_address();
662 uint64_t kernStackMax = max_valid_stack_address();
663 uint32_t *buffer = callStack;
664 uint32_t tmpWord;
665 int bufferIndex = 0;
666 int bufferMaxIndex = *count;
667 boolean_t supervisor;
668 boolean_t is64Bit;
669 struct savearea *sv;
670
671 if(user_only) {
672 sv = find_user_regs(thread);
673 } else {
674 sv = find_kern_regs(thread);
675 }
676
677 if(!sv) {
678 *count = 0;
679 return KERN_FAILURE;
680 }
681
682 supervisor = SUPERVISOR_MODE(sv->save_srr1);
683 if(supervisor) {
684 #warning assuming kernel task is always 32-bit
685 is64Bit = FALSE;
686 } else {
687 is64Bit = chudxnu_is_64bit_task(task);
688 }
689
690 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
691 if(bufferMaxIndex<2) {
692 *count = 0;
693 return KERN_RESOURCE_SHORTAGE;
694 }
695
696 currPC = sv->save_srr0;
697 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
698 currLR = sv->save_lr;
699 currR0 = sv->save_r0;
700
701 bufferIndex = 0; // start with a stack of size zero
702 buffer[bufferIndex++] = currPC; // save PC in position 0.
703
704 // Now, fill buffer with stack backtraces.
705 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
706 uint64_t pc = 0;
707 // Above the stack pointer, the following values are saved:
708 // saved LR
709 // saved CR
710 // saved SP
711 //-> SP
712 // Here, we'll get the lr from the stack.
713 uint64_t fp_link;
714
715 if(is64Bit) {
716 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
717 } else {
718 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
719 }
720
721 // Note that we read the pc even for the first stack frame (which, in theory,
722 // is always empty because the callee fills it in just before it lowers the
723 // stack. However, if we catch the program in between filling in the return
724 // address and lowering the stack, we want to still have a valid backtrace.
725 // FixupStack correctly disregards this value if necessary.
726
727 if(supervisor) {
728 if(is64Bit) {
729 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
730 } else {
731 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
732 pc = tmpWord;
733 }
734 } else {
735 if(is64Bit) {
736 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
737 } else {
738 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
739 pc = tmpWord;
740 }
741 }
742 if(kr!=KERN_SUCCESS) {
743 pc = 0;
744 break;
745 }
746
747 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
748 if(supervisor) {
749 if(is64Bit) {
750 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
751 } else {
752 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
753 nextFramePointer = tmpWord;
754 }
755 } else {
756 if(is64Bit) {
757 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
758 } else {
759 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
760 nextFramePointer = tmpWord;
761 }
762 }
763 if(kr!=KERN_SUCCESS) {
764 nextFramePointer = 0;
765 }
766
767 if(nextFramePointer) {
768 buffer[bufferIndex++] = pc;
769 prevPC = pc;
770 }
771
772 if(nextFramePointer<framePointer) {
773 break;
774 } else {
775 framePointer = nextFramePointer;
776 }
777 }
778
779 if(bufferIndex>=bufferMaxIndex) {
780 *count = 0;
781 return KERN_RESOURCE_SHORTAGE;
782 }
783
784 // Save link register and R0 at bottom of stack (used for later fixup).
785 buffer[bufferIndex++] = currLR;
786 buffer[bufferIndex++] = currR0;
787
788 *count = bufferIndex;
789 return KERN_SUCCESS;
790 }
791
792 #pragma mark **** DEPRECATED ****
793
794 // DEPRECATED
795 __private_extern__
796 kern_return_t chudxnu_bind_current_thread(int cpu)
797 {
798 return chudxnu_bind_thread(current_thread(), cpu);
799 }
800
801 // DEPRECATED
802 kern_return_t chudxnu_unbind_current_thread(void)
803 {
804 return chudxnu_unbind_thread(current_thread());
805 }
806
807 // DEPRECATED
808 __private_extern__
809 kern_return_t chudxnu_current_thread_get_callstack( uint32_t *callStack,
810 mach_msg_type_number_t *count,
811 boolean_t user_only)
812 {
813 return chudxnu_thread_get_callstack(current_thread(), callStack, count, user_only);
814 }
815
816 // DEPRECATED
817 __private_extern__
818 thread_t chudxnu_current_act(void)
819 {
820 return chudxnu_current_thread();
821 }