]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/ppc/chud_thread_ppc.c
b822fbeae2c6c28350d25656ca8cfbf0fb59ca66
[apple/xnu.git] / osfmk / chud / ppc / chud_thread_ppc.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/ipc_tt.h>
37
38 #include <vm/vm_map.h>
39 #include <vm/pmap.h>
40
41 #include <chud/chud_xnu.h>
42 #include <chud/chud_xnu_private.h>
43
44 #include <ppc/misc_protos.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/machine_routines.h>
47 #include <ppc/fpu_protos.h>
48
49 // forward declarations
50 extern kern_return_t machine_thread_get_kern_state( thread_t thread,
51 thread_flavor_t flavor,
52 thread_state_t tstate,
53 mach_msg_type_number_t *count);
54
55
56 #pragma mark **** thread state ****
57
58 __private_extern__
59 kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
60 {
61 struct ppc_thread_state *ts;
62 struct ppc_thread_state64 *xts;
63
64 switch(flavor) {
65 case PPC_THREAD_STATE:
66 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
67 *count = 0;
68 return KERN_INVALID_ARGUMENT;
69 }
70 ts = (struct ppc_thread_state *) tstate;
71 if(sv) {
72 ts->r0 = (unsigned int)sv->save_r0;
73 ts->r1 = (unsigned int)sv->save_r1;
74 ts->r2 = (unsigned int)sv->save_r2;
75 ts->r3 = (unsigned int)sv->save_r3;
76 ts->r4 = (unsigned int)sv->save_r4;
77 ts->r5 = (unsigned int)sv->save_r5;
78 ts->r6 = (unsigned int)sv->save_r6;
79 ts->r7 = (unsigned int)sv->save_r7;
80 ts->r8 = (unsigned int)sv->save_r8;
81 ts->r9 = (unsigned int)sv->save_r9;
82 ts->r10 = (unsigned int)sv->save_r10;
83 ts->r11 = (unsigned int)sv->save_r11;
84 ts->r12 = (unsigned int)sv->save_r12;
85 ts->r13 = (unsigned int)sv->save_r13;
86 ts->r14 = (unsigned int)sv->save_r14;
87 ts->r15 = (unsigned int)sv->save_r15;
88 ts->r16 = (unsigned int)sv->save_r16;
89 ts->r17 = (unsigned int)sv->save_r17;
90 ts->r18 = (unsigned int)sv->save_r18;
91 ts->r19 = (unsigned int)sv->save_r19;
92 ts->r20 = (unsigned int)sv->save_r20;
93 ts->r21 = (unsigned int)sv->save_r21;
94 ts->r22 = (unsigned int)sv->save_r22;
95 ts->r23 = (unsigned int)sv->save_r23;
96 ts->r24 = (unsigned int)sv->save_r24;
97 ts->r25 = (unsigned int)sv->save_r25;
98 ts->r26 = (unsigned int)sv->save_r26;
99 ts->r27 = (unsigned int)sv->save_r27;
100 ts->r28 = (unsigned int)sv->save_r28;
101 ts->r29 = (unsigned int)sv->save_r29;
102 ts->r30 = (unsigned int)sv->save_r30;
103 ts->r31 = (unsigned int)sv->save_r31;
104 ts->cr = (unsigned int)sv->save_cr;
105 ts->xer = (unsigned int)sv->save_xer;
106 ts->lr = (unsigned int)sv->save_lr;
107 ts->ctr = (unsigned int)sv->save_ctr;
108 ts->srr0 = (unsigned int)sv->save_srr0;
109 ts->srr1 = (unsigned int)sv->save_srr1;
110 ts->mq = 0;
111 ts->vrsave = (unsigned int)sv->save_vrsave;
112 } else {
113 bzero((void *)ts, sizeof(struct ppc_thread_state));
114 }
115 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
116 return KERN_SUCCESS;
117 break;
118 case PPC_THREAD_STATE64:
119 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
120 return KERN_INVALID_ARGUMENT;
121 }
122 xts = (struct ppc_thread_state64 *) tstate;
123 if(sv) {
124 xts->r0 = sv->save_r0;
125 xts->r1 = sv->save_r1;
126 xts->r2 = sv->save_r2;
127 xts->r3 = sv->save_r3;
128 xts->r4 = sv->save_r4;
129 xts->r5 = sv->save_r5;
130 xts->r6 = sv->save_r6;
131 xts->r7 = sv->save_r7;
132 xts->r8 = sv->save_r8;
133 xts->r9 = sv->save_r9;
134 xts->r10 = sv->save_r10;
135 xts->r11 = sv->save_r11;
136 xts->r12 = sv->save_r12;
137 xts->r13 = sv->save_r13;
138 xts->r14 = sv->save_r14;
139 xts->r15 = sv->save_r15;
140 xts->r16 = sv->save_r16;
141 xts->r17 = sv->save_r17;
142 xts->r18 = sv->save_r18;
143 xts->r19 = sv->save_r19;
144 xts->r20 = sv->save_r20;
145 xts->r21 = sv->save_r21;
146 xts->r22 = sv->save_r22;
147 xts->r23 = sv->save_r23;
148 xts->r24 = sv->save_r24;
149 xts->r25 = sv->save_r25;
150 xts->r26 = sv->save_r26;
151 xts->r27 = sv->save_r27;
152 xts->r28 = sv->save_r28;
153 xts->r29 = sv->save_r29;
154 xts->r30 = sv->save_r30;
155 xts->r31 = sv->save_r31;
156 xts->cr = sv->save_cr;
157 xts->xer = sv->save_xer;
158 xts->lr = sv->save_lr;
159 xts->ctr = sv->save_ctr;
160 xts->srr0 = sv->save_srr0;
161 xts->srr1 = sv->save_srr1;
162 xts->vrsave = sv->save_vrsave;
163 } else {
164 bzero((void *)xts, sizeof(struct ppc_thread_state64));
165 }
166 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
167 return KERN_SUCCESS;
168 break;
169 default:
170 *count = 0;
171 return KERN_INVALID_ARGUMENT;
172 break;
173 }
174 }
175
176 __private_extern__
177 kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
178 {
179 struct ppc_thread_state *ts;
180 struct ppc_thread_state64 *xts;
181
182 switch(flavor) {
183 case PPC_THREAD_STATE:
184 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
185 return KERN_INVALID_ARGUMENT;
186 }
187 ts = (struct ppc_thread_state *) tstate;
188 if(sv) {
189 sv->save_r0 = (uint64_t)ts->r0;
190 sv->save_r1 = (uint64_t)ts->r1;
191 sv->save_r2 = (uint64_t)ts->r2;
192 sv->save_r3 = (uint64_t)ts->r3;
193 sv->save_r4 = (uint64_t)ts->r4;
194 sv->save_r5 = (uint64_t)ts->r5;
195 sv->save_r6 = (uint64_t)ts->r6;
196 sv->save_r7 = (uint64_t)ts->r7;
197 sv->save_r8 = (uint64_t)ts->r8;
198 sv->save_r9 = (uint64_t)ts->r9;
199 sv->save_r10 = (uint64_t)ts->r10;
200 sv->save_r11 = (uint64_t)ts->r11;
201 sv->save_r12 = (uint64_t)ts->r12;
202 sv->save_r13 = (uint64_t)ts->r13;
203 sv->save_r14 = (uint64_t)ts->r14;
204 sv->save_r15 = (uint64_t)ts->r15;
205 sv->save_r16 = (uint64_t)ts->r16;
206 sv->save_r17 = (uint64_t)ts->r17;
207 sv->save_r18 = (uint64_t)ts->r18;
208 sv->save_r19 = (uint64_t)ts->r19;
209 sv->save_r20 = (uint64_t)ts->r20;
210 sv->save_r21 = (uint64_t)ts->r21;
211 sv->save_r22 = (uint64_t)ts->r22;
212 sv->save_r23 = (uint64_t)ts->r23;
213 sv->save_r24 = (uint64_t)ts->r24;
214 sv->save_r25 = (uint64_t)ts->r25;
215 sv->save_r26 = (uint64_t)ts->r26;
216 sv->save_r27 = (uint64_t)ts->r27;
217 sv->save_r28 = (uint64_t)ts->r28;
218 sv->save_r29 = (uint64_t)ts->r29;
219 sv->save_r30 = (uint64_t)ts->r30;
220 sv->save_r31 = (uint64_t)ts->r31;
221 sv->save_cr = ts->cr;
222 sv->save_xer = (uint64_t)ts->xer;
223 sv->save_lr = (uint64_t)ts->lr;
224 sv->save_ctr = (uint64_t)ts->ctr;
225 sv->save_srr0 = (uint64_t)ts->srr0;
226 sv->save_srr1 = (uint64_t)ts->srr1;
227 sv->save_vrsave = ts->vrsave;
228 return KERN_SUCCESS;
229 }
230 break;
231 case PPC_THREAD_STATE64:
232 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
233 return KERN_INVALID_ARGUMENT;
234 }
235 xts = (struct ppc_thread_state64 *) tstate;
236 if(sv) {
237 sv->save_r0 = xts->r0;
238 sv->save_r1 = xts->r1;
239 sv->save_r2 = xts->r2;
240 sv->save_r3 = xts->r3;
241 sv->save_r4 = xts->r4;
242 sv->save_r5 = xts->r5;
243 sv->save_r6 = xts->r6;
244 sv->save_r7 = xts->r7;
245 sv->save_r8 = xts->r8;
246 sv->save_r9 = xts->r9;
247 sv->save_r10 = xts->r10;
248 sv->save_r11 = xts->r11;
249 sv->save_r12 = xts->r12;
250 sv->save_r13 = xts->r13;
251 sv->save_r14 = xts->r14;
252 sv->save_r15 = xts->r15;
253 sv->save_r16 = xts->r16;
254 sv->save_r17 = xts->r17;
255 sv->save_r18 = xts->r18;
256 sv->save_r19 = xts->r19;
257 sv->save_r20 = xts->r20;
258 sv->save_r21 = xts->r21;
259 sv->save_r22 = xts->r22;
260 sv->save_r23 = xts->r23;
261 sv->save_r24 = xts->r24;
262 sv->save_r25 = xts->r25;
263 sv->save_r26 = xts->r26;
264 sv->save_r27 = xts->r27;
265 sv->save_r28 = xts->r28;
266 sv->save_r29 = xts->r29;
267 sv->save_r30 = xts->r30;
268 sv->save_r31 = xts->r31;
269 sv->save_cr = xts->cr;
270 sv->save_xer = xts->xer;
271 sv->save_lr = xts->lr;
272 sv->save_ctr = xts->ctr;
273 sv->save_srr0 = xts->srr0;
274 sv->save_srr1 = xts->srr1;
275 sv->save_vrsave = xts->vrsave;
276 return KERN_SUCCESS;
277 }
278 }
279 return KERN_FAILURE;
280 }
281
282 __private_extern__
283 kern_return_t chudxnu_thread_user_state_available(thread_t thread)
284 {
285 if(find_user_regs(thread)) {
286 return KERN_SUCCESS;
287 } else {
288 return KERN_FAILURE;
289 }
290 }
291
292 __private_extern__
293 kern_return_t chudxnu_thread_get_state(thread_t thread,
294 thread_flavor_t flavor,
295 thread_state_t tstate,
296 mach_msg_type_number_t *count,
297 boolean_t user_only)
298 {
299 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_get_state filters out some bits
300 struct savearea *sv;
301 if(user_only) {
302 sv = find_user_regs(thread);
303 } else {
304 sv = find_kern_regs(thread);
305 }
306 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
307 } else {
308 if(user_only) {
309 return machine_thread_get_state(thread, flavor, tstate, count);
310 } else {
311 // doesn't do FP or VMX
312 return machine_thread_get_kern_state(thread, flavor, tstate, count);
313 }
314 }
315 }
316
317 __private_extern__
318 kern_return_t chudxnu_thread_set_state(thread_t thread,
319 thread_flavor_t flavor,
320 thread_state_t tstate,
321 mach_msg_type_number_t count,
322 boolean_t user_only)
323 {
324 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_set_state filters out some bits
325 struct savearea *sv;
326 if(user_only) {
327 sv = find_user_regs(thread);
328 } else {
329 sv = find_kern_regs(thread);
330 }
331 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
332 } else {
333 return machine_thread_set_state(thread, flavor, tstate, count); // always user
334 }
335 }
336
337 #pragma mark **** task memory read/write ****
338
339 __private_extern__
340 kern_return_t chudxnu_task_read(task_t task, void *kernaddr, uint64_t usraddr, vm_size_t size)
341 {
342 kern_return_t ret = KERN_SUCCESS;
343
344 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
345 usraddr &= 0x00000000FFFFFFFFULL;
346 }
347
348 if(current_task()==task) {
349 thread_t cur_thr = current_thread();
350 vm_offset_t recover_handler = cur_thr->recover;
351
352 if(ml_at_interrupt_context()) {
353 return KERN_FAILURE; // can't do copyin on interrupt stack
354 }
355
356 if(copyin(usraddr, kernaddr, size)) {
357 ret = KERN_FAILURE;
358 }
359 cur_thr->recover = recover_handler;
360 } else {
361 vm_map_t map = get_task_map(task);
362 ret = vm_map_read_user(map, usraddr, kernaddr, size);
363 }
364
365 return ret;
366 }
367
368 __private_extern__
369 kern_return_t chudxnu_task_write(task_t task, uint64_t useraddr, void *kernaddr, vm_size_t size)
370 {
371 kern_return_t ret = KERN_SUCCESS;
372
373 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
374 useraddr &= 0x00000000FFFFFFFFULL;
375 }
376
377 if(current_task()==task) {
378 thread_t cur_thr = current_thread();
379 vm_offset_t recover_handler = cur_thr->recover;
380
381 if(ml_at_interrupt_context()) {
382 return KERN_FAILURE; // can't do copyout on interrupt stack
383 }
384
385 if(copyout(kernaddr, useraddr, size)) {
386 ret = KERN_FAILURE;
387 }
388 cur_thr->recover = recover_handler;
389 } else {
390 vm_map_t map = get_task_map(task);
391 ret = vm_map_write_user(map, kernaddr, useraddr, size);
392 }
393
394 return ret;
395 }
396
397 __private_extern__
398 kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
399 {
400 while(size>0) {
401 ppnum_t pp;
402 addr64_t phys_addr;
403
404 pp = pmap_find_phys(kernel_pmap, srcaddr); /* Get the page number */
405 if(!pp) {
406 return KERN_FAILURE; /* Not mapped... */
407 }
408
409 phys_addr = ((addr64_t)pp << 12) | (srcaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
410 if(phys_addr >= mem_actual) {
411 return KERN_FAILURE; /* out of range */
412 }
413
414 if((phys_addr&0x1) || size==1) {
415 *((uint8_t *)dstaddr) = ml_phys_read_byte_64(phys_addr);
416 ((uint8_t *)dstaddr)++;
417 srcaddr += sizeof(uint8_t);
418 size -= sizeof(uint8_t);
419 } else if((phys_addr&0x3) || size<=2) {
420 *((uint16_t *)dstaddr) = ml_phys_read_half_64(phys_addr);
421 ((uint16_t *)dstaddr)++;
422 srcaddr += sizeof(uint16_t);
423 size -= sizeof(uint16_t);
424 } else {
425 *((uint32_t *)dstaddr) = ml_phys_read_word_64(phys_addr);
426 ((uint32_t *)dstaddr)++;
427 srcaddr += sizeof(uint32_t);
428 size -= sizeof(uint32_t);
429 }
430 }
431 return KERN_SUCCESS;
432 }
433
434 __private_extern__
435 kern_return_t chudxnu_kern_write(vm_offset_t dstaddr, void *srcaddr, vm_size_t size)
436 {
437 while(size>0) {
438 ppnum_t pp;
439 addr64_t phys_addr;
440
441 pp = pmap_find_phys(kernel_pmap, dstaddr); /* Get the page number */
442 if(!pp) {
443 return KERN_FAILURE; /* Not mapped... */
444 }
445
446 phys_addr = ((addr64_t)pp << 12) | (dstaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
447 if(phys_addr >= mem_actual) {
448 return KERN_FAILURE; /* out of range */
449 }
450
451 if((phys_addr&0x1) || size==1) {
452 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
453 ((uint8_t *)srcaddr)++;
454 dstaddr += sizeof(uint8_t);
455 size -= sizeof(uint8_t);
456 } else if((phys_addr&0x3) || size<=2) {
457 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
458 ((uint16_t *)srcaddr)++;
459 dstaddr += sizeof(uint16_t);
460 size -= sizeof(uint16_t);
461 } else {
462 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
463 ((uint32_t *)srcaddr)++;
464 dstaddr += sizeof(uint32_t);
465 size -= sizeof(uint32_t);
466 }
467 }
468
469 return KERN_SUCCESS;
470 }
471
472 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
473 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
474 // after sampling has finished.
475 //
476 // For an N-entry callstack:
477 //
478 // [0] current pc
479 // [1..N-3] stack frames (including current one)
480 // [N-2] current LR (return value if we're in a leaf function)
481 // [N-1] current r0 (in case we've saved LR in r0)
482 //
483
484 #define FP_LINK_OFFSET 2
485 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
486 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
487
488 #ifndef USER_MODE
489 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
490 #endif
491
492 #ifndef SUPERVISOR_MODE
493 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
494 #endif
495
496 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && \
497 (addr&STACK_ALIGNMENT_MASK)==0x0 && \
498 (supervisor ? \
499 (addr>=kernStackMin && \
500 addr<=kernStackMax) : \
501 TRUE))
502
503
504 __private_extern__
505 kern_return_t chudxnu_thread_get_callstack64( thread_t thread,
506 uint64_t *callStack,
507 mach_msg_type_number_t *count,
508 boolean_t user_only)
509 {
510 kern_return_t kr;
511 task_t task = get_threadtask(thread);
512 uint64_t nextFramePointer = 0;
513 uint64_t currPC, currLR, currR0;
514 uint64_t framePointer;
515 uint64_t prevPC = 0;
516 uint64_t kernStackMin = min_valid_stack_address();
517 uint64_t kernStackMax = max_valid_stack_address();
518 uint64_t *buffer = callStack;
519 uint32_t tmpWord;
520 int bufferIndex = 0;
521 int bufferMaxIndex = *count;
522 boolean_t supervisor;
523 boolean_t is64Bit;
524 struct savearea *sv;
525
526 if(user_only) {
527 sv = find_user_regs(thread);
528 } else {
529 sv = find_kern_regs(thread);
530 }
531
532 if(!sv) {
533 *count = 0;
534 return KERN_FAILURE;
535 }
536
537 supervisor = SUPERVISOR_MODE(sv->save_srr1);
538 if(supervisor) {
539 #warning assuming kernel task is always 32-bit
540 is64Bit = FALSE;
541 } else {
542 is64Bit = chudxnu_is_64bit_task(task);
543 }
544
545 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
546 if(bufferMaxIndex<2) {
547 *count = 0;
548 return KERN_RESOURCE_SHORTAGE;
549 }
550
551 currPC = sv->save_srr0;
552 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
553 currLR = sv->save_lr;
554 currR0 = sv->save_r0;
555
556 bufferIndex = 0; // start with a stack of size zero
557 buffer[bufferIndex++] = currPC; // save PC in position 0.
558
559 // Now, fill buffer with stack backtraces.
560 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
561 uint64_t pc = 0;
562 // Above the stack pointer, the following values are saved:
563 // saved LR
564 // saved CR
565 // saved SP
566 //-> SP
567 // Here, we'll get the lr from the stack.
568 uint64_t fp_link;
569
570 if(is64Bit) {
571 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
572 } else {
573 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
574 }
575
576 // Note that we read the pc even for the first stack frame (which, in theory,
577 // is always empty because the callee fills it in just before it lowers the
578 // stack. However, if we catch the program in between filling in the return
579 // address and lowering the stack, we want to still have a valid backtrace.
580 // FixupStack correctly disregards this value if necessary.
581
582 if(supervisor) {
583 if(is64Bit) {
584 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
585 } else {
586 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
587 pc = tmpWord;
588 }
589 } else {
590 if(is64Bit) {
591 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
592 } else {
593 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
594 pc = tmpWord;
595 }
596 }
597 if(kr!=KERN_SUCCESS) {
598 pc = 0;
599 break;
600 }
601
602 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
603 if(supervisor) {
604 if(is64Bit) {
605 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
606 } else {
607 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
608 nextFramePointer = tmpWord;
609 }
610 } else {
611 if(is64Bit) {
612 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
613 } else {
614 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
615 nextFramePointer = tmpWord;
616 }
617 }
618 if(kr!=KERN_SUCCESS) {
619 nextFramePointer = 0;
620 }
621
622 if(nextFramePointer) {
623 buffer[bufferIndex++] = pc;
624 prevPC = pc;
625 }
626
627 if(nextFramePointer<framePointer) {
628 break;
629 } else {
630 framePointer = nextFramePointer;
631 }
632 }
633
634 if(bufferIndex>=bufferMaxIndex) {
635 *count = 0;
636 return KERN_RESOURCE_SHORTAGE;
637 }
638
639 // Save link register and R0 at bottom of stack (used for later fixup).
640 buffer[bufferIndex++] = currLR;
641 buffer[bufferIndex++] = currR0;
642
643 *count = bufferIndex;
644 return KERN_SUCCESS;
645 }
646
647 __private_extern__
648 kern_return_t chudxnu_thread_get_callstack( thread_t thread,
649 uint32_t *callStack,
650 mach_msg_type_number_t *count,
651 boolean_t user_only)
652 {
653 kern_return_t kr;
654 task_t task = get_threadtask(thread);
655 uint64_t nextFramePointer = 0;
656 uint64_t currPC, currLR, currR0;
657 uint64_t framePointer;
658 uint64_t prevPC = 0;
659 uint64_t kernStackMin = min_valid_stack_address();
660 uint64_t kernStackMax = max_valid_stack_address();
661 uint32_t *buffer = callStack;
662 uint32_t tmpWord;
663 int bufferIndex = 0;
664 int bufferMaxIndex = *count;
665 boolean_t supervisor;
666 boolean_t is64Bit;
667 struct savearea *sv;
668
669 if(user_only) {
670 sv = find_user_regs(thread);
671 } else {
672 sv = find_kern_regs(thread);
673 }
674
675 if(!sv) {
676 *count = 0;
677 return KERN_FAILURE;
678 }
679
680 supervisor = SUPERVISOR_MODE(sv->save_srr1);
681 if(supervisor) {
682 #warning assuming kernel task is always 32-bit
683 is64Bit = FALSE;
684 } else {
685 is64Bit = chudxnu_is_64bit_task(task);
686 }
687
688 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
689 if(bufferMaxIndex<2) {
690 *count = 0;
691 return KERN_RESOURCE_SHORTAGE;
692 }
693
694 currPC = sv->save_srr0;
695 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
696 currLR = sv->save_lr;
697 currR0 = sv->save_r0;
698
699 bufferIndex = 0; // start with a stack of size zero
700 buffer[bufferIndex++] = currPC; // save PC in position 0.
701
702 // Now, fill buffer with stack backtraces.
703 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
704 uint64_t pc = 0;
705 // Above the stack pointer, the following values are saved:
706 // saved LR
707 // saved CR
708 // saved SP
709 //-> SP
710 // Here, we'll get the lr from the stack.
711 uint64_t fp_link;
712
713 if(is64Bit) {
714 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
715 } else {
716 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
717 }
718
719 // Note that we read the pc even for the first stack frame (which, in theory,
720 // is always empty because the callee fills it in just before it lowers the
721 // stack. However, if we catch the program in between filling in the return
722 // address and lowering the stack, we want to still have a valid backtrace.
723 // FixupStack correctly disregards this value if necessary.
724
725 if(supervisor) {
726 if(is64Bit) {
727 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
728 } else {
729 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
730 pc = tmpWord;
731 }
732 } else {
733 if(is64Bit) {
734 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
735 } else {
736 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
737 pc = tmpWord;
738 }
739 }
740 if(kr!=KERN_SUCCESS) {
741 pc = 0;
742 break;
743 }
744
745 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
746 if(supervisor) {
747 if(is64Bit) {
748 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
749 } else {
750 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
751 nextFramePointer = tmpWord;
752 }
753 } else {
754 if(is64Bit) {
755 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
756 } else {
757 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
758 nextFramePointer = tmpWord;
759 }
760 }
761 if(kr!=KERN_SUCCESS) {
762 nextFramePointer = 0;
763 }
764
765 if(nextFramePointer) {
766 buffer[bufferIndex++] = pc;
767 prevPC = pc;
768 }
769
770 if(nextFramePointer<framePointer) {
771 break;
772 } else {
773 framePointer = nextFramePointer;
774 }
775 }
776
777 if(bufferIndex>=bufferMaxIndex) {
778 *count = 0;
779 return KERN_RESOURCE_SHORTAGE;
780 }
781
782 // Save link register and R0 at bottom of stack (used for later fixup).
783 buffer[bufferIndex++] = currLR;
784 buffer[bufferIndex++] = currR0;
785
786 *count = bufferIndex;
787 return KERN_SUCCESS;
788 }
789
790 #pragma mark **** DEPRECATED ****
791
792 // DEPRECATED
793 __private_extern__
794 kern_return_t chudxnu_bind_current_thread(int cpu)
795 {
796 return chudxnu_bind_thread(current_thread(), cpu);
797 }
798
799 // DEPRECATED
800 kern_return_t chudxnu_unbind_current_thread(void)
801 {
802 return chudxnu_unbind_thread(current_thread());
803 }
804
805 // DEPRECATED
806 __private_extern__
807 kern_return_t chudxnu_current_thread_get_callstack( uint32_t *callStack,
808 mach_msg_type_number_t *count,
809 boolean_t user_only)
810 {
811 return chudxnu_thread_get_callstack(current_thread(), callStack, count, user_only);
812 }
813
814 // DEPRECATED
815 __private_extern__
816 thread_t chudxnu_current_act(void)
817 {
818 return chudxnu_current_thread();
819 }