]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/chud/ppc/chud_thread_ppc.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / chud / ppc / chud_thread_ppc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <mach/mach_types.h>
24#include <mach/task.h>
25#include <mach/thread_act.h>
26
27#include <kern/kern_types.h>
28#include <kern/processor.h>
29#include <kern/thread.h>
30#include <kern/ipc_tt.h>
31
32#include <vm/vm_map.h>
33#include <vm/pmap.h>
34
35#include <chud/chud_xnu.h>
36#include <chud/chud_xnu_private.h>
37
38#include <ppc/misc_protos.h>
39#include <ppc/proc_reg.h>
40#include <ppc/machine_routines.h>
41#include <ppc/fpu_protos.h>
42
43// forward declarations
44extern kern_return_t machine_thread_get_kern_state( thread_t thread,
45 thread_flavor_t flavor,
46 thread_state_t tstate,
47 mach_msg_type_number_t *count);
48
49
50#pragma mark **** thread state ****
51
52__private_extern__
53kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
54{
55 struct ppc_thread_state *ts;
56 struct ppc_thread_state64 *xts;
57
58 switch(flavor) {
59 case PPC_THREAD_STATE:
60 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
61 *count = 0;
62 return KERN_INVALID_ARGUMENT;
63 }
64 ts = (struct ppc_thread_state *) tstate;
65 if(sv) {
66 ts->r0 = (unsigned int)sv->save_r0;
67 ts->r1 = (unsigned int)sv->save_r1;
68 ts->r2 = (unsigned int)sv->save_r2;
69 ts->r3 = (unsigned int)sv->save_r3;
70 ts->r4 = (unsigned int)sv->save_r4;
71 ts->r5 = (unsigned int)sv->save_r5;
72 ts->r6 = (unsigned int)sv->save_r6;
73 ts->r7 = (unsigned int)sv->save_r7;
74 ts->r8 = (unsigned int)sv->save_r8;
75 ts->r9 = (unsigned int)sv->save_r9;
76 ts->r10 = (unsigned int)sv->save_r10;
77 ts->r11 = (unsigned int)sv->save_r11;
78 ts->r12 = (unsigned int)sv->save_r12;
79 ts->r13 = (unsigned int)sv->save_r13;
80 ts->r14 = (unsigned int)sv->save_r14;
81 ts->r15 = (unsigned int)sv->save_r15;
82 ts->r16 = (unsigned int)sv->save_r16;
83 ts->r17 = (unsigned int)sv->save_r17;
84 ts->r18 = (unsigned int)sv->save_r18;
85 ts->r19 = (unsigned int)sv->save_r19;
86 ts->r20 = (unsigned int)sv->save_r20;
87 ts->r21 = (unsigned int)sv->save_r21;
88 ts->r22 = (unsigned int)sv->save_r22;
89 ts->r23 = (unsigned int)sv->save_r23;
90 ts->r24 = (unsigned int)sv->save_r24;
91 ts->r25 = (unsigned int)sv->save_r25;
92 ts->r26 = (unsigned int)sv->save_r26;
93 ts->r27 = (unsigned int)sv->save_r27;
94 ts->r28 = (unsigned int)sv->save_r28;
95 ts->r29 = (unsigned int)sv->save_r29;
96 ts->r30 = (unsigned int)sv->save_r30;
97 ts->r31 = (unsigned int)sv->save_r31;
98 ts->cr = (unsigned int)sv->save_cr;
99 ts->xer = (unsigned int)sv->save_xer;
100 ts->lr = (unsigned int)sv->save_lr;
101 ts->ctr = (unsigned int)sv->save_ctr;
102 ts->srr0 = (unsigned int)sv->save_srr0;
103 ts->srr1 = (unsigned int)sv->save_srr1;
104 ts->mq = 0;
105 ts->vrsave = (unsigned int)sv->save_vrsave;
106 } else {
107 bzero((void *)ts, sizeof(struct ppc_thread_state));
108 }
109 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
110 return KERN_SUCCESS;
111 break;
112 case PPC_THREAD_STATE64:
113 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
114 return KERN_INVALID_ARGUMENT;
115 }
116 xts = (struct ppc_thread_state64 *) tstate;
117 if(sv) {
118 xts->r0 = sv->save_r0;
119 xts->r1 = sv->save_r1;
120 xts->r2 = sv->save_r2;
121 xts->r3 = sv->save_r3;
122 xts->r4 = sv->save_r4;
123 xts->r5 = sv->save_r5;
124 xts->r6 = sv->save_r6;
125 xts->r7 = sv->save_r7;
126 xts->r8 = sv->save_r8;
127 xts->r9 = sv->save_r9;
128 xts->r10 = sv->save_r10;
129 xts->r11 = sv->save_r11;
130 xts->r12 = sv->save_r12;
131 xts->r13 = sv->save_r13;
132 xts->r14 = sv->save_r14;
133 xts->r15 = sv->save_r15;
134 xts->r16 = sv->save_r16;
135 xts->r17 = sv->save_r17;
136 xts->r18 = sv->save_r18;
137 xts->r19 = sv->save_r19;
138 xts->r20 = sv->save_r20;
139 xts->r21 = sv->save_r21;
140 xts->r22 = sv->save_r22;
141 xts->r23 = sv->save_r23;
142 xts->r24 = sv->save_r24;
143 xts->r25 = sv->save_r25;
144 xts->r26 = sv->save_r26;
145 xts->r27 = sv->save_r27;
146 xts->r28 = sv->save_r28;
147 xts->r29 = sv->save_r29;
148 xts->r30 = sv->save_r30;
149 xts->r31 = sv->save_r31;
150 xts->cr = sv->save_cr;
151 xts->xer = sv->save_xer;
152 xts->lr = sv->save_lr;
153 xts->ctr = sv->save_ctr;
154 xts->srr0 = sv->save_srr0;
155 xts->srr1 = sv->save_srr1;
156 xts->vrsave = sv->save_vrsave;
157 } else {
158 bzero((void *)xts, sizeof(struct ppc_thread_state64));
159 }
160 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
161 return KERN_SUCCESS;
162 break;
163 default:
164 *count = 0;
165 return KERN_INVALID_ARGUMENT;
166 break;
167 }
168}
169
170__private_extern__
171kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
172{
173 struct ppc_thread_state *ts;
174 struct ppc_thread_state64 *xts;
175
176 switch(flavor) {
177 case PPC_THREAD_STATE:
178 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
179 return KERN_INVALID_ARGUMENT;
180 }
181 ts = (struct ppc_thread_state *) tstate;
182 if(sv) {
183 sv->save_r0 = (uint64_t)ts->r0;
184 sv->save_r1 = (uint64_t)ts->r1;
185 sv->save_r2 = (uint64_t)ts->r2;
186 sv->save_r3 = (uint64_t)ts->r3;
187 sv->save_r4 = (uint64_t)ts->r4;
188 sv->save_r5 = (uint64_t)ts->r5;
189 sv->save_r6 = (uint64_t)ts->r6;
190 sv->save_r7 = (uint64_t)ts->r7;
191 sv->save_r8 = (uint64_t)ts->r8;
192 sv->save_r9 = (uint64_t)ts->r9;
193 sv->save_r10 = (uint64_t)ts->r10;
194 sv->save_r11 = (uint64_t)ts->r11;
195 sv->save_r12 = (uint64_t)ts->r12;
196 sv->save_r13 = (uint64_t)ts->r13;
197 sv->save_r14 = (uint64_t)ts->r14;
198 sv->save_r15 = (uint64_t)ts->r15;
199 sv->save_r16 = (uint64_t)ts->r16;
200 sv->save_r17 = (uint64_t)ts->r17;
201 sv->save_r18 = (uint64_t)ts->r18;
202 sv->save_r19 = (uint64_t)ts->r19;
203 sv->save_r20 = (uint64_t)ts->r20;
204 sv->save_r21 = (uint64_t)ts->r21;
205 sv->save_r22 = (uint64_t)ts->r22;
206 sv->save_r23 = (uint64_t)ts->r23;
207 sv->save_r24 = (uint64_t)ts->r24;
208 sv->save_r25 = (uint64_t)ts->r25;
209 sv->save_r26 = (uint64_t)ts->r26;
210 sv->save_r27 = (uint64_t)ts->r27;
211 sv->save_r28 = (uint64_t)ts->r28;
212 sv->save_r29 = (uint64_t)ts->r29;
213 sv->save_r30 = (uint64_t)ts->r30;
214 sv->save_r31 = (uint64_t)ts->r31;
215 sv->save_cr = ts->cr;
216 sv->save_xer = (uint64_t)ts->xer;
217 sv->save_lr = (uint64_t)ts->lr;
218 sv->save_ctr = (uint64_t)ts->ctr;
219 sv->save_srr0 = (uint64_t)ts->srr0;
220 sv->save_srr1 = (uint64_t)ts->srr1;
221 sv->save_vrsave = ts->vrsave;
222 return KERN_SUCCESS;
223 }
224 break;
225 case PPC_THREAD_STATE64:
226 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
227 return KERN_INVALID_ARGUMENT;
228 }
229 xts = (struct ppc_thread_state64 *) tstate;
230 if(sv) {
231 sv->save_r0 = xts->r0;
232 sv->save_r1 = xts->r1;
233 sv->save_r2 = xts->r2;
234 sv->save_r3 = xts->r3;
235 sv->save_r4 = xts->r4;
236 sv->save_r5 = xts->r5;
237 sv->save_r6 = xts->r6;
238 sv->save_r7 = xts->r7;
239 sv->save_r8 = xts->r8;
240 sv->save_r9 = xts->r9;
241 sv->save_r10 = xts->r10;
242 sv->save_r11 = xts->r11;
243 sv->save_r12 = xts->r12;
244 sv->save_r13 = xts->r13;
245 sv->save_r14 = xts->r14;
246 sv->save_r15 = xts->r15;
247 sv->save_r16 = xts->r16;
248 sv->save_r17 = xts->r17;
249 sv->save_r18 = xts->r18;
250 sv->save_r19 = xts->r19;
251 sv->save_r20 = xts->r20;
252 sv->save_r21 = xts->r21;
253 sv->save_r22 = xts->r22;
254 sv->save_r23 = xts->r23;
255 sv->save_r24 = xts->r24;
256 sv->save_r25 = xts->r25;
257 sv->save_r26 = xts->r26;
258 sv->save_r27 = xts->r27;
259 sv->save_r28 = xts->r28;
260 sv->save_r29 = xts->r29;
261 sv->save_r30 = xts->r30;
262 sv->save_r31 = xts->r31;
263 sv->save_cr = xts->cr;
264 sv->save_xer = xts->xer;
265 sv->save_lr = xts->lr;
266 sv->save_ctr = xts->ctr;
267 sv->save_srr0 = xts->srr0;
268 sv->save_srr1 = xts->srr1;
269 sv->save_vrsave = xts->vrsave;
270 return KERN_SUCCESS;
271 }
272 }
273 return KERN_FAILURE;
274}
275
276__private_extern__
277kern_return_t chudxnu_thread_user_state_available(thread_t thread)
278{
279 if(find_user_regs(thread)) {
280 return KERN_SUCCESS;
281 } else {
282 return KERN_FAILURE;
283 }
284}
285
286__private_extern__
287kern_return_t chudxnu_thread_get_state(thread_t thread,
288 thread_flavor_t flavor,
289 thread_state_t tstate,
290 mach_msg_type_number_t *count,
291 boolean_t user_only)
292{
293 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_get_state filters out some bits
294 struct savearea *sv;
295 if(user_only) {
296 sv = find_user_regs(thread);
297 } else {
298 sv = find_kern_regs(thread);
299 }
300 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
301 } else {
302 if(user_only) {
303 return machine_thread_get_state(thread, flavor, tstate, count);
304 } else {
305 // doesn't do FP or VMX
306 return machine_thread_get_kern_state(thread, flavor, tstate, count);
307 }
308 }
309}
310
311__private_extern__
312kern_return_t chudxnu_thread_set_state(thread_t thread,
313 thread_flavor_t flavor,
314 thread_state_t tstate,
315 mach_msg_type_number_t count,
316 boolean_t user_only)
317{
318 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_set_state filters out some bits
319 struct savearea *sv;
320 if(user_only) {
321 sv = find_user_regs(thread);
322 } else {
323 sv = find_kern_regs(thread);
324 }
325 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
326 } else {
327 return machine_thread_set_state(thread, flavor, tstate, count); // always user
328 }
329}
330
331#pragma mark **** task memory read/write ****
332
333__private_extern__
334kern_return_t chudxnu_task_read(task_t task, void *kernaddr, uint64_t usraddr, vm_size_t size)
335{
336 kern_return_t ret = KERN_SUCCESS;
337
338 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
339 usraddr &= 0x00000000FFFFFFFFULL;
340 }
341
342 if(current_task()==task) {
343 thread_t cur_thr = current_thread();
344 vm_offset_t recover_handler = cur_thr->recover;
345
346 if(ml_at_interrupt_context()) {
347 return KERN_FAILURE; // can't do copyin on interrupt stack
348 }
349
350 if(copyin(usraddr, kernaddr, size)) {
351 ret = KERN_FAILURE;
352 }
353 cur_thr->recover = recover_handler;
354 } else {
355 vm_map_t map = get_task_map(task);
356 ret = vm_map_read_user(map, usraddr, kernaddr, size);
357 }
358
359 return ret;
360}
361
362__private_extern__
363kern_return_t chudxnu_task_write(task_t task, uint64_t useraddr, void *kernaddr, vm_size_t size)
364{
365 kern_return_t ret = KERN_SUCCESS;
366
367 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
368 useraddr &= 0x00000000FFFFFFFFULL;
369 }
370
371 if(current_task()==task) {
372 thread_t cur_thr = current_thread();
373 vm_offset_t recover_handler = cur_thr->recover;
374
375 if(ml_at_interrupt_context()) {
376 return KERN_FAILURE; // can't do copyout on interrupt stack
377 }
378
379 if(copyout(kernaddr, useraddr, size)) {
380 ret = KERN_FAILURE;
381 }
382 cur_thr->recover = recover_handler;
383 } else {
384 vm_map_t map = get_task_map(task);
385 ret = vm_map_write_user(map, kernaddr, useraddr, size);
386 }
387
388 return ret;
389}
390
391__private_extern__
392kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
393{
394 while(size>0) {
395 ppnum_t pp;
396 addr64_t phys_addr;
397
398 pp = pmap_find_phys(kernel_pmap, srcaddr); /* Get the page number */
399 if(!pp) {
400 return KERN_FAILURE; /* Not mapped... */
401 }
402
403 phys_addr = ((addr64_t)pp << 12) | (srcaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
404 if(phys_addr >= mem_actual) {
405 return KERN_FAILURE; /* out of range */
406 }
407
408 if((phys_addr&0x1) || size==1) {
409 *((uint8_t *)dstaddr) = ml_phys_read_byte_64(phys_addr);
410 ((uint8_t *)dstaddr)++;
411 srcaddr += sizeof(uint8_t);
412 size -= sizeof(uint8_t);
413 } else if((phys_addr&0x3) || size<=2) {
414 *((uint16_t *)dstaddr) = ml_phys_read_half_64(phys_addr);
415 ((uint16_t *)dstaddr)++;
416 srcaddr += sizeof(uint16_t);
417 size -= sizeof(uint16_t);
418 } else {
419 *((uint32_t *)dstaddr) = ml_phys_read_word_64(phys_addr);
420 ((uint32_t *)dstaddr)++;
421 srcaddr += sizeof(uint32_t);
422 size -= sizeof(uint32_t);
423 }
424 }
425 return KERN_SUCCESS;
426}
427
428__private_extern__
429kern_return_t chudxnu_kern_write(vm_offset_t dstaddr, void *srcaddr, vm_size_t size)
430{
431 while(size>0) {
432 ppnum_t pp;
433 addr64_t phys_addr;
434
435 pp = pmap_find_phys(kernel_pmap, dstaddr); /* Get the page number */
436 if(!pp) {
437 return KERN_FAILURE; /* Not mapped... */
438 }
439
440 phys_addr = ((addr64_t)pp << 12) | (dstaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
441 if(phys_addr >= mem_actual) {
442 return KERN_FAILURE; /* out of range */
443 }
444
445 if((phys_addr&0x1) || size==1) {
446 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
447 ((uint8_t *)srcaddr)++;
448 dstaddr += sizeof(uint8_t);
449 size -= sizeof(uint8_t);
450 } else if((phys_addr&0x3) || size<=2) {
451 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
452 ((uint16_t *)srcaddr)++;
453 dstaddr += sizeof(uint16_t);
454 size -= sizeof(uint16_t);
455 } else {
456 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
457 ((uint32_t *)srcaddr)++;
458 dstaddr += sizeof(uint32_t);
459 size -= sizeof(uint32_t);
460 }
461 }
462
463 return KERN_SUCCESS;
464}
465
466// chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
467// fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
468// after sampling has finished.
469//
470// For an N-entry callstack:
471//
472// [0] current pc
473// [1..N-3] stack frames (including current one)
474// [N-2] current LR (return value if we're in a leaf function)
475// [N-1] current r0 (in case we've saved LR in r0)
476//
477
478#define FP_LINK_OFFSET 2
479#define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
480#define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
481
482#ifndef USER_MODE
483#define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
484#endif
485
486#ifndef SUPERVISOR_MODE
487#define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
488#endif
489
490#define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && \
491 (addr&STACK_ALIGNMENT_MASK)==0x0 && \
492 (supervisor ? \
493 (addr>=kernStackMin && \
494 addr<=kernStackMax) : \
495 TRUE))
496
497
498__private_extern__
499kern_return_t chudxnu_thread_get_callstack64( thread_t thread,
500 uint64_t *callStack,
501 mach_msg_type_number_t *count,
502 boolean_t user_only)
503{
504 kern_return_t kr;
505 task_t task = get_threadtask(thread);
506 uint64_t nextFramePointer = 0;
507 uint64_t currPC, currLR, currR0;
508 uint64_t framePointer;
509 uint64_t prevPC = 0;
510 uint64_t kernStackMin = min_valid_stack_address();
511 uint64_t kernStackMax = max_valid_stack_address();
512 uint64_t *buffer = callStack;
513 uint32_t tmpWord;
514 int bufferIndex = 0;
515 int bufferMaxIndex = *count;
516 boolean_t supervisor;
517 boolean_t is64Bit;
518 struct savearea *sv;
519
520 if(user_only) {
521 sv = find_user_regs(thread);
522 } else {
523 sv = find_kern_regs(thread);
524 }
525
526 if(!sv) {
527 *count = 0;
528 return KERN_FAILURE;
529 }
530
531 supervisor = SUPERVISOR_MODE(sv->save_srr1);
532 if(supervisor) {
533#warning assuming kernel task is always 32-bit
534 is64Bit = FALSE;
535 } else {
536 is64Bit = chudxnu_is_64bit_task(task);
537 }
538
539 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
540 if(bufferMaxIndex<2) {
541 *count = 0;
542 return KERN_RESOURCE_SHORTAGE;
543 }
544
545 currPC = sv->save_srr0;
546 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
547 currLR = sv->save_lr;
548 currR0 = sv->save_r0;
549
550 bufferIndex = 0; // start with a stack of size zero
551 buffer[bufferIndex++] = currPC; // save PC in position 0.
552
553 // Now, fill buffer with stack backtraces.
554 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
555 uint64_t pc = 0;
556 // Above the stack pointer, the following values are saved:
557 // saved LR
558 // saved CR
559 // saved SP
560 //-> SP
561 // Here, we'll get the lr from the stack.
562 uint64_t fp_link;
563
564 if(is64Bit) {
565 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
566 } else {
567 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
568 }
569
570 // Note that we read the pc even for the first stack frame (which, in theory,
571 // is always empty because the callee fills it in just before it lowers the
572 // stack. However, if we catch the program in between filling in the return
573 // address and lowering the stack, we want to still have a valid backtrace.
574 // FixupStack correctly disregards this value if necessary.
575
576 if(supervisor) {
577 if(is64Bit) {
578 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
579 } else {
580 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
581 pc = tmpWord;
582 }
583 } else {
584 if(is64Bit) {
585 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
586 } else {
587 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
588 pc = tmpWord;
589 }
590 }
591 if(kr!=KERN_SUCCESS) {
592 pc = 0;
593 break;
594 }
595
596 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
597 if(supervisor) {
598 if(is64Bit) {
599 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
600 } else {
601 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
602 nextFramePointer = tmpWord;
603 }
604 } else {
605 if(is64Bit) {
606 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
607 } else {
608 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
609 nextFramePointer = tmpWord;
610 }
611 }
612 if(kr!=KERN_SUCCESS) {
613 nextFramePointer = 0;
614 }
615
616 if(nextFramePointer) {
617 buffer[bufferIndex++] = pc;
618 prevPC = pc;
619 }
620
621 if(nextFramePointer<framePointer) {
622 break;
623 } else {
624 framePointer = nextFramePointer;
625 }
626 }
627
628 if(bufferIndex>=bufferMaxIndex) {
629 *count = 0;
630 return KERN_RESOURCE_SHORTAGE;
631 }
632
633 // Save link register and R0 at bottom of stack (used for later fixup).
634 buffer[bufferIndex++] = currLR;
635 buffer[bufferIndex++] = currR0;
636
637 *count = bufferIndex;
638 return KERN_SUCCESS;
639}
640
641__private_extern__
642kern_return_t chudxnu_thread_get_callstack( thread_t thread,
643 uint32_t *callStack,
644 mach_msg_type_number_t *count,
645 boolean_t user_only)
646{
647 kern_return_t kr;
648 task_t task = get_threadtask(thread);
649 uint64_t nextFramePointer = 0;
650 uint64_t currPC, currLR, currR0;
651 uint64_t framePointer;
652 uint64_t prevPC = 0;
653 uint64_t kernStackMin = min_valid_stack_address();
654 uint64_t kernStackMax = max_valid_stack_address();
655 uint32_t *buffer = callStack;
656 uint32_t tmpWord;
657 int bufferIndex = 0;
658 int bufferMaxIndex = *count;
659 boolean_t supervisor;
660 boolean_t is64Bit;
661 struct savearea *sv;
662
663 if(user_only) {
664 sv = find_user_regs(thread);
665 } else {
666 sv = find_kern_regs(thread);
667 }
668
669 if(!sv) {
670 *count = 0;
671 return KERN_FAILURE;
672 }
673
674 supervisor = SUPERVISOR_MODE(sv->save_srr1);
675 if(supervisor) {
676#warning assuming kernel task is always 32-bit
677 is64Bit = FALSE;
678 } else {
679 is64Bit = chudxnu_is_64bit_task(task);
680 }
681
682 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
683 if(bufferMaxIndex<2) {
684 *count = 0;
685 return KERN_RESOURCE_SHORTAGE;
686 }
687
688 currPC = sv->save_srr0;
689 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
690 currLR = sv->save_lr;
691 currR0 = sv->save_r0;
692
693 bufferIndex = 0; // start with a stack of size zero
694 buffer[bufferIndex++] = currPC; // save PC in position 0.
695
696 // Now, fill buffer with stack backtraces.
697 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
698 uint64_t pc = 0;
699 // Above the stack pointer, the following values are saved:
700 // saved LR
701 // saved CR
702 // saved SP
703 //-> SP
704 // Here, we'll get the lr from the stack.
705 uint64_t fp_link;
706
707 if(is64Bit) {
708 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
709 } else {
710 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
711 }
712
713 // Note that we read the pc even for the first stack frame (which, in theory,
714 // is always empty because the callee fills it in just before it lowers the
715 // stack. However, if we catch the program in between filling in the return
716 // address and lowering the stack, we want to still have a valid backtrace.
717 // FixupStack correctly disregards this value if necessary.
718
719 if(supervisor) {
720 if(is64Bit) {
721 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
722 } else {
723 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
724 pc = tmpWord;
725 }
726 } else {
727 if(is64Bit) {
728 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
729 } else {
730 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
731 pc = tmpWord;
732 }
733 }
734 if(kr!=KERN_SUCCESS) {
735 pc = 0;
736 break;
737 }
738
739 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
740 if(supervisor) {
741 if(is64Bit) {
742 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
743 } else {
744 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
745 nextFramePointer = tmpWord;
746 }
747 } else {
748 if(is64Bit) {
749 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
750 } else {
751 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
752 nextFramePointer = tmpWord;
753 }
754 }
755 if(kr!=KERN_SUCCESS) {
756 nextFramePointer = 0;
757 }
758
759 if(nextFramePointer) {
760 buffer[bufferIndex++] = pc;
761 prevPC = pc;
762 }
763
764 if(nextFramePointer<framePointer) {
765 break;
766 } else {
767 framePointer = nextFramePointer;
768 }
769 }
770
771 if(bufferIndex>=bufferMaxIndex) {
772 *count = 0;
773 return KERN_RESOURCE_SHORTAGE;
774 }
775
776 // Save link register and R0 at bottom of stack (used for later fixup).
777 buffer[bufferIndex++] = currLR;
778 buffer[bufferIndex++] = currR0;
779
780 *count = bufferIndex;
781 return KERN_SUCCESS;
782}
783
784#pragma mark **** DEPRECATED ****
785
786// DEPRECATED
787__private_extern__
788kern_return_t chudxnu_bind_current_thread(int cpu)
789{
790 return chudxnu_bind_thread(current_thread(), cpu);
791}
792
793// DEPRECATED
794kern_return_t chudxnu_unbind_current_thread(void)
795{
796 return chudxnu_unbind_thread(current_thread());
797}
798
799// DEPRECATED
800__private_extern__
801kern_return_t chudxnu_current_thread_get_callstack( uint32_t *callStack,
802 mach_msg_type_number_t *count,
803 boolean_t user_only)
804{
805 return chudxnu_thread_get_callstack(current_thread(), callStack, count, user_only);
806}
807
808// DEPRECATED
809__private_extern__
810thread_t chudxnu_current_act(void)
811{
812 return chudxnu_current_thread();
813}