]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <ppc/chud/chud_xnu.h> | |
24 | #include <kern/processor.h> | |
25 | #include <kern/thread.h> | |
26 | #include <kern/thread_act.h> | |
27 | #include <kern/ipc_tt.h> | |
28 | #include <ppc/proc_reg.h> | |
29 | #include <ppc/machine_routines.h> | |
30 | ||
31 | __private_extern__ | |
32 | kern_return_t chudxnu_bind_current_thread(int cpu) | |
33 | { | |
34 | if(cpu>=0 && cpu<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */ | |
35 | thread_bind(current_thread(), processor_ptr[cpu]); | |
36 | thread_block((void (*)(void)) 0); | |
37 | return KERN_SUCCESS; | |
38 | } else { | |
39 | return KERN_FAILURE; | |
40 | } | |
41 | } | |
42 | ||
43 | __private_extern__ | |
44 | kern_return_t chudxnu_unbind_current_thread(void) | |
45 | { | |
46 | thread_bind(current_thread(), PROCESSOR_NULL); | |
47 | return KERN_SUCCESS; | |
48 | } | |
49 | ||
50 | static savearea *chudxnu_private_get_regs(void) | |
51 | { | |
52 | return current_act()->mact.pcb; // take the top savearea (user or kernel) | |
53 | } | |
54 | ||
55 | static savearea *chudxnu_private_get_user_regs(void) | |
56 | { | |
57 | return find_user_regs(current_act()); // take the top user savearea (skip any kernel saveareas) | |
58 | } | |
59 | ||
60 | static savearea_fpu *chudxnu_private_get_fp_regs(void) | |
61 | { | |
62 | fpu_save(current_act()->mact.curctx); // just in case it's live, save it | |
63 | return current_act()->mact.curctx->FPUsave; // take the top savearea (user or kernel) | |
64 | } | |
65 | ||
66 | static savearea_fpu *chudxnu_private_get_user_fp_regs(void) | |
67 | { | |
68 | return find_user_fpu(current_act()); // take the top user savearea (skip any kernel saveareas) | |
69 | } | |
70 | ||
71 | static savearea_vec *chudxnu_private_get_vec_regs(void) | |
72 | { | |
73 | vec_save(current_act()->mact.curctx); // just in case it's live, save it | |
74 | return current_act()->mact.curctx->VMXsave; // take the top savearea (user or kernel) | |
75 | } | |
76 | ||
77 | static savearea_vec *chudxnu_private_get_user_vec_regs(void) | |
78 | { | |
79 | return find_user_vec(current_act()); // take the top user savearea (skip any kernel saveareas) | |
80 | } | |
81 | ||
82 | __private_extern__ | |
83 | kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv) | |
84 | { | |
85 | struct ppc_thread_state *ts; | |
86 | struct ppc_thread_state64 *xts; | |
87 | ||
88 | switch(flavor) { | |
89 | case PPC_THREAD_STATE: | |
90 | if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */ | |
91 | *count = 0; | |
92 | return KERN_INVALID_ARGUMENT; | |
93 | } | |
94 | ts = (struct ppc_thread_state *) tstate; | |
95 | if(sv) { | |
96 | ts->r0 = (unsigned int)sv->save_r0; | |
97 | ts->r1 = (unsigned int)sv->save_r1; | |
98 | ts->r2 = (unsigned int)sv->save_r2; | |
99 | ts->r3 = (unsigned int)sv->save_r3; | |
100 | ts->r4 = (unsigned int)sv->save_r4; | |
101 | ts->r5 = (unsigned int)sv->save_r5; | |
102 | ts->r6 = (unsigned int)sv->save_r6; | |
103 | ts->r7 = (unsigned int)sv->save_r7; | |
104 | ts->r8 = (unsigned int)sv->save_r8; | |
105 | ts->r9 = (unsigned int)sv->save_r9; | |
106 | ts->r10 = (unsigned int)sv->save_r10; | |
107 | ts->r11 = (unsigned int)sv->save_r11; | |
108 | ts->r12 = (unsigned int)sv->save_r12; | |
109 | ts->r13 = (unsigned int)sv->save_r13; | |
110 | ts->r14 = (unsigned int)sv->save_r14; | |
111 | ts->r15 = (unsigned int)sv->save_r15; | |
112 | ts->r16 = (unsigned int)sv->save_r16; | |
113 | ts->r17 = (unsigned int)sv->save_r17; | |
114 | ts->r18 = (unsigned int)sv->save_r18; | |
115 | ts->r19 = (unsigned int)sv->save_r19; | |
116 | ts->r20 = (unsigned int)sv->save_r20; | |
117 | ts->r21 = (unsigned int)sv->save_r21; | |
118 | ts->r22 = (unsigned int)sv->save_r22; | |
119 | ts->r23 = (unsigned int)sv->save_r23; | |
120 | ts->r24 = (unsigned int)sv->save_r24; | |
121 | ts->r25 = (unsigned int)sv->save_r25; | |
122 | ts->r26 = (unsigned int)sv->save_r26; | |
123 | ts->r27 = (unsigned int)sv->save_r27; | |
124 | ts->r28 = (unsigned int)sv->save_r28; | |
125 | ts->r29 = (unsigned int)sv->save_r29; | |
126 | ts->r30 = (unsigned int)sv->save_r30; | |
127 | ts->r31 = (unsigned int)sv->save_r31; | |
128 | ts->cr = (unsigned int)sv->save_cr; | |
129 | ts->xer = (unsigned int)sv->save_xer; | |
130 | ts->lr = (unsigned int)sv->save_lr; | |
131 | ts->ctr = (unsigned int)sv->save_ctr; | |
132 | ts->srr0 = (unsigned int)sv->save_srr0; | |
133 | ts->srr1 = (unsigned int)sv->save_srr1; | |
134 | ts->mq = 0; | |
135 | ts->vrsave = (unsigned int)sv->save_vrsave; | |
136 | } else { | |
137 | bzero((void *)ts, sizeof(struct ppc_thread_state)); | |
138 | } | |
139 | *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */ | |
140 | return KERN_SUCCESS; | |
141 | break; | |
142 | case PPC_THREAD_STATE64: | |
143 | if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */ | |
144 | return KERN_INVALID_ARGUMENT; | |
145 | } | |
146 | xts = (struct ppc_thread_state64 *) tstate; | |
147 | if(sv) { | |
148 | xts->r0 = sv->save_r0; | |
149 | xts->r1 = sv->save_r1; | |
150 | xts->r2 = sv->save_r2; | |
151 | xts->r3 = sv->save_r3; | |
152 | xts->r4 = sv->save_r4; | |
153 | xts->r5 = sv->save_r5; | |
154 | xts->r6 = sv->save_r6; | |
155 | xts->r7 = sv->save_r7; | |
156 | xts->r8 = sv->save_r8; | |
157 | xts->r9 = sv->save_r9; | |
158 | xts->r10 = sv->save_r10; | |
159 | xts->r11 = sv->save_r11; | |
160 | xts->r12 = sv->save_r12; | |
161 | xts->r13 = sv->save_r13; | |
162 | xts->r14 = sv->save_r14; | |
163 | xts->r15 = sv->save_r15; | |
164 | xts->r16 = sv->save_r16; | |
165 | xts->r17 = sv->save_r17; | |
166 | xts->r18 = sv->save_r18; | |
167 | xts->r19 = sv->save_r19; | |
168 | xts->r20 = sv->save_r20; | |
169 | xts->r21 = sv->save_r21; | |
170 | xts->r22 = sv->save_r22; | |
171 | xts->r23 = sv->save_r23; | |
172 | xts->r24 = sv->save_r24; | |
173 | xts->r25 = sv->save_r25; | |
174 | xts->r26 = sv->save_r26; | |
175 | xts->r27 = sv->save_r27; | |
176 | xts->r28 = sv->save_r28; | |
177 | xts->r29 = sv->save_r29; | |
178 | xts->r30 = sv->save_r30; | |
179 | xts->r31 = sv->save_r31; | |
180 | xts->cr = sv->save_cr; | |
181 | xts->xer = sv->save_xer; | |
182 | xts->lr = sv->save_lr; | |
183 | xts->ctr = sv->save_ctr; | |
184 | xts->srr0 = sv->save_srr0; | |
185 | xts->srr1 = sv->save_srr1; | |
186 | xts->vrsave = sv->save_vrsave; | |
187 | } else { | |
188 | bzero((void *)xts, sizeof(struct ppc_thread_state64)); | |
189 | } | |
190 | *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */ | |
191 | return KERN_SUCCESS; | |
192 | break; | |
193 | default: | |
194 | *count = 0; | |
195 | return KERN_INVALID_ARGUMENT; | |
196 | break; | |
197 | } | |
198 | } | |
199 | ||
200 | __private_extern__ | |
201 | kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count) | |
202 | { | |
203 | struct ppc_thread_state *ts; | |
204 | struct ppc_thread_state64 *xts; | |
205 | ||
206 | switch(flavor) { | |
207 | case PPC_THREAD_STATE: | |
208 | if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */ | |
209 | return KERN_INVALID_ARGUMENT; | |
210 | } | |
211 | ts = (struct ppc_thread_state *) tstate; | |
212 | if(sv) { | |
213 | sv->save_r0 = (uint64_t)ts->r0; | |
214 | sv->save_r1 = (uint64_t)ts->r1; | |
215 | sv->save_r2 = (uint64_t)ts->r2; | |
216 | sv->save_r3 = (uint64_t)ts->r3; | |
217 | sv->save_r4 = (uint64_t)ts->r4; | |
218 | sv->save_r5 = (uint64_t)ts->r5; | |
219 | sv->save_r6 = (uint64_t)ts->r6; | |
220 | sv->save_r7 = (uint64_t)ts->r7; | |
221 | sv->save_r8 = (uint64_t)ts->r8; | |
222 | sv->save_r9 = (uint64_t)ts->r9; | |
223 | sv->save_r10 = (uint64_t)ts->r10; | |
224 | sv->save_r11 = (uint64_t)ts->r11; | |
225 | sv->save_r12 = (uint64_t)ts->r12; | |
226 | sv->save_r13 = (uint64_t)ts->r13; | |
227 | sv->save_r14 = (uint64_t)ts->r14; | |
228 | sv->save_r15 = (uint64_t)ts->r15; | |
229 | sv->save_r16 = (uint64_t)ts->r16; | |
230 | sv->save_r17 = (uint64_t)ts->r17; | |
231 | sv->save_r18 = (uint64_t)ts->r18; | |
232 | sv->save_r19 = (uint64_t)ts->r19; | |
233 | sv->save_r20 = (uint64_t)ts->r20; | |
234 | sv->save_r21 = (uint64_t)ts->r21; | |
235 | sv->save_r22 = (uint64_t)ts->r22; | |
236 | sv->save_r23 = (uint64_t)ts->r23; | |
237 | sv->save_r24 = (uint64_t)ts->r24; | |
238 | sv->save_r25 = (uint64_t)ts->r25; | |
239 | sv->save_r26 = (uint64_t)ts->r26; | |
240 | sv->save_r27 = (uint64_t)ts->r27; | |
241 | sv->save_r28 = (uint64_t)ts->r28; | |
242 | sv->save_r29 = (uint64_t)ts->r29; | |
243 | sv->save_r30 = (uint64_t)ts->r30; | |
244 | sv->save_r31 = (uint64_t)ts->r31; | |
245 | sv->save_cr = ts->cr; | |
246 | sv->save_xer = (uint64_t)ts->xer; | |
247 | sv->save_lr = (uint64_t)ts->lr; | |
248 | sv->save_ctr = (uint64_t)ts->ctr; | |
249 | sv->save_srr0 = (uint64_t)ts->srr0; | |
250 | sv->save_srr1 = (uint64_t)ts->srr1; | |
251 | sv->save_vrsave = ts->vrsave; | |
252 | return KERN_SUCCESS; | |
253 | } else { | |
254 | return KERN_FAILURE; | |
255 | } | |
256 | break; | |
257 | case PPC_THREAD_STATE64: | |
258 | if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */ | |
259 | return KERN_INVALID_ARGUMENT; | |
260 | } | |
261 | xts = (struct ppc_thread_state64 *) tstate; | |
262 | if(sv) { | |
263 | sv->save_r0 = xts->r0; | |
264 | sv->save_r1 = xts->r1; | |
265 | sv->save_r2 = xts->r2; | |
266 | sv->save_r3 = xts->r3; | |
267 | sv->save_r4 = xts->r4; | |
268 | sv->save_r5 = xts->r5; | |
269 | sv->save_r6 = xts->r6; | |
270 | sv->save_r7 = xts->r7; | |
271 | sv->save_r8 = xts->r8; | |
272 | sv->save_r9 = xts->r9; | |
273 | sv->save_r10 = xts->r10; | |
274 | sv->save_r11 = xts->r11; | |
275 | sv->save_r12 = xts->r12; | |
276 | sv->save_r13 = xts->r13; | |
277 | sv->save_r14 = xts->r14; | |
278 | sv->save_r15 = xts->r15; | |
279 | sv->save_r16 = xts->r16; | |
280 | sv->save_r17 = xts->r17; | |
281 | sv->save_r18 = xts->r18; | |
282 | sv->save_r19 = xts->r19; | |
283 | sv->save_r20 = xts->r20; | |
284 | sv->save_r21 = xts->r21; | |
285 | sv->save_r22 = xts->r22; | |
286 | sv->save_r23 = xts->r23; | |
287 | sv->save_r24 = xts->r24; | |
288 | sv->save_r25 = xts->r25; | |
289 | sv->save_r26 = xts->r26; | |
290 | sv->save_r27 = xts->r27; | |
291 | sv->save_r28 = xts->r28; | |
292 | sv->save_r29 = xts->r29; | |
293 | sv->save_r30 = xts->r30; | |
294 | sv->save_r31 = xts->r31; | |
295 | sv->save_cr = xts->cr; | |
296 | sv->save_xer = xts->xer; | |
297 | sv->save_lr = xts->lr; | |
298 | sv->save_ctr = xts->ctr; | |
299 | sv->save_srr0 = xts->srr0; | |
300 | sv->save_srr1 = xts->srr1; | |
301 | sv->save_vrsave = xts->vrsave; | |
302 | return KERN_SUCCESS; | |
303 | } else { | |
304 | return KERN_FAILURE; | |
305 | } | |
306 | } | |
307 | } | |
308 | ||
309 | __private_extern__ | |
310 | kern_return_t chudxnu_thread_get_state(thread_act_t thr_act, | |
311 | thread_flavor_t flavor, | |
312 | thread_state_t tstate, | |
313 | mach_msg_type_number_t *count, | |
314 | boolean_t user_only) | |
315 | { | |
316 | if(thr_act==current_act()) { | |
317 | if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { | |
318 | struct savearea *sv; | |
319 | if(user_only) { | |
320 | sv = chudxnu_private_get_user_regs(); | |
321 | } else { | |
322 | sv = chudxnu_private_get_regs(); | |
323 | } | |
324 | return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv); | |
325 | } else if(flavor==PPC_FLOAT_STATE && user_only) { | |
326 | #warning chudxnu_thread_get_state() does not yet support supervisor FP | |
327 | return machine_thread_get_state(current_act(), flavor, tstate, count); | |
328 | } else if(flavor==PPC_VECTOR_STATE && user_only) { | |
329 | #warning chudxnu_thread_get_state() does not yet support supervisor VMX | |
330 | return machine_thread_get_state(current_act(), flavor, tstate, count); | |
331 | } else { | |
332 | *count = 0; | |
333 | return KERN_INVALID_ARGUMENT; | |
334 | } | |
335 | } else { | |
336 | return machine_thread_get_state(thr_act, flavor, tstate, count); | |
337 | } | |
338 | } | |
339 | ||
340 | __private_extern__ | |
341 | kern_return_t chudxnu_thread_set_state(thread_act_t thr_act, | |
342 | thread_flavor_t flavor, | |
343 | thread_state_t tstate, | |
344 | mach_msg_type_number_t count, | |
345 | boolean_t user_only) | |
346 | { | |
347 | if(thr_act==current_act()) { | |
348 | if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { | |
349 | struct savearea *sv; | |
350 | if(user_only) { | |
351 | sv = chudxnu_private_get_user_regs(); | |
352 | } else { | |
353 | sv = chudxnu_private_get_regs(); | |
354 | } | |
355 | return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count); | |
356 | } else if(flavor==PPC_FLOAT_STATE && user_only) { | |
357 | #warning chudxnu_thread_set_state() does not yet support supervisor FP | |
358 | return machine_thread_set_state(current_act(), flavor, tstate, count); | |
359 | } else if(flavor==PPC_VECTOR_STATE && user_only) { | |
360 | #warning chudxnu_thread_set_state() does not yet support supervisor VMX | |
361 | return machine_thread_set_state(current_act(), flavor, tstate, count); | |
362 | } else { | |
363 | return KERN_INVALID_ARGUMENT; | |
364 | } | |
365 | } else { | |
366 | return machine_thread_set_state(thr_act, flavor, tstate, count); | |
367 | } | |
368 | } | |
369 | ||
370 | static inline kern_return_t chudxnu_private_task_read_bytes(task_t task, vm_offset_t addr, int size, void *data) | |
371 | { | |
372 | ||
373 | kern_return_t ret; | |
374 | ||
375 | if(task==kernel_task) { | |
376 | if(size==sizeof(unsigned int)) { | |
377 | addr64_t phys_addr; | |
378 | ppnum_t pp; | |
379 | ||
380 | pp = pmap_find_phys(kernel_pmap, addr); /* Get the page number */ | |
381 | if(!pp) return KERN_FAILURE; /* Not mapped... */ | |
382 | ||
383 | phys_addr = ((addr64_t)pp << 12) | (addr & 0x0000000000000FFFULL); /* Shove in the page offset */ | |
384 | ||
385 | if(phys_addr < mem_actual) { /* Sanity check: is it in memory? */ | |
386 | *((uint32_t *)data) = ml_phys_read_64(phys_addr); | |
387 | return KERN_SUCCESS; | |
388 | } | |
389 | } else { | |
390 | return KERN_FAILURE; | |
391 | } | |
392 | } else { | |
393 | ||
394 | ret = KERN_SUCCESS; /* Assume everything worked */ | |
395 | if(copyin((void *)addr, data, size)) ret = KERN_FAILURE; /* Get memory, if non-zero rc, it didn't work */ | |
396 | return ret; | |
397 | } | |
398 | } | |
399 | ||
400 | // chudxnu_current_thread_get_callstack gathers a raw callstack along with any information needed to | |
401 | // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.) | |
402 | // after sampling has finished. | |
403 | // | |
404 | // For an N-entry callstack: | |
405 | // | |
406 | // [0] current pc | |
407 | // [1..N-3] stack frames (including current one) | |
408 | // [N-2] current LR (return value if we're in a leaf function) | |
409 | // [N-1] current r0 (in case we've saved LR in r0) | |
410 | // | |
411 | ||
412 | #define FP_LINK_OFFSET 2 | |
413 | #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned | |
414 | #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide | |
415 | ||
416 | #ifndef USER_MODE | |
417 | #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE) | |
418 | #endif | |
419 | ||
420 | #ifndef SUPERVISOR_MODE | |
421 | #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE) | |
422 | #endif | |
423 | ||
424 | #define VALID_STACK_ADDRESS(addr) (addr>=0x1000 && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE)) | |
425 | ||
426 | __private_extern__ | |
427 | kern_return_t chudxnu_current_thread_get_callstack(uint32_t *callStack, | |
428 | mach_msg_type_number_t *count, | |
429 | boolean_t user_only) | |
430 | { | |
431 | kern_return_t kr; | |
432 | vm_address_t nextFramePointer = 0; | |
433 | vm_address_t currPC, currLR, currR0; | |
434 | vm_address_t framePointer; | |
435 | vm_address_t prevPC = 0; | |
436 | vm_address_t kernStackMin = min_valid_stack_address(); | |
437 | vm_address_t kernStackMax = max_valid_stack_address(); | |
438 | unsigned int *buffer = callStack; | |
439 | int bufferIndex = 0; | |
440 | int bufferMaxIndex = *count; | |
441 | boolean_t supervisor; | |
442 | struct savearea *sv; | |
443 | ||
444 | if(user_only) { | |
445 | sv = chudxnu_private_get_user_regs(); | |
446 | } else { | |
447 | sv = chudxnu_private_get_regs(); | |
448 | } | |
449 | ||
450 | if(!sv) { | |
451 | *count = 0; | |
452 | return KERN_FAILURE; | |
453 | } | |
454 | ||
455 | supervisor = SUPERVISOR_MODE(sv->save_srr1); | |
456 | ||
457 | if(!supervisor && ml_at_interrupt_context()) { // can't do copyin() if on interrupt stack | |
458 | *count = 0; | |
459 | return KERN_FAILURE; | |
460 | } | |
461 | ||
462 | bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end. | |
463 | if(bufferMaxIndex<2) { | |
464 | *count = 0; | |
465 | return KERN_RESOURCE_SHORTAGE; | |
466 | } | |
467 | ||
468 | currPC = sv->save_srr0; | |
469 | framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */ | |
470 | currLR = sv->save_lr; | |
471 | currR0 = sv->save_r0; | |
472 | ||
473 | bufferIndex = 0; // start with a stack of size zero | |
474 | buffer[bufferIndex++] = currPC; // save PC in position 0. | |
475 | ||
476 | // Now, fill buffer with stack backtraces. | |
477 | while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) { | |
478 | vm_address_t pc = 0; | |
479 | // Above the stack pointer, the following values are saved: | |
480 | // saved LR | |
481 | // saved CR | |
482 | // saved SP | |
483 | //-> SP | |
484 | // Here, we'll get the lr from the stack. | |
485 | volatile vm_address_t fp_link = (vm_address_t)(((unsigned *)framePointer)+FP_LINK_OFFSET); | |
486 | ||
487 | // Note that we read the pc even for the first stack frame (which, in theory, | |
488 | // is always empty because the callee fills it in just before it lowers the | |
489 | // stack. However, if we catch the program in between filling in the return | |
490 | // address and lowering the stack, we want to still have a valid backtrace. | |
491 | // FixupStack correctly disregards this value if necessary. | |
492 | ||
493 | if(supervisor) { | |
494 | kr = chudxnu_private_task_read_bytes(kernel_task, fp_link, sizeof(unsigned int), &pc); | |
495 | } else { | |
496 | kr = chudxnu_private_task_read_bytes(current_task(), fp_link, sizeof(unsigned int), &pc); | |
497 | } | |
498 | if(kr!=KERN_SUCCESS) { | |
499 | // IOLog("task_read_callstack: unable to read framePointer: %08x\n",framePointer); | |
500 | pc = 0; | |
501 | break; | |
502 | } | |
503 | ||
504 | // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid | |
505 | ||
506 | if(supervisor) { | |
507 | kr = chudxnu_private_task_read_bytes(kernel_task, framePointer, sizeof(unsigned int), &nextFramePointer); | |
508 | } else { | |
509 | kr = chudxnu_private_task_read_bytes(current_task(), framePointer, sizeof(unsigned int), &nextFramePointer); | |
510 | } | |
511 | if(kr!=KERN_SUCCESS) { | |
512 | nextFramePointer = 0; | |
513 | } | |
514 | ||
515 | if(nextFramePointer) { | |
516 | buffer[bufferIndex++] = pc; | |
517 | prevPC = pc; | |
518 | } | |
519 | ||
520 | if(nextFramePointer<framePointer) { | |
521 | break; | |
522 | } else { | |
523 | framePointer = nextFramePointer; | |
524 | } | |
525 | } | |
526 | ||
527 | if(bufferIndex>=bufferMaxIndex) { | |
528 | *count = 0; | |
529 | return KERN_RESOURCE_SHORTAGE; | |
530 | } | |
531 | ||
532 | // Save link register and R0 at bottom of stack. This means that we won't worry | |
533 | // about these values messing up stack compression. These end up being used | |
534 | // by FixupStack. | |
535 | buffer[bufferIndex++] = currLR; | |
536 | buffer[bufferIndex++] = currR0; | |
537 | ||
538 | *count = bufferIndex; | |
539 | return KERN_SUCCESS; | |
540 | } | |
541 | ||
542 | __private_extern__ | |
543 | int chudxnu_task_threads(task_t task, | |
544 | thread_act_array_t *thr_act_list, | |
545 | mach_msg_type_number_t *count) | |
546 | { | |
547 | mach_msg_type_number_t task_thread_count = 0; | |
548 | kern_return_t kr; | |
549 | ||
550 | kr = task_threads(current_task(), thr_act_list, count); | |
551 | if(kr==KERN_SUCCESS) { | |
552 | thread_act_t thr_act; | |
553 | int i, state_count; | |
554 | for(i=0; i<(*count); i++) { | |
555 | thr_act = convert_port_to_act(((ipc_port_t *)(*thr_act_list))[i]); | |
556 | /* undo the mig conversion task_threads does */ | |
557 | thr_act_list[i] = thr_act; | |
558 | } | |
559 | } | |
560 | return kr; | |
561 | } | |
562 | ||
563 | __private_extern__ | |
564 | thread_act_t chudxnu_current_act(void) | |
565 | { | |
566 | return current_act(); | |
567 | } | |
568 | ||
569 | __private_extern__ | |
570 | task_t chudxnu_current_task(void) | |
571 | { | |
572 | return current_task(); | |
573 | } | |
574 | ||
575 | __private_extern__ | |
576 | kern_return_t chudxnu_thread_info(thread_act_t thr_act, | |
577 | thread_flavor_t flavor, | |
578 | thread_info_t thread_info_out, | |
579 | mach_msg_type_number_t *thread_info_count) | |
580 | { | |
581 | return thread_info(thr_act, flavor, thread_info_out, thread_info_count); | |
582 | } |