]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_thread.c
585653203f42bbefae038261a4815b3d04bbc38d
[apple/xnu.git] / osfmk / ppc / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/ipc_tt.h>
37
38 #include <vm/vm_map.h>
39 #include <vm/pmap.h>
40
41 #include <ppc/chud/chud_xnu.h>
42 #include <ppc/chud/chud_xnu_private.h>
43
44 #include <ppc/misc_protos.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/machine_routines.h>
47 #include <ppc/fpu_protos.h>
48
49 // forward declarations
50 extern kern_return_t machine_thread_get_kern_state( thread_t thread,
51 thread_flavor_t flavor,
52 thread_state_t tstate,
53 mach_msg_type_number_t *count);
54
55
56 #pragma mark **** thread binding ****
57
58 __private_extern__
59 kern_return_t chudxnu_bind_thread(thread_t thread, int cpu)
60 {
61 if(cpu>=0 && cpu<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */
62 thread_bind(thread, cpu_to_processor(cpu));
63 if(thread==current_thread()) {
64 (void)thread_block(THREAD_CONTINUE_NULL);
65 }
66 return KERN_SUCCESS;
67 } else {
68 return KERN_FAILURE;
69 }
70 }
71
72 __private_extern__
73 kern_return_t chudxnu_unbind_thread(thread_t thread)
74 {
75 thread_bind(thread, PROCESSOR_NULL);
76 return KERN_SUCCESS;
77 }
78
79 #pragma mark **** thread state ****
80
81 __private_extern__
82 kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
83 {
84 struct ppc_thread_state *ts;
85 struct ppc_thread_state64 *xts;
86
87 switch(flavor) {
88 case PPC_THREAD_STATE:
89 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
90 *count = 0;
91 return KERN_INVALID_ARGUMENT;
92 }
93 ts = (struct ppc_thread_state *) tstate;
94 if(sv) {
95 ts->r0 = (unsigned int)sv->save_r0;
96 ts->r1 = (unsigned int)sv->save_r1;
97 ts->r2 = (unsigned int)sv->save_r2;
98 ts->r3 = (unsigned int)sv->save_r3;
99 ts->r4 = (unsigned int)sv->save_r4;
100 ts->r5 = (unsigned int)sv->save_r5;
101 ts->r6 = (unsigned int)sv->save_r6;
102 ts->r7 = (unsigned int)sv->save_r7;
103 ts->r8 = (unsigned int)sv->save_r8;
104 ts->r9 = (unsigned int)sv->save_r9;
105 ts->r10 = (unsigned int)sv->save_r10;
106 ts->r11 = (unsigned int)sv->save_r11;
107 ts->r12 = (unsigned int)sv->save_r12;
108 ts->r13 = (unsigned int)sv->save_r13;
109 ts->r14 = (unsigned int)sv->save_r14;
110 ts->r15 = (unsigned int)sv->save_r15;
111 ts->r16 = (unsigned int)sv->save_r16;
112 ts->r17 = (unsigned int)sv->save_r17;
113 ts->r18 = (unsigned int)sv->save_r18;
114 ts->r19 = (unsigned int)sv->save_r19;
115 ts->r20 = (unsigned int)sv->save_r20;
116 ts->r21 = (unsigned int)sv->save_r21;
117 ts->r22 = (unsigned int)sv->save_r22;
118 ts->r23 = (unsigned int)sv->save_r23;
119 ts->r24 = (unsigned int)sv->save_r24;
120 ts->r25 = (unsigned int)sv->save_r25;
121 ts->r26 = (unsigned int)sv->save_r26;
122 ts->r27 = (unsigned int)sv->save_r27;
123 ts->r28 = (unsigned int)sv->save_r28;
124 ts->r29 = (unsigned int)sv->save_r29;
125 ts->r30 = (unsigned int)sv->save_r30;
126 ts->r31 = (unsigned int)sv->save_r31;
127 ts->cr = (unsigned int)sv->save_cr;
128 ts->xer = (unsigned int)sv->save_xer;
129 ts->lr = (unsigned int)sv->save_lr;
130 ts->ctr = (unsigned int)sv->save_ctr;
131 ts->srr0 = (unsigned int)sv->save_srr0;
132 ts->srr1 = (unsigned int)sv->save_srr1;
133 ts->mq = 0;
134 ts->vrsave = (unsigned int)sv->save_vrsave;
135 } else {
136 bzero((void *)ts, sizeof(struct ppc_thread_state));
137 }
138 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
139 return KERN_SUCCESS;
140 break;
141 case PPC_THREAD_STATE64:
142 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
143 return KERN_INVALID_ARGUMENT;
144 }
145 xts = (struct ppc_thread_state64 *) tstate;
146 if(sv) {
147 xts->r0 = sv->save_r0;
148 xts->r1 = sv->save_r1;
149 xts->r2 = sv->save_r2;
150 xts->r3 = sv->save_r3;
151 xts->r4 = sv->save_r4;
152 xts->r5 = sv->save_r5;
153 xts->r6 = sv->save_r6;
154 xts->r7 = sv->save_r7;
155 xts->r8 = sv->save_r8;
156 xts->r9 = sv->save_r9;
157 xts->r10 = sv->save_r10;
158 xts->r11 = sv->save_r11;
159 xts->r12 = sv->save_r12;
160 xts->r13 = sv->save_r13;
161 xts->r14 = sv->save_r14;
162 xts->r15 = sv->save_r15;
163 xts->r16 = sv->save_r16;
164 xts->r17 = sv->save_r17;
165 xts->r18 = sv->save_r18;
166 xts->r19 = sv->save_r19;
167 xts->r20 = sv->save_r20;
168 xts->r21 = sv->save_r21;
169 xts->r22 = sv->save_r22;
170 xts->r23 = sv->save_r23;
171 xts->r24 = sv->save_r24;
172 xts->r25 = sv->save_r25;
173 xts->r26 = sv->save_r26;
174 xts->r27 = sv->save_r27;
175 xts->r28 = sv->save_r28;
176 xts->r29 = sv->save_r29;
177 xts->r30 = sv->save_r30;
178 xts->r31 = sv->save_r31;
179 xts->cr = sv->save_cr;
180 xts->xer = sv->save_xer;
181 xts->lr = sv->save_lr;
182 xts->ctr = sv->save_ctr;
183 xts->srr0 = sv->save_srr0;
184 xts->srr1 = sv->save_srr1;
185 xts->vrsave = sv->save_vrsave;
186 } else {
187 bzero((void *)xts, sizeof(struct ppc_thread_state64));
188 }
189 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
190 return KERN_SUCCESS;
191 break;
192 default:
193 *count = 0;
194 return KERN_INVALID_ARGUMENT;
195 break;
196 }
197 }
198
199 __private_extern__
200 kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
201 {
202 struct ppc_thread_state *ts;
203 struct ppc_thread_state64 *xts;
204
205 switch(flavor) {
206 case PPC_THREAD_STATE:
207 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
208 return KERN_INVALID_ARGUMENT;
209 }
210 ts = (struct ppc_thread_state *) tstate;
211 if(sv) {
212 sv->save_r0 = (uint64_t)ts->r0;
213 sv->save_r1 = (uint64_t)ts->r1;
214 sv->save_r2 = (uint64_t)ts->r2;
215 sv->save_r3 = (uint64_t)ts->r3;
216 sv->save_r4 = (uint64_t)ts->r4;
217 sv->save_r5 = (uint64_t)ts->r5;
218 sv->save_r6 = (uint64_t)ts->r6;
219 sv->save_r7 = (uint64_t)ts->r7;
220 sv->save_r8 = (uint64_t)ts->r8;
221 sv->save_r9 = (uint64_t)ts->r9;
222 sv->save_r10 = (uint64_t)ts->r10;
223 sv->save_r11 = (uint64_t)ts->r11;
224 sv->save_r12 = (uint64_t)ts->r12;
225 sv->save_r13 = (uint64_t)ts->r13;
226 sv->save_r14 = (uint64_t)ts->r14;
227 sv->save_r15 = (uint64_t)ts->r15;
228 sv->save_r16 = (uint64_t)ts->r16;
229 sv->save_r17 = (uint64_t)ts->r17;
230 sv->save_r18 = (uint64_t)ts->r18;
231 sv->save_r19 = (uint64_t)ts->r19;
232 sv->save_r20 = (uint64_t)ts->r20;
233 sv->save_r21 = (uint64_t)ts->r21;
234 sv->save_r22 = (uint64_t)ts->r22;
235 sv->save_r23 = (uint64_t)ts->r23;
236 sv->save_r24 = (uint64_t)ts->r24;
237 sv->save_r25 = (uint64_t)ts->r25;
238 sv->save_r26 = (uint64_t)ts->r26;
239 sv->save_r27 = (uint64_t)ts->r27;
240 sv->save_r28 = (uint64_t)ts->r28;
241 sv->save_r29 = (uint64_t)ts->r29;
242 sv->save_r30 = (uint64_t)ts->r30;
243 sv->save_r31 = (uint64_t)ts->r31;
244 sv->save_cr = ts->cr;
245 sv->save_xer = (uint64_t)ts->xer;
246 sv->save_lr = (uint64_t)ts->lr;
247 sv->save_ctr = (uint64_t)ts->ctr;
248 sv->save_srr0 = (uint64_t)ts->srr0;
249 sv->save_srr1 = (uint64_t)ts->srr1;
250 sv->save_vrsave = ts->vrsave;
251 return KERN_SUCCESS;
252 }
253 break;
254 case PPC_THREAD_STATE64:
255 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
256 return KERN_INVALID_ARGUMENT;
257 }
258 xts = (struct ppc_thread_state64 *) tstate;
259 if(sv) {
260 sv->save_r0 = xts->r0;
261 sv->save_r1 = xts->r1;
262 sv->save_r2 = xts->r2;
263 sv->save_r3 = xts->r3;
264 sv->save_r4 = xts->r4;
265 sv->save_r5 = xts->r5;
266 sv->save_r6 = xts->r6;
267 sv->save_r7 = xts->r7;
268 sv->save_r8 = xts->r8;
269 sv->save_r9 = xts->r9;
270 sv->save_r10 = xts->r10;
271 sv->save_r11 = xts->r11;
272 sv->save_r12 = xts->r12;
273 sv->save_r13 = xts->r13;
274 sv->save_r14 = xts->r14;
275 sv->save_r15 = xts->r15;
276 sv->save_r16 = xts->r16;
277 sv->save_r17 = xts->r17;
278 sv->save_r18 = xts->r18;
279 sv->save_r19 = xts->r19;
280 sv->save_r20 = xts->r20;
281 sv->save_r21 = xts->r21;
282 sv->save_r22 = xts->r22;
283 sv->save_r23 = xts->r23;
284 sv->save_r24 = xts->r24;
285 sv->save_r25 = xts->r25;
286 sv->save_r26 = xts->r26;
287 sv->save_r27 = xts->r27;
288 sv->save_r28 = xts->r28;
289 sv->save_r29 = xts->r29;
290 sv->save_r30 = xts->r30;
291 sv->save_r31 = xts->r31;
292 sv->save_cr = xts->cr;
293 sv->save_xer = xts->xer;
294 sv->save_lr = xts->lr;
295 sv->save_ctr = xts->ctr;
296 sv->save_srr0 = xts->srr0;
297 sv->save_srr1 = xts->srr1;
298 sv->save_vrsave = xts->vrsave;
299 return KERN_SUCCESS;
300 }
301 }
302 return KERN_FAILURE;
303 }
304
305 __private_extern__
306 kern_return_t chudxnu_thread_user_state_available(thread_t thread)
307 {
308 if(find_user_regs(thread)) {
309 return KERN_SUCCESS;
310 } else {
311 return KERN_FAILURE;
312 }
313 }
314
315 __private_extern__
316 kern_return_t chudxnu_thread_get_state(thread_t thread,
317 thread_flavor_t flavor,
318 thread_state_t tstate,
319 mach_msg_type_number_t *count,
320 boolean_t user_only)
321 {
322 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_get_state filters out some bits
323 struct savearea *sv;
324 if(user_only) {
325 sv = find_user_regs(thread);
326 } else {
327 sv = find_kern_regs(thread);
328 }
329 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
330 } else {
331 if(user_only) {
332 return machine_thread_get_state(thread, flavor, tstate, count);
333 } else {
334 // doesn't do FP or VMX
335 return machine_thread_get_kern_state(thread, flavor, tstate, count);
336 }
337 }
338 }
339
340 __private_extern__
341 kern_return_t chudxnu_thread_set_state(thread_t thread,
342 thread_flavor_t flavor,
343 thread_state_t tstate,
344 mach_msg_type_number_t count,
345 boolean_t user_only)
346 {
347 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_set_state filters out some bits
348 struct savearea *sv;
349 if(user_only) {
350 sv = find_user_regs(thread);
351 } else {
352 sv = find_kern_regs(thread);
353 }
354 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
355 } else {
356 return machine_thread_set_state(thread, flavor, tstate, count); // always user
357 }
358 }
359
360 #pragma mark **** task memory read/write ****
361
362 __private_extern__
363 kern_return_t chudxnu_task_read(task_t task, void *kernaddr, uint64_t usraddr, vm_size_t size)
364 {
365 kern_return_t ret = KERN_SUCCESS;
366
367 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
368 usraddr &= 0x00000000FFFFFFFFULL;
369 }
370
371 if(current_task()==task) {
372 thread_t cur_thr = current_thread();
373 vm_offset_t recover_handler = cur_thr->recover;
374
375 if(ml_at_interrupt_context()) {
376 return KERN_FAILURE; // can't do copyin on interrupt stack
377 }
378
379 if(copyin(usraddr, kernaddr, size)) {
380 ret = KERN_FAILURE;
381 }
382 cur_thr->recover = recover_handler;
383 } else {
384 vm_map_t map = get_task_map(task);
385 ret = vm_map_read_user(map, usraddr, kernaddr, size);
386 }
387
388 return ret;
389 }
390
391 __private_extern__
392 kern_return_t chudxnu_task_write(task_t task, uint64_t useraddr, void *kernaddr, vm_size_t size)
393 {
394 kern_return_t ret = KERN_SUCCESS;
395
396 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
397 useraddr &= 0x00000000FFFFFFFFULL;
398 }
399
400 if(current_task()==task) {
401 thread_t cur_thr = current_thread();
402 vm_offset_t recover_handler = cur_thr->recover;
403
404 if(ml_at_interrupt_context()) {
405 return KERN_FAILURE; // can't do copyout on interrupt stack
406 }
407
408 if(copyout(kernaddr, useraddr, size)) {
409 ret = KERN_FAILURE;
410 }
411 cur_thr->recover = recover_handler;
412 } else {
413 vm_map_t map = get_task_map(task);
414 ret = vm_map_write_user(map, kernaddr, useraddr, size);
415 }
416
417 return ret;
418 }
419
420 __private_extern__
421 kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
422 {
423 while(size>0) {
424 ppnum_t pp;
425 addr64_t phys_addr;
426
427 pp = pmap_find_phys(kernel_pmap, srcaddr); /* Get the page number */
428 if(!pp) {
429 return KERN_FAILURE; /* Not mapped... */
430 }
431
432 phys_addr = ((addr64_t)pp << 12) | (srcaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
433 if(phys_addr >= mem_actual) {
434 return KERN_FAILURE; /* out of range */
435 }
436
437 if((phys_addr&0x1) || size==1) {
438 *((uint8_t *)dstaddr) = ml_phys_read_byte_64(phys_addr);
439 ((uint8_t *)dstaddr)++;
440 srcaddr += sizeof(uint8_t);
441 size -= sizeof(uint8_t);
442 } else if((phys_addr&0x3) || size<=2) {
443 *((uint16_t *)dstaddr) = ml_phys_read_half_64(phys_addr);
444 ((uint16_t *)dstaddr)++;
445 srcaddr += sizeof(uint16_t);
446 size -= sizeof(uint16_t);
447 } else {
448 *((uint32_t *)dstaddr) = ml_phys_read_word_64(phys_addr);
449 ((uint32_t *)dstaddr)++;
450 srcaddr += sizeof(uint32_t);
451 size -= sizeof(uint32_t);
452 }
453 }
454 return KERN_SUCCESS;
455 }
456
457 __private_extern__
458 kern_return_t chudxnu_kern_write(vm_offset_t dstaddr, void *srcaddr, vm_size_t size)
459 {
460 while(size>0) {
461 ppnum_t pp;
462 addr64_t phys_addr;
463
464 pp = pmap_find_phys(kernel_pmap, dstaddr); /* Get the page number */
465 if(!pp) {
466 return KERN_FAILURE; /* Not mapped... */
467 }
468
469 phys_addr = ((addr64_t)pp << 12) | (dstaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
470 if(phys_addr >= mem_actual) {
471 return KERN_FAILURE; /* out of range */
472 }
473
474 if((phys_addr&0x1) || size==1) {
475 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
476 ((uint8_t *)srcaddr)++;
477 dstaddr += sizeof(uint8_t);
478 size -= sizeof(uint8_t);
479 } else if((phys_addr&0x3) || size<=2) {
480 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
481 ((uint16_t *)srcaddr)++;
482 dstaddr += sizeof(uint16_t);
483 size -= sizeof(uint16_t);
484 } else {
485 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
486 ((uint32_t *)srcaddr)++;
487 dstaddr += sizeof(uint32_t);
488 size -= sizeof(uint32_t);
489 }
490 }
491
492 return KERN_SUCCESS;
493 }
494
495 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
496 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
497 // after sampling has finished.
498 //
499 // For an N-entry callstack:
500 //
501 // [0] current pc
502 // [1..N-3] stack frames (including current one)
503 // [N-2] current LR (return value if we're in a leaf function)
504 // [N-1] current r0 (in case we've saved LR in r0)
505 //
506
507 #define FP_LINK_OFFSET 2
508 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
509 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
510
511 #ifndef USER_MODE
512 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
513 #endif
514
515 #ifndef SUPERVISOR_MODE
516 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
517 #endif
518
519 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE))
520
521
522 __private_extern__
523 kern_return_t chudxnu_thread_get_callstack64( thread_t thread,
524 uint64_t *callStack,
525 mach_msg_type_number_t *count,
526 boolean_t user_only)
527 {
528 kern_return_t kr;
529 task_t task = get_threadtask(thread);
530 uint64_t nextFramePointer = 0;
531 uint64_t currPC, currLR, currR0;
532 uint64_t framePointer;
533 uint64_t prevPC = 0;
534 uint64_t kernStackMin = min_valid_stack_address();
535 uint64_t kernStackMax = max_valid_stack_address();
536 uint64_t *buffer = callStack;
537 uint32_t tmpWord;
538 int bufferIndex = 0;
539 int bufferMaxIndex = *count;
540 boolean_t supervisor;
541 boolean_t is64Bit;
542 struct savearea *sv;
543
544 if(user_only) {
545 sv = find_user_regs(thread);
546 } else {
547 sv = find_kern_regs(thread);
548 }
549
550 if(!sv) {
551 *count = 0;
552 return KERN_FAILURE;
553 }
554
555 supervisor = SUPERVISOR_MODE(sv->save_srr1);
556 if(supervisor) {
557 #warning assuming kernel task is always 32-bit
558 is64Bit = FALSE;
559 } else {
560 is64Bit = chudxnu_is_64bit_task(task);
561 }
562
563 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
564 if(bufferMaxIndex<2) {
565 *count = 0;
566 return KERN_RESOURCE_SHORTAGE;
567 }
568
569 currPC = sv->save_srr0;
570 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
571 currLR = sv->save_lr;
572 currR0 = sv->save_r0;
573
574 bufferIndex = 0; // start with a stack of size zero
575 buffer[bufferIndex++] = currPC; // save PC in position 0.
576
577 // Now, fill buffer with stack backtraces.
578 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
579 uint64_t pc = 0;
580 // Above the stack pointer, the following values are saved:
581 // saved LR
582 // saved CR
583 // saved SP
584 //-> SP
585 // Here, we'll get the lr from the stack.
586 uint64_t fp_link;
587
588 if(is64Bit) {
589 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
590 } else {
591 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
592 }
593
594 // Note that we read the pc even for the first stack frame (which, in theory,
595 // is always empty because the callee fills it in just before it lowers the
596 // stack. However, if we catch the program in between filling in the return
597 // address and lowering the stack, we want to still have a valid backtrace.
598 // FixupStack correctly disregards this value if necessary.
599
600 if(supervisor) {
601 if(is64Bit) {
602 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
603 } else {
604 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
605 pc = tmpWord;
606 }
607 } else {
608 if(is64Bit) {
609 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
610 } else {
611 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
612 pc = tmpWord;
613 }
614 }
615 if(kr!=KERN_SUCCESS) {
616 pc = 0;
617 break;
618 }
619
620 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
621 if(supervisor) {
622 if(is64Bit) {
623 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
624 } else {
625 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
626 nextFramePointer = tmpWord;
627 }
628 } else {
629 if(is64Bit) {
630 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
631 } else {
632 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
633 nextFramePointer = tmpWord;
634 }
635 }
636 if(kr!=KERN_SUCCESS) {
637 nextFramePointer = 0;
638 }
639
640 if(nextFramePointer) {
641 buffer[bufferIndex++] = pc;
642 prevPC = pc;
643 }
644
645 if(nextFramePointer<framePointer) {
646 break;
647 } else {
648 framePointer = nextFramePointer;
649 }
650 }
651
652 if(bufferIndex>=bufferMaxIndex) {
653 *count = 0;
654 return KERN_RESOURCE_SHORTAGE;
655 }
656
657 // Save link register and R0 at bottom of stack (used for later fixup).
658 buffer[bufferIndex++] = currLR;
659 buffer[bufferIndex++] = currR0;
660
661 *count = bufferIndex;
662 return KERN_SUCCESS;
663 }
664
665 __private_extern__
666 kern_return_t chudxnu_thread_get_callstack( thread_t thread,
667 uint32_t *callStack,
668 mach_msg_type_number_t *count,
669 boolean_t user_only)
670 {
671 kern_return_t kr;
672 task_t task = get_threadtask(thread);
673 uint64_t nextFramePointer = 0;
674 uint64_t currPC, currLR, currR0;
675 uint64_t framePointer;
676 uint64_t prevPC = 0;
677 uint64_t kernStackMin = min_valid_stack_address();
678 uint64_t kernStackMax = max_valid_stack_address();
679 uint32_t *buffer = callStack;
680 uint32_t tmpWord;
681 int bufferIndex = 0;
682 int bufferMaxIndex = *count;
683 boolean_t supervisor;
684 boolean_t is64Bit;
685 struct savearea *sv;
686
687 if(user_only) {
688 sv = find_user_regs(thread);
689 } else {
690 sv = find_kern_regs(thread);
691 }
692
693 if(!sv) {
694 *count = 0;
695 return KERN_FAILURE;
696 }
697
698 supervisor = SUPERVISOR_MODE(sv->save_srr1);
699 if(supervisor) {
700 #warning assuming kernel task is always 32-bit
701 is64Bit = FALSE;
702 } else {
703 is64Bit = chudxnu_is_64bit_task(task);
704 }
705
706 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
707 if(bufferMaxIndex<2) {
708 *count = 0;
709 return KERN_RESOURCE_SHORTAGE;
710 }
711
712 currPC = sv->save_srr0;
713 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
714 currLR = sv->save_lr;
715 currR0 = sv->save_r0;
716
717 bufferIndex = 0; // start with a stack of size zero
718 buffer[bufferIndex++] = currPC; // save PC in position 0.
719
720 // Now, fill buffer with stack backtraces.
721 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
722 uint64_t pc = 0;
723 // Above the stack pointer, the following values are saved:
724 // saved LR
725 // saved CR
726 // saved SP
727 //-> SP
728 // Here, we'll get the lr from the stack.
729 uint64_t fp_link;
730
731 if(is64Bit) {
732 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
733 } else {
734 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
735 }
736
737 // Note that we read the pc even for the first stack frame (which, in theory,
738 // is always empty because the callee fills it in just before it lowers the
739 // stack. However, if we catch the program in between filling in the return
740 // address and lowering the stack, we want to still have a valid backtrace.
741 // FixupStack correctly disregards this value if necessary.
742
743 if(supervisor) {
744 if(is64Bit) {
745 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
746 } else {
747 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
748 pc = tmpWord;
749 }
750 } else {
751 if(is64Bit) {
752 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
753 } else {
754 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
755 pc = tmpWord;
756 }
757 }
758 if(kr!=KERN_SUCCESS) {
759 pc = 0;
760 break;
761 }
762
763 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
764 if(supervisor) {
765 if(is64Bit) {
766 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
767 } else {
768 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
769 nextFramePointer = tmpWord;
770 }
771 } else {
772 if(is64Bit) {
773 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
774 } else {
775 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
776 nextFramePointer = tmpWord;
777 }
778 }
779 if(kr!=KERN_SUCCESS) {
780 nextFramePointer = 0;
781 }
782
783 if(nextFramePointer) {
784 buffer[bufferIndex++] = pc;
785 prevPC = pc;
786 }
787
788 if(nextFramePointer<framePointer) {
789 break;
790 } else {
791 framePointer = nextFramePointer;
792 }
793 }
794
795 if(bufferIndex>=bufferMaxIndex) {
796 *count = 0;
797 return KERN_RESOURCE_SHORTAGE;
798 }
799
800 // Save link register and R0 at bottom of stack (used for later fixup).
801 buffer[bufferIndex++] = currLR;
802 buffer[bufferIndex++] = currR0;
803
804 *count = bufferIndex;
805 return KERN_SUCCESS;
806 }
807
808 #pragma mark **** task and thread info ****
809
810 __private_extern__
811 boolean_t chudxnu_is_64bit_task(task_t task)
812 {
813 return (task_has_64BitAddr(task));
814 }
815
816 #define THING_TASK 0
817 #define THING_THREAD 1
818
819 // an exact copy of processor_set_things() except no mig conversion at the end!
820 static kern_return_t chudxnu_private_processor_set_things( processor_set_t pset,
821 mach_port_t **thing_list,
822 mach_msg_type_number_t *count,
823 int type)
824 {
825 unsigned int actual; /* this many things */
826 unsigned int maxthings;
827 unsigned int i;
828
829 vm_size_t size, size_needed;
830 void *addr;
831
832 if (pset == PROCESSOR_SET_NULL)
833 return (KERN_INVALID_ARGUMENT);
834
835 size = 0; addr = 0;
836
837 for (;;) {
838 pset_lock(pset);
839 if (!pset->active) {
840 pset_unlock(pset);
841
842 return (KERN_FAILURE);
843 }
844
845 if (type == THING_TASK)
846 maxthings = pset->task_count;
847 else
848 maxthings = pset->thread_count;
849
850 /* do we have the memory we need? */
851
852 size_needed = maxthings * sizeof (mach_port_t);
853 if (size_needed <= size)
854 break;
855
856 /* unlock the pset and allocate more memory */
857 pset_unlock(pset);
858
859 if (size != 0)
860 kfree(addr, size);
861
862 assert(size_needed > 0);
863 size = size_needed;
864
865 addr = kalloc(size);
866 if (addr == 0)
867 return (KERN_RESOURCE_SHORTAGE);
868 }
869
870 /* OK, have memory and the processor_set is locked & active */
871
872 actual = 0;
873 switch (type) {
874
875 case THING_TASK:
876 {
877 task_t task, *tasks = (task_t *)addr;
878
879 for (task = (task_t)queue_first(&pset->tasks);
880 !queue_end(&pset->tasks, (queue_entry_t)task);
881 task = (task_t)queue_next(&task->pset_tasks)) {
882 task_reference_internal(task);
883 tasks[actual++] = task;
884 }
885
886 break;
887 }
888
889 case THING_THREAD:
890 {
891 thread_t thread, *threads = (thread_t *)addr;
892
893 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
894 !queue_end(&pset->threads, (queue_entry_t)thread);
895 thread = (thread_t)queue_next(&thread->pset_threads)) {
896 thread_reference_internal(thread);
897 threads[actual++] = thread;
898 }
899
900 break;
901 }
902 }
903
904 pset_unlock(pset);
905
906 if (actual < maxthings)
907 size_needed = actual * sizeof (mach_port_t);
908
909 if (actual == 0) {
910 /* no things, so return null pointer and deallocate memory */
911 *thing_list = 0;
912 *count = 0;
913
914 if (size != 0)
915 kfree(addr, size);
916 }
917 else {
918 /* if we allocated too much, must copy */
919
920 if (size_needed < size) {
921 void *newaddr;
922
923 newaddr = kalloc(size_needed);
924 if (newaddr == 0) {
925 switch (type) {
926
927 case THING_TASK:
928 {
929 task_t *tasks = (task_t *)addr;
930
931 for (i = 0; i < actual; i++)
932 task_deallocate(tasks[i]);
933 break;
934 }
935
936 case THING_THREAD:
937 {
938 thread_t *threads = (thread_t *)addr;
939
940 for (i = 0; i < actual; i++)
941 thread_deallocate(threads[i]);
942 break;
943 }
944 }
945
946 kfree(addr, size);
947 return (KERN_RESOURCE_SHORTAGE);
948 }
949
950 bcopy((void *) addr, (void *) newaddr, size_needed);
951 kfree(addr, size);
952 addr = newaddr;
953 }
954
955 *thing_list = (mach_port_t *)addr;
956 *count = actual;
957 }
958
959 return (KERN_SUCCESS);
960 }
961
962 // an exact copy of task_threads() except no mig conversion at the end!
963 static kern_return_t chudxnu_private_task_threads(task_t task,
964 thread_act_array_t *threads_out,
965 mach_msg_type_number_t *count)
966 {
967 mach_msg_type_number_t actual;
968 thread_t *threads;
969 thread_t thread;
970 vm_size_t size, size_needed;
971 void *addr;
972 unsigned int i, j;
973
974 if (task == TASK_NULL)
975 return (KERN_INVALID_ARGUMENT);
976
977 size = 0; addr = 0;
978
979 for (;;) {
980 task_lock(task);
981 if (!task->active) {
982 task_unlock(task);
983
984 if (size != 0)
985 kfree(addr, size);
986
987 return (KERN_FAILURE);
988 }
989
990 actual = task->thread_count;
991
992 /* do we have the memory we need? */
993 size_needed = actual * sizeof (mach_port_t);
994 if (size_needed <= size)
995 break;
996
997 /* unlock the task and allocate more memory */
998 task_unlock(task);
999
1000 if (size != 0)
1001 kfree(addr, size);
1002
1003 assert(size_needed > 0);
1004 size = size_needed;
1005
1006 addr = kalloc(size);
1007 if (addr == 0)
1008 return (KERN_RESOURCE_SHORTAGE);
1009 }
1010
1011 /* OK, have memory and the task is locked & active */
1012 threads = (thread_t *)addr;
1013
1014 i = j = 0;
1015
1016 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1017 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1018 thread_reference_internal(thread);
1019 threads[j++] = thread;
1020 }
1021
1022 assert(queue_end(&task->threads, (queue_entry_t)thread));
1023
1024 actual = j;
1025 size_needed = actual * sizeof (mach_port_t);
1026
1027 /* can unlock task now that we've got the thread refs */
1028 task_unlock(task);
1029
1030 if (actual == 0) {
1031 /* no threads, so return null pointer and deallocate memory */
1032
1033 *threads_out = 0;
1034 *count = 0;
1035
1036 if (size != 0)
1037 kfree(addr, size);
1038 }
1039 else {
1040 /* if we allocated too much, must copy */
1041
1042 if (size_needed < size) {
1043 void *newaddr;
1044
1045 newaddr = kalloc(size_needed);
1046 if (newaddr == 0) {
1047 for (i = 0; i < actual; ++i)
1048 thread_deallocate(threads[i]);
1049 kfree(addr, size);
1050 return (KERN_RESOURCE_SHORTAGE);
1051 }
1052
1053 bcopy(addr, newaddr, size_needed);
1054 kfree(addr, size);
1055 threads = (thread_t *)newaddr;
1056 }
1057
1058 *threads_out = threads;
1059 *count = actual;
1060 }
1061
1062 return (KERN_SUCCESS);
1063 }
1064
1065
1066 __private_extern__
1067 kern_return_t chudxnu_all_tasks(task_array_t *task_list,
1068 mach_msg_type_number_t *count)
1069 {
1070 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)task_list, count, THING_TASK);
1071 }
1072
1073 __private_extern__
1074 kern_return_t chudxnu_free_task_list(task_array_t *task_list,
1075 mach_msg_type_number_t *count)
1076 {
1077 vm_size_t size = (*count)*sizeof(mach_port_t);
1078 void *addr = *task_list;
1079
1080 if(addr) {
1081 int i, maxCount = *count;
1082 for(i=0; i<maxCount; i++) {
1083 task_deallocate((*task_list)[i]);
1084 }
1085 kfree(addr, size);
1086 *task_list = NULL;
1087 *count = 0;
1088 return KERN_SUCCESS;
1089 } else {
1090 return KERN_FAILURE;
1091 }
1092 }
1093
1094 __private_extern__
1095 kern_return_t chudxnu_all_threads( thread_array_t *thread_list,
1096 mach_msg_type_number_t *count)
1097 {
1098 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)thread_list, count, THING_THREAD);
1099 }
1100
1101 __private_extern__
1102 kern_return_t chudxnu_task_threads( task_t task,
1103 thread_array_t *thread_list,
1104 mach_msg_type_number_t *count)
1105 {
1106 return chudxnu_private_task_threads(task, thread_list, count);
1107 }
1108
1109 __private_extern__
1110 kern_return_t chudxnu_free_thread_list(thread_array_t *thread_list,
1111 mach_msg_type_number_t *count)
1112 {
1113 vm_size_t size = (*count)*sizeof(mach_port_t);
1114 void *addr = *thread_list;
1115
1116 if(addr) {
1117 int i, maxCount = *count;
1118 for(i=0; i<maxCount; i++) {
1119 thread_deallocate((*thread_list)[i]);
1120 }
1121 kfree(addr, size);
1122 *thread_list = NULL;
1123 *count = 0;
1124 return KERN_SUCCESS;
1125 } else {
1126 return KERN_FAILURE;
1127 }
1128 }
1129
1130 __private_extern__
1131 task_t chudxnu_current_task(void)
1132 {
1133 return current_task();
1134 }
1135
1136 __private_extern__
1137 thread_t chudxnu_current_thread(void)
1138 {
1139 return current_thread();
1140 }
1141
1142 __private_extern__
1143 task_t chudxnu_task_for_thread(thread_t thread)
1144 {
1145 return get_threadtask(thread);
1146 }
1147
1148 __private_extern__
1149 kern_return_t chudxnu_thread_info(thread_t thread,
1150 thread_flavor_t flavor,
1151 thread_info_t thread_info_out,
1152 mach_msg_type_number_t *thread_info_count)
1153 {
1154 return thread_info(thread, flavor, thread_info_out, thread_info_count);
1155 }
1156
1157 __private_extern__
1158 kern_return_t chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
1159 {
1160 *timestamp = thread->last_switch;
1161 return KERN_SUCCESS;
1162 }
1163
1164 #pragma mark **** DEPRECATED ****
1165
1166 // DEPRECATED
1167 __private_extern__
1168 kern_return_t chudxnu_bind_current_thread(int cpu)
1169 {
1170 return chudxnu_bind_thread(current_thread(), cpu);
1171 }
1172
1173 // DEPRECATED
1174 kern_return_t chudxnu_unbind_current_thread(void)
1175 {
1176 return chudxnu_unbind_thread(current_thread());
1177 }
1178
1179 // DEPRECATED
1180 __private_extern__
1181 kern_return_t chudxnu_current_thread_get_callstack( uint32_t *callStack,
1182 mach_msg_type_number_t *count,
1183 boolean_t user_only)
1184 {
1185 return chudxnu_thread_get_callstack(current_thread(), callStack, count, user_only);
1186 }
1187
1188 // DEPRECATED
1189 __private_extern__
1190 thread_t chudxnu_current_act(void)
1191 {
1192 return chudxnu_current_thread();
1193 }