]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_thread.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach/mach_types.h>
24 #include <mach/task.h>
25 #include <mach/thread_act.h>
26
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/thread.h>
30 #include <kern/ipc_tt.h>
31
32 #include <vm/vm_map.h>
33 #include <vm/pmap.h>
34
35 #include <ppc/chud/chud_xnu.h>
36 #include <ppc/chud/chud_xnu_private.h>
37
38 #include <ppc/misc_protos.h>
39 #include <ppc/proc_reg.h>
40 #include <ppc/machine_routines.h>
41 #include <ppc/fpu_protos.h>
42
43 // forward declarations
44 extern kern_return_t machine_thread_get_kern_state( thread_t thread,
45 thread_flavor_t flavor,
46 thread_state_t tstate,
47 mach_msg_type_number_t *count);
48
49
50 #pragma mark **** thread binding ****
51
52 __private_extern__
53 kern_return_t chudxnu_bind_thread(thread_t thread, int cpu)
54 {
55 if(cpu>=0 && cpu<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */
56 thread_bind(thread, cpu_to_processor(cpu));
57 if(thread==current_thread()) {
58 (void)thread_block(THREAD_CONTINUE_NULL);
59 }
60 return KERN_SUCCESS;
61 } else {
62 return KERN_FAILURE;
63 }
64 }
65
66 __private_extern__
67 kern_return_t chudxnu_unbind_thread(thread_t thread)
68 {
69 thread_bind(thread, PROCESSOR_NULL);
70 return KERN_SUCCESS;
71 }
72
73 #pragma mark **** thread state ****
74
75 __private_extern__
76 kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
77 {
78 struct ppc_thread_state *ts;
79 struct ppc_thread_state64 *xts;
80
81 switch(flavor) {
82 case PPC_THREAD_STATE:
83 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
84 *count = 0;
85 return KERN_INVALID_ARGUMENT;
86 }
87 ts = (struct ppc_thread_state *) tstate;
88 if(sv) {
89 ts->r0 = (unsigned int)sv->save_r0;
90 ts->r1 = (unsigned int)sv->save_r1;
91 ts->r2 = (unsigned int)sv->save_r2;
92 ts->r3 = (unsigned int)sv->save_r3;
93 ts->r4 = (unsigned int)sv->save_r4;
94 ts->r5 = (unsigned int)sv->save_r5;
95 ts->r6 = (unsigned int)sv->save_r6;
96 ts->r7 = (unsigned int)sv->save_r7;
97 ts->r8 = (unsigned int)sv->save_r8;
98 ts->r9 = (unsigned int)sv->save_r9;
99 ts->r10 = (unsigned int)sv->save_r10;
100 ts->r11 = (unsigned int)sv->save_r11;
101 ts->r12 = (unsigned int)sv->save_r12;
102 ts->r13 = (unsigned int)sv->save_r13;
103 ts->r14 = (unsigned int)sv->save_r14;
104 ts->r15 = (unsigned int)sv->save_r15;
105 ts->r16 = (unsigned int)sv->save_r16;
106 ts->r17 = (unsigned int)sv->save_r17;
107 ts->r18 = (unsigned int)sv->save_r18;
108 ts->r19 = (unsigned int)sv->save_r19;
109 ts->r20 = (unsigned int)sv->save_r20;
110 ts->r21 = (unsigned int)sv->save_r21;
111 ts->r22 = (unsigned int)sv->save_r22;
112 ts->r23 = (unsigned int)sv->save_r23;
113 ts->r24 = (unsigned int)sv->save_r24;
114 ts->r25 = (unsigned int)sv->save_r25;
115 ts->r26 = (unsigned int)sv->save_r26;
116 ts->r27 = (unsigned int)sv->save_r27;
117 ts->r28 = (unsigned int)sv->save_r28;
118 ts->r29 = (unsigned int)sv->save_r29;
119 ts->r30 = (unsigned int)sv->save_r30;
120 ts->r31 = (unsigned int)sv->save_r31;
121 ts->cr = (unsigned int)sv->save_cr;
122 ts->xer = (unsigned int)sv->save_xer;
123 ts->lr = (unsigned int)sv->save_lr;
124 ts->ctr = (unsigned int)sv->save_ctr;
125 ts->srr0 = (unsigned int)sv->save_srr0;
126 ts->srr1 = (unsigned int)sv->save_srr1;
127 ts->mq = 0;
128 ts->vrsave = (unsigned int)sv->save_vrsave;
129 } else {
130 bzero((void *)ts, sizeof(struct ppc_thread_state));
131 }
132 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
133 return KERN_SUCCESS;
134 break;
135 case PPC_THREAD_STATE64:
136 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
137 return KERN_INVALID_ARGUMENT;
138 }
139 xts = (struct ppc_thread_state64 *) tstate;
140 if(sv) {
141 xts->r0 = sv->save_r0;
142 xts->r1 = sv->save_r1;
143 xts->r2 = sv->save_r2;
144 xts->r3 = sv->save_r3;
145 xts->r4 = sv->save_r4;
146 xts->r5 = sv->save_r5;
147 xts->r6 = sv->save_r6;
148 xts->r7 = sv->save_r7;
149 xts->r8 = sv->save_r8;
150 xts->r9 = sv->save_r9;
151 xts->r10 = sv->save_r10;
152 xts->r11 = sv->save_r11;
153 xts->r12 = sv->save_r12;
154 xts->r13 = sv->save_r13;
155 xts->r14 = sv->save_r14;
156 xts->r15 = sv->save_r15;
157 xts->r16 = sv->save_r16;
158 xts->r17 = sv->save_r17;
159 xts->r18 = sv->save_r18;
160 xts->r19 = sv->save_r19;
161 xts->r20 = sv->save_r20;
162 xts->r21 = sv->save_r21;
163 xts->r22 = sv->save_r22;
164 xts->r23 = sv->save_r23;
165 xts->r24 = sv->save_r24;
166 xts->r25 = sv->save_r25;
167 xts->r26 = sv->save_r26;
168 xts->r27 = sv->save_r27;
169 xts->r28 = sv->save_r28;
170 xts->r29 = sv->save_r29;
171 xts->r30 = sv->save_r30;
172 xts->r31 = sv->save_r31;
173 xts->cr = sv->save_cr;
174 xts->xer = sv->save_xer;
175 xts->lr = sv->save_lr;
176 xts->ctr = sv->save_ctr;
177 xts->srr0 = sv->save_srr0;
178 xts->srr1 = sv->save_srr1;
179 xts->vrsave = sv->save_vrsave;
180 } else {
181 bzero((void *)xts, sizeof(struct ppc_thread_state64));
182 }
183 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
184 return KERN_SUCCESS;
185 break;
186 default:
187 *count = 0;
188 return KERN_INVALID_ARGUMENT;
189 break;
190 }
191 }
192
193 __private_extern__
194 kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
195 {
196 struct ppc_thread_state *ts;
197 struct ppc_thread_state64 *xts;
198
199 switch(flavor) {
200 case PPC_THREAD_STATE:
201 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
202 return KERN_INVALID_ARGUMENT;
203 }
204 ts = (struct ppc_thread_state *) tstate;
205 if(sv) {
206 sv->save_r0 = (uint64_t)ts->r0;
207 sv->save_r1 = (uint64_t)ts->r1;
208 sv->save_r2 = (uint64_t)ts->r2;
209 sv->save_r3 = (uint64_t)ts->r3;
210 sv->save_r4 = (uint64_t)ts->r4;
211 sv->save_r5 = (uint64_t)ts->r5;
212 sv->save_r6 = (uint64_t)ts->r6;
213 sv->save_r7 = (uint64_t)ts->r7;
214 sv->save_r8 = (uint64_t)ts->r8;
215 sv->save_r9 = (uint64_t)ts->r9;
216 sv->save_r10 = (uint64_t)ts->r10;
217 sv->save_r11 = (uint64_t)ts->r11;
218 sv->save_r12 = (uint64_t)ts->r12;
219 sv->save_r13 = (uint64_t)ts->r13;
220 sv->save_r14 = (uint64_t)ts->r14;
221 sv->save_r15 = (uint64_t)ts->r15;
222 sv->save_r16 = (uint64_t)ts->r16;
223 sv->save_r17 = (uint64_t)ts->r17;
224 sv->save_r18 = (uint64_t)ts->r18;
225 sv->save_r19 = (uint64_t)ts->r19;
226 sv->save_r20 = (uint64_t)ts->r20;
227 sv->save_r21 = (uint64_t)ts->r21;
228 sv->save_r22 = (uint64_t)ts->r22;
229 sv->save_r23 = (uint64_t)ts->r23;
230 sv->save_r24 = (uint64_t)ts->r24;
231 sv->save_r25 = (uint64_t)ts->r25;
232 sv->save_r26 = (uint64_t)ts->r26;
233 sv->save_r27 = (uint64_t)ts->r27;
234 sv->save_r28 = (uint64_t)ts->r28;
235 sv->save_r29 = (uint64_t)ts->r29;
236 sv->save_r30 = (uint64_t)ts->r30;
237 sv->save_r31 = (uint64_t)ts->r31;
238 sv->save_cr = ts->cr;
239 sv->save_xer = (uint64_t)ts->xer;
240 sv->save_lr = (uint64_t)ts->lr;
241 sv->save_ctr = (uint64_t)ts->ctr;
242 sv->save_srr0 = (uint64_t)ts->srr0;
243 sv->save_srr1 = (uint64_t)ts->srr1;
244 sv->save_vrsave = ts->vrsave;
245 return KERN_SUCCESS;
246 }
247 break;
248 case PPC_THREAD_STATE64:
249 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
250 return KERN_INVALID_ARGUMENT;
251 }
252 xts = (struct ppc_thread_state64 *) tstate;
253 if(sv) {
254 sv->save_r0 = xts->r0;
255 sv->save_r1 = xts->r1;
256 sv->save_r2 = xts->r2;
257 sv->save_r3 = xts->r3;
258 sv->save_r4 = xts->r4;
259 sv->save_r5 = xts->r5;
260 sv->save_r6 = xts->r6;
261 sv->save_r7 = xts->r7;
262 sv->save_r8 = xts->r8;
263 sv->save_r9 = xts->r9;
264 sv->save_r10 = xts->r10;
265 sv->save_r11 = xts->r11;
266 sv->save_r12 = xts->r12;
267 sv->save_r13 = xts->r13;
268 sv->save_r14 = xts->r14;
269 sv->save_r15 = xts->r15;
270 sv->save_r16 = xts->r16;
271 sv->save_r17 = xts->r17;
272 sv->save_r18 = xts->r18;
273 sv->save_r19 = xts->r19;
274 sv->save_r20 = xts->r20;
275 sv->save_r21 = xts->r21;
276 sv->save_r22 = xts->r22;
277 sv->save_r23 = xts->r23;
278 sv->save_r24 = xts->r24;
279 sv->save_r25 = xts->r25;
280 sv->save_r26 = xts->r26;
281 sv->save_r27 = xts->r27;
282 sv->save_r28 = xts->r28;
283 sv->save_r29 = xts->r29;
284 sv->save_r30 = xts->r30;
285 sv->save_r31 = xts->r31;
286 sv->save_cr = xts->cr;
287 sv->save_xer = xts->xer;
288 sv->save_lr = xts->lr;
289 sv->save_ctr = xts->ctr;
290 sv->save_srr0 = xts->srr0;
291 sv->save_srr1 = xts->srr1;
292 sv->save_vrsave = xts->vrsave;
293 return KERN_SUCCESS;
294 }
295 }
296 return KERN_FAILURE;
297 }
298
299 __private_extern__
300 kern_return_t chudxnu_thread_user_state_available(thread_t thread)
301 {
302 if(find_user_regs(thread)) {
303 return KERN_SUCCESS;
304 } else {
305 return KERN_FAILURE;
306 }
307 }
308
309 __private_extern__
310 kern_return_t chudxnu_thread_get_state(thread_t thread,
311 thread_flavor_t flavor,
312 thread_state_t tstate,
313 mach_msg_type_number_t *count,
314 boolean_t user_only)
315 {
316 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_get_state filters out some bits
317 struct savearea *sv;
318 if(user_only) {
319 sv = find_user_regs(thread);
320 } else {
321 sv = find_kern_regs(thread);
322 }
323 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
324 } else {
325 if(user_only) {
326 return machine_thread_get_state(thread, flavor, tstate, count);
327 } else {
328 // doesn't do FP or VMX
329 return machine_thread_get_kern_state(thread, flavor, tstate, count);
330 }
331 }
332 }
333
334 __private_extern__
335 kern_return_t chudxnu_thread_set_state(thread_t thread,
336 thread_flavor_t flavor,
337 thread_state_t tstate,
338 mach_msg_type_number_t count,
339 boolean_t user_only)
340 {
341 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_set_state filters out some bits
342 struct savearea *sv;
343 if(user_only) {
344 sv = find_user_regs(thread);
345 } else {
346 sv = find_kern_regs(thread);
347 }
348 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
349 } else {
350 return machine_thread_set_state(thread, flavor, tstate, count); // always user
351 }
352 }
353
354 #pragma mark **** task memory read/write ****
355
356 __private_extern__
357 kern_return_t chudxnu_task_read(task_t task, void *kernaddr, uint64_t usraddr, vm_size_t size)
358 {
359 kern_return_t ret = KERN_SUCCESS;
360
361 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
362 usraddr &= 0x00000000FFFFFFFFULL;
363 }
364
365 if(current_task()==task) {
366 thread_t cur_thr = current_thread();
367 vm_offset_t recover_handler = cur_thr->recover;
368
369 if(ml_at_interrupt_context()) {
370 return KERN_FAILURE; // can't do copyin on interrupt stack
371 }
372
373 if(copyin(usraddr, kernaddr, size)) {
374 ret = KERN_FAILURE;
375 }
376 cur_thr->recover = recover_handler;
377 } else {
378 vm_map_t map = get_task_map(task);
379 ret = vm_map_read_user(map, usraddr, kernaddr, size);
380 }
381
382 return ret;
383 }
384
385 __private_extern__
386 kern_return_t chudxnu_task_write(task_t task, uint64_t useraddr, void *kernaddr, vm_size_t size)
387 {
388 kern_return_t ret = KERN_SUCCESS;
389
390 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
391 useraddr &= 0x00000000FFFFFFFFULL;
392 }
393
394 if(current_task()==task) {
395 thread_t cur_thr = current_thread();
396 vm_offset_t recover_handler = cur_thr->recover;
397
398 if(ml_at_interrupt_context()) {
399 return KERN_FAILURE; // can't do copyout on interrupt stack
400 }
401
402 if(copyout(kernaddr, useraddr, size)) {
403 ret = KERN_FAILURE;
404 }
405 cur_thr->recover = recover_handler;
406 } else {
407 vm_map_t map = get_task_map(task);
408 ret = vm_map_write_user(map, kernaddr, useraddr, size);
409 }
410
411 return ret;
412 }
413
414 __private_extern__
415 kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
416 {
417 while(size>0) {
418 ppnum_t pp;
419 addr64_t phys_addr;
420
421 pp = pmap_find_phys(kernel_pmap, srcaddr); /* Get the page number */
422 if(!pp) {
423 return KERN_FAILURE; /* Not mapped... */
424 }
425
426 phys_addr = ((addr64_t)pp << 12) | (srcaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
427 if(phys_addr >= mem_actual) {
428 return KERN_FAILURE; /* out of range */
429 }
430
431 if((phys_addr&0x1) || size==1) {
432 *((uint8_t *)dstaddr) = ml_phys_read_byte_64(phys_addr);
433 ((uint8_t *)dstaddr)++;
434 srcaddr += sizeof(uint8_t);
435 size -= sizeof(uint8_t);
436 } else if((phys_addr&0x3) || size<=2) {
437 *((uint16_t *)dstaddr) = ml_phys_read_half_64(phys_addr);
438 ((uint16_t *)dstaddr)++;
439 srcaddr += sizeof(uint16_t);
440 size -= sizeof(uint16_t);
441 } else {
442 *((uint32_t *)dstaddr) = ml_phys_read_word_64(phys_addr);
443 ((uint32_t *)dstaddr)++;
444 srcaddr += sizeof(uint32_t);
445 size -= sizeof(uint32_t);
446 }
447 }
448 return KERN_SUCCESS;
449 }
450
451 __private_extern__
452 kern_return_t chudxnu_kern_write(vm_offset_t dstaddr, void *srcaddr, vm_size_t size)
453 {
454 while(size>0) {
455 ppnum_t pp;
456 addr64_t phys_addr;
457
458 pp = pmap_find_phys(kernel_pmap, dstaddr); /* Get the page number */
459 if(!pp) {
460 return KERN_FAILURE; /* Not mapped... */
461 }
462
463 phys_addr = ((addr64_t)pp << 12) | (dstaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
464 if(phys_addr >= mem_actual) {
465 return KERN_FAILURE; /* out of range */
466 }
467
468 if((phys_addr&0x1) || size==1) {
469 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
470 ((uint8_t *)srcaddr)++;
471 dstaddr += sizeof(uint8_t);
472 size -= sizeof(uint8_t);
473 } else if((phys_addr&0x3) || size<=2) {
474 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
475 ((uint16_t *)srcaddr)++;
476 dstaddr += sizeof(uint16_t);
477 size -= sizeof(uint16_t);
478 } else {
479 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
480 ((uint32_t *)srcaddr)++;
481 dstaddr += sizeof(uint32_t);
482 size -= sizeof(uint32_t);
483 }
484 }
485
486 return KERN_SUCCESS;
487 }
488
489 // chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
490 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
491 // after sampling has finished.
492 //
493 // For an N-entry callstack:
494 //
495 // [0] current pc
496 // [1..N-3] stack frames (including current one)
497 // [N-2] current LR (return value if we're in a leaf function)
498 // [N-1] current r0 (in case we've saved LR in r0)
499 //
500
501 #define FP_LINK_OFFSET 2
502 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
503 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
504
505 #ifndef USER_MODE
506 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
507 #endif
508
509 #ifndef SUPERVISOR_MODE
510 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
511 #endif
512
513 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE))
514
515
516 __private_extern__
517 kern_return_t chudxnu_thread_get_callstack64( thread_t thread,
518 uint64_t *callStack,
519 mach_msg_type_number_t *count,
520 boolean_t user_only)
521 {
522 kern_return_t kr;
523 task_t task = get_threadtask(thread);
524 uint64_t nextFramePointer = 0;
525 uint64_t currPC, currLR, currR0;
526 uint64_t framePointer;
527 uint64_t prevPC = 0;
528 uint64_t kernStackMin = min_valid_stack_address();
529 uint64_t kernStackMax = max_valid_stack_address();
530 uint64_t *buffer = callStack;
531 uint32_t tmpWord;
532 int bufferIndex = 0;
533 int bufferMaxIndex = *count;
534 boolean_t supervisor;
535 boolean_t is64Bit;
536 struct savearea *sv;
537
538 if(user_only) {
539 sv = find_user_regs(thread);
540 } else {
541 sv = find_kern_regs(thread);
542 }
543
544 if(!sv) {
545 *count = 0;
546 return KERN_FAILURE;
547 }
548
549 supervisor = SUPERVISOR_MODE(sv->save_srr1);
550 if(supervisor) {
551 #warning assuming kernel task is always 32-bit
552 is64Bit = FALSE;
553 } else {
554 is64Bit = chudxnu_is_64bit_task(task);
555 }
556
557 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
558 if(bufferMaxIndex<2) {
559 *count = 0;
560 return KERN_RESOURCE_SHORTAGE;
561 }
562
563 currPC = sv->save_srr0;
564 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
565 currLR = sv->save_lr;
566 currR0 = sv->save_r0;
567
568 bufferIndex = 0; // start with a stack of size zero
569 buffer[bufferIndex++] = currPC; // save PC in position 0.
570
571 // Now, fill buffer with stack backtraces.
572 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
573 uint64_t pc = 0;
574 // Above the stack pointer, the following values are saved:
575 // saved LR
576 // saved CR
577 // saved SP
578 //-> SP
579 // Here, we'll get the lr from the stack.
580 uint64_t fp_link;
581
582 if(is64Bit) {
583 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
584 } else {
585 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
586 }
587
588 // Note that we read the pc even for the first stack frame (which, in theory,
589 // is always empty because the callee fills it in just before it lowers the
590 // stack. However, if we catch the program in between filling in the return
591 // address and lowering the stack, we want to still have a valid backtrace.
592 // FixupStack correctly disregards this value if necessary.
593
594 if(supervisor) {
595 if(is64Bit) {
596 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
597 } else {
598 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
599 pc = tmpWord;
600 }
601 } else {
602 if(is64Bit) {
603 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
604 } else {
605 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
606 pc = tmpWord;
607 }
608 }
609 if(kr!=KERN_SUCCESS) {
610 pc = 0;
611 break;
612 }
613
614 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
615 if(supervisor) {
616 if(is64Bit) {
617 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
618 } else {
619 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
620 nextFramePointer = tmpWord;
621 }
622 } else {
623 if(is64Bit) {
624 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
625 } else {
626 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
627 nextFramePointer = tmpWord;
628 }
629 }
630 if(kr!=KERN_SUCCESS) {
631 nextFramePointer = 0;
632 }
633
634 if(nextFramePointer) {
635 buffer[bufferIndex++] = pc;
636 prevPC = pc;
637 }
638
639 if(nextFramePointer<framePointer) {
640 break;
641 } else {
642 framePointer = nextFramePointer;
643 }
644 }
645
646 if(bufferIndex>=bufferMaxIndex) {
647 *count = 0;
648 return KERN_RESOURCE_SHORTAGE;
649 }
650
651 // Save link register and R0 at bottom of stack (used for later fixup).
652 buffer[bufferIndex++] = currLR;
653 buffer[bufferIndex++] = currR0;
654
655 *count = bufferIndex;
656 return KERN_SUCCESS;
657 }
658
659 __private_extern__
660 kern_return_t chudxnu_thread_get_callstack( thread_t thread,
661 uint32_t *callStack,
662 mach_msg_type_number_t *count,
663 boolean_t user_only)
664 {
665 kern_return_t kr;
666 task_t task = get_threadtask(thread);
667 uint64_t nextFramePointer = 0;
668 uint64_t currPC, currLR, currR0;
669 uint64_t framePointer;
670 uint64_t prevPC = 0;
671 uint64_t kernStackMin = min_valid_stack_address();
672 uint64_t kernStackMax = max_valid_stack_address();
673 uint32_t *buffer = callStack;
674 uint32_t tmpWord;
675 int bufferIndex = 0;
676 int bufferMaxIndex = *count;
677 boolean_t supervisor;
678 boolean_t is64Bit;
679 struct savearea *sv;
680
681 if(user_only) {
682 sv = find_user_regs(thread);
683 } else {
684 sv = find_kern_regs(thread);
685 }
686
687 if(!sv) {
688 *count = 0;
689 return KERN_FAILURE;
690 }
691
692 supervisor = SUPERVISOR_MODE(sv->save_srr1);
693 if(supervisor) {
694 #warning assuming kernel task is always 32-bit
695 is64Bit = FALSE;
696 } else {
697 is64Bit = chudxnu_is_64bit_task(task);
698 }
699
700 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
701 if(bufferMaxIndex<2) {
702 *count = 0;
703 return KERN_RESOURCE_SHORTAGE;
704 }
705
706 currPC = sv->save_srr0;
707 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
708 currLR = sv->save_lr;
709 currR0 = sv->save_r0;
710
711 bufferIndex = 0; // start with a stack of size zero
712 buffer[bufferIndex++] = currPC; // save PC in position 0.
713
714 // Now, fill buffer with stack backtraces.
715 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
716 uint64_t pc = 0;
717 // Above the stack pointer, the following values are saved:
718 // saved LR
719 // saved CR
720 // saved SP
721 //-> SP
722 // Here, we'll get the lr from the stack.
723 uint64_t fp_link;
724
725 if(is64Bit) {
726 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
727 } else {
728 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
729 }
730
731 // Note that we read the pc even for the first stack frame (which, in theory,
732 // is always empty because the callee fills it in just before it lowers the
733 // stack. However, if we catch the program in between filling in the return
734 // address and lowering the stack, we want to still have a valid backtrace.
735 // FixupStack correctly disregards this value if necessary.
736
737 if(supervisor) {
738 if(is64Bit) {
739 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
740 } else {
741 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
742 pc = tmpWord;
743 }
744 } else {
745 if(is64Bit) {
746 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
747 } else {
748 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
749 pc = tmpWord;
750 }
751 }
752 if(kr!=KERN_SUCCESS) {
753 pc = 0;
754 break;
755 }
756
757 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
758 if(supervisor) {
759 if(is64Bit) {
760 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
761 } else {
762 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
763 nextFramePointer = tmpWord;
764 }
765 } else {
766 if(is64Bit) {
767 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
768 } else {
769 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
770 nextFramePointer = tmpWord;
771 }
772 }
773 if(kr!=KERN_SUCCESS) {
774 nextFramePointer = 0;
775 }
776
777 if(nextFramePointer) {
778 buffer[bufferIndex++] = pc;
779 prevPC = pc;
780 }
781
782 if(nextFramePointer<framePointer) {
783 break;
784 } else {
785 framePointer = nextFramePointer;
786 }
787 }
788
789 if(bufferIndex>=bufferMaxIndex) {
790 *count = 0;
791 return KERN_RESOURCE_SHORTAGE;
792 }
793
794 // Save link register and R0 at bottom of stack (used for later fixup).
795 buffer[bufferIndex++] = currLR;
796 buffer[bufferIndex++] = currR0;
797
798 *count = bufferIndex;
799 return KERN_SUCCESS;
800 }
801
802 #pragma mark **** task and thread info ****
803
804 __private_extern__
805 boolean_t chudxnu_is_64bit_task(task_t task)
806 {
807 return (task_has_64BitAddr(task));
808 }
809
810 #define THING_TASK 0
811 #define THING_THREAD 1
812
813 // an exact copy of processor_set_things() except no mig conversion at the end!
814 static kern_return_t chudxnu_private_processor_set_things( processor_set_t pset,
815 mach_port_t **thing_list,
816 mach_msg_type_number_t *count,
817 int type)
818 {
819 unsigned int actual; /* this many things */
820 unsigned int maxthings;
821 unsigned int i;
822
823 vm_size_t size, size_needed;
824 void *addr;
825
826 if (pset == PROCESSOR_SET_NULL)
827 return (KERN_INVALID_ARGUMENT);
828
829 size = 0; addr = 0;
830
831 for (;;) {
832 pset_lock(pset);
833 if (!pset->active) {
834 pset_unlock(pset);
835
836 return (KERN_FAILURE);
837 }
838
839 if (type == THING_TASK)
840 maxthings = pset->task_count;
841 else
842 maxthings = pset->thread_count;
843
844 /* do we have the memory we need? */
845
846 size_needed = maxthings * sizeof (mach_port_t);
847 if (size_needed <= size)
848 break;
849
850 /* unlock the pset and allocate more memory */
851 pset_unlock(pset);
852
853 if (size != 0)
854 kfree(addr, size);
855
856 assert(size_needed > 0);
857 size = size_needed;
858
859 addr = kalloc(size);
860 if (addr == 0)
861 return (KERN_RESOURCE_SHORTAGE);
862 }
863
864 /* OK, have memory and the processor_set is locked & active */
865
866 actual = 0;
867 switch (type) {
868
869 case THING_TASK:
870 {
871 task_t task, *tasks = (task_t *)addr;
872
873 for (task = (task_t)queue_first(&pset->tasks);
874 !queue_end(&pset->tasks, (queue_entry_t)task);
875 task = (task_t)queue_next(&task->pset_tasks)) {
876 task_reference_internal(task);
877 tasks[actual++] = task;
878 }
879
880 break;
881 }
882
883 case THING_THREAD:
884 {
885 thread_t thread, *threads = (thread_t *)addr;
886
887 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
888 !queue_end(&pset->threads, (queue_entry_t)thread);
889 thread = (thread_t)queue_next(&thread->pset_threads)) {
890 thread_reference_internal(thread);
891 threads[actual++] = thread;
892 }
893
894 break;
895 }
896 }
897
898 pset_unlock(pset);
899
900 if (actual < maxthings)
901 size_needed = actual * sizeof (mach_port_t);
902
903 if (actual == 0) {
904 /* no things, so return null pointer and deallocate memory */
905 *thing_list = 0;
906 *count = 0;
907
908 if (size != 0)
909 kfree(addr, size);
910 }
911 else {
912 /* if we allocated too much, must copy */
913
914 if (size_needed < size) {
915 void *newaddr;
916
917 newaddr = kalloc(size_needed);
918 if (newaddr == 0) {
919 switch (type) {
920
921 case THING_TASK:
922 {
923 task_t *tasks = (task_t *)addr;
924
925 for (i = 0; i < actual; i++)
926 task_deallocate(tasks[i]);
927 break;
928 }
929
930 case THING_THREAD:
931 {
932 thread_t *threads = (thread_t *)addr;
933
934 for (i = 0; i < actual; i++)
935 thread_deallocate(threads[i]);
936 break;
937 }
938 }
939
940 kfree(addr, size);
941 return (KERN_RESOURCE_SHORTAGE);
942 }
943
944 bcopy((void *) addr, (void *) newaddr, size_needed);
945 kfree(addr, size);
946 addr = newaddr;
947 }
948
949 *thing_list = (mach_port_t *)addr;
950 *count = actual;
951 }
952
953 return (KERN_SUCCESS);
954 }
955
956 // an exact copy of task_threads() except no mig conversion at the end!
957 static kern_return_t chudxnu_private_task_threads(task_t task,
958 thread_act_array_t *threads_out,
959 mach_msg_type_number_t *count)
960 {
961 mach_msg_type_number_t actual;
962 thread_t *threads;
963 thread_t thread;
964 vm_size_t size, size_needed;
965 void *addr;
966 unsigned int i, j;
967
968 if (task == TASK_NULL)
969 return (KERN_INVALID_ARGUMENT);
970
971 size = 0; addr = 0;
972
973 for (;;) {
974 task_lock(task);
975 if (!task->active) {
976 task_unlock(task);
977
978 if (size != 0)
979 kfree(addr, size);
980
981 return (KERN_FAILURE);
982 }
983
984 actual = task->thread_count;
985
986 /* do we have the memory we need? */
987 size_needed = actual * sizeof (mach_port_t);
988 if (size_needed <= size)
989 break;
990
991 /* unlock the task and allocate more memory */
992 task_unlock(task);
993
994 if (size != 0)
995 kfree(addr, size);
996
997 assert(size_needed > 0);
998 size = size_needed;
999
1000 addr = kalloc(size);
1001 if (addr == 0)
1002 return (KERN_RESOURCE_SHORTAGE);
1003 }
1004
1005 /* OK, have memory and the task is locked & active */
1006 threads = (thread_t *)addr;
1007
1008 i = j = 0;
1009
1010 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1011 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1012 thread_reference_internal(thread);
1013 threads[j++] = thread;
1014 }
1015
1016 assert(queue_end(&task->threads, (queue_entry_t)thread));
1017
1018 actual = j;
1019 size_needed = actual * sizeof (mach_port_t);
1020
1021 /* can unlock task now that we've got the thread refs */
1022 task_unlock(task);
1023
1024 if (actual == 0) {
1025 /* no threads, so return null pointer and deallocate memory */
1026
1027 *threads_out = 0;
1028 *count = 0;
1029
1030 if (size != 0)
1031 kfree(addr, size);
1032 }
1033 else {
1034 /* if we allocated too much, must copy */
1035
1036 if (size_needed < size) {
1037 void *newaddr;
1038
1039 newaddr = kalloc(size_needed);
1040 if (newaddr == 0) {
1041 for (i = 0; i < actual; ++i)
1042 thread_deallocate(threads[i]);
1043 kfree(addr, size);
1044 return (KERN_RESOURCE_SHORTAGE);
1045 }
1046
1047 bcopy(addr, newaddr, size_needed);
1048 kfree(addr, size);
1049 threads = (thread_t *)newaddr;
1050 }
1051
1052 *threads_out = threads;
1053 *count = actual;
1054 }
1055
1056 return (KERN_SUCCESS);
1057 }
1058
1059
1060 __private_extern__
1061 kern_return_t chudxnu_all_tasks(task_array_t *task_list,
1062 mach_msg_type_number_t *count)
1063 {
1064 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)task_list, count, THING_TASK);
1065 }
1066
1067 __private_extern__
1068 kern_return_t chudxnu_free_task_list(task_array_t *task_list,
1069 mach_msg_type_number_t *count)
1070 {
1071 vm_size_t size = (*count)*sizeof(mach_port_t);
1072 void *addr = *task_list;
1073
1074 if(addr) {
1075 int i, maxCount = *count;
1076 for(i=0; i<maxCount; i++) {
1077 task_deallocate((*task_list)[i]);
1078 }
1079 kfree(addr, size);
1080 *task_list = NULL;
1081 *count = 0;
1082 return KERN_SUCCESS;
1083 } else {
1084 return KERN_FAILURE;
1085 }
1086 }
1087
1088 __private_extern__
1089 kern_return_t chudxnu_all_threads( thread_array_t *thread_list,
1090 mach_msg_type_number_t *count)
1091 {
1092 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)thread_list, count, THING_THREAD);
1093 }
1094
1095 __private_extern__
1096 kern_return_t chudxnu_task_threads( task_t task,
1097 thread_array_t *thread_list,
1098 mach_msg_type_number_t *count)
1099 {
1100 return chudxnu_private_task_threads(task, thread_list, count);
1101 }
1102
1103 __private_extern__
1104 kern_return_t chudxnu_free_thread_list(thread_array_t *thread_list,
1105 mach_msg_type_number_t *count)
1106 {
1107 vm_size_t size = (*count)*sizeof(mach_port_t);
1108 void *addr = *thread_list;
1109
1110 if(addr) {
1111 int i, maxCount = *count;
1112 for(i=0; i<maxCount; i++) {
1113 thread_deallocate((*thread_list)[i]);
1114 }
1115 kfree(addr, size);
1116 *thread_list = NULL;
1117 *count = 0;
1118 return KERN_SUCCESS;
1119 } else {
1120 return KERN_FAILURE;
1121 }
1122 }
1123
1124 __private_extern__
1125 task_t chudxnu_current_task(void)
1126 {
1127 return current_task();
1128 }
1129
1130 __private_extern__
1131 thread_t chudxnu_current_thread(void)
1132 {
1133 return current_thread();
1134 }
1135
1136 __private_extern__
1137 task_t chudxnu_task_for_thread(thread_t thread)
1138 {
1139 return get_threadtask(thread);
1140 }
1141
1142 __private_extern__
1143 kern_return_t chudxnu_thread_info(thread_t thread,
1144 thread_flavor_t flavor,
1145 thread_info_t thread_info_out,
1146 mach_msg_type_number_t *thread_info_count)
1147 {
1148 return thread_info(thread, flavor, thread_info_out, thread_info_count);
1149 }
1150
1151 __private_extern__
1152 kern_return_t chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
1153 {
1154 *timestamp = thread->last_switch;
1155 return KERN_SUCCESS;
1156 }
1157
1158 #pragma mark **** DEPRECATED ****
1159
1160 // DEPRECATED
1161 __private_extern__
1162 kern_return_t chudxnu_bind_current_thread(int cpu)
1163 {
1164 return chudxnu_bind_thread(current_thread(), cpu);
1165 }
1166
1167 // DEPRECATED
1168 kern_return_t chudxnu_unbind_current_thread(void)
1169 {
1170 return chudxnu_unbind_thread(current_thread());
1171 }
1172
1173 // DEPRECATED
1174 __private_extern__
1175 kern_return_t chudxnu_current_thread_get_callstack( uint32_t *callStack,
1176 mach_msg_type_number_t *count,
1177 boolean_t user_only)
1178 {
1179 return chudxnu_thread_get_callstack(current_thread(), callStack, count, user_only);
1180 }
1181
1182 // DEPRECATED
1183 __private_extern__
1184 thread_t chudxnu_current_act(void)
1185 {
1186 return chudxnu_current_thread();
1187 }