]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/chud/chud_thread.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_thread.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <mach/mach_types.h>
25#include <mach/task.h>
26#include <mach/thread_act.h>
27
28#include <kern/kern_types.h>
29#include <kern/processor.h>
30#include <kern/thread.h>
31#include <kern/ipc_tt.h>
32
33#include <vm/vm_map.h>
34#include <vm/pmap.h>
35
36#include <ppc/chud/chud_xnu.h>
37#include <ppc/chud/chud_xnu_private.h>
38
39#include <ppc/misc_protos.h>
40#include <ppc/proc_reg.h>
41#include <ppc/machine_routines.h>
42#include <ppc/fpu_protos.h>
43
44// forward declarations
45extern kern_return_t machine_thread_get_kern_state( thread_t thread,
46 thread_flavor_t flavor,
47 thread_state_t tstate,
48 mach_msg_type_number_t *count);
49
50
51#pragma mark **** thread binding ****
52
53__private_extern__
54kern_return_t chudxnu_bind_thread(thread_t thread, int cpu)
55{
56 if(cpu>=0 && cpu<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */
57 thread_bind(thread, cpu_to_processor(cpu));
58 if(thread==current_thread()) {
59 (void)thread_block(THREAD_CONTINUE_NULL);
60 }
61 return KERN_SUCCESS;
62 } else {
63 return KERN_FAILURE;
64 }
65}
66
67__private_extern__
68kern_return_t chudxnu_unbind_thread(thread_t thread)
69{
70 thread_bind(thread, PROCESSOR_NULL);
71 return KERN_SUCCESS;
72}
73
74#pragma mark **** thread state ****
75
76__private_extern__
77kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
78{
79 struct ppc_thread_state *ts;
80 struct ppc_thread_state64 *xts;
81
82 switch(flavor) {
83 case PPC_THREAD_STATE:
84 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
85 *count = 0;
86 return KERN_INVALID_ARGUMENT;
87 }
88 ts = (struct ppc_thread_state *) tstate;
89 if(sv) {
90 ts->r0 = (unsigned int)sv->save_r0;
91 ts->r1 = (unsigned int)sv->save_r1;
92 ts->r2 = (unsigned int)sv->save_r2;
93 ts->r3 = (unsigned int)sv->save_r3;
94 ts->r4 = (unsigned int)sv->save_r4;
95 ts->r5 = (unsigned int)sv->save_r5;
96 ts->r6 = (unsigned int)sv->save_r6;
97 ts->r7 = (unsigned int)sv->save_r7;
98 ts->r8 = (unsigned int)sv->save_r8;
99 ts->r9 = (unsigned int)sv->save_r9;
100 ts->r10 = (unsigned int)sv->save_r10;
101 ts->r11 = (unsigned int)sv->save_r11;
102 ts->r12 = (unsigned int)sv->save_r12;
103 ts->r13 = (unsigned int)sv->save_r13;
104 ts->r14 = (unsigned int)sv->save_r14;
105 ts->r15 = (unsigned int)sv->save_r15;
106 ts->r16 = (unsigned int)sv->save_r16;
107 ts->r17 = (unsigned int)sv->save_r17;
108 ts->r18 = (unsigned int)sv->save_r18;
109 ts->r19 = (unsigned int)sv->save_r19;
110 ts->r20 = (unsigned int)sv->save_r20;
111 ts->r21 = (unsigned int)sv->save_r21;
112 ts->r22 = (unsigned int)sv->save_r22;
113 ts->r23 = (unsigned int)sv->save_r23;
114 ts->r24 = (unsigned int)sv->save_r24;
115 ts->r25 = (unsigned int)sv->save_r25;
116 ts->r26 = (unsigned int)sv->save_r26;
117 ts->r27 = (unsigned int)sv->save_r27;
118 ts->r28 = (unsigned int)sv->save_r28;
119 ts->r29 = (unsigned int)sv->save_r29;
120 ts->r30 = (unsigned int)sv->save_r30;
121 ts->r31 = (unsigned int)sv->save_r31;
122 ts->cr = (unsigned int)sv->save_cr;
123 ts->xer = (unsigned int)sv->save_xer;
124 ts->lr = (unsigned int)sv->save_lr;
125 ts->ctr = (unsigned int)sv->save_ctr;
126 ts->srr0 = (unsigned int)sv->save_srr0;
127 ts->srr1 = (unsigned int)sv->save_srr1;
128 ts->mq = 0;
129 ts->vrsave = (unsigned int)sv->save_vrsave;
130 } else {
131 bzero((void *)ts, sizeof(struct ppc_thread_state));
132 }
133 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
134 return KERN_SUCCESS;
135 break;
136 case PPC_THREAD_STATE64:
137 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
138 return KERN_INVALID_ARGUMENT;
139 }
140 xts = (struct ppc_thread_state64 *) tstate;
141 if(sv) {
142 xts->r0 = sv->save_r0;
143 xts->r1 = sv->save_r1;
144 xts->r2 = sv->save_r2;
145 xts->r3 = sv->save_r3;
146 xts->r4 = sv->save_r4;
147 xts->r5 = sv->save_r5;
148 xts->r6 = sv->save_r6;
149 xts->r7 = sv->save_r7;
150 xts->r8 = sv->save_r8;
151 xts->r9 = sv->save_r9;
152 xts->r10 = sv->save_r10;
153 xts->r11 = sv->save_r11;
154 xts->r12 = sv->save_r12;
155 xts->r13 = sv->save_r13;
156 xts->r14 = sv->save_r14;
157 xts->r15 = sv->save_r15;
158 xts->r16 = sv->save_r16;
159 xts->r17 = sv->save_r17;
160 xts->r18 = sv->save_r18;
161 xts->r19 = sv->save_r19;
162 xts->r20 = sv->save_r20;
163 xts->r21 = sv->save_r21;
164 xts->r22 = sv->save_r22;
165 xts->r23 = sv->save_r23;
166 xts->r24 = sv->save_r24;
167 xts->r25 = sv->save_r25;
168 xts->r26 = sv->save_r26;
169 xts->r27 = sv->save_r27;
170 xts->r28 = sv->save_r28;
171 xts->r29 = sv->save_r29;
172 xts->r30 = sv->save_r30;
173 xts->r31 = sv->save_r31;
174 xts->cr = sv->save_cr;
175 xts->xer = sv->save_xer;
176 xts->lr = sv->save_lr;
177 xts->ctr = sv->save_ctr;
178 xts->srr0 = sv->save_srr0;
179 xts->srr1 = sv->save_srr1;
180 xts->vrsave = sv->save_vrsave;
181 } else {
182 bzero((void *)xts, sizeof(struct ppc_thread_state64));
183 }
184 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
185 return KERN_SUCCESS;
186 break;
187 default:
188 *count = 0;
189 return KERN_INVALID_ARGUMENT;
190 break;
191 }
192}
193
194__private_extern__
195kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
196{
197 struct ppc_thread_state *ts;
198 struct ppc_thread_state64 *xts;
199
200 switch(flavor) {
201 case PPC_THREAD_STATE:
202 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
203 return KERN_INVALID_ARGUMENT;
204 }
205 ts = (struct ppc_thread_state *) tstate;
206 if(sv) {
207 sv->save_r0 = (uint64_t)ts->r0;
208 sv->save_r1 = (uint64_t)ts->r1;
209 sv->save_r2 = (uint64_t)ts->r2;
210 sv->save_r3 = (uint64_t)ts->r3;
211 sv->save_r4 = (uint64_t)ts->r4;
212 sv->save_r5 = (uint64_t)ts->r5;
213 sv->save_r6 = (uint64_t)ts->r6;
214 sv->save_r7 = (uint64_t)ts->r7;
215 sv->save_r8 = (uint64_t)ts->r8;
216 sv->save_r9 = (uint64_t)ts->r9;
217 sv->save_r10 = (uint64_t)ts->r10;
218 sv->save_r11 = (uint64_t)ts->r11;
219 sv->save_r12 = (uint64_t)ts->r12;
220 sv->save_r13 = (uint64_t)ts->r13;
221 sv->save_r14 = (uint64_t)ts->r14;
222 sv->save_r15 = (uint64_t)ts->r15;
223 sv->save_r16 = (uint64_t)ts->r16;
224 sv->save_r17 = (uint64_t)ts->r17;
225 sv->save_r18 = (uint64_t)ts->r18;
226 sv->save_r19 = (uint64_t)ts->r19;
227 sv->save_r20 = (uint64_t)ts->r20;
228 sv->save_r21 = (uint64_t)ts->r21;
229 sv->save_r22 = (uint64_t)ts->r22;
230 sv->save_r23 = (uint64_t)ts->r23;
231 sv->save_r24 = (uint64_t)ts->r24;
232 sv->save_r25 = (uint64_t)ts->r25;
233 sv->save_r26 = (uint64_t)ts->r26;
234 sv->save_r27 = (uint64_t)ts->r27;
235 sv->save_r28 = (uint64_t)ts->r28;
236 sv->save_r29 = (uint64_t)ts->r29;
237 sv->save_r30 = (uint64_t)ts->r30;
238 sv->save_r31 = (uint64_t)ts->r31;
239 sv->save_cr = ts->cr;
240 sv->save_xer = (uint64_t)ts->xer;
241 sv->save_lr = (uint64_t)ts->lr;
242 sv->save_ctr = (uint64_t)ts->ctr;
243 sv->save_srr0 = (uint64_t)ts->srr0;
244 sv->save_srr1 = (uint64_t)ts->srr1;
245 sv->save_vrsave = ts->vrsave;
246 return KERN_SUCCESS;
247 }
248 break;
249 case PPC_THREAD_STATE64:
250 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
251 return KERN_INVALID_ARGUMENT;
252 }
253 xts = (struct ppc_thread_state64 *) tstate;
254 if(sv) {
255 sv->save_r0 = xts->r0;
256 sv->save_r1 = xts->r1;
257 sv->save_r2 = xts->r2;
258 sv->save_r3 = xts->r3;
259 sv->save_r4 = xts->r4;
260 sv->save_r5 = xts->r5;
261 sv->save_r6 = xts->r6;
262 sv->save_r7 = xts->r7;
263 sv->save_r8 = xts->r8;
264 sv->save_r9 = xts->r9;
265 sv->save_r10 = xts->r10;
266 sv->save_r11 = xts->r11;
267 sv->save_r12 = xts->r12;
268 sv->save_r13 = xts->r13;
269 sv->save_r14 = xts->r14;
270 sv->save_r15 = xts->r15;
271 sv->save_r16 = xts->r16;
272 sv->save_r17 = xts->r17;
273 sv->save_r18 = xts->r18;
274 sv->save_r19 = xts->r19;
275 sv->save_r20 = xts->r20;
276 sv->save_r21 = xts->r21;
277 sv->save_r22 = xts->r22;
278 sv->save_r23 = xts->r23;
279 sv->save_r24 = xts->r24;
280 sv->save_r25 = xts->r25;
281 sv->save_r26 = xts->r26;
282 sv->save_r27 = xts->r27;
283 sv->save_r28 = xts->r28;
284 sv->save_r29 = xts->r29;
285 sv->save_r30 = xts->r30;
286 sv->save_r31 = xts->r31;
287 sv->save_cr = xts->cr;
288 sv->save_xer = xts->xer;
289 sv->save_lr = xts->lr;
290 sv->save_ctr = xts->ctr;
291 sv->save_srr0 = xts->srr0;
292 sv->save_srr1 = xts->srr1;
293 sv->save_vrsave = xts->vrsave;
294 return KERN_SUCCESS;
295 }
296 }
297 return KERN_FAILURE;
298}
299
300__private_extern__
301kern_return_t chudxnu_thread_user_state_available(thread_t thread)
302{
303 if(find_user_regs(thread)) {
304 return KERN_SUCCESS;
305 } else {
306 return KERN_FAILURE;
307 }
308}
309
310__private_extern__
311kern_return_t chudxnu_thread_get_state(thread_t thread,
312 thread_flavor_t flavor,
313 thread_state_t tstate,
314 mach_msg_type_number_t *count,
315 boolean_t user_only)
316{
317 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_get_state filters out some bits
318 struct savearea *sv;
319 if(user_only) {
320 sv = find_user_regs(thread);
321 } else {
322 sv = find_kern_regs(thread);
323 }
324 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
325 } else {
326 if(user_only) {
327 return machine_thread_get_state(thread, flavor, tstate, count);
328 } else {
329 // doesn't do FP or VMX
330 return machine_thread_get_kern_state(thread, flavor, tstate, count);
331 }
332 }
333}
334
335__private_extern__
336kern_return_t chudxnu_thread_set_state(thread_t thread,
337 thread_flavor_t flavor,
338 thread_state_t tstate,
339 mach_msg_type_number_t count,
340 boolean_t user_only)
341{
342 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) { // machine_thread_set_state filters out some bits
343 struct savearea *sv;
344 if(user_only) {
345 sv = find_user_regs(thread);
346 } else {
347 sv = find_kern_regs(thread);
348 }
349 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
350 } else {
351 return machine_thread_set_state(thread, flavor, tstate, count); // always user
352 }
353}
354
355#pragma mark **** task memory read/write ****
356
357__private_extern__
358kern_return_t chudxnu_task_read(task_t task, void *kernaddr, uint64_t usraddr, vm_size_t size)
359{
360 kern_return_t ret = KERN_SUCCESS;
361
362 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
363 usraddr &= 0x00000000FFFFFFFFULL;
364 }
365
366 if(current_task()==task) {
367 thread_t cur_thr = current_thread();
368 vm_offset_t recover_handler = cur_thr->recover;
369
370 if(ml_at_interrupt_context()) {
371 return KERN_FAILURE; // can't do copyin on interrupt stack
372 }
373
374 if(copyin(usraddr, kernaddr, size)) {
375 ret = KERN_FAILURE;
376 }
377 cur_thr->recover = recover_handler;
378 } else {
379 vm_map_t map = get_task_map(task);
380 ret = vm_map_read_user(map, usraddr, kernaddr, size);
381 }
382
383 return ret;
384}
385
386__private_extern__
387kern_return_t chudxnu_task_write(task_t task, uint64_t useraddr, void *kernaddr, vm_size_t size)
388{
389 kern_return_t ret = KERN_SUCCESS;
390
391 if(!chudxnu_is_64bit_task(task)) { // clear any cruft out of upper 32-bits for 32-bit tasks
392 useraddr &= 0x00000000FFFFFFFFULL;
393 }
394
395 if(current_task()==task) {
396 thread_t cur_thr = current_thread();
397 vm_offset_t recover_handler = cur_thr->recover;
398
399 if(ml_at_interrupt_context()) {
400 return KERN_FAILURE; // can't do copyout on interrupt stack
401 }
402
403 if(copyout(kernaddr, useraddr, size)) {
404 ret = KERN_FAILURE;
405 }
406 cur_thr->recover = recover_handler;
407 } else {
408 vm_map_t map = get_task_map(task);
409 ret = vm_map_write_user(map, kernaddr, useraddr, size);
410 }
411
412 return ret;
413}
414
415__private_extern__
416kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
417{
418 while(size>0) {
419 ppnum_t pp;
420 addr64_t phys_addr;
421
422 pp = pmap_find_phys(kernel_pmap, srcaddr); /* Get the page number */
423 if(!pp) {
424 return KERN_FAILURE; /* Not mapped... */
425 }
426
427 phys_addr = ((addr64_t)pp << 12) | (srcaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
428 if(phys_addr >= mem_actual) {
429 return KERN_FAILURE; /* out of range */
430 }
431
432 if((phys_addr&0x1) || size==1) {
433 *((uint8_t *)dstaddr) = ml_phys_read_byte_64(phys_addr);
434 ((uint8_t *)dstaddr)++;
435 srcaddr += sizeof(uint8_t);
436 size -= sizeof(uint8_t);
437 } else if((phys_addr&0x3) || size<=2) {
438 *((uint16_t *)dstaddr) = ml_phys_read_half_64(phys_addr);
439 ((uint16_t *)dstaddr)++;
440 srcaddr += sizeof(uint16_t);
441 size -= sizeof(uint16_t);
442 } else {
443 *((uint32_t *)dstaddr) = ml_phys_read_word_64(phys_addr);
444 ((uint32_t *)dstaddr)++;
445 srcaddr += sizeof(uint32_t);
446 size -= sizeof(uint32_t);
447 }
448 }
449 return KERN_SUCCESS;
450}
451
452__private_extern__
453kern_return_t chudxnu_kern_write(vm_offset_t dstaddr, void *srcaddr, vm_size_t size)
454{
455 while(size>0) {
456 ppnum_t pp;
457 addr64_t phys_addr;
458
459 pp = pmap_find_phys(kernel_pmap, dstaddr); /* Get the page number */
460 if(!pp) {
461 return KERN_FAILURE; /* Not mapped... */
462 }
463
464 phys_addr = ((addr64_t)pp << 12) | (dstaddr & 0x0000000000000FFFULL); /* Shove in the page offset */
465 if(phys_addr >= mem_actual) {
466 return KERN_FAILURE; /* out of range */
467 }
468
469 if((phys_addr&0x1) || size==1) {
470 ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
471 ((uint8_t *)srcaddr)++;
472 dstaddr += sizeof(uint8_t);
473 size -= sizeof(uint8_t);
474 } else if((phys_addr&0x3) || size<=2) {
475 ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
476 ((uint16_t *)srcaddr)++;
477 dstaddr += sizeof(uint16_t);
478 size -= sizeof(uint16_t);
479 } else {
480 ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
481 ((uint32_t *)srcaddr)++;
482 dstaddr += sizeof(uint32_t);
483 size -= sizeof(uint32_t);
484 }
485 }
486
487 return KERN_SUCCESS;
488}
489
490// chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
491// fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
492// after sampling has finished.
493//
494// For an N-entry callstack:
495//
496// [0] current pc
497// [1..N-3] stack frames (including current one)
498// [N-2] current LR (return value if we're in a leaf function)
499// [N-1] current r0 (in case we've saved LR in r0)
500//
501
502#define FP_LINK_OFFSET 2
503#define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
504#define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
505
506#ifndef USER_MODE
507#define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
508#endif
509
510#ifndef SUPERVISOR_MODE
511#define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
512#endif
513
514#define VALID_STACK_ADDRESS(addr) (addr>=0x1000ULL && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE))
515
516
517__private_extern__
518kern_return_t chudxnu_thread_get_callstack64( thread_t thread,
519 uint64_t *callStack,
520 mach_msg_type_number_t *count,
521 boolean_t user_only)
522{
523 kern_return_t kr;
524 task_t task = get_threadtask(thread);
525 uint64_t nextFramePointer = 0;
526 uint64_t currPC, currLR, currR0;
527 uint64_t framePointer;
528 uint64_t prevPC = 0;
529 uint64_t kernStackMin = min_valid_stack_address();
530 uint64_t kernStackMax = max_valid_stack_address();
531 uint64_t *buffer = callStack;
532 uint32_t tmpWord;
533 int bufferIndex = 0;
534 int bufferMaxIndex = *count;
535 boolean_t supervisor;
536 boolean_t is64Bit;
537 struct savearea *sv;
538
539 if(user_only) {
540 sv = find_user_regs(thread);
541 } else {
542 sv = find_kern_regs(thread);
543 }
544
545 if(!sv) {
546 *count = 0;
547 return KERN_FAILURE;
548 }
549
550 supervisor = SUPERVISOR_MODE(sv->save_srr1);
551 if(supervisor) {
552#warning assuming kernel task is always 32-bit
553 is64Bit = FALSE;
554 } else {
555 is64Bit = chudxnu_is_64bit_task(task);
556 }
557
558 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
559 if(bufferMaxIndex<2) {
560 *count = 0;
561 return KERN_RESOURCE_SHORTAGE;
562 }
563
564 currPC = sv->save_srr0;
565 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
566 currLR = sv->save_lr;
567 currR0 = sv->save_r0;
568
569 bufferIndex = 0; // start with a stack of size zero
570 buffer[bufferIndex++] = currPC; // save PC in position 0.
571
572 // Now, fill buffer with stack backtraces.
573 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
574 uint64_t pc = 0;
575 // Above the stack pointer, the following values are saved:
576 // saved LR
577 // saved CR
578 // saved SP
579 //-> SP
580 // Here, we'll get the lr from the stack.
581 uint64_t fp_link;
582
583 if(is64Bit) {
584 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
585 } else {
586 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
587 }
588
589 // Note that we read the pc even for the first stack frame (which, in theory,
590 // is always empty because the callee fills it in just before it lowers the
591 // stack. However, if we catch the program in between filling in the return
592 // address and lowering the stack, we want to still have a valid backtrace.
593 // FixupStack correctly disregards this value if necessary.
594
595 if(supervisor) {
596 if(is64Bit) {
597 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
598 } else {
599 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
600 pc = tmpWord;
601 }
602 } else {
603 if(is64Bit) {
604 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
605 } else {
606 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
607 pc = tmpWord;
608 }
609 }
610 if(kr!=KERN_SUCCESS) {
611 pc = 0;
612 break;
613 }
614
615 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
616 if(supervisor) {
617 if(is64Bit) {
618 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
619 } else {
620 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
621 nextFramePointer = tmpWord;
622 }
623 } else {
624 if(is64Bit) {
625 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
626 } else {
627 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
628 nextFramePointer = tmpWord;
629 }
630 }
631 if(kr!=KERN_SUCCESS) {
632 nextFramePointer = 0;
633 }
634
635 if(nextFramePointer) {
636 buffer[bufferIndex++] = pc;
637 prevPC = pc;
638 }
639
640 if(nextFramePointer<framePointer) {
641 break;
642 } else {
643 framePointer = nextFramePointer;
644 }
645 }
646
647 if(bufferIndex>=bufferMaxIndex) {
648 *count = 0;
649 return KERN_RESOURCE_SHORTAGE;
650 }
651
652 // Save link register and R0 at bottom of stack (used for later fixup).
653 buffer[bufferIndex++] = currLR;
654 buffer[bufferIndex++] = currR0;
655
656 *count = bufferIndex;
657 return KERN_SUCCESS;
658}
659
660__private_extern__
661kern_return_t chudxnu_thread_get_callstack( thread_t thread,
662 uint32_t *callStack,
663 mach_msg_type_number_t *count,
664 boolean_t user_only)
665{
666 kern_return_t kr;
667 task_t task = get_threadtask(thread);
668 uint64_t nextFramePointer = 0;
669 uint64_t currPC, currLR, currR0;
670 uint64_t framePointer;
671 uint64_t prevPC = 0;
672 uint64_t kernStackMin = min_valid_stack_address();
673 uint64_t kernStackMax = max_valid_stack_address();
674 uint32_t *buffer = callStack;
675 uint32_t tmpWord;
676 int bufferIndex = 0;
677 int bufferMaxIndex = *count;
678 boolean_t supervisor;
679 boolean_t is64Bit;
680 struct savearea *sv;
681
682 if(user_only) {
683 sv = find_user_regs(thread);
684 } else {
685 sv = find_kern_regs(thread);
686 }
687
688 if(!sv) {
689 *count = 0;
690 return KERN_FAILURE;
691 }
692
693 supervisor = SUPERVISOR_MODE(sv->save_srr1);
694 if(supervisor) {
695#warning assuming kernel task is always 32-bit
696 is64Bit = FALSE;
697 } else {
698 is64Bit = chudxnu_is_64bit_task(task);
699 }
700
701 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
702 if(bufferMaxIndex<2) {
703 *count = 0;
704 return KERN_RESOURCE_SHORTAGE;
705 }
706
707 currPC = sv->save_srr0;
708 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
709 currLR = sv->save_lr;
710 currR0 = sv->save_r0;
711
712 bufferIndex = 0; // start with a stack of size zero
713 buffer[bufferIndex++] = currPC; // save PC in position 0.
714
715 // Now, fill buffer with stack backtraces.
716 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
717 uint64_t pc = 0;
718 // Above the stack pointer, the following values are saved:
719 // saved LR
720 // saved CR
721 // saved SP
722 //-> SP
723 // Here, we'll get the lr from the stack.
724 uint64_t fp_link;
725
726 if(is64Bit) {
727 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint64_t);
728 } else {
729 fp_link = framePointer + FP_LINK_OFFSET*sizeof(uint32_t);
730 }
731
732 // Note that we read the pc even for the first stack frame (which, in theory,
733 // is always empty because the callee fills it in just before it lowers the
734 // stack. However, if we catch the program in between filling in the return
735 // address and lowering the stack, we want to still have a valid backtrace.
736 // FixupStack correctly disregards this value if necessary.
737
738 if(supervisor) {
739 if(is64Bit) {
740 kr = chudxnu_kern_read(&pc, fp_link, sizeof(uint64_t));
741 } else {
742 kr = chudxnu_kern_read(&tmpWord, fp_link, sizeof(uint32_t));
743 pc = tmpWord;
744 }
745 } else {
746 if(is64Bit) {
747 kr = chudxnu_task_read(task, &pc, fp_link, sizeof(uint64_t));
748 } else {
749 kr = chudxnu_task_read(task, &tmpWord, fp_link, sizeof(uint32_t));
750 pc = tmpWord;
751 }
752 }
753 if(kr!=KERN_SUCCESS) {
754 pc = 0;
755 break;
756 }
757
758 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
759 if(supervisor) {
760 if(is64Bit) {
761 kr = chudxnu_kern_read(&nextFramePointer, framePointer, sizeof(uint64_t));
762 } else {
763 kr = chudxnu_kern_read(&tmpWord, framePointer, sizeof(uint32_t));
764 nextFramePointer = tmpWord;
765 }
766 } else {
767 if(is64Bit) {
768 kr = chudxnu_task_read(task, &nextFramePointer, framePointer, sizeof(uint64_t));
769 } else {
770 kr = chudxnu_task_read(task, &tmpWord, framePointer, sizeof(uint32_t));
771 nextFramePointer = tmpWord;
772 }
773 }
774 if(kr!=KERN_SUCCESS) {
775 nextFramePointer = 0;
776 }
777
778 if(nextFramePointer) {
779 buffer[bufferIndex++] = pc;
780 prevPC = pc;
781 }
782
783 if(nextFramePointer<framePointer) {
784 break;
785 } else {
786 framePointer = nextFramePointer;
787 }
788 }
789
790 if(bufferIndex>=bufferMaxIndex) {
791 *count = 0;
792 return KERN_RESOURCE_SHORTAGE;
793 }
794
795 // Save link register and R0 at bottom of stack (used for later fixup).
796 buffer[bufferIndex++] = currLR;
797 buffer[bufferIndex++] = currR0;
798
799 *count = bufferIndex;
800 return KERN_SUCCESS;
801}
802
803#pragma mark **** task and thread info ****
804
805__private_extern__
806boolean_t chudxnu_is_64bit_task(task_t task)
807{
808 return (task_has_64BitAddr(task));
809}
810
811#define THING_TASK 0
812#define THING_THREAD 1
813
814// an exact copy of processor_set_things() except no mig conversion at the end!
815static kern_return_t chudxnu_private_processor_set_things( processor_set_t pset,
816 mach_port_t **thing_list,
817 mach_msg_type_number_t *count,
818 int type)
819{
820 unsigned int actual; /* this many things */
821 unsigned int maxthings;
822 unsigned int i;
823
824 vm_size_t size, size_needed;
825 void *addr;
826
827 if (pset == PROCESSOR_SET_NULL)
828 return (KERN_INVALID_ARGUMENT);
829
830 size = 0; addr = 0;
831
832 for (;;) {
833 pset_lock(pset);
834 if (!pset->active) {
835 pset_unlock(pset);
836
837 return (KERN_FAILURE);
838 }
839
840 if (type == THING_TASK)
841 maxthings = pset->task_count;
842 else
843 maxthings = pset->thread_count;
844
845 /* do we have the memory we need? */
846
847 size_needed = maxthings * sizeof (mach_port_t);
848 if (size_needed <= size)
849 break;
850
851 /* unlock the pset and allocate more memory */
852 pset_unlock(pset);
853
854 if (size != 0)
855 kfree(addr, size);
856
857 assert(size_needed > 0);
858 size = size_needed;
859
860 addr = kalloc(size);
861 if (addr == 0)
862 return (KERN_RESOURCE_SHORTAGE);
863 }
864
865 /* OK, have memory and the processor_set is locked & active */
866
867 actual = 0;
868 switch (type) {
869
870 case THING_TASK:
871 {
872 task_t task, *tasks = (task_t *)addr;
873
874 for (task = (task_t)queue_first(&pset->tasks);
875 !queue_end(&pset->tasks, (queue_entry_t)task);
876 task = (task_t)queue_next(&task->pset_tasks)) {
877 task_reference_internal(task);
878 tasks[actual++] = task;
879 }
880
881 break;
882 }
883
884 case THING_THREAD:
885 {
886 thread_t thread, *threads = (thread_t *)addr;
887
888 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
889 !queue_end(&pset->threads, (queue_entry_t)thread);
890 thread = (thread_t)queue_next(&thread->pset_threads)) {
891 thread_reference_internal(thread);
892 threads[actual++] = thread;
893 }
894
895 break;
896 }
897 }
898
899 pset_unlock(pset);
900
901 if (actual < maxthings)
902 size_needed = actual * sizeof (mach_port_t);
903
904 if (actual == 0) {
905 /* no things, so return null pointer and deallocate memory */
906 *thing_list = 0;
907 *count = 0;
908
909 if (size != 0)
910 kfree(addr, size);
911 }
912 else {
913 /* if we allocated too much, must copy */
914
915 if (size_needed < size) {
916 void *newaddr;
917
918 newaddr = kalloc(size_needed);
919 if (newaddr == 0) {
920 switch (type) {
921
922 case THING_TASK:
923 {
924 task_t *tasks = (task_t *)addr;
925
926 for (i = 0; i < actual; i++)
927 task_deallocate(tasks[i]);
928 break;
929 }
930
931 case THING_THREAD:
932 {
933 thread_t *threads = (thread_t *)addr;
934
935 for (i = 0; i < actual; i++)
936 thread_deallocate(threads[i]);
937 break;
938 }
939 }
940
941 kfree(addr, size);
942 return (KERN_RESOURCE_SHORTAGE);
943 }
944
945 bcopy((void *) addr, (void *) newaddr, size_needed);
946 kfree(addr, size);
947 addr = newaddr;
948 }
949
950 *thing_list = (mach_port_t *)addr;
951 *count = actual;
952 }
953
954 return (KERN_SUCCESS);
955}
956
957// an exact copy of task_threads() except no mig conversion at the end!
958static kern_return_t chudxnu_private_task_threads(task_t task,
959 thread_act_array_t *threads_out,
960 mach_msg_type_number_t *count)
961{
962 mach_msg_type_number_t actual;
963 thread_t *threads;
964 thread_t thread;
965 vm_size_t size, size_needed;
966 void *addr;
967 unsigned int i, j;
968
969 if (task == TASK_NULL)
970 return (KERN_INVALID_ARGUMENT);
971
972 size = 0; addr = 0;
973
974 for (;;) {
975 task_lock(task);
976 if (!task->active) {
977 task_unlock(task);
978
979 if (size != 0)
980 kfree(addr, size);
981
982 return (KERN_FAILURE);
983 }
984
985 actual = task->thread_count;
986
987 /* do we have the memory we need? */
988 size_needed = actual * sizeof (mach_port_t);
989 if (size_needed <= size)
990 break;
991
992 /* unlock the task and allocate more memory */
993 task_unlock(task);
994
995 if (size != 0)
996 kfree(addr, size);
997
998 assert(size_needed > 0);
999 size = size_needed;
1000
1001 addr = kalloc(size);
1002 if (addr == 0)
1003 return (KERN_RESOURCE_SHORTAGE);
1004 }
1005
1006 /* OK, have memory and the task is locked & active */
1007 threads = (thread_t *)addr;
1008
1009 i = j = 0;
1010
1011 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1012 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1013 thread_reference_internal(thread);
1014 threads[j++] = thread;
1015 }
1016
1017 assert(queue_end(&task->threads, (queue_entry_t)thread));
1018
1019 actual = j;
1020 size_needed = actual * sizeof (mach_port_t);
1021
1022 /* can unlock task now that we've got the thread refs */
1023 task_unlock(task);
1024
1025 if (actual == 0) {
1026 /* no threads, so return null pointer and deallocate memory */
1027
1028 *threads_out = 0;
1029 *count = 0;
1030
1031 if (size != 0)
1032 kfree(addr, size);
1033 }
1034 else {
1035 /* if we allocated too much, must copy */
1036
1037 if (size_needed < size) {
1038 void *newaddr;
1039
1040 newaddr = kalloc(size_needed);
1041 if (newaddr == 0) {
1042 for (i = 0; i < actual; ++i)
1043 thread_deallocate(threads[i]);
1044 kfree(addr, size);
1045 return (KERN_RESOURCE_SHORTAGE);
1046 }
1047
1048 bcopy(addr, newaddr, size_needed);
1049 kfree(addr, size);
1050 threads = (thread_t *)newaddr;
1051 }
1052
1053 *threads_out = threads;
1054 *count = actual;
1055 }
1056
1057 return (KERN_SUCCESS);
1058}
1059
1060
1061__private_extern__
1062kern_return_t chudxnu_all_tasks(task_array_t *task_list,
1063 mach_msg_type_number_t *count)
1064{
1065 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)task_list, count, THING_TASK);
1066}
1067
1068__private_extern__
1069kern_return_t chudxnu_free_task_list(task_array_t *task_list,
1070 mach_msg_type_number_t *count)
1071{
1072 vm_size_t size = (*count)*sizeof(mach_port_t);
1073 void *addr = *task_list;
1074
1075 if(addr) {
1076 int i, maxCount = *count;
1077 for(i=0; i<maxCount; i++) {
1078 task_deallocate((*task_list)[i]);
1079 }
1080 kfree(addr, size);
1081 *task_list = NULL;
1082 *count = 0;
1083 return KERN_SUCCESS;
1084 } else {
1085 return KERN_FAILURE;
1086 }
1087}
1088
1089__private_extern__
1090kern_return_t chudxnu_all_threads( thread_array_t *thread_list,
1091 mach_msg_type_number_t *count)
1092{
1093 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)thread_list, count, THING_THREAD);
1094}
1095
1096__private_extern__
1097kern_return_t chudxnu_task_threads( task_t task,
1098 thread_array_t *thread_list,
1099 mach_msg_type_number_t *count)
1100{
1101 return chudxnu_private_task_threads(task, thread_list, count);
1102}
1103
1104__private_extern__
1105kern_return_t chudxnu_free_thread_list(thread_array_t *thread_list,
1106 mach_msg_type_number_t *count)
1107{
1108 vm_size_t size = (*count)*sizeof(mach_port_t);
1109 void *addr = *thread_list;
1110
1111 if(addr) {
1112 int i, maxCount = *count;
1113 for(i=0; i<maxCount; i++) {
1114 thread_deallocate((*thread_list)[i]);
1115 }
1116 kfree(addr, size);
1117 *thread_list = NULL;
1118 *count = 0;
1119 return KERN_SUCCESS;
1120 } else {
1121 return KERN_FAILURE;
1122 }
1123}
1124
1125__private_extern__
1126task_t chudxnu_current_task(void)
1127{
1128 return current_task();
1129}
1130
1131__private_extern__
1132thread_t chudxnu_current_thread(void)
1133{
1134 return current_thread();
1135}
1136
1137__private_extern__
1138task_t chudxnu_task_for_thread(thread_t thread)
1139{
1140 return get_threadtask(thread);
1141}
1142
1143__private_extern__
1144kern_return_t chudxnu_thread_info(thread_t thread,
1145 thread_flavor_t flavor,
1146 thread_info_t thread_info_out,
1147 mach_msg_type_number_t *thread_info_count)
1148{
1149 return thread_info(thread, flavor, thread_info_out, thread_info_count);
1150}
1151
1152__private_extern__
1153kern_return_t chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
1154{
1155 *timestamp = thread->last_switch;
1156 return KERN_SUCCESS;
1157}
1158
1159#pragma mark **** DEPRECATED ****
1160
1161// DEPRECATED
1162__private_extern__
1163kern_return_t chudxnu_bind_current_thread(int cpu)
1164{
1165 return chudxnu_bind_thread(current_thread(), cpu);
1166}
1167
1168// DEPRECATED
1169kern_return_t chudxnu_unbind_current_thread(void)
1170{
1171 return chudxnu_unbind_thread(current_thread());
1172}
1173
1174// DEPRECATED
1175__private_extern__
1176kern_return_t chudxnu_current_thread_get_callstack( uint32_t *callStack,
1177 mach_msg_type_number_t *count,
1178 boolean_t user_only)
1179{
1180 return chudxnu_thread_get_callstack(current_thread(), callStack, count, user_only);
1181}
1182
1183// DEPRECATED
1184__private_extern__
1185thread_t chudxnu_current_act(void)
1186{
1187 return chudxnu_current_thread();
1188}