]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/dev/ppc/dtrace_isa.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / dev / ppc / dtrace_isa.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
30#include <kern/thread.h>
31#include <mach/thread_status.h>
32#include <stdarg.h>
33#include <string.h>
34#include <sys/malloc.h>
35#include <sys/time.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38#include <sys/proc_internal.h>
39#include <sys/kauth.h>
40#include <sys/dtrace.h>
41#include <sys/dtrace_impl.h>
42#include <libkern/OSAtomic.h>
43#include <kern/thread_call.h>
44#include <kern/task.h>
45#include <kern/sched_prim.h>
46#include <miscfs/devfs/devfs.h>
47#include <mach/vm_param.h>
48#include <machine/cpu_capabilities.h>
49
50extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
51
52void
53dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
54 int fault, int fltoffs, uint64_t illval)
55{
56 /*
57 * dtrace_getarg() is a lost cause on PPC. For the case of the error probe firing lets
58 * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
59 */
60 state->dts_arg_error_illval = illval;
61 dtrace_probe( dtrace_probeid_error, (uint64_t)(uintptr_t)state, epid, which, fault, fltoffs );
62}
63
64/*
65 * Atomicity and synchronization
66 */
67void
68dtrace_membar_producer(void)
69{
70 __asm__ volatile("sync");
71}
72
73void
74dtrace_membar_consumer(void)
75{
76 __asm__ volatile("isync");
77}
78
79/*
80 * Interrupt manipulation
81 * XXX dtrace_getipl() can be called from probe context.
82 */
83int
84dtrace_getipl(void)
85{
86 return (ml_at_interrupt_context() ? 1: 0);
87}
88
89/*
90 * MP coordination
91 */
92typedef void (*broadcastFunc) (uint32_t);
93
94int32_t cpu_broadcast(uint32_t *, broadcastFunc, uint32_t); /* osfmk/ppc/machine_cpu.h */
95
96typedef struct xcArg {
97 processorid_t cpu;
98 dtrace_xcall_t f;
99 void *arg;
100 uint32_t waitVar;
101} xcArg_t;
102
103static void
104xcRemote( uint32_t foo )
105{
106 xcArg_t *pArg = (xcArg_t *)foo;
107
108 if ( pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL ) {
109 (pArg->f)(pArg->arg);
110 }
111
112 if(!hw_atomic_sub(&(pArg->waitVar), 1)) { /* Drop the wait count */
113 thread_wakeup((event_t)&(pArg->waitVar)); /* If we were the last, wake up the signaller */
114 }
115}
116
117/*
118 * dtrace_xcall() is not called from probe context.
119 */
120void
121dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg)
122{
123 xcArg_t xcArg;
124
125 /* Talking to ourselves, are we? */
126 if ( cpu == CPU->cpu_id ) {
127 (*f)(arg);
128 return;
129 }
130
131 if ( cpu == DTRACE_CPUALL ) {
132 (*f)(arg);
133 }
134
135 xcArg.cpu = cpu;
136 xcArg.f = f;
137 xcArg.arg = arg;
138 xcArg.waitVar = 0;
139
140 (void)cpu_broadcast(&(xcArg.waitVar), xcRemote, (uint32_t)&xcArg);
141}
142
143/*
144 * Runtime and ABI
145 */
146extern greg_t
147dtrace_getfp(void)
148{
149 return (greg_t)__builtin_frame_address(0);
150}
151
152uint64_t
153dtrace_getreg(struct regs *savearea, uint_t reg)
154{
155 ppc_saved_state_t *regs = (ppc_saved_state_t *)savearea;
156 uint64_t mask = (_cpu_capabilities & k64Bit) ? 0xffffffffffffffffULL : 0x00000000ffffffffULL;
157
158 /* See osfmk/ppc/savearea.h */
159 if (reg > 68) { /* beyond mmcr2 */
160 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
161 return (0);
162 }
163
164 switch (reg) {
165 /* First 38 registers are saved to 64 bits r0-r31, srr0, srr1, xer, lr, ctr, dar. */
166 default:
167 return (((uint64_t *)(&(regs->save_r0)))[reg]) & mask;
168
169 /* Handle the 32-bit registers */
170 case 38: case 39: case 40: case 41: /* cr, dsisr, exception, vrsave */
171 case 42: case 43: case 44: case 45: /* vscr[4] */
172 case 46: case 47: case 48: case 49: /* fpscrpad, fpscr, save_1d8[2] */
173 case 50: case 51: case 52: case 53: /* save_1E0[8] */
174 case 54: case 55: case 56: case 57:
175 case 58: case 59: case 60: case 61: /* save_pmc[8] */
176 case 62: case 63: case 64: case 65:
177 return (uint64_t)(((unsigned int *)(&(regs->save_cr)))[reg - 38]);
178
179 case 66:
180 return regs->save_mmcr0 & mask;
181 case 67:
182 return regs->save_mmcr1 & mask;
183 case 68:
184 return regs->save_mmcr2 & mask;
185 }
186}
187
188#define RETURN_OFFSET 8
189#define RETURN_OFFSET64 16
190#define REGPC save_srr0
191#define REGSP save_r1
192
193/*
194 * XXX dtrace_getustack_common() can be called from probe context.
195 */
196static int
197dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc,
198 user_addr_t sp)
199{
200#if 0
201 volatile uint16_t *flags =
202 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
203
204 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl*/
205 size_t s1, s2;
206#endif
207 int ret = 0;
208 boolean_t is64Bit = proc_is64bit(current_proc());
209
210 ASSERT(pcstack == NULL || pcstack_limit > 0);
211
212#if 0 /* XXX signal stack crawl*/
213 if (p->p_model == DATAMODEL_NATIVE) {
214 s1 = sizeof (struct frame) + 2 * sizeof (long);
215 s2 = s1 + sizeof (siginfo_t);
216 } else {
217 s1 = sizeof (struct frame32) + 3 * sizeof (int);
218 s2 = s1 + sizeof (siginfo32_t);
219 }
220#endif
221
222 while (pc != 0) {
223 ret++;
224 if (pcstack != NULL) {
225 *pcstack++ = (uint64_t)pc;
226 pcstack_limit--;
227 if (pcstack_limit <= 0)
228 break;
229 }
230
231 if (sp == 0)
232 break;
233
234#if 0 /* XXX signal stack crawl*/
235 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
236 if (p->p_model == DATAMODEL_NATIVE) {
237 ucontext_t *ucp = (ucontext_t *)oldcontext;
238 greg_t *gregs = ucp->uc_mcontext.gregs;
239
240 sp = dtrace_fulword(&gregs[REG_FP]);
241 pc = dtrace_fulword(&gregs[REG_PC]);
242
243 oldcontext = dtrace_fulword(&ucp->uc_link);
244 } else {
245 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
246 greg32_t *gregs = ucp->uc_mcontext.gregs;
247
248 sp = dtrace_fuword32(&gregs[EBP]);
249 pc = dtrace_fuword32(&gregs[EIP]);
250
251 oldcontext = dtrace_fuword32(&ucp->uc_link);
252 }
253 }
254 else
255#endif
256 {
257 if (is64Bit) {
258 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
259 sp = dtrace_fuword64(sp);
260 } else {
261 pc = dtrace_fuword32((sp + RETURN_OFFSET));
262 sp = dtrace_fuword32(sp);
263 }
264 }
265 }
266
267 return (ret);
268}
269
270void
271dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
272{
273 thread_t thread = current_thread();
274 ppc_saved_state_t *regs;
275 user_addr_t pc, sp;
276 volatile uint16_t *flags =
277 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
278 int n;
279 boolean_t is64Bit = proc_is64bit(current_proc());
280
281 if (*flags & CPU_DTRACE_FAULT)
282 return;
283
284 if (pcstack_limit <= 0)
285 return;
286
287 /*
288 * If there's no user context we still need to zero the stack.
289 */
290 if (thread == NULL)
291 goto zero;
292
293 regs = (ppc_saved_state_t *)find_user_regs(thread);
294 if (regs == NULL)
295 goto zero;
296
297 *pcstack++ = (uint64_t)proc_selfpid();
298 pcstack_limit--;
299
300 if (pcstack_limit <= 0)
301 return;
302
303 pc = regs->REGPC;
304 sp = regs->REGSP;
305
306 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
307 *pcstack++ = (uint64_t)pc;
308 pcstack_limit--;
309 if (pcstack_limit <= 0)
310 return;
311
312 pc = regs->save_lr;
313 }
314
315 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
316 /*
317 * If the ustack fp flag is set, the stack frame from sp to
318 * fp contains no valid call information. Start with the fp.
319 */
320 if (is64Bit)
321 sp = dtrace_fuword64(sp);
322 else
323 sp = (user_addr_t)dtrace_fuword32(sp);
324 }
325
326 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
327 ASSERT(n >= 0);
328 ASSERT(n <= pcstack_limit);
329
330 pcstack += n;
331 pcstack_limit -= n;
332
333zero:
334 while (pcstack_limit-- > 0)
335 *pcstack++ = 0;
336}
337
338int
339dtrace_getustackdepth(void)
340{
341 thread_t thread = current_thread();
342 ppc_saved_state_t *regs;
343 user_addr_t pc, sp;
344 int n = 0;
345 boolean_t is64Bit = proc_is64bit(current_proc());
346
347 if (thread == NULL)
348 return 0;
349
350 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
351 return (-1);
352
353 regs = (ppc_saved_state_t *)find_user_regs(thread);
354 if (regs == NULL)
355 return 0;
356
357 pc = regs->REGPC;
358 sp = regs->REGSP;
359
360 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
361 n++;
362 pc = regs->save_lr;
363 }
364
365 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
366 /*
367 * If the ustack fp flag is set, the stack frame from sp to
368 * fp contains no valid call information. Start with the fp.
369 */
370 if (is64Bit)
371 sp = dtrace_fuword64(sp);
372 else
373 sp = (user_addr_t)dtrace_fuword32(sp);
374 }
375
376 n += dtrace_getustack_common(NULL, 0, pc, sp);
377
378 return (n);
379}
380
381void
382dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
383{
384 thread_t thread = current_thread();
385 ppc_saved_state_t *regs;
386 user_addr_t pc, sp;
387 volatile uint16_t *flags =
388 (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
389#if 0
390 uintptr_t oldcontext;
391 size_t s1, s2;
392#endif
393 boolean_t is64Bit = proc_is64bit(current_proc());
394
395 if (*flags & CPU_DTRACE_FAULT)
396 return;
397
398 if (pcstack_limit <= 0)
399 return;
400
401 /*
402 * If there's no user context we still need to zero the stack.
403 */
404 if (thread == NULL)
405 goto zero;
406
407 regs = (ppc_saved_state_t *)find_user_regs(thread);
408 if (regs == NULL)
409 goto zero;
410
411 *pcstack++ = (uint64_t)proc_selfpid();
412 pcstack_limit--;
413
414 if (pcstack_limit <= 0)
415 return;
416
417 pc = regs->REGPC;
418 sp = regs->REGSP;
419
420#if 0 /* XXX signal stack crawl*/
421 oldcontext = lwp->lwp_oldcontext;
422
423 if (p->p_model == DATAMODEL_NATIVE) {
424 s1 = sizeof (struct frame) + 2 * sizeof (long);
425 s2 = s1 + sizeof (siginfo_t);
426 } else {
427 s1 = sizeof (struct frame32) + 3 * sizeof (int);
428 s2 = s1 + sizeof (siginfo32_t);
429 }
430#endif
431
432 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
433 *pcstack++ = (uint64_t)pc;
434 *fpstack++ = 0;
435 pcstack_limit--;
436 if (pcstack_limit <= 0)
437 return;
438
439 /*
440 * XXX This is wrong, but we do not yet support stack helpers.
441 */
442 if (is64Bit)
443 pc = dtrace_fuword64(sp);
444 else
445 pc = dtrace_fuword32(sp);
446 }
447
448 while (pc != 0) {
449 *pcstack++ = (uint64_t)pc;
450 *fpstack++ = sp;
451 pcstack_limit--;
452 if (pcstack_limit <= 0)
453 break;
454
455 if (sp == 0)
456 break;
457
458#if 0 /* XXX signal stack crawl*/
459 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
460 if (p->p_model == DATAMODEL_NATIVE) {
461 ucontext_t *ucp = (ucontext_t *)oldcontext;
462 greg_t *gregs = ucp->uc_mcontext.gregs;
463
464 sp = dtrace_fulword(&gregs[REG_FP]);
465 pc = dtrace_fulword(&gregs[REG_PC]);
466
467 oldcontext = dtrace_fulword(&ucp->uc_link);
468 } else {
469 ucontext_t *ucp = (ucontext_t *)oldcontext;
470 greg_t *gregs = ucp->uc_mcontext.gregs;
471
472 sp = dtrace_fuword32(&gregs[EBP]);
473 pc = dtrace_fuword32(&gregs[EIP]);
474
475 oldcontext = dtrace_fuword32(&ucp->uc_link);
476 }
477 }
478 else
479#endif
480 {
481 if (is64Bit) {
482 pc = dtrace_fuword64((sp + RETURN_OFFSET64));
483 sp = dtrace_fuword64(sp);
484 } else {
485 pc = dtrace_fuword32((sp + RETURN_OFFSET));
486 sp = dtrace_fuword32(sp);
487 }
488 }
489 }
490
491zero:
492 while (pcstack_limit-- > 0)
493 *pcstack++ = 0;
494}
495
496void
497dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
498 uint32_t *intrpc)
499{
500 struct frame *fp = (struct frame *)dtrace_getfp();
501 struct frame *nextfp, *minfp, *stacktop;
502 int depth = 0;
503 int last = 0;
504 uintptr_t pc;
505 uintptr_t caller = CPU->cpu_dtrace_caller;
506 int on_intr;
507
508 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
509 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
510 else
511 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + KERNEL_STACK_SIZE);
512
513 minfp = fp;
514
515 aframes++;
516
517 if (intrpc != NULL && depth < pcstack_limit)
518 pcstack[depth++] = (pc_t)intrpc;
519
520 while (depth < pcstack_limit) {
521 nextfp = *(struct frame **)fp;
522 pc = *(uintptr_t *)(((uint32_t)fp) + RETURN_OFFSET);
523
524 if (nextfp <= minfp || nextfp >= stacktop) {
525 if (on_intr) {
526 /*
527 * Hop from interrupt stack to thread stack.
528 */
529 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
530
531 minfp = (struct frame *)kstack_base;
532 stacktop = (struct frame *)(kstack_base + KERNEL_STACK_SIZE);
533
534 on_intr = 0;
535 continue;
536 }
537 /*
538 * This is the last frame we can process; indicate
539 * that we should return after processing this frame.
540 */
541 last = 1;
542 }
543
544 if (aframes > 0) {
545 if (--aframes == 0 && caller != 0) {
546 /*
547 * We've just run out of artificial frames,
548 * and we have a valid caller -- fill it in
549 * now.
550 */
551 ASSERT(depth < pcstack_limit);
552 pcstack[depth++] = (pc_t)caller;
553 caller = 0;
554 }
555 } else {
556 if (depth < pcstack_limit)
557 pcstack[depth++] = (pc_t)pc;
558 }
559
560 if (last) {
561 while (depth < pcstack_limit)
562 pcstack[depth++] = 0;
563 return;
564 }
565
566 fp = nextfp;
567 minfp = fp;
568 }
569}
570
571uint64_t
572dtrace_getarg(int arg, int aframes)
573{
574#pragma unused(arg,aframes)
575 return 0xfeedfacedeafbeadLL; /* XXX Only called for arg >= 5 */
576}
577
578/*
579 * Load/Store Safety
580 */
581
582void
583dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
584{
585 /*
586 * "base" is the smallest toxic address in the range, "limit" is the first
587 * VALID address greater than "base".
588 */
589 func(0x0, VM_MIN_KERNEL_ADDRESS);
590 func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0);
591}
592
593extern void *mapping_phys_lookup(ppnum_t, unsigned int *);
594
595boolean_t
596dtxnu_is_RAM_page(ppnum_t pn)
597{
598 unsigned int ignore;
599 return (NULL == mapping_phys_lookup(pn, &ignore)) ? FALSE : TRUE;
600}
601