]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/ppc/fasttrap_isa.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / bsd / dev / ppc / fasttrap_isa.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * CDDL HEADER START
31 *
32 * The contents of this file are subject to the terms of the
33 * Common Development and Distribution License (the "License").
34 * You may not use this file except in compliance with the License.
35 *
36 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
37 * or http://www.opensolaris.org/os/licensing.
38 * See the License for the specific language governing permissions
39 * and limitations under the License.
40 *
41 * When distributing Covered Code, include this CDDL HEADER in each
42 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
43 * If applicable, add the following below this CDDL HEADER, with the
44 * fields enclosed by brackets "[]" replaced with your own identifying
45 * information: Portions Copyright [yyyy] [name of copyright owner]
46 *
47 * CDDL HEADER END
48 */
49
50/*
51 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
52 * Use is subject to license terms.
53 */
54
55/*
56 * #pragma ident "@(#)fasttrap_isa.c 1.23 06/09/19 SMI"
57 */
58
59#ifdef KERNEL
60#ifndef _KERNEL
61#define _KERNEL /* Solaris vs. Darwin */
62#endif
63#endif
64
65#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
66#include <sys/fasttrap_isa.h>
67#include <sys/fasttrap_impl.h>
68#include <sys/dtrace.h>
69#include <sys/dtrace_impl.h>
70#include <sys/dtrace_ptss.h>
71#include <kern/debug.h>
72#include <ppc/decodePPC.h>
73#include <kern/task.h>
74#include <mach/vm_param.h>
75#include <mach/mach_vm.h>
76#include <mach/task.h>
77#include <vm/pmap.h>
78#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
79
80#define proc_t struct proc
81
82static int32_t branchtaken(int32_t bo, int32_t bi, ppc_saved_state_t *sv);
83static int32_t dtrace_decode_ppc(uint32_t inst);
84int patchInst(task_t task, addr64_t vaddr, uint32_t inst);
85kern_return_t dtrace_user_probe(ppc_saved_state_t *sv);
86
87/*
88 * Lossless User-Land Tracing on PPC
89 * ---------------------------------
90 *
91 * PPC uses a different technique to emulate user-land instruction replaces by a probe
92 * trap than x86.
93 *
94 * Like x86, it will emulate all forms of branch instructions. We will not attempt
95 * to emulate any instruction that we know will cause an interruption or exception
96 * (system call, trap, privileged instruction, instruction that uses a privileged
97 * register).
98 *
99 * NOTE: I am thinking that we should punish tight loopers, e.g., branch-to-dot.
100 * Depending upon clock resolution and how fast we can process these guys, it is
101 * possible that its quantum will never decrease. Maybe we could just manually
102 * end the guy's quantum and let the next guy go...
103 *
104 * When fasttrap_tracepoint_init is called, we fetch the instruction and decode it.
105 * If we don't recognize it or find it is a "banned" instruction, we return -1,
106 * telling our caller to forget it. Otherwise we save the instruction image and
107 * enough of the decode to quickly handle it at probe time. We cram it into
108 * the fasttrap_machtp_t structure.
109 *
110 * When the probe hits, we verify that the PC is still a probe point and if not,
111 * we bail. Otherwise we have a bit more to do.
112 *
113 * If DTFTP_ENTRY is set, we have an entry probe and need to call dtrace_probe.
114 *
115 * If DTFTP_IS_ENABLED is set, all we need to do is to return a 1.
116 *
117 * If ftp_argmap is NULL, we call dtrace_probe
118 *
119 * Otherwise, we figure out what the arguments are and pass them to dtrace_probe
120 *
121 * Next, we need to set up to emulate the probed instruction and here is where we are
122 * the most different than the x86 code.
123 *
124 * Like x86, we first check to see if the instruction is any form of branch. If so,
125 * we emulate it completely within the kernel and are done.
126 *
127 * If it is anything else, we build a code stream within the kernel to execute the
128 * instruction. Note that this is very different from x86 which build the code in
129 * userland.
130 *
131 * The generated stream needs to be executed within the kernel's code space but with
132 * the user address space and registers. Because PPC allows different translation modes
133 * for instruction fetch and data fetch, this is not too difficult.
134 *
135 * There are two kinds streams needed: execute and continue, and execute and return,
136 * which are used for entry/offset and exit probes respectivily.
137 *
138 * The probe code will copy the instruction image into the current user savearea (which
139 * also contains the complete user state register context). A flag that requests either
140 * execute/continue or execute/return is also set in the savearea.
141 *
142 * We now exit the dtrace code and the marked context makes its way back to the point
143 * where it will be dispatched on the processor.
144 *
145 * The exception return code will start to restore the user context, including registers
146 * and address space. However, before dispatching the user, it will notice that the
147 * emulate flags are set. At this point the code will build a code stream
148 * in an area in the per_proc that consists of
149 * the original instruction followed by a trap instruction. It will set the new MSR (in
150 * SRR1) to have address translation enable for data, translation disabled for instruction
151 * fetches, interruptions disabled, and supervisor state.
152 *
153 * The new PC and MSR are loaded via a RFID and the generated stream is executed. If a
154 * synchronous fault occurs, it is either handled (PTE miss, FPU or vector unavailable),
155 * emulated (alignment or denorm), or passed on to the user.
156 *
157 * Assuming the emulated instruction completes, the trap will execute. When that happens,
158 * low-level trap handler will check its flags. If the trap corresponds to an
159 * execute/continue stream, the trap handler will adjust the PC and complete the
160 * transition into user space.
161 *
162 * If the trap corresponds to an execute/return stream, the handler will generate
163 * a T_DTRACE_RET exception and let the trap handler pass it along to dtrace_user_probe.
164 *
165 */
166
167
168static uint64_t
169fasttrap_anarg(ppc_saved_state_t *sv, int function_entry, int argno)
170{
171#pragma unused(function_entry)
172 uint32_t farg;
173 uint64_t value;
174
175 /* The first 8 arguments (argno 0-7) are in registers */
176 if (argno < 8) {
177 value = (&sv->save_r3)[argno];
178 } else {
179 if (sv->save_srr1 & 0x8000000000000000ULL) {
180 /* 64-bit */
181 /* Grab argument >= 8 from stack */
182 fasttrap_fuword64_noerr(sv->save_r1 + 48 + ((argno)* sizeof(uint64_t)), &value);
183 } else {
184 /* 32-bit */
185 /* Grab argument >= 8 from stack */
186 fasttrap_fuword32_noerr(sv->save_r1 + 24 + ((argno) * sizeof(uint32_t)), &farg);
187 value = (uint64_t)farg;
188 }
189 }
190
191 return (value);
192}
193
194/*ARGSUSED*/
195int
196fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, user_addr_t pc,
197 fasttrap_probe_type_t type)
198{
199#pragma unused(type)
200
201 uint32_t instr, testr1, testr2, testr3;
202 user_addr_t targpc;
203 int32_t target, optype;
204
205 /*
206 * Read the instruction at the given address out of the process's
207 * address space. We don't have to worry about a debugger
208 * changing this instruction before we overwrite it with our trap
209 * instruction since P_PR_LOCK is set. Since instructions can span
210 * pages, we potentially read the instruction in two parts. If the
211 * second part fails, we just zero out that part of the instruction.
212 */
213 /*
214 * APPLE NOTE: Of course, we do not have a P_PR_LOCK, so this is racey...
215 */
216
217 if (uread(p, &instr, 4, pc) != 0) return (-1); /* Grab instruction, return suddenly if read fails... */
218
219 optype = dtrace_decode_ppc(instr); /* See if we have an instruction we can probe */
220
221 tp->ftt_instr = instr; /* Save the instruction image */
222 testr1 = tp->ftt_bo = (uint8_t)((instr >> (31 - 10)) & 0x1F); /* Extract branch options */
223 testr2 = tp->ftt_bi = (uint8_t)((instr >> (31 - 15)) & 0x1F); /* Extract condition register bit */
224 testr3 = (instr >> (31 - 20)) & 0x1F; /* Get that last register */
225 tp->ftt_flgs = (uint8_t)(instr & 3); /* Set the absolute address and link flags */
226
227 switch(optype) { /* Do instruction specific decode */
228
229 case diCMN: /* Common instruction */
230 tp->ftt_type = ftmtCommon; /* Mark as common instruction */
231 break;
232
233 case diINV: /* Invalid */
234 case diTRP: /* Trap */
235 case diSC: /* System Call */
236 case diRFI: /* Return from interrupt */
237 case diPRV: /* Priviliged instruction */
238 return (-1); /* We will not emulate these... */
239 break;
240
241 case diB: /* Branch */
242 tp->ftt_type = ftmtB; /* Mark as branch instruction */
243 target = instr & 0x03FFFFFC; /* Extract address or offset */
244 if(target & 0x02000000) target |= 0xFC000000; /* Sign extend */
245 tp->ftt_trgt = target; /* Trim back down and save */
246
247 targpc = (user_addr_t)((int64_t)target); /* Generate a target address, hopefully we sign extend... */
248 if(!(tp->ftt_flgs & ftmtAbs)) { /* Are we dealing with an offset here? */
249 targpc = targpc + pc; /* Apply offset to get target address */
250 }
251
252 if(targpc == pc) return -1; /* Branching to self is a sin and is forbidden... */
253 break;
254
255 case diBC: /* Branch conditional */
256 tp->ftt_type = ftmtBC; /* Mark as branch conditional */
257 target = instr & 0x0000FFFC; /* Extract address or offset */
258 if(target & 0x00008000) target |= 0xFFFF0000; /* Sign extend */
259 tp->ftt_trgt = target; /* Trim back down and save */
260
261 targpc = (user_addr_t)((int64_t)target); /* Generate a target address, hopefully we sign extend... */
262 if(!(tp->ftt_flgs & ftmtAbs)) { /* Are we dealing with an offset here? */
263 targpc = targpc + pc; /* Apply offset to get target address */
264 }
265
266 if(targpc == pc) return -1; /* Branching to self is a sin and is forbidden... */
267 break;
268
269 case diBLR: /* Branch conditional to link register */
270 tp->ftt_type = ftmtBLR; /* Mark as branch conditional to link register */
271 break;
272
273 case diBCTR: /* Branch conditional to count register */
274 tp->ftt_type = ftmtBCTR; /* Mark as branch conditional to count register */
275 break;
276
277 case diOR: /* OR */
278 if((instr >> 26) == 24) { /* Is this the ORI nop? */
279 if((testr1 == testr2) && ((instr & 0x0000FFFF) == 0)) tp->ftt_type = ftmtNOP; /* Remember if this is a NOP instruction */
280 else tp->ftt_type = ftmtCommon; /* Otherwise it is a common ORI instruction */
281 }
282 else if((testr1 == testr2) && (testr1 == testr3)) tp->ftt_type = ftmtNOP; /* If all three registers are the same, this is a NOP */
283 else tp->ftt_type = ftmtCommon; /* Otherwise it is a common OR instruction */
284
285 break;
286
287 default:
288 panic("fasttrap_tracepoint_init: invalid branch decode, inst = %08X, optype = %d\n", instr, optype);
289 break;
290
291 }
292
293 return (0);
294}
295
296int
297fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp)
298{
299 return patchInst(p->task, tp->ftt_pc, FASTTRAP_INSTR); /* Patch the instruction and flush it */
300}
301
302extern void dbgTrace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
303
304int
305fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp)
306{
307 uint32_t instr;
308
309 /*
310 * Distinguish between read or write failures and a changed
311 * instruction.
312 */
313 if (uread(p, &instr, 4, tp->ftt_pc) != 0) return (0); /* Get the instruction, but exit if not mapped */
314
315// dbgTrace(0x99999999, (uint32_t)tp->ftt_pc, tp->ftt_instr, instr, 0); /* (TRACE/DEBUG) */
316
317 if (instr != FASTTRAP_INSTR) return (0); /* Did someone change it? If so, just leave */
318
319 return patchInst(p->task, tp->ftt_pc, tp->ftt_instr); /* Patch the old instruction back in and flush it */
320}
321
322static void
323fasttrap_return_common(ppc_saved_state_t *sv, user_addr_t pc, pid_t pid, user_addr_t new_pc)
324{
325
326 fasttrap_tracepoint_t *tp;
327 fasttrap_bucket_t *bucket;
328 fasttrap_id_t *id;
329 lck_mtx_t *pid_mtx;
330
331 pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
332 lck_mtx_lock(pid_mtx);
333 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
334
335 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
336 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
337 !tp->ftt_proc->ftpc_defunct)
338 break;
339 }
340
341 /*
342 * Don't sweat it if we can't find the tracepoint again. Unlike
343 * when we're in fasttrap_pid_probe(), finding the tracepoint here
344 * is not essential to the correct execution of the process.
345 */
346 if (tp == NULL) {
347 lck_mtx_unlock(pid_mtx);
348 return;
349 }
350
351 for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
352 /*
353 * If there's a branch that could act as a return site, we
354 * need to trace it, and check here if the program counter is
355 * external to the function.
356 */
357 if((new_pc - id->fti_probe->ftp_faddr) < id->fti_probe->ftp_fsize) /* Is target within the function? */
358 continue; /* Yeah, skip this one... */
359
360 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP);
361 dtrace_probe(id->fti_probe->ftp_id,
362 pc - id->fti_probe->ftp_faddr,
363 sv->save_r3, sv->save_r4, 0, 0);
364 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP);
365 }
366
367 lck_mtx_unlock(pid_mtx);
368}
369
370static void
371fasttrap_usdt_args(fasttrap_probe_t *probe, ppc_saved_state_t *sv, int argc,
372 uint64_t *argv)
373{
374 int i, x, cap = MIN(argc, probe->ftp_nargs);
375 uint32_t farg;
376
377 for (i = 0; i < cap; i++) {
378 x = probe->ftp_argmap[i];
379
380 if (x <= 8) { /* Is this argument in a register? */
381 argv[i] = (&sv->save_r0)[x];
382 } else {
383 if(sv->save_srr1 & 0x8000000000000000ULL) { /* Are we running in 64-bit? */
384 fasttrap_fuword64_noerr(sv->save_r1 + 48 + (x * sizeof(uint64_t)), &argv[i]); /* Grab argument > 8 from stack */
385 }
386 else {
387 fasttrap_fuword32_noerr(sv->save_r1 + 24 + (x * sizeof(uint32_t)), &farg); /* Grab argument > 8 from stack */
388 argv[i] = (uint64_t)farg; /* Convert to 64-bit */
389 }
390 }
391 }
392
393 for (; i < argc; i++) {
394 argv[i] = 0;
395 }
396}
397
398int
399fasttrap_pid_probe(ppc_saved_state_t *sv)
400{
401 proc_t *p = current_proc();
402 fasttrap_bucket_t *bucket;
403 lck_mtx_t *pid_mtx;
404 fasttrap_tracepoint_t *tp, tp_local;
405 pid_t pid;
406 dtrace_icookie_t cookie;
407 uint_t is_enabled = 0;
408 user_addr_t new_pc = 0;
409 user_addr_t pc;
410 user_addr_t addrmask;
411
412 pc = sv->save_srr0; /* Remember the PC for later */
413 if(sv->save_srr1 & 0x8000000000000000ULL) addrmask = 0xFFFFFFFFFFFFFFFFULL; /* Set 64-bit addressing if enabled */
414 else addrmask = 0x00000000FFFFFFFFULL; /* Otherwise set 32-bit */
415
416 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
417
418 /*
419 * Clear all user tracing flags.
420 */
421 uthread->t_dtrace_ft = 0;
422
423 /*
424 * Treat a child created by a call to vfork(2) as if it were its
425 * parent. We know that there's only one thread of control in such a
426 * process: this one.
427 */
428 /*
429 * APPLE NOTE: Terry says: "You need to hold the process locks (currently: kernel funnel) for this traversal"
430 * FIXME: How do we assert this?
431 */
432 while (p->p_lflag & P_LINVFORK) p = p->p_pptr; /* Search the end */
433
434 pid = p->p_pid;
435 pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
436 lck_mtx_lock(pid_mtx);
437 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, sv->save_srr0)]; /* Get the bucket that corresponds to out PC */
438
439 /*
440 * Lookup the tracepoint that the process just hit.
441 */
442 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
443 if (pid == tp->ftt_pid && (sv->save_srr0 == tp->ftt_pc) &&
444 !tp->ftt_proc->ftpc_defunct)
445 break;
446 }
447
448 /*
449 * If we couldn't find a matching tracepoint, either a tracepoint has
450 * been inserted without using the pid<pid> ioctl interface (see
451 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
452 */
453 if (tp == NULL) {
454 lck_mtx_unlock(pid_mtx);
455 return (-1);
456 }
457
458 if (tp->ftt_ids != NULL) {
459 fasttrap_id_t *id;
460
461 for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
462 fasttrap_probe_t *probe = id->fti_probe;
463
464 if (id->fti_ptype == DTFTP_ENTRY) {
465 /*
466 * We note that this was an entry
467 * probe to help ustack() find the
468 * first caller.
469 */
470 cookie = dtrace_interrupt_disable();
471 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP | CPU_DTRACE_ENTRY);
472 dtrace_probe(probe->ftp_id, sv->save_r3, sv->save_r4, /* Call the main probe routine with the first 5 args */
473 sv->save_r5, sv->save_r6, sv->save_r7);
474 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP | CPU_DTRACE_ENTRY);
475 dtrace_interrupt_enable(cookie);
476
477 } else if (id->fti_ptype == DTFTP_IS_ENABLED) {
478 /*
479 * Note that in this case, we don't
480 * call dtrace_probe() since it's only
481 * an artificial probe meant to change
482 * the flow of control so that it
483 * encounters the true probe.
484 */
485 is_enabled = 1;
486
487 } else if (probe->ftp_argmap == NULL) {
488 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP);
489 dtrace_probe(probe->ftp_id, sv->save_r3, sv->save_r4, /* Call the main probe routine with the first 5 args */
490 sv->save_r5, sv->save_r6, sv->save_r7);
491 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP);
492
493 } else {
494 uint64_t t[5];
495
496 fasttrap_usdt_args(probe, sv, 5, t); /* Grab 5 arguments */
497
498 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP);
499 dtrace_probe(probe->ftp_id, t[0], t[1],
500 t[2], t[3], t[4]);
501 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP);
502 }
503
504 /* APPLE NOTE: Oneshot probes get one and only one chance... */
505 if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) {
506 fasttrap_tracepoint_remove(p, tp);
507 }
508 }
509 }
510
511 /*
512 * We're about to do a bunch of work so we cache a local copy of
513 * the tracepoint to emulate the instruction, and then find the
514 * tracepoint again later if we need to light up any return probes.
515 */
516 tp_local = *tp;
517 lck_mtx_unlock(pid_mtx);
518 tp = &tp_local;
519
520 /*
521 * If there's an is-enabled probe connected to this tracepoint it
522 * means that there was a 'xor r3,r3,r3'
523 * instruction that was placed there by DTrace when the binary was
524 * linked. As this probe is, in fact, enabled, we need to stuff 1
525 * into R3. Accordingly, we can bypass all the instruction
526 * emulation logic since we know the inevitable result. It's possible
527 * that a user could construct a scenario where the 'is-enabled'
528 * probe was on some other instruction, but that would be a rather
529 * exotic way to shoot oneself in the foot.
530 */
531 if (is_enabled) {
532 sv->save_r3 = 1; /* Set condition to true */
533 new_pc = (sv->save_srr0 + 4) & addrmask; /* Just fall through to the next instruction */
534 goto done;
535 }
536
537 /*
538 * We emulate certain types of instructions to ensure correctness
539 * (in the case of position dependent instructions) or optimize
540 * common cases. The rest we execute in the kernel, but with
541 * most of the user's context active.
542 */
543 switch (tp->ftt_type) {
544
545 case ftmtNOP: /* NOP */
546 new_pc = (sv->save_srr0 + 4) & addrmask; /* Just fall through to the next instruction */
547 break;
548
549 case ftmtB: /* Plain unconditional branch */
550 new_pc = (user_addr_t)((int64_t)tp->ftt_trgt); /* Assume target is absolute address for the moment */
551 if(!(tp->ftt_flgs & ftmtAbs)) new_pc = (new_pc + sv->save_srr0) & addrmask; /* We don't have absolute address, use as offset from instruction address */
552
553 if(tp->ftt_flgs & ftmtLink) sv->save_lr = (sv->save_srr0 + 4) & addrmask; /* Set the LR to the next instruction if needed */
554 break;
555
556 case ftmtBC: /* Conditional PC relative or absolute branch */
557 new_pc = (user_addr_t)((int64_t)tp->ftt_trgt); /* Assume target is absolute address for the moment */
558 if(!(tp->ftt_flgs & ftmtAbs)) new_pc = new_pc + sv->save_srr0; /* We don't have absolute address, use as offset from instruction address */
559
560 if(tp->ftt_flgs & ftmtLink) sv->save_lr = (sv->save_srr0 + 4) & addrmask; /* Set the LR to the next instruction if needed */
561 if(!branchtaken(tp->ftt_bo, tp->ftt_bi, sv)) new_pc = (sv->save_srr0 + 4) & addrmask; /* If branch was not taken, set PC to next address */
562 break;
563
564 case ftmtBLR: /* Conditional branch to LR */
565 new_pc = sv->save_lr; /* Branch target comes from the LR */
566
567 if(tp->ftt_flgs & ftmtLink) sv->save_lr = (sv->save_srr0 + 4) & addrmask; /* Set the LR to the next instruction if needed */
568 if(!branchtaken(tp->ftt_bo, tp->ftt_bi, sv)) new_pc = (sv->save_srr0 + 4) & addrmask; /* If branch was not taken, set PC to next address */
569 break;
570
571 case ftmtBCTR: /* Conditional branch to CTR */
572 new_pc = sv->save_ctr; /* Branch target comes from the CTR */
573
574 if(tp->ftt_flgs & ftmtLink) sv->save_lr = (sv->save_srr0 + 4) & addrmask; /* Set the LR to the next instruction if needed */
575 if(!branchtaken(tp->ftt_bo, tp->ftt_bi, sv)) new_pc = (sv->save_srr0 + 4) & addrmask; /* If branch was not taken, set PC to next address */
576 break;
577
578 case ftmtCommon: /* Common, non-in-kernel emulated instruction */
579 sv->save_instr[0] = 1; /* We only have one instruction to inject */
580 sv->save_instr[1] = tp->ftt_instr; /* Set the instruction */
581 sv->save_hdr.save_flags = sv->save_hdr.save_flags | SAVinject; /* Tell low-level exception return to inject the instruction */
582 uthread->t_dtrace_step = 1; /* Let it be known that a trace return is imminent */
583 return 0; /* Go and don't dome back until you are done... */
584
585 default:
586 panic("fasttrap_pid_probe: invalid ftt_type = %08X\n", tp->ftt_type); /* Huh, wha happened? */
587 break;
588 }
589
590
591done:
592
593 /*
594 * If there were no return probes when we first found the tracepoint,
595 * we should feel no obligation to honor any return probes that were
596 * subsequently enabled -- they'll just have to wait until the next
597 * time around.
598 */
599 sv->save_srr0 = new_pc; /* Set the new PC */
600 if (tp->ftt_retids != NULL) fasttrap_return_common(sv, pc, pid, new_pc);
601
602 return (0);
603}
604
605
606int
607fasttrap_return_probe(ppc_saved_state_t *sv)
608{
609
610 user_addr_t pc, npc;
611
612 proc_t *p = current_proc();
613
614
615 /*
616 * Treat a child created by a call to vfork(2) as if it were its
617 * parent. We know that there's only one thread of control in such a
618 * process: this one.
619 */
620 /*
621 * APPLE NOTE: Terry says: "You need to hold the process locks (currently: kernel funnel) for this traversal"
622 * How do we assert this?
623 */
624 while (p->p_lflag & P_LINVFORK) {
625 p = p->p_pptr;
626 }
627
628 pc = sv->save_srr0; /* Get the PC of the probed instruction */
629 npc = pc + 4; /* Get next PC */
630 if(!(sv->save_srr1 & 0x8000000000000000ULL)) npc &= 0x00000000FFFFFFFF; /* Wrap new PC if running 32-bit */
631 fasttrap_return_common(sv, pc, p->p_pid, npc);
632
633 return (0);
634}
635
636uint64_t
637fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
638 int aframes)
639{
640#pragma unused(arg, id, parg, aframes)
641 return (fasttrap_anarg((ppc_saved_state_t *)find_user_regs(current_thread()), 1, argno));
642}
643
644uint64_t
645fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
646 int aframes)
647{
648#pragma unused(arg, id, parg, aframes)
649 return (fasttrap_anarg((ppc_saved_state_t *)find_user_regs(current_thread()), 0, argno));
650}
651
652
653static int32_t branchtaken(int32_t bo, int32_t bi, ppc_saved_state_t *sv) {
654 int32_t bcond, czero, crmatch;
655 uint64_t ctr;
656
657 if((bo & 0x14) == 0x14) return 1; /* If this is a branch always, exit with true... */
658
659 czero = 0; /* Assume that we have not just decremented the CTR to 0 */
660
661 if(!(bo & 4)) { /* Skip the next bit if we do NOT muck with the CTR */
662 ctr = sv->save_ctr = sv->save_ctr - 1; /* Decrement the CTR */
663 if(!(sv->save_srr1 & 0x8000000000000000ULL)) ctr &= 0x00000000FFFFFFFF; /* Only look at the bottom 32 bits if 32-bit mode */
664 czero = (ctr == 0); /* Remember if we just hit zero */
665 }
666
667 bcond = (bo >> 3); /* If 1, branch if CR flag is 1. If 0, branch if 0 */
668 crmatch = bo >> 4; /* If bo[0] is set, do not check CR flag */
669 crmatch = crmatch | (((sv->save_cr >> (31 - bi)) ^ bcond) ^ 1); /* Low bit is now set if CR flag matches or CR is not checked. Other bits are trash. */
670
671// dbgTrace(0x77777777, bo, bi, sv->save_cr, ((czero | crmatch) & 1)); /* (TRACE/DEBUG) */
672
673 return ((czero | crmatch) & 1); /* Return 1 if branch taken, 0 if not... */
674}
675
676static int32_t dtrace_decode_ppc(uint32_t inst) {
677
678 int32_t curdcd, lastmask, newmask, spr, bit, bito, word;
679 uint16_t xop = 0;
680 dcdtab *dcd;
681
682 curdcd = inst >> 26; /* Isolate major op code to start decode */
683 lastmask = 99; /* Always force a new xop at the start */
684
685 while(1) { /* Loop until we find instruction or fail */
686 dcd = &insts[curdcd]; /* Point to the current decode table entry */
687 if(dcd->dcdFlgs & dcdJump) { /* Should we jump to a new spot in the decode table? */
688 curdcd = dcd->dcdMatch; /* Jump */
689 continue;
690 }
691
692 newmask = dcd->dcdFlgs & dcdMask; /* Isolate the mask index */
693 if(lastmask != newmask) { /* Are we changing masks? */
694 if(!newmask) break; /* If the mask is 0, we match everything and succeed... (note: lastmask can never be 0) */
695 xop = inst & masktab[newmask]; /* Clear all extra bits to make match */
696 lastmask = newmask; /* Remember */
697 }
698
699 if(xop == dcd->dcdMatch) break; /* We found our guy! */
700
701 if(!(dcd->dcdFlgs & dcdStep)) { /* No stepping, we failed */
702 dcd = &dcdfail; /* Point to a failure entry */
703 break; /* Leave... */
704 }
705
706 curdcd = curdcd + 1; /* Step to the next decode entry */
707 }
708
709 if(dcd->dcdType != diSPR) return (int32_t)(dcd->dcdType); /* Return what we found */
710
711 spr = (inst >> (31 - 20)) & 0x3FF; /* Get the source */
712 spr = ((spr << 5) & 0x3E0) | ((spr >> 5) & 0x1F); /* Flip to right order */
713
714 word = spr >> 5; /* Get word index into table */
715 bito = spr & 0x1F; /* Get bit offset into entry */
716 bit = 0x80000000 >> bito; /* Position bit for a test */
717
718 if(!(sprtbl[word] & bit)) return (diINV); /* Bogus SPR so whole instruction is invalid... */
719
720 if(spr & 0x10) return (diPRV); /* This is a priviliged SPR so instruction is priviliged... */
721 return (diCMN); /* Just a common SPR so instruction is the same... */
722}