2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * The contents of this file are subject to the terms of the
33 * Common Development and Distribution License (the "License").
34 * You may not use this file except in compliance with the License.
36 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
37 * or http://www.opensolaris.org/os/licensing.
38 * See the License for the specific language governing permissions
39 * and limitations under the License.
41 * When distributing Covered Code, include this CDDL HEADER in each
42 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
43 * If applicable, add the following below this CDDL HEADER, with the
44 * fields enclosed by brackets "[]" replaced with your own identifying
45 * information: Portions Copyright [yyyy] [name of copyright owner]
51 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
52 * Use is subject to license terms.
56 * #pragma ident "@(#)fasttrap_isa.c 1.23 06/09/19 SMI"
61 #define _KERNEL /* Solaris vs. Darwin */
65 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
66 #include <sys/fasttrap_isa.h>
67 #include <sys/fasttrap_impl.h>
68 #include <sys/dtrace.h>
69 #include <sys/dtrace_impl.h>
70 #include <sys/dtrace_ptss.h>
71 #include <kern/debug.h>
72 #include <ppc/decodePPC.h>
73 #include <kern/task.h>
74 #include <mach/vm_param.h>
75 #include <mach/mach_vm.h>
76 #include <mach/task.h>
78 #include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
79 extern dtrace_id_t dtrace_probeid_error
;
81 #define proc_t struct proc
83 static int32_t branchtaken(int32_t bo
, int32_t bi
, ppc_saved_state_t
*sv
);
84 static int32_t dtrace_decode_ppc(uint32_t inst
);
85 int patchInst(task_t task
, addr64_t vaddr
, uint32_t inst
);
86 kern_return_t
dtrace_user_probe(ppc_saved_state_t
*sv
);
89 * Lossless User-Land Tracing on PPC
90 * ---------------------------------
92 * PPC uses a different technique to emulate user-land instruction replaces by a probe
95 * Like x86, it will emulate all forms of branch instructions. We will not attempt
96 * to emulate any instruction that we know will cause an interruption or exception
97 * (system call, trap, privileged instruction, instruction that uses a privileged
100 * NOTE: I am thinking that we should punish tight loopers, e.g., branch-to-dot.
101 * Depending upon clock resolution and how fast we can process these guys, it is
102 * possible that its quantum will never decrease. Maybe we could just manually
103 * end the guy's quantum and let the next guy go...
105 * When fasttrap_tracepoint_init is called, we fetch the instruction and decode it.
106 * If we don't recognize it or find it is a "banned" instruction, we return -1,
107 * telling our caller to forget it. Otherwise we save the instruction image and
108 * enough of the decode to quickly handle it at probe time. We cram it into
109 * the fasttrap_machtp_t structure.
111 * When the probe hits, we verify that the PC is still a probe point and if not,
112 * we bail. Otherwise we have a bit more to do.
114 * If DTFTP_ENTRY is set, we have an entry probe and need to call dtrace_probe.
116 * If DTFTP_IS_ENABLED is set, all we need to do is to return a 1.
118 * If ftp_argmap is NULL, we call dtrace_probe
120 * Otherwise, we figure out what the arguments are and pass them to dtrace_probe
122 * Next, we need to set up to emulate the probed instruction and here is where we are
123 * the most different than the x86 code.
125 * Like x86, we first check to see if the instruction is any form of branch. If so,
126 * we emulate it completely within the kernel and are done.
128 * If it is anything else, we build a code stream within the kernel to execute the
129 * instruction. Note that this is very different from x86 which build the code in
132 * The generated stream needs to be executed within the kernel's code space but with
133 * the user address space and registers. Because PPC allows different translation modes
134 * for instruction fetch and data fetch, this is not too difficult.
136 * There are two kinds streams needed: execute and continue, and execute and return,
137 * which are used for entry/offset and exit probes respectivily.
139 * The probe code will copy the instruction image into the current user savearea (which
140 * also contains the complete user state register context). A flag that requests either
141 * execute/continue or execute/return is also set in the savearea.
143 * We now exit the dtrace code and the marked context makes its way back to the point
144 * where it will be dispatched on the processor.
146 * The exception return code will start to restore the user context, including registers
147 * and address space. However, before dispatching the user, it will notice that the
148 * emulate flags are set. At this point the code will build a code stream
149 * in an area in the per_proc that consists of
150 * the original instruction followed by a trap instruction. It will set the new MSR (in
151 * SRR1) to have address translation enable for data, translation disabled for instruction
152 * fetches, interruptions disabled, and supervisor state.
154 * The new PC and MSR are loaded via a RFID and the generated stream is executed. If a
155 * synchronous fault occurs, it is either handled (PTE miss, FPU or vector unavailable),
156 * emulated (alignment or denorm), or passed on to the user.
158 * Assuming the emulated instruction completes, the trap will execute. When that happens,
159 * low-level trap handler will check its flags. If the trap corresponds to an
160 * execute/continue stream, the trap handler will adjust the PC and complete the
161 * transition into user space.
163 * If the trap corresponds to an execute/return stream, the handler will generate
164 * a T_DTRACE_RET exception and let the trap handler pass it along to dtrace_user_probe.
170 fasttrap_anarg(ppc_saved_state_t
*sv
, int function_entry
, int argno
)
172 #pragma unused(function_entry)
176 /* The first 8 arguments (argno 0-7) are in registers */
178 value
= (&sv
->save_r3
)[argno
];
180 if (sv
->save_srr1
& 0x8000000000000000ULL
) {
182 /* Grab argument >= 8 from stack */
183 fasttrap_fuword64_noerr(sv
->save_r1
+ 48 + ((argno
)* sizeof(uint64_t)), &value
);
186 /* Grab argument >= 8 from stack */
187 fasttrap_fuword32_noerr(sv
->save_r1
+ 24 + ((argno
) * sizeof(uint32_t)), &farg
);
188 value
= (uint64_t)farg
;
197 fasttrap_tracepoint_init(proc_t
*p
, fasttrap_tracepoint_t
*tp
, user_addr_t pc
,
198 fasttrap_probe_type_t type
)
202 uint32_t instr
, testr1
, testr2
, testr3
;
204 int32_t target
, optype
;
207 * Read the instruction at the given address out of the process's
208 * address space. We don't have to worry about a debugger
209 * changing this instruction before we overwrite it with our trap
210 * instruction since P_PR_LOCK is set. Since instructions can span
211 * pages, we potentially read the instruction in two parts. If the
212 * second part fails, we just zero out that part of the instruction.
215 * APPLE NOTE: Of course, we do not have a P_PR_LOCK, so this is racey...
218 if (uread(p
, &instr
, 4, pc
) != 0) return (-1); /* Grab instruction, return suddenly if read fails... */
220 optype
= dtrace_decode_ppc(instr
); /* See if we have an instruction we can probe */
222 tp
->ftt_instr
= instr
; /* Save the instruction image */
223 testr1
= tp
->ftt_bo
= (uint8_t)((instr
>> (31 - 10)) & 0x1F); /* Extract branch options */
224 testr2
= tp
->ftt_bi
= (uint8_t)((instr
>> (31 - 15)) & 0x1F); /* Extract condition register bit */
225 testr3
= (instr
>> (31 - 20)) & 0x1F; /* Get that last register */
226 tp
->ftt_flgs
= (uint8_t)(instr
& 3); /* Set the absolute address and link flags */
228 switch(optype
) { /* Do instruction specific decode */
230 case diCMN
: /* Common instruction */
231 tp
->ftt_type
= ftmtCommon
; /* Mark as common instruction */
234 case diINV
: /* Invalid */
235 case diTRP
: /* Trap */
236 case diSC
: /* System Call */
237 case diRFI
: /* Return from interrupt */
238 case diPRV
: /* Priviliged instruction */
239 return (-1); /* We will not emulate these... */
242 case diB
: /* Branch */
243 tp
->ftt_type
= ftmtB
; /* Mark as branch instruction */
244 target
= instr
& 0x03FFFFFC; /* Extract address or offset */
245 if(target
& 0x02000000) target
|= 0xFC000000; /* Sign extend */
246 tp
->ftt_trgt
= target
; /* Trim back down and save */
248 targpc
= (user_addr_t
)((int64_t)target
); /* Generate a target address, hopefully we sign extend... */
249 if(!(tp
->ftt_flgs
& ftmtAbs
)) { /* Are we dealing with an offset here? */
250 targpc
= targpc
+ pc
; /* Apply offset to get target address */
253 if(targpc
== pc
) return -1; /* Branching to self is a sin and is forbidden... */
256 case diBC
: /* Branch conditional */
257 tp
->ftt_type
= ftmtBC
; /* Mark as branch conditional */
258 target
= instr
& 0x0000FFFC; /* Extract address or offset */
259 if(target
& 0x00008000) target
|= 0xFFFF0000; /* Sign extend */
260 tp
->ftt_trgt
= target
; /* Trim back down and save */
262 targpc
= (user_addr_t
)((int64_t)target
); /* Generate a target address, hopefully we sign extend... */
263 if(!(tp
->ftt_flgs
& ftmtAbs
)) { /* Are we dealing with an offset here? */
264 targpc
= targpc
+ pc
; /* Apply offset to get target address */
267 if(targpc
== pc
) return -1; /* Branching to self is a sin and is forbidden... */
270 case diBLR
: /* Branch conditional to link register */
271 tp
->ftt_type
= ftmtBLR
; /* Mark as branch conditional to link register */
274 case diBCTR
: /* Branch conditional to count register */
275 tp
->ftt_type
= ftmtBCTR
; /* Mark as branch conditional to count register */
279 if((instr
>> 26) == 24) { /* Is this the ORI nop? */
280 if((testr1
== testr2
) && ((instr
& 0x0000FFFF) == 0)) tp
->ftt_type
= ftmtNOP
; /* Remember if this is a NOP instruction */
281 else tp
->ftt_type
= ftmtCommon
; /* Otherwise it is a common ORI instruction */
283 else if((testr1
== testr2
) && (testr1
== testr3
)) tp
->ftt_type
= ftmtNOP
; /* If all three registers are the same, this is a NOP */
284 else tp
->ftt_type
= ftmtCommon
; /* Otherwise it is a common OR instruction */
289 panic("fasttrap_tracepoint_init: invalid branch decode, inst = %08X, optype = %d\n", instr
, optype
);
298 fasttrap_tracepoint_install(proc_t
*p
, fasttrap_tracepoint_t
*tp
)
300 return patchInst(p
->task
, tp
->ftt_pc
, FASTTRAP_INSTR
); /* Patch the instruction and flush it */
303 extern void dbgTrace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
306 fasttrap_tracepoint_remove(proc_t
*p
, fasttrap_tracepoint_t
*tp
)
311 * Distinguish between read or write failures and a changed
314 if (uread(p
, &instr
, 4, tp
->ftt_pc
) != 0) return (0); /* Get the instruction, but exit if not mapped */
316 // dbgTrace(0x99999999, (uint32_t)tp->ftt_pc, tp->ftt_instr, instr, 0); /* (TRACE/DEBUG) */
318 if (instr
!= FASTTRAP_INSTR
) return (0); /* Did someone change it? If so, just leave */
320 return patchInst(p
->task
, tp
->ftt_pc
, tp
->ftt_instr
); /* Patch the old instruction back in and flush it */
324 fasttrap_return_common(ppc_saved_state_t
*sv
, user_addr_t pc
, pid_t pid
, user_addr_t new_pc
)
327 fasttrap_tracepoint_t
*tp
;
328 fasttrap_bucket_t
*bucket
;
332 pid_mtx
= &cpu_core
[CPU
->cpu_id
].cpuc_pid_lock
;
333 lck_mtx_lock(pid_mtx
);
334 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
336 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
337 if (pid
== tp
->ftt_pid
&& pc
== tp
->ftt_pc
&&
338 !tp
->ftt_proc
->ftpc_defunct
)
343 * Don't sweat it if we can't find the tracepoint again. Unlike
344 * when we're in fasttrap_pid_probe(), finding the tracepoint here
345 * is not essential to the correct execution of the process.
348 lck_mtx_unlock(pid_mtx
);
352 for (id
= tp
->ftt_retids
; id
!= NULL
; id
= id
->fti_next
) {
354 * If there's a branch that could act as a return site, we
355 * need to trace it, and check here if the program counter is
356 * external to the function.
358 if((new_pc
- id
->fti_probe
->ftp_faddr
) < id
->fti_probe
->ftp_fsize
) /* Is target within the function? */
359 continue; /* Yeah, skip this one... */
361 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP
);
362 if (ISSET(current_proc()->p_lflag
, P_LNOATTACH
)) {
363 dtrace_probe(dtrace_probeid_error
, 0 /* state */,
364 id
->fti_probe
->ftp_id
, 1 /* ndx */, -1 /* offset */,
367 dtrace_probe(id
->fti_probe
->ftp_id
,
368 pc
- id
->fti_probe
->ftp_faddr
,
369 sv
->save_r3
, sv
->save_r4
, 0, 0);
371 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP
);
374 lck_mtx_unlock(pid_mtx
);
378 fasttrap_usdt_args(fasttrap_probe_t
*probe
, ppc_saved_state_t
*sv
, int argc
,
381 int i
, x
, cap
= MIN(argc
, probe
->ftp_nargs
);
384 for (i
= 0; i
< cap
; i
++) {
385 x
= probe
->ftp_argmap
[i
];
387 if (x
<= 8) { /* Is this argument in a register? */
388 argv
[i
] = (&sv
->save_r0
)[x
];
390 if(sv
->save_srr1
& 0x8000000000000000ULL
) { /* Are we running in 64-bit? */
391 fasttrap_fuword64_noerr(sv
->save_r1
+ 48 + (x
* sizeof(uint64_t)), &argv
[i
]); /* Grab argument > 8 from stack */
394 fasttrap_fuword32_noerr(sv
->save_r1
+ 24 + (x
* sizeof(uint32_t)), &farg
); /* Grab argument > 8 from stack */
395 argv
[i
] = (uint64_t)farg
; /* Convert to 64-bit */
400 for (; i
< argc
; i
++) {
406 fasttrap_pid_probe(ppc_saved_state_t
*sv
)
408 proc_t
*p
= current_proc();
409 fasttrap_bucket_t
*bucket
;
411 fasttrap_tracepoint_t
*tp
, tp_local
;
413 dtrace_icookie_t cookie
;
414 uint_t is_enabled
= 0;
415 user_addr_t new_pc
= 0;
417 user_addr_t addrmask
;
419 pc
= sv
->save_srr0
; /* Remember the PC for later */
420 if(sv
->save_srr1
& 0x8000000000000000ULL
) addrmask
= 0xFFFFFFFFFFFFFFFFULL
; /* Set 64-bit addressing if enabled */
421 else addrmask
= 0x00000000FFFFFFFFULL
; /* Otherwise set 32-bit */
423 uthread_t uthread
= (uthread_t
)get_bsdthread_info(current_thread());
426 * Clear all user tracing flags.
428 uthread
->t_dtrace_ft
= 0;
431 * Treat a child created by a call to vfork(2) as if it were its
432 * parent. We know that there's only one thread of control in such a
436 * APPLE NOTE: Terry says: "You need to hold the process locks (currently: kernel funnel) for this traversal"
437 * FIXME: How do we assert this?
439 while (p
->p_lflag
& P_LINVFORK
) p
= p
->p_pptr
; /* Search the end */
442 pid_mtx
= &cpu_core
[CPU
->cpu_id
].cpuc_pid_lock
;
443 lck_mtx_lock(pid_mtx
);
444 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, sv
->save_srr0
)]; /* Get the bucket that corresponds to out PC */
447 * Lookup the tracepoint that the process just hit.
449 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
450 if (pid
== tp
->ftt_pid
&& (sv
->save_srr0
== tp
->ftt_pc
) &&
451 !tp
->ftt_proc
->ftpc_defunct
)
456 * If we couldn't find a matching tracepoint, either a tracepoint has
457 * been inserted without using the pid<pid> ioctl interface (see
458 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
461 lck_mtx_unlock(pid_mtx
);
465 if (tp
->ftt_ids
!= NULL
) {
468 for (id
= tp
->ftt_ids
; id
!= NULL
; id
= id
->fti_next
) {
469 fasttrap_probe_t
*probe
= id
->fti_probe
;
471 if (ISSET(current_proc()->p_lflag
, P_LNOATTACH
)) {
472 dtrace_probe(dtrace_probeid_error
, 0 /* state */,
473 id
->fti_probe
->ftp_id
, 1 /* ndx */, -1 /* offset */,
475 } else if (id
->fti_ptype
== DTFTP_ENTRY
) {
477 * We note that this was an entry
478 * probe to help ustack() find the
481 cookie
= dtrace_interrupt_disable();
482 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP
| CPU_DTRACE_ENTRY
);
483 dtrace_probe(probe
->ftp_id
, sv
->save_r3
, sv
->save_r4
, /* Call the main probe routine with the first 5 args */
484 sv
->save_r5
, sv
->save_r6
, sv
->save_r7
);
485 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP
| CPU_DTRACE_ENTRY
);
486 dtrace_interrupt_enable(cookie
);
488 } else if (id
->fti_ptype
== DTFTP_IS_ENABLED
) {
490 * Note that in this case, we don't
491 * call dtrace_probe() since it's only
492 * an artificial probe meant to change
493 * the flow of control so that it
494 * encounters the true probe.
498 } else if (probe
->ftp_argmap
== NULL
) {
499 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP
);
500 dtrace_probe(probe
->ftp_id
, sv
->save_r3
, sv
->save_r4
, /* Call the main probe routine with the first 5 args */
501 sv
->save_r5
, sv
->save_r6
, sv
->save_r7
);
502 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP
);
507 fasttrap_usdt_args(probe
, sv
, 5, t
); /* Grab 5 arguments */
509 DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP
);
510 dtrace_probe(probe
->ftp_id
, t
[0], t
[1],
512 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP
);
515 /* APPLE NOTE: Oneshot probes get one and only one chance... */
516 if (probe
->ftp_prov
->ftp_provider_type
== DTFTP_PROVIDER_ONESHOT
) {
517 fasttrap_tracepoint_remove(p
, tp
);
523 * We're about to do a bunch of work so we cache a local copy of
524 * the tracepoint to emulate the instruction, and then find the
525 * tracepoint again later if we need to light up any return probes.
528 lck_mtx_unlock(pid_mtx
);
532 * If there's an is-enabled probe connected to this tracepoint it
533 * means that there was a 'xor r3,r3,r3'
534 * instruction that was placed there by DTrace when the binary was
535 * linked. As this probe is, in fact, enabled, we need to stuff 1
536 * into R3. Accordingly, we can bypass all the instruction
537 * emulation logic since we know the inevitable result. It's possible
538 * that a user could construct a scenario where the 'is-enabled'
539 * probe was on some other instruction, but that would be a rather
540 * exotic way to shoot oneself in the foot.
543 sv
->save_r3
= 1; /* Set condition to true */
544 new_pc
= (sv
->save_srr0
+ 4) & addrmask
; /* Just fall through to the next instruction */
549 * We emulate certain types of instructions to ensure correctness
550 * (in the case of position dependent instructions) or optimize
551 * common cases. The rest we execute in the kernel, but with
552 * most of the user's context active.
554 switch (tp
->ftt_type
) {
556 case ftmtNOP
: /* NOP */
557 new_pc
= (sv
->save_srr0
+ 4) & addrmask
; /* Just fall through to the next instruction */
560 case ftmtB
: /* Plain unconditional branch */
561 new_pc
= (user_addr_t
)((int64_t)tp
->ftt_trgt
); /* Assume target is absolute address for the moment */
562 if(!(tp
->ftt_flgs
& ftmtAbs
)) new_pc
= (new_pc
+ sv
->save_srr0
) & addrmask
; /* We don't have absolute address, use as offset from instruction address */
564 if(tp
->ftt_flgs
& ftmtLink
) sv
->save_lr
= (sv
->save_srr0
+ 4) & addrmask
; /* Set the LR to the next instruction if needed */
567 case ftmtBC
: /* Conditional PC relative or absolute branch */
568 new_pc
= (user_addr_t
)((int64_t)tp
->ftt_trgt
); /* Assume target is absolute address for the moment */
569 if(!(tp
->ftt_flgs
& ftmtAbs
)) new_pc
= new_pc
+ sv
->save_srr0
; /* We don't have absolute address, use as offset from instruction address */
571 if(tp
->ftt_flgs
& ftmtLink
) sv
->save_lr
= (sv
->save_srr0
+ 4) & addrmask
; /* Set the LR to the next instruction if needed */
572 if(!branchtaken(tp
->ftt_bo
, tp
->ftt_bi
, sv
)) new_pc
= (sv
->save_srr0
+ 4) & addrmask
; /* If branch was not taken, set PC to next address */
575 case ftmtBLR
: /* Conditional branch to LR */
576 new_pc
= sv
->save_lr
; /* Branch target comes from the LR */
578 if(tp
->ftt_flgs
& ftmtLink
) sv
->save_lr
= (sv
->save_srr0
+ 4) & addrmask
; /* Set the LR to the next instruction if needed */
579 if(!branchtaken(tp
->ftt_bo
, tp
->ftt_bi
, sv
)) new_pc
= (sv
->save_srr0
+ 4) & addrmask
; /* If branch was not taken, set PC to next address */
582 case ftmtBCTR
: /* Conditional branch to CTR */
583 new_pc
= sv
->save_ctr
; /* Branch target comes from the CTR */
585 if(tp
->ftt_flgs
& ftmtLink
) sv
->save_lr
= (sv
->save_srr0
+ 4) & addrmask
; /* Set the LR to the next instruction if needed */
586 if(!branchtaken(tp
->ftt_bo
, tp
->ftt_bi
, sv
)) new_pc
= (sv
->save_srr0
+ 4) & addrmask
; /* If branch was not taken, set PC to next address */
589 case ftmtCommon
: /* Common, non-in-kernel emulated instruction */
590 sv
->save_instr
[0] = 1; /* We only have one instruction to inject */
591 sv
->save_instr
[1] = tp
->ftt_instr
; /* Set the instruction */
592 sv
->save_hdr
.save_flags
= sv
->save_hdr
.save_flags
| SAVinject
; /* Tell low-level exception return to inject the instruction */
593 uthread
->t_dtrace_step
= 1; /* Let it be known that a trace return is imminent */
594 return 0; /* Go and don't dome back until you are done... */
597 panic("fasttrap_pid_probe: invalid ftt_type = %08X\n", tp
->ftt_type
); /* Huh, wha happened? */
605 * If there were no return probes when we first found the tracepoint,
606 * we should feel no obligation to honor any return probes that were
607 * subsequently enabled -- they'll just have to wait until the next
610 sv
->save_srr0
= new_pc
; /* Set the new PC */
611 if (tp
->ftt_retids
!= NULL
) fasttrap_return_common(sv
, pc
, pid
, new_pc
);
618 fasttrap_return_probe(ppc_saved_state_t
*sv
)
623 proc_t
*p
= current_proc();
627 * Treat a child created by a call to vfork(2) as if it were its
628 * parent. We know that there's only one thread of control in such a
632 * APPLE NOTE: Terry says: "You need to hold the process locks (currently: kernel funnel) for this traversal"
633 * How do we assert this?
635 while (p
->p_lflag
& P_LINVFORK
) {
639 pc
= sv
->save_srr0
; /* Get the PC of the probed instruction */
640 npc
= pc
+ 4; /* Get next PC */
641 if(!(sv
->save_srr1
& 0x8000000000000000ULL
)) npc
&= 0x00000000FFFFFFFF; /* Wrap new PC if running 32-bit */
642 fasttrap_return_common(sv
, pc
, p
->p_pid
, npc
);
648 fasttrap_pid_getarg(void *arg
, dtrace_id_t id
, void *parg
, int argno
,
651 #pragma unused(arg, id, parg, aframes)
652 return (fasttrap_anarg((ppc_saved_state_t
*)find_user_regs(current_thread()), 1, argno
));
656 fasttrap_usdt_getarg(void *arg
, dtrace_id_t id
, void *parg
, int argno
,
659 #pragma unused(arg, id, parg, aframes)
660 return (fasttrap_anarg((ppc_saved_state_t
*)find_user_regs(current_thread()), 0, argno
));
664 static int32_t branchtaken(int32_t bo
, int32_t bi
, ppc_saved_state_t
*sv
) {
665 int32_t bcond
, czero
, crmatch
;
668 if((bo
& 0x14) == 0x14) return 1; /* If this is a branch always, exit with true... */
670 czero
= 0; /* Assume that we have not just decremented the CTR to 0 */
672 if(!(bo
& 4)) { /* Skip the next bit if we do NOT muck with the CTR */
673 ctr
= sv
->save_ctr
= sv
->save_ctr
- 1; /* Decrement the CTR */
674 if(!(sv
->save_srr1
& 0x8000000000000000ULL
)) ctr
&= 0x00000000FFFFFFFF; /* Only look at the bottom 32 bits if 32-bit mode */
675 czero
= (ctr
== 0); /* Remember if we just hit zero */
678 bcond
= (bo
>> 3); /* If 1, branch if CR flag is 1. If 0, branch if 0 */
679 crmatch
= bo
>> 4; /* If bo[0] is set, do not check CR flag */
680 crmatch
= crmatch
| (((sv
->save_cr
>> (31 - bi
)) ^ bcond
) ^ 1); /* Low bit is now set if CR flag matches or CR is not checked. Other bits are trash. */
682 // dbgTrace(0x77777777, bo, bi, sv->save_cr, ((czero | crmatch) & 1)); /* (TRACE/DEBUG) */
684 return ((czero
| crmatch
) & 1); /* Return 1 if branch taken, 0 if not... */
687 static int32_t dtrace_decode_ppc(uint32_t inst
) {
689 int32_t curdcd
, lastmask
, newmask
, spr
, bit
, bito
, word
;
693 curdcd
= inst
>> 26; /* Isolate major op code to start decode */
694 lastmask
= 99; /* Always force a new xop at the start */
696 while(1) { /* Loop until we find instruction or fail */
697 dcd
= &insts
[curdcd
]; /* Point to the current decode table entry */
698 if(dcd
->dcdFlgs
& dcdJump
) { /* Should we jump to a new spot in the decode table? */
699 curdcd
= dcd
->dcdMatch
; /* Jump */
703 newmask
= dcd
->dcdFlgs
& dcdMask
; /* Isolate the mask index */
704 if(lastmask
!= newmask
) { /* Are we changing masks? */
705 if(!newmask
) break; /* If the mask is 0, we match everything and succeed... (note: lastmask can never be 0) */
706 xop
= inst
& masktab
[newmask
]; /* Clear all extra bits to make match */
707 lastmask
= newmask
; /* Remember */
710 if(xop
== dcd
->dcdMatch
) break; /* We found our guy! */
712 if(!(dcd
->dcdFlgs
& dcdStep
)) { /* No stepping, we failed */
713 dcd
= &dcdfail
; /* Point to a failure entry */
714 break; /* Leave... */
717 curdcd
= curdcd
+ 1; /* Step to the next decode entry */
720 if(dcd
->dcdType
!= diSPR
) return (int32_t)(dcd
->dcdType
); /* Return what we found */
722 spr
= (inst
>> (31 - 20)) & 0x3FF; /* Get the source */
723 spr
= ((spr
<< 5) & 0x3E0) | ((spr
>> 5) & 0x1F); /* Flip to right order */
725 word
= spr
>> 5; /* Get word index into table */
726 bito
= spr
& 0x1F; /* Get bit offset into entry */
727 bit
= 0x80000000 >> bito
; /* Position bit for a test */
729 if(!(sprtbl
[word
] & bit
)) return (diINV
); /* Bogus SPR so whole instruction is invalid... */
731 if(spr
& 0x10) return (diPRV
); /* This is a priviliged SPR so instruction is priviliged... */
732 return (diCMN
); /* Just a common SPR so instruction is the same... */