- if (dtrace_probe_lookup(fbt_id, modname, name, NULL) != 0)
- continue;
-
- for (j = 0, instr = (machine_inst_t *)sym[i].n_value, theInstr = 0;
- (j < 4) && ((uintptr_t)instr >= instrLow) && (instrHigh > (uintptr_t)(instr + 2));
- j++) {
- theInstr = instr[0];
- if (theInstr == FBT_PUSHL_EBP || theInstr == FBT_RET || theInstr == FBT_RET_IMM16)
- break;
-
- if ((size = dtrace_instr_size(instr)) <= 0)
- break;
-
- instr += size;
- }
-
- if (theInstr != FBT_PUSHL_EBP)
- continue;
-
- i1 = instr[1];
- i2 = instr[2];
-
- limit = (machine_inst_t *)instrHigh;
-
- if ((i1 == FBT_MOVL_ESP_EBP0_V0 && i2 == FBT_MOVL_ESP_EBP1_V0) ||
- (i1 == FBT_MOVL_ESP_EBP0_V1 && i2 == FBT_MOVL_ESP_EBP1_V1)) {
- instr += 1; /* Advance to the movl %esp,%ebp */
- theInstr = i1;
- } else {
- /*
- * Sometimes, the compiler will schedule an intervening instruction
- * in the function prologue. Example:
- *
- * _mach_vm_read:
- * 000006d8 pushl %ebp
- * 000006d9 movl $0x00000004,%edx
- * 000006de movl %esp,%ebp
- *
- * Try the next instruction, to see if it is a movl %esp,%ebp
- */
-
- instr += 1; /* Advance past the pushl %ebp */
- if ((size = dtrace_instr_size(instr)) <= 0)
- continue;
-
- instr += size;
-
- if ((instr + 1) >= limit)
- continue;
-
- i1 = instr[0];
- i2 = instr[1];
-
- if (!(i1 == FBT_MOVL_ESP_EBP0_V0 && i2 == FBT_MOVL_ESP_EBP1_V0) &&
- !(i1 == FBT_MOVL_ESP_EBP0_V1 && i2 == FBT_MOVL_ESP_EBP1_V1))
- continue;
-
- /* instr already points at the movl %esp,%ebp */
- theInstr = i1;
- }
-
- fbt = kmem_zalloc(sizeof (fbt_probe_t), KM_SLEEP);
- strlcpy( (char *)&(fbt->fbtp_name), name, MAX_FBTP_NAME_CHARS );
- fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, FBT_AFRAMES_ENTRY, fbt);
- fbt->fbtp_patchpoint = instr;
- fbt->fbtp_ctl = ctl;
- fbt->fbtp_loadcnt = ctl->mod_loadcnt;
- fbt->fbtp_rval = DTRACE_INVOP_MOVL_ESP_EBP;
- fbt->fbtp_savedval = theInstr;
- fbt->fbtp_patchval = FBT_PATCHVAL;
-
- fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
- fbt->fbtp_symndx = i;
- fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
-
- retfbt = NULL;
-again:
- if (instr >= limit)
- continue;
-
- /*
- * If this disassembly fails, then we've likely walked off into
- * a jump table or some other unsuitable area. Bail out of the
- * disassembly now.
- */
- if ((size = dtrace_instr_size(instr)) <= 0)
- continue;
-
- /*
- * We (desperately) want to avoid erroneously instrumenting a
- * jump table, especially given that our markers are pretty
- * short: two bytes on x86, and just one byte on amd64. To
- * determine if we're looking at a true instruction sequence
- * or an inline jump table that happens to contain the same
- * byte sequences, we resort to some heuristic sleeze: we
- * treat this instruction as being contained within a pointer,
- * and see if that pointer points to within the body of the
- * function. If it does, we refuse to instrument it.
- */
- for (j = 0; j < sizeof (uintptr_t); j++) {
- uintptr_t check = (uintptr_t)instr - j;
- uint8_t *ptr;
-
- if (check < sym[i].n_value)
- break;
-
- if (check + sizeof (uintptr_t) > (uintptr_t)limit)
- continue;
-
- ptr = *(uint8_t **)check;
-
- if (ptr >= (uint8_t *)sym[i].n_value && ptr < limit) {
- instr += size;
- goto again;
- }
- }
-
- /*
- * OK, it's an instruction.
- */
- theInstr = instr[0];
-
- /* Walked onto the start of the next routine? If so, bail out of this function. */
- if (theInstr == FBT_PUSHL_EBP)
- continue;
-
- if (!(size == 1 && (theInstr == FBT_POPL_EBP || theInstr == FBT_LEAVE))) {
- instr += size;
- goto again;
- }
-
- /*
- * Found the popl %ebp; or leave.
- */
- machine_inst_t *patch_instr = instr;
-
- /*
- * Scan forward for a "ret", or "jmp".
- */
- instr += size;
- if (instr >= limit)
- continue;
-
- size = dtrace_instr_size(instr);
- if (size <= 0) /* Failed instruction decode? */
- continue;
-
- theInstr = instr[0];
-
- if (!(size == FBT_RET_LEN && (theInstr == FBT_RET)) &&
- !(size == FBT_RET_IMM16_LEN && (theInstr == FBT_RET_IMM16)) &&
- !(size == FBT_JMP_SHORT_REL_LEN && (theInstr == FBT_JMP_SHORT_REL)) &&
- !(size == FBT_JMP_NEAR_REL_LEN && (theInstr == FBT_JMP_NEAR_REL)) &&
- !(size == FBT_JMP_FAR_ABS_LEN && (theInstr == FBT_JMP_FAR_ABS)))
- continue;
-
- /*
- * popl %ebp; ret; or leave; ret; or leave; jmp tailCalledFun; -- We have a winner!
- */
- fbt = kmem_zalloc(sizeof (fbt_probe_t), KM_SLEEP);
- strlcpy( (char *)&(fbt->fbtp_name), name, MAX_FBTP_NAME_CHARS );
-
- if (retfbt == NULL) {
- fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
- name, FBT_RETURN, FBT_AFRAMES_RETURN, fbt);
- } else {
- retfbt->fbtp_next = fbt;
- fbt->fbtp_id = retfbt->fbtp_id;
- }
-
- retfbt = fbt;
- fbt->fbtp_patchpoint = patch_instr;
- fbt->fbtp_ctl = ctl;
- fbt->fbtp_loadcnt = ctl->mod_loadcnt;
-
- if (*patch_instr == FBT_POPL_EBP) {
- fbt->fbtp_rval = DTRACE_INVOP_POPL_EBP;
- } else {
- ASSERT(*patch_instr == FBT_LEAVE);
- fbt->fbtp_rval = DTRACE_INVOP_LEAVE;
- }
- fbt->fbtp_roffset =
- (uintptr_t)(patch_instr - (uint8_t *)sym[i].n_value);
-
- fbt->fbtp_savedval = *patch_instr;
- fbt->fbtp_patchval = FBT_PATCHVAL;
- fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(patch_instr)];
- fbt->fbtp_symndx = i;
- fbt_probetab[FBT_ADDR2NDX(patch_instr)] = fbt;
-
- instr += size;
- goto again;
- }
-}
-#elif defined(__x86_64__)
-int
-fbt_invop(uintptr_t addr, uintptr_t *state, uintptr_t rval)
-{
- fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
-
- for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
- if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
-
- if (fbt->fbtp_roffset == 0) {
- x86_saved_state64_t *regs = (x86_saved_state64_t *)state;
-
- CPU->cpu_dtrace_caller = *(uintptr_t *)(((uintptr_t)(regs->isf.rsp))+sizeof(uint64_t)); // 8(%rsp)
- /* 64-bit ABI, arguments passed in registers. */
- dtrace_probe(fbt->fbtp_id, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->r8);
- CPU->cpu_dtrace_caller = 0;
- } else {
-
- dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset, rval, 0, 0, 0);
- CPU->cpu_dtrace_caller = 0;
- }
-
- return (fbt->fbtp_rval);
- }
- }
-
- return (0);
-}
-
-#define IS_USER_TRAP(regs) (regs && (((regs)->isf.cs & 3) != 0))
-#define T_INVALID_OPCODE 6
-#define FBT_EXCEPTION_CODE T_INVALID_OPCODE
-#define T_PREEMPT 255
-
-kern_return_t
-fbt_perfCallback(
- int trapno,
- x86_saved_state_t *tagged_regs,
- __unused int unused1,
- __unused int unused2)
-{
- kern_return_t retval = KERN_FAILURE;
- x86_saved_state64_t *saved_state = saved_state64(tagged_regs);
-
- if (FBT_EXCEPTION_CODE == trapno && !IS_USER_TRAP(saved_state)) {
- boolean_t oldlevel;
- uint64_t rsp_probe, *rbp, r12, fp, delta = 0;
- uint32_t *pDst;
- int emul;
-
- oldlevel = ml_set_interrupts_enabled(FALSE);
-
- /* Calculate where the stack pointer was when the probe instruction "fired." */
- rsp_probe = saved_state->isf.rsp; /* Easy, x86_64 establishes this value in idt64.s */
-
- emul = dtrace_invop( saved_state->isf.rip, (uintptr_t *)saved_state, saved_state->rax );
- __asm__ volatile(".globl _dtrace_invop_callsite");
- __asm__ volatile("_dtrace_invop_callsite:");
-
- switch (emul) {
- case DTRACE_INVOP_NOP:
- saved_state->isf.rip += DTRACE_INVOP_NOP_SKIP; /* Skip over the patched NOP (planted by sdt). */
- retval = KERN_SUCCESS;
- break;
-
- case DTRACE_INVOP_MOV_RSP_RBP:
- saved_state->rbp = rsp_probe; /* Emulate patched mov %rsp,%rbp */
- saved_state->isf.rip += DTRACE_INVOP_MOV_RSP_RBP_SKIP; /* Skip over the bytes of the patched mov %rsp,%rbp */
- retval = KERN_SUCCESS;
- break;
-
- case DTRACE_INVOP_POP_RBP:
- case DTRACE_INVOP_LEAVE:
-/*
- * Emulate first micro-op of patched leave: mov %rbp,%rsp
- * fp points just below the return address slot for target's ret
- * and at the slot holding the frame pointer saved by the target's prologue.
- */
- fp = saved_state->rbp;
-/* Emulate second micro-op of patched leave: patched pop %rbp
- * savearea rbp is set for the frame of the caller to target
- * The *live* %rsp will be adjusted below for pop increment(s)
- */
- saved_state->rbp = *(uint64_t *)fp;
-/* Skip over the patched leave */
- saved_state->isf.rip += DTRACE_INVOP_LEAVE_SKIP;
-/*
- * Lift the stack to account for the emulated leave
- * Account for words local in this frame
- * (in "case DTRACE_INVOP_POPL_EBP:" this is zero.)
- */
- delta = ((uint32_t *)fp) - ((uint32_t *)rsp_probe); /* delta is a *word* increment */
-/* Account for popping off the rbp (just accomplished by the emulation
- * above...)
- */
- delta += 2;
- saved_state->isf.rsp += (delta << 2);
-
-/* XXX Fragile in the extreme.
- * This is sensitive to trap_from_kernel()'s internals.
- */
- rbp = (uint64_t *)__builtin_frame_address(0);
- rbp = (uint64_t *)*rbp;
- r12 = *(rbp - 4);
-
-/* Shift contents of stack */
- for (pDst = (uint32_t *)fp;
- pDst > (((uint32_t *)r12));
- pDst--)
- *pDst = pDst[-delta];
-
-/* Track the stack lift in "saved_state". */
- saved_state = (x86_saved_state64_t *) (((uintptr_t)saved_state) + (delta << 2));
-
-/* Now adjust the value of %r12 in our caller (kernel_trap)'s frame */
- *(rbp - 4) = r12 + (delta << 2);
-
- retval = KERN_SUCCESS;
- break;
-
- default:
- retval = KERN_FAILURE;
- break;
- }
- saved_state->isf.trapno = T_PREEMPT; /* Avoid call to i386_astintr()! */
-
- ml_set_interrupts_enabled(oldlevel);