4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* #pragma ident "@(#)fbt.c 1.15 05/09/19 SMI" */
31 #define _KERNEL /* Solaris vs. Darwin */
35 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
36 #include <kern/thread.h>
37 #include <mach/thread_status.h>
38 #include <mach/vm_param.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/nlist.h>
42 extern struct mach_header _mh_execute_header
; /* the kernel's mach header */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/errno.h>
48 #include <sys/ioctl.h>
50 #include <sys/fcntl.h>
51 #include <miscfs/devfs/devfs.h>
53 #include <sys/dtrace.h>
54 #include <sys/dtrace_impl.h>
57 #include <sys/dtrace_glue.h>
59 #define DTRACE_INVOP_NOP_SKIP 1
60 #define DTRACE_INVOP_MOVL_ESP_EBP 10
61 #define DTRACE_INVOP_MOVL_ESP_EBP_SKIP 2
62 #define DTRACE_INVOP_LEAVE_SKIP 1
64 #define FBT_PUSHL_EBP 0x55
65 #define FBT_MOVL_ESP_EBP0_V0 0x8b
66 #define FBT_MOVL_ESP_EBP1_V0 0xec
67 #define FBT_MOVL_ESP_EBP0_V1 0x89
68 #define FBT_MOVL_ESP_EBP1_V1 0xe5
69 #define FBT_REX_RSP_RBP 0x48
71 #define FBT_POPL_EBP 0x5d
73 #define FBT_RET_IMM16 0xc2
74 #define FBT_LEAVE 0xc9
75 #define FBT_JMP_SHORT_REL 0xeb /* Jump short, relative, displacement relative to next instr. */
76 #define FBT_JMP_NEAR_REL 0xe9 /* Jump near, relative, displacement relative to next instr. */
77 #define FBT_JMP_FAR_ABS 0xea /* Jump far, absolute, address given in operand */
79 #define FBT_RET_IMM16_LEN 3
80 #define FBT_JMP_SHORT_REL_LEN 2
81 #define FBT_JMP_NEAR_REL_LEN 5
82 #define FBT_JMP_FAR_ABS_LEN 5
84 #define FBT_PATCHVAL 0xf0
85 #define FBT_AFRAMES_ENTRY 7
86 #define FBT_AFRAMES_RETURN 6
88 #define FBT_ENTRY "entry"
89 #define FBT_RETURN "return"
90 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
92 extern dtrace_provider_id_t fbt_id
;
93 extern fbt_probe_t
**fbt_probetab
;
94 extern int fbt_probetab_mask
;
97 * Critical routines that must not be probed. PR_5221096, PR_5379018.
100 static const char * critical_blacklist
[] =
119 "cpu_processor_alloc",
120 "cpu_processor_free",
121 "cpu_signal_handler",
130 "cpu_topology_start",
134 "handle_pending_TLB_flushes",
135 "hw_compare_and_store",
136 "machine_idle_cstate",
142 "pmap_cpu_high_map_vaddr",
143 "pmap_cpu_high_shared_remap",
146 "register_cpu_setup_func",
147 "unregister_cpu_setup_func"
149 #define CRITICAL_BLACKLIST_COUNT (sizeof(critical_blacklist)/sizeof(critical_blacklist[0]))
152 * The transitive closure of entry points that can be reached from probe context.
153 * (Apart from routines whose names begin with dtrace_ or dtxnu_.)
155 static const char * probe_ctx_closure
[] =
159 "absolutetime_to_microtime",
161 "clock_get_calendar_nanotime_nowait",
176 "get_bsdthread_info",
181 "kernel_preempt_check",
182 "mach_absolute_time",
183 "max_valid_stack_address",
184 "ml_at_interrupt_context",
185 "ml_phys_write_byte_64",
186 "ml_phys_write_half_64",
187 "ml_phys_write_word_64",
188 "ml_set_interrupts_enabled",
193 "pmap_get_mapwindow",
196 "pmap_put_mapwindow",
206 "sync_iss_to_iks_unconditionally",
209 #define PROBE_CTX_CLOSURE_COUNT (sizeof(probe_ctx_closure)/sizeof(probe_ctx_closure[0]))
212 static int _cmp(const void *a
, const void *b
)
214 return strcmp((const char *)a
, *(const char **)b
);
217 static const void * bsearch(
218 register const void *key
,
221 register size_t size
,
222 register int (*compar
)(const void *, const void *)) {
224 register const char *base
= base0
;
227 register const void *p
;
229 for (lim
= nmemb
; lim
!= 0; lim
>>= 1) {
230 p
= base
+ (lim
>> 1) * size
;
231 cmp
= (*compar
)(key
, p
);
234 if (cmp
> 0) { /* key > p: move right */
235 base
= (const char *)p
+ size
;
237 } /* else move left */
243 fbt_invop(uintptr_t addr
, uintptr_t *stack
, uintptr_t rval
)
245 uintptr_t stack0
= 0, stack1
= 0, stack2
= 0, stack3
= 0, stack4
= 0;
246 fbt_probe_t
*fbt
= fbt_probetab
[FBT_ADDR2NDX(addr
)];
248 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_hashnext
) {
249 if ((uintptr_t)fbt
->fbtp_patchpoint
== addr
) {
251 if (fbt
->fbtp_roffset
== 0) {
253 if (CPU_ON_INTR(CPU
))
254 stacktop
= (uintptr_t *)dtrace_get_cpu_int_stack_top();
256 stacktop
= (uintptr_t *)(dtrace_get_kernel_stack(current_thread()) + KERNEL_STACK_SIZE
);
258 stack
+= 1; /* skip over the target's pushl'd %ebp */
260 if (stack
<= stacktop
)
261 CPU
->cpu_dtrace_caller
= *stack
++;
262 if (stack
<= stacktop
)
264 if (stack
<= stacktop
)
266 if (stack
<= stacktop
)
268 if (stack
<= stacktop
)
270 if (stack
<= stacktop
)
273 dtrace_probe(fbt
->fbtp_id
, stack0
, stack1
, stack2
, stack3
, stack4
);
274 CPU
->cpu_dtrace_caller
= 0;
276 dtrace_probe(fbt
->fbtp_id
, fbt
->fbtp_roffset
, rval
, 0, 0, 0);
277 CPU
->cpu_dtrace_caller
= 0;
280 return (fbt
->fbtp_rval
);
287 #define IS_USER_TRAP(regs) (regs && (((regs)->cs & 3) != 0))
288 #define T_INVALID_OPCODE 6
289 #define FBT_EXCEPTION_CODE T_INVALID_OPCODE
294 x86_saved_state_t
*tagged_regs
,
295 __unused
int unused1
,
296 __unused
int unused2
)
298 kern_return_t retval
= KERN_FAILURE
;
299 x86_saved_state32_t
*saved_state
= saved_state32(tagged_regs
);
300 struct x86_saved_state32_from_kernel
*regs
= (struct x86_saved_state32_from_kernel
*)saved_state
;
302 if (FBT_EXCEPTION_CODE
== trapno
&& !IS_USER_TRAP(saved_state
)) {
303 boolean_t oldlevel
, cpu_64bit
;
304 uint32_t esp_probe
, *ebp
, edi
, fp
, *pDst
, delta
= 0;
307 cpu_64bit
= ml_is64bit();
308 oldlevel
= ml_set_interrupts_enabled(FALSE
);
310 /* Calculate where the stack pointer was when the probe instruction "fired." */
312 esp_probe
= saved_state
->uesp
; /* Easy, x86_64 establishes this value in idt64.s */
314 esp_probe
= (uint32_t)&(regs
[1]); /* Nasty, infer the location above the save area */
317 emul
= dtrace_invop( saved_state
->eip
, (uintptr_t *)esp_probe
, saved_state
->eax
);
318 __asm__
volatile(".globl _dtrace_invop_callsite");
319 __asm__
volatile("_dtrace_invop_callsite:");
322 case DTRACE_INVOP_NOP
:
323 saved_state
->eip
+= DTRACE_INVOP_NOP_SKIP
; /* Skip over the patched NOP */
324 retval
= KERN_SUCCESS
;
327 case DTRACE_INVOP_MOVL_ESP_EBP
:
328 saved_state
->ebp
= esp_probe
; /* Emulate patched movl %esp,%ebp */
329 saved_state
->eip
+= DTRACE_INVOP_MOVL_ESP_EBP_SKIP
; /* Skip over the bytes of the patched movl %esp,%ebp */
330 retval
= KERN_SUCCESS
;
333 case DTRACE_INVOP_POPL_EBP
:
334 case DTRACE_INVOP_LEAVE
:
336 * Emulate first micro-op of patched leave: movl %ebp,%esp
337 * fp points just below the return address slot for target's ret
338 * and at the slot holding the frame pointer saved by the target's prologue.
340 fp
= saved_state
->ebp
;
341 /* Emulate second micro-op of patched leave: patched popl %ebp
342 * savearea ebp is set for the frame of the caller to target
343 * The *live* %esp will be adjusted below for pop increment(s)
345 saved_state
->ebp
= *(uint32_t *)fp
;
346 /* Skip over the patched leave */
347 saved_state
->eip
+= DTRACE_INVOP_LEAVE_SKIP
;
349 * Lift the stack to account for the emulated leave
350 * Account for words local in this frame
351 * (in "case DTRACE_INVOP_POPL_EBP:" this is zero.)
353 delta
= ((uint32_t *)fp
) - ((uint32_t *)esp_probe
);
354 /* Account for popping off the ebp (just accomplished by the emulation
360 saved_state
->uesp
+= (delta
<< 2);
362 /* XXX Fragile in the extreme. Obtain the value of %edi that our caller pushed
363 * (on behalf of its caller -- trap_from_kernel()). Ultimately,
364 * trap_from_kernel's stack pointer is restored from this slot.
365 * This is sensitive to the manner in which the compiler preserves %edi,
366 * and trap_from_kernel()'s internals.
368 ebp
= (uint32_t *)__builtin_frame_address(0);
369 ebp
= (uint32_t *)*ebp
;
371 /* Shift contents of stack */
372 for (pDst
= (uint32_t *)fp
;
373 pDst
> (((uint32_t *)edi
));
375 *pDst
= pDst
[-delta
];
376 /* Now adjust the value of %edi in our caller (kernel_trap)'s frame */
377 *(ebp
- 1) = edi
+ (delta
<< 2);
379 retval
= KERN_SUCCESS
;
383 retval
= KERN_FAILURE
;
386 ml_set_interrupts_enabled(oldlevel
);
394 __fbt_provide_module(void *arg
, struct modctl
*ctl
)
397 struct mach_header
*mh
;
398 struct load_command
*cmd
;
399 struct segment_command
*orig_ts
= NULL
, *orig_le
= NULL
;
400 struct symtab_command
*orig_st
= NULL
;
401 struct nlist
*sym
= NULL
;
403 uintptr_t instrLow
, instrHigh
;
407 int gIgnoreFBTBlacklist
= 0;
408 PE_parse_boot_arg("IgnoreFBTBlacklist", &gIgnoreFBTBlacklist
);
410 mh
= (struct mach_header
*)(ctl
->address
);
411 modname
= ctl
->mod_modname
;
413 if (0 == ctl
->address
|| 0 == ctl
->size
) /* Has the linker been jettisoned? */
417 * Employees of dtrace and their families are ineligible. Void
421 if (strcmp(modname
, "com.apple.driver.dtrace") == 0)
424 if (strstr(modname
, "CHUD") != NULL
)
427 if (mh
->magic
!= MH_MAGIC
)
430 cmd
= (struct load_command
*) &mh
[1];
431 for (i
= 0; i
< mh
->ncmds
; i
++) {
432 if (cmd
->cmd
== LC_SEGMENT
) {
433 struct segment_command
*orig_sg
= (struct segment_command
*) cmd
;
435 if (strcmp(SEG_TEXT
, orig_sg
->segname
) == 0)
437 else if (strcmp(SEG_LINKEDIT
, orig_sg
->segname
) == 0)
439 else if (strcmp("", orig_sg
->segname
) == 0)
440 orig_ts
= orig_sg
; /* kexts have a single unnamed segment */
442 else if (cmd
->cmd
== LC_SYMTAB
)
443 orig_st
= (struct symtab_command
*) cmd
;
445 cmd
= (struct load_command
*) ((caddr_t
) cmd
+ cmd
->cmdsize
);
448 if ((orig_ts
== NULL
) || (orig_st
== NULL
) || (orig_le
== NULL
))
451 sym
= (struct nlist
*)orig_le
->vmaddr
;
452 strings
= ((char *)sym
) + orig_st
->nsyms
* sizeof(struct nlist
);
454 /* Find extent of the TEXT section */
455 instrLow
= (uintptr_t)orig_ts
->vmaddr
;
456 instrHigh
= (uintptr_t)(orig_ts
->vmaddr
+ orig_ts
->vmsize
);
458 for (i
= 0; i
< orig_st
->nsyms
; i
++) {
459 fbt_probe_t
*fbt
, *retfbt
;
460 machine_inst_t
*instr
, *limit
, theInstr
, i1
, i2
;
461 uint8_t n_type
= sym
[i
].n_type
& (N_TYPE
| N_EXT
);
462 char *name
= strings
+ sym
[i
].n_un
.n_strx
;
465 /* Check that the symbol is a global and that it has a name. */
466 if (((N_SECT
| N_EXT
) != n_type
&& (N_ABS
| N_EXT
) != n_type
))
469 if (0 == sym
[i
].n_un
.n_strx
) /* iff a null, "", name. */
472 /* Lop off omnipresent leading underscore. */
476 if (strstr(name
, "dtrace_") == name
&&
477 strstr(name
, "dtrace_safe_") != name
) {
479 * Anything beginning with "dtrace_" may be called
480 * from probe context unless it explitly indicates
481 * that it won't be called from probe context by
482 * using the prefix "dtrace_safe_".
487 if (strstr(name
, "dsmos_") == name
)
488 continue; /* Don't Steal Mac OS X! */
490 if (strstr(name
, "dtxnu_") == name
||
491 strstr(name
, "_dtrace") == name
)
492 continue; /* Shims in dtrace.c */
494 if (strstr(name
, "chud") == name
)
495 continue; /* Professional courtesy. */
497 if (strstr(name
, "hibernate_") == name
)
498 continue; /* Let sleeping dogs lie. */
500 if (0 == strcmp(name
, "ZN9IOService14newTemperatureElPS_") || /* IOService::newTemperature */
501 0 == strcmp(name
, "ZN9IOService26temperatureCriticalForZoneEPS_")) /* IOService::temperatureCriticalForZone */
502 continue; /* Per the fire code */
505 * Place no probes (illegal instructions) in the exception handling path!
507 if (0 == strcmp(name
, "t_invop") ||
508 0 == strcmp(name
, "enter_lohandler") ||
509 0 == strcmp(name
, "lo_alltraps") ||
510 0 == strcmp(name
, "kernel_trap") ||
511 0 == strcmp(name
, "i386_astintr"))
514 if (0 == strcmp(name
, "current_thread") ||
515 0 == strcmp(name
, "ast_pending") ||
516 0 == strcmp(name
, "fbt_perfCallback") ||
517 0 == strcmp(name
, "machine_thread_get_kern_state") ||
518 0 == strcmp(name
, "ml_set_interrupts_enabled") ||
519 0 == strcmp(name
, "dtrace_invop") ||
520 0 == strcmp(name
, "fbt_invop") ||
521 0 == strcmp(name
, "sdt_invop") ||
522 0 == strcmp(name
, "max_valid_stack_address"))
528 if (strstr(name
, "machine_stack_") == name
||
529 strstr(name
, "mapping_") == name
||
530 strstr(name
, "hpet_") == name
||
532 0 == strcmp(name
, "rdHPET") ||
533 0 == strcmp(name
, "HPETInterrupt") ||
534 0 == strcmp(name
, "tmrCvt") ||
536 strstr(name
, "tsc_") == name
||
538 strstr(name
, "pmCPU") == name
||
539 0 == strcmp(name
, "Cstate_table_set") ||
540 0 == strcmp(name
, "pmHPETInterrupt") ||
541 0 == strcmp(name
, "pmKextRegister") ||
542 0 == strcmp(name
, "pmSafeMode") ||
543 0 == strcmp(name
, "pmUnregister") ||
544 strstr(name
, "pms") == name
||
545 0 == strcmp(name
, "power_management_init") ||
546 strstr(name
, "usimple_") == name
||
548 strstr(name
, "rtc_") == name
||
549 strstr(name
, "_rtc_") == name
||
550 strstr(name
, "rtclock_") == name
||
551 strstr(name
, "clock_") == name
||
552 strstr(name
, "absolutetime_to_") == name
||
553 0 == strcmp(name
, "setPop") ||
554 0 == strcmp(name
, "nanoseconds_to_absolutetime") ||
555 0 == strcmp(name
, "nanotime_to_absolutetime") ||
557 strstr(name
, "etimer_") == name
||
559 strstr(name
, "commpage_") == name
||
560 strstr(name
, "pmap_") == name
||
561 strstr(name
, "ml_") == name
||
562 strstr(name
, "PE_") == name
||
563 strstr(name
, "lapic_") == name
||
564 strstr(name
, "acpi_") == name
)
568 * Avoid machine_ routines. PR_5346750.
570 if (strstr(name
, "machine_") == name
)
573 if (0 == strcmp(name
, "handle_pending_TLB_flushes"))
577 * Place no probes on critical routines. PR_5221096
579 if (!gIgnoreFBTBlacklist
&&
580 bsearch( name
, critical_blacklist
, CRITICAL_BLACKLIST_COUNT
, sizeof(name
), _cmp
) != NULL
)
584 * Place no probes that could be hit in probe context.
586 if (!gIgnoreFBTBlacklist
&&
587 bsearch( name
, probe_ctx_closure
, PROBE_CTX_CLOSURE_COUNT
, sizeof(name
), _cmp
) != NULL
)
591 * Place no probes that could be hit on the way to the debugger.
593 if (strstr(name
, "kdp_") == name
||
594 strstr(name
, "kdb_") == name
||
595 strstr(name
, "kdbg_") == name
||
596 strstr(name
, "kdebug_") == name
||
597 0 == strcmp(name
, "kernel_debug") ||
598 0 == strcmp(name
, "Debugger") ||
599 0 == strcmp(name
, "Call_DebuggerC") ||
600 0 == strcmp(name
, "lock_debugger") ||
601 0 == strcmp(name
, "unlock_debugger") ||
602 0 == strcmp(name
, "SysChoked"))
606 * Place no probes that could be hit on the way to a panic.
608 if (NULL
!= strstr(name
, "panic_") ||
609 0 == strcmp(name
, "panic") ||
610 0 == strcmp(name
, "handleMck") ||
611 0 == strcmp(name
, "unresolved_kernel_trap"))
614 if (dtrace_probe_lookup(fbt_id
, modname
, name
, NULL
) != 0)
617 for (j
= 0, instr
= (machine_inst_t
*)sym
[i
].n_value
, theInstr
= 0;
618 (j
< 4) && ((uintptr_t)instr
>= instrLow
) && (instrHigh
> (uintptr_t)(instr
+ 2));
621 if (theInstr
== FBT_PUSHL_EBP
|| theInstr
== FBT_RET
|| theInstr
== FBT_RET_IMM16
)
624 if ((size
= dtrace_instr_size(instr
)) <= 0)
630 if (theInstr
!= FBT_PUSHL_EBP
)
636 limit
= (machine_inst_t
*)instrHigh
;
638 if ((i1
== FBT_MOVL_ESP_EBP0_V0
&& i2
== FBT_MOVL_ESP_EBP1_V0
) ||
639 (i1
== FBT_MOVL_ESP_EBP0_V1
&& i2
== FBT_MOVL_ESP_EBP1_V1
)) {
640 instr
+= 1; /* Advance to the movl %esp,%ebp */
644 * Sometimes, the compiler will schedule an intervening instruction
645 * in the function prologue. Example:
648 * 000006d8 pushl %ebp
649 * 000006d9 movl $0x00000004,%edx
650 * 000006de movl %esp,%ebp
652 * Try the next instruction, to see if it is a movl %esp,%ebp
655 instr
+= 1; /* Advance past the pushl %ebp */
656 if ((size
= dtrace_instr_size(instr
)) <= 0)
661 if ((instr
+ 1) >= limit
)
667 if (!(i1
== FBT_MOVL_ESP_EBP0_V0
&& i2
== FBT_MOVL_ESP_EBP1_V0
) &&
668 !(i1
== FBT_MOVL_ESP_EBP0_V1
&& i2
== FBT_MOVL_ESP_EBP1_V1
))
671 /* instr already points at the movl %esp,%ebp */
675 fbt
= kmem_zalloc(sizeof (fbt_probe_t
), KM_SLEEP
);
676 strlcpy( (char *)&(fbt
->fbtp_name
), name
, MAX_FBTP_NAME_CHARS
);
677 fbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
, name
, FBT_ENTRY
, FBT_AFRAMES_ENTRY
, fbt
);
678 fbt
->fbtp_patchpoint
= instr
;
680 fbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
681 fbt
->fbtp_rval
= DTRACE_INVOP_MOVL_ESP_EBP
;
682 fbt
->fbtp_savedval
= theInstr
;
683 fbt
->fbtp_patchval
= FBT_PATCHVAL
;
685 fbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(instr
)];
686 fbt
->fbtp_symndx
= i
;
687 fbt_probetab
[FBT_ADDR2NDX(instr
)] = fbt
;
695 * If this disassembly fails, then we've likely walked off into
696 * a jump table or some other unsuitable area. Bail out of the
699 if ((size
= dtrace_instr_size(instr
)) <= 0)
703 * We (desperately) want to avoid erroneously instrumenting a
704 * jump table, especially given that our markers are pretty
705 * short: two bytes on x86, and just one byte on amd64. To
706 * determine if we're looking at a true instruction sequence
707 * or an inline jump table that happens to contain the same
708 * byte sequences, we resort to some heuristic sleeze: we
709 * treat this instruction as being contained within a pointer,
710 * and see if that pointer points to within the body of the
711 * function. If it does, we refuse to instrument it.
713 for (j
= 0; j
< sizeof (uintptr_t); j
++) {
714 uintptr_t check
= (uintptr_t)instr
- j
;
717 if (check
< sym
[i
].n_value
)
720 if (check
+ sizeof (uintptr_t) > (uintptr_t)limit
)
723 ptr
= *(uint8_t **)check
;
725 if (ptr
>= (uint8_t *)sym
[i
].n_value
&& ptr
< limit
) {
732 * OK, it's an instruction.
736 /* Walked onto the start of the next routine? If so, bail out of this function. */
737 if (theInstr
== FBT_PUSHL_EBP
)
740 if (!(size
== 1 && (theInstr
== FBT_POPL_EBP
|| theInstr
== FBT_LEAVE
))) {
746 * Found the popl %ebp; or leave.
748 machine_inst_t
*patch_instr
= instr
;
751 * Scan forward for a "ret", or "jmp".
757 size
= dtrace_instr_size(instr
);
758 if (size
<= 0) /* Failed instruction decode? */
763 if (!(size
== FBT_RET_LEN
&& (theInstr
== FBT_RET
)) &&
764 !(size
== FBT_RET_IMM16_LEN
&& (theInstr
== FBT_RET_IMM16
)) &&
765 !(size
== FBT_JMP_SHORT_REL_LEN
&& (theInstr
== FBT_JMP_SHORT_REL
)) &&
766 !(size
== FBT_JMP_NEAR_REL_LEN
&& (theInstr
== FBT_JMP_NEAR_REL
)) &&
767 !(size
== FBT_JMP_FAR_ABS_LEN
&& (theInstr
== FBT_JMP_FAR_ABS
)))
771 * popl %ebp; ret; or leave; ret; or leave; jmp tailCalledFun; -- We have a winner!
773 fbt
= kmem_zalloc(sizeof (fbt_probe_t
), KM_SLEEP
);
774 strlcpy( (char *)&(fbt
->fbtp_name
), name
, MAX_FBTP_NAME_CHARS
);
776 if (retfbt
== NULL
) {
777 fbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
,
778 name
, FBT_RETURN
, FBT_AFRAMES_RETURN
, fbt
);
780 retfbt
->fbtp_next
= fbt
;
781 fbt
->fbtp_id
= retfbt
->fbtp_id
;
785 fbt
->fbtp_patchpoint
= patch_instr
;
787 fbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
789 if (*patch_instr
== FBT_POPL_EBP
) {
790 fbt
->fbtp_rval
= DTRACE_INVOP_POPL_EBP
;
792 ASSERT(*patch_instr
== FBT_LEAVE
);
793 fbt
->fbtp_rval
= DTRACE_INVOP_LEAVE
;
796 (uintptr_t)(patch_instr
- (uint8_t *)sym
[i
].n_value
);
798 fbt
->fbtp_savedval
= *patch_instr
;
799 fbt
->fbtp_patchval
= FBT_PATCHVAL
;
800 fbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(patch_instr
)];
801 fbt
->fbtp_symndx
= i
;
802 fbt_probetab
[FBT_ADDR2NDX(patch_instr
)] = fbt
;
809 extern struct modctl g_fbt_kernctl
;
810 #undef kmem_alloc /* from its binding to dt_kmem_alloc glue */
811 #undef kmem_free /* from its binding to dt_kmem_free glue */
812 #include <vm/vm_kern.h>
816 fbt_provide_module(void *arg
, struct modctl
*ctl
)
819 __fbt_provide_module(arg
, &g_fbt_kernctl
);
821 kmem_free(kernel_map
, (vm_offset_t
)g_fbt_kernctl
.address
, round_page_32(g_fbt_kernctl
.size
));
822 g_fbt_kernctl
.address
= 0;
823 g_fbt_kernctl
.size
= 0;