4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* #pragma ident "@(#)fbt.c 1.15 05/09/19 SMI" */
31 #define _KERNEL /* Solaris vs. Darwin */
35 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
36 #include <kern/thread.h>
37 #include <mach/thread_status.h>
38 #include <mach/vm_param.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/nlist.h>
41 #include <libkern/kernel_mach_header.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/errno.h>
47 #include <sys/ioctl.h>
49 #include <sys/fcntl.h>
50 #include <miscfs/devfs/devfs.h>
52 #include <sys/dtrace.h>
53 #include <sys/dtrace_impl.h>
56 #include <sys/dtrace_glue.h>
58 #define DTRACE_INVOP_NOP_SKIP 1
59 #define DTRACE_INVOP_MOVL_ESP_EBP 10
60 #define DTRACE_INVOP_MOVL_ESP_EBP_SKIP 2
61 #define DTRACE_INVOP_MOV_RSP_RBP 11
62 #define DTRACE_INVOP_MOV_RSP_RBP_SKIP 3
63 #define DTRACE_INVOP_POP_RBP 12
64 #define DTRACE_INVOP_POP_RBP_SKIP 1
65 #define DTRACE_INVOP_LEAVE_SKIP 1
67 #define FBT_PUSHL_EBP 0x55
68 #define FBT_MOVL_ESP_EBP0_V0 0x8b
69 #define FBT_MOVL_ESP_EBP1_V0 0xec
70 #define FBT_MOVL_ESP_EBP0_V1 0x89
71 #define FBT_MOVL_ESP_EBP1_V1 0xe5
73 #define FBT_PUSH_RBP 0x55
74 #define FBT_REX_RSP_RBP 0x48
75 #define FBT_MOV_RSP_RBP0 0x89
76 #define FBT_MOV_RSP_RBP1 0xe5
77 #define FBT_POP_RBP 0x5d
79 #define FBT_POPL_EBP 0x5d
81 #define FBT_RET_IMM16 0xc2
82 #define FBT_LEAVE 0xc9
83 #define FBT_JMP_SHORT_REL 0xeb /* Jump short, relative, displacement relative to next instr. */
84 #define FBT_JMP_NEAR_REL 0xe9 /* Jump near, relative, displacement relative to next instr. */
85 #define FBT_JMP_FAR_ABS 0xea /* Jump far, absolute, address given in operand */
87 #define FBT_RET_IMM16_LEN 3
88 #define FBT_JMP_SHORT_REL_LEN 2
89 #define FBT_JMP_NEAR_REL_LEN 5
90 #define FBT_JMP_FAR_ABS_LEN 5
92 #define FBT_PATCHVAL 0xf0
93 #define FBT_AFRAMES_ENTRY 7
94 #define FBT_AFRAMES_RETURN 6
96 #define FBT_ENTRY "entry"
97 #define FBT_RETURN "return"
98 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
100 extern dtrace_provider_id_t fbt_id
;
101 extern fbt_probe_t
**fbt_probetab
;
102 extern int fbt_probetab_mask
;
104 kern_return_t
fbt_perfCallback(int, x86_saved_state_t
*, __unused
int, __unused
int);
107 * Critical routines that must not be probed. PR_5221096, PR_5379018.
108 * The blacklist must be kept in alphabetic order for purposes of bsearch().
111 static const char * critical_blacklist
[] =
132 "cpu_processor_alloc",
133 "cpu_processor_free",
134 "cpu_signal_handler",
144 "cpu_topology_start_cpu",
147 "handle_pending_TLB_flushes",
148 "hw_compare_and_store",
149 "machine_idle_cstate",
155 "pmap_cpu_high_map_vaddr",
156 "pmap_cpu_high_shared_remap",
158 "register_cpu_setup_func",
159 "unregister_cpu_setup_func",
162 #define CRITICAL_BLACKLIST_COUNT (sizeof(critical_blacklist)/sizeof(critical_blacklist[0]))
165 * The transitive closure of entry points that can be reached from probe context.
166 * (Apart from routines whose names begin with dtrace_).
168 static const char * probe_ctx_closure
[] =
173 "absolutetime_to_microtime",
176 "clock_get_calendar_nanotime_nowait",
191 "get_bsdthread_info",
196 "kernel_preempt_check",
197 "mach_absolute_time",
198 "max_valid_stack_address",
199 "ml_at_interrupt_context",
200 "ml_phys_write_byte_64",
201 "ml_phys_write_half_64",
202 "ml_phys_write_word_64",
203 "ml_set_interrupts_enabled",
208 "pmap_get_mapwindow",
211 "pmap_put_mapwindow",
223 "sync_iss_to_iks_unconditionally",
227 #define PROBE_CTX_CLOSURE_COUNT (sizeof(probe_ctx_closure)/sizeof(probe_ctx_closure[0]))
230 static int _cmp(const void *a
, const void *b
)
232 return strncmp((const char *)a
, *(const char **)b
, strlen((const char *)a
) + 1);
235 static const void * bsearch(
236 register const void *key
,
239 register size_t size
,
240 register int (*compar
)(const void *, const void *)) {
242 register const char *base
= base0
;
245 register const void *p
;
247 for (lim
= nmemb
; lim
!= 0; lim
>>= 1) {
248 p
= base
+ (lim
>> 1) * size
;
249 cmp
= (*compar
)(key
, p
);
252 if (cmp
> 0) { /* key > p: move right */
253 base
= (const char *)p
+ size
;
255 } /* else move left */
260 #if defined(__i386__)
262 fbt_invop(uintptr_t addr
, uintptr_t *stack
, uintptr_t rval
)
264 uintptr_t stack0
= 0, stack1
= 0, stack2
= 0, stack3
= 0, stack4
= 0;
265 fbt_probe_t
*fbt
= fbt_probetab
[FBT_ADDR2NDX(addr
)];
267 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_hashnext
) {
268 if ((uintptr_t)fbt
->fbtp_patchpoint
== addr
) {
270 if (fbt
->fbtp_roffset
== 0) {
272 if (CPU_ON_INTR(CPU
))
273 stacktop
= (uintptr_t *)dtrace_get_cpu_int_stack_top();
275 stacktop
= (uintptr_t *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size
);
277 stack
+= 1; /* skip over the target's pushl'd %ebp */
279 if (stack
<= stacktop
)
280 CPU
->cpu_dtrace_caller
= *stack
++;
281 if (stack
<= stacktop
)
283 if (stack
<= stacktop
)
285 if (stack
<= stacktop
)
287 if (stack
<= stacktop
)
289 if (stack
<= stacktop
)
292 /* 32-bit ABI, arguments passed on stack. */
293 dtrace_probe(fbt
->fbtp_id
, stack0
, stack1
, stack2
, stack3
, stack4
);
294 CPU
->cpu_dtrace_caller
= 0;
296 dtrace_probe(fbt
->fbtp_id
, fbt
->fbtp_roffset
, rval
, 0, 0, 0);
297 CPU
->cpu_dtrace_caller
= 0;
300 return (fbt
->fbtp_rval
);
307 #define IS_USER_TRAP(regs) (regs && (((regs)->cs & 3) != 0))
308 #define T_INVALID_OPCODE 6
309 #define FBT_EXCEPTION_CODE T_INVALID_OPCODE
310 #define T_PREEMPT 255
315 x86_saved_state_t
*tagged_regs
,
316 __unused
int unused1
,
317 __unused
int unused2
)
319 kern_return_t retval
= KERN_FAILURE
;
320 x86_saved_state32_t
*saved_state
= saved_state32(tagged_regs
);
321 struct x86_saved_state32_from_kernel
*regs
= (struct x86_saved_state32_from_kernel
*)saved_state
;
323 if (FBT_EXCEPTION_CODE
== trapno
&& !IS_USER_TRAP(saved_state
)) {
324 boolean_t oldlevel
, cpu_64bit
;
325 uint32_t esp_probe
, *ebp
, edi
, fp
, *pDst
, delta
= 0;
328 cpu_64bit
= ml_is64bit();
329 oldlevel
= ml_set_interrupts_enabled(FALSE
);
331 /* Calculate where the stack pointer was when the probe instruction "fired." */
333 esp_probe
= saved_state
->uesp
; /* Easy, x86_64 establishes this value in idt64.s */
335 esp_probe
= (uint32_t)&(regs
[1]); /* Nasty, infer the location above the save area */
338 emul
= dtrace_invop( saved_state
->eip
, (uintptr_t *)esp_probe
, saved_state
->eax
);
339 __asm__
volatile(".globl _dtrace_invop_callsite");
340 __asm__
volatile("_dtrace_invop_callsite:");
343 case DTRACE_INVOP_NOP
:
344 saved_state
->eip
+= DTRACE_INVOP_NOP_SKIP
; /* Skip over the patched NOP (planted by sdt.) */
345 retval
= KERN_SUCCESS
;
348 case DTRACE_INVOP_MOVL_ESP_EBP
:
349 saved_state
->ebp
= esp_probe
; /* Emulate patched movl %esp,%ebp */
350 saved_state
->eip
+= DTRACE_INVOP_MOVL_ESP_EBP_SKIP
; /* Skip over the bytes of the patched movl %esp,%ebp */
351 retval
= KERN_SUCCESS
;
354 case DTRACE_INVOP_POPL_EBP
:
355 case DTRACE_INVOP_LEAVE
:
357 * Emulate first micro-op of patched leave: movl %ebp,%esp
358 * fp points just below the return address slot for target's ret
359 * and at the slot holding the frame pointer saved by the target's prologue.
361 fp
= saved_state
->ebp
;
362 /* Emulate second micro-op of patched leave: patched popl %ebp
363 * savearea ebp is set for the frame of the caller to target
364 * The *live* %esp will be adjusted below for pop increment(s)
366 saved_state
->ebp
= *(uint32_t *)fp
;
367 /* Skip over the patched leave */
368 saved_state
->eip
+= DTRACE_INVOP_LEAVE_SKIP
;
370 * Lift the stack to account for the emulated leave
371 * Account for words local in this frame
372 * (in "case DTRACE_INVOP_POPL_EBP:" this is zero.)
374 delta
= ((uint32_t *)fp
) - ((uint32_t *)esp_probe
);
375 /* Account for popping off the ebp (just accomplished by the emulation
381 saved_state
->uesp
+= (delta
<< 2);
383 /* XXX Fragile in the extreme. Obtain the value of %edi that our caller pushed
384 * (on behalf of its caller -- trap_from_kernel()). Ultimately,
385 * trap_from_kernel's stack pointer is restored from this slot.
386 * This is sensitive to the manner in which the compiler preserves %edi,
387 * and trap_from_kernel()'s internals.
389 ebp
= (uint32_t *)__builtin_frame_address(0);
390 ebp
= (uint32_t *)*ebp
;
392 /* Shift contents of stack */
393 for (pDst
= (uint32_t *)fp
;
394 pDst
> (((uint32_t *)edi
));
396 *pDst
= pDst
[-delta
];
398 /* Track the stack lift in "saved_state". */
399 saved_state
= (x86_saved_state32_t
*) (((uintptr_t)saved_state
) + (delta
<< 2));
401 /* Now adjust the value of %edi in our caller (kernel_trap)'s frame */
402 *(ebp
- 1) = edi
+ (delta
<< 2);
404 retval
= KERN_SUCCESS
;
408 retval
= KERN_FAILURE
;
411 saved_state
->trapno
= T_PREEMPT
; /* Avoid call to i386_astintr()! */
413 ml_set_interrupts_enabled(oldlevel
);
421 __fbt_provide_module(void *arg
, struct modctl
*ctl
)
424 kernel_mach_header_t
*mh
;
425 struct load_command
*cmd
;
426 kernel_segment_command_t
*orig_ts
= NULL
, *orig_le
= NULL
;
427 struct symtab_command
*orig_st
= NULL
;
428 struct nlist
*sym
= NULL
;
430 uintptr_t instrLow
, instrHigh
;
434 int gIgnoreFBTBlacklist
= 0;
435 PE_parse_boot_argn("IgnoreFBTBlacklist", &gIgnoreFBTBlacklist
, sizeof (gIgnoreFBTBlacklist
));
437 mh
= (kernel_mach_header_t
*)(ctl
->address
);
438 modname
= ctl
->mod_modname
;
440 if (0 == ctl
->address
|| 0 == ctl
->size
) /* Has the linker been jettisoned? */
444 * Employees of dtrace and their families are ineligible. Void
448 if (LIT_STRNEQL(modname
, "com.apple.driver.dtrace"))
451 if (strstr(modname
, "CHUD") != NULL
)
454 if (mh
->magic
!= MH_MAGIC
)
457 cmd
= (struct load_command
*) &mh
[1];
458 for (i
= 0; i
< mh
->ncmds
; i
++) {
459 if (cmd
->cmd
== LC_SEGMENT_KERNEL
) {
460 kernel_segment_command_t
*orig_sg
= (kernel_segment_command_t
*) cmd
;
462 if (LIT_STRNEQL(orig_sg
->segname
, SEG_TEXT
))
464 else if (LIT_STRNEQL(orig_sg
->segname
, SEG_LINKEDIT
))
466 else if (LIT_STRNEQL(orig_sg
->segname
, ""))
467 orig_ts
= orig_sg
; /* kexts have a single unnamed segment */
469 else if (cmd
->cmd
== LC_SYMTAB
)
470 orig_st
= (struct symtab_command
*) cmd
;
472 cmd
= (struct load_command
*) ((caddr_t
) cmd
+ cmd
->cmdsize
);
475 if ((orig_ts
== NULL
) || (orig_st
== NULL
) || (orig_le
== NULL
))
478 sym
= (struct nlist
*)(orig_le
->vmaddr
+ orig_st
->symoff
- orig_le
->fileoff
);
479 strings
= (char *)(orig_le
->vmaddr
+ orig_st
->stroff
- orig_le
->fileoff
);
481 /* Find extent of the TEXT section */
482 instrLow
= (uintptr_t)orig_ts
->vmaddr
;
483 instrHigh
= (uintptr_t)(orig_ts
->vmaddr
+ orig_ts
->vmsize
);
485 for (i
= 0; i
< orig_st
->nsyms
; i
++) {
486 fbt_probe_t
*fbt
, *retfbt
;
487 machine_inst_t
*instr
, *limit
, theInstr
, i1
, i2
;
488 uint8_t n_type
= sym
[i
].n_type
& (N_TYPE
| N_EXT
);
489 char *name
= strings
+ sym
[i
].n_un
.n_strx
;
492 /* Check that the symbol is a global and that it has a name. */
493 if (((N_SECT
| N_EXT
) != n_type
&& (N_ABS
| N_EXT
) != n_type
))
496 if (0 == sym
[i
].n_un
.n_strx
) /* iff a null, "", name. */
499 /* Lop off omnipresent leading underscore. */
503 if (LIT_STRNSTART(name
, "dtrace_") && !LIT_STRNSTART(name
, "dtrace_safe_")) {
505 * Anything beginning with "dtrace_" may be called
506 * from probe context unless it explitly indicates
507 * that it won't be called from probe context by
508 * using the prefix "dtrace_safe_".
513 if (LIT_STRNSTART(name
, "dsmos_"))
514 continue; /* Don't Steal Mac OS X! */
516 if (LIT_STRNSTART(name
, "_dtrace"))
517 continue; /* Shims in dtrace.c */
519 if (LIT_STRNSTART(name
, "chud"))
520 continue; /* Professional courtesy. */
522 if (LIT_STRNSTART(name
, "hibernate_"))
523 continue; /* Let sleeping dogs lie. */
525 if (LIT_STRNEQL(name
, "_ZN9IOService14newTemperatureElPS_") || /* IOService::newTemperature */
526 LIT_STRNEQL(name
, "_ZN9IOService26temperatureCriticalForZoneEPS_")) /* IOService::temperatureCriticalForZone */
527 continue; /* Per the fire code */
530 * Place no probes (illegal instructions) in the exception handling path!
532 if (LIT_STRNEQL(name
, "t_invop") ||
533 LIT_STRNEQL(name
, "enter_lohandler") ||
534 LIT_STRNEQL(name
, "lo_alltraps") ||
535 LIT_STRNEQL(name
, "kernel_trap") ||
536 LIT_STRNEQL(name
, "interrupt") ||
537 LIT_STRNEQL(name
, "i386_astintr"))
540 if (LIT_STRNEQL(name
, "current_thread") ||
541 LIT_STRNEQL(name
, "ast_pending") ||
542 LIT_STRNEQL(name
, "fbt_perfCallback") ||
543 LIT_STRNEQL(name
, "machine_thread_get_kern_state") ||
544 LIT_STRNEQL(name
, "get_threadtask") ||
545 LIT_STRNEQL(name
, "ml_set_interrupts_enabled") ||
546 LIT_STRNEQL(name
, "dtrace_invop") ||
547 LIT_STRNEQL(name
, "fbt_invop") ||
548 LIT_STRNEQL(name
, "sdt_invop") ||
549 LIT_STRNEQL(name
, "max_valid_stack_address"))
555 if (LIT_STRNSTART(name
, "machine_stack_") ||
556 LIT_STRNSTART(name
, "mapping_") ||
557 LIT_STRNEQL(name
, "tmrCvt") ||
559 LIT_STRNSTART(name
, "tsc_") ||
561 LIT_STRNSTART(name
, "pmCPU") ||
562 LIT_STRNEQL(name
, "pmKextRegister") ||
563 LIT_STRNEQL(name
, "pmMarkAllCPUsOff") ||
564 LIT_STRNEQL(name
, "pmSafeMode") ||
565 LIT_STRNEQL(name
, "pmTimerSave") ||
566 LIT_STRNEQL(name
, "pmTimerRestore") ||
567 LIT_STRNEQL(name
, "pmUnRegister") ||
568 LIT_STRNSTART(name
, "pms") ||
569 LIT_STRNEQL(name
, "power_management_init") ||
570 LIT_STRNSTART(name
, "usimple_") ||
571 LIT_STRNEQL(name
, "lck_spin_lock") ||
572 LIT_STRNEQL(name
, "lck_spin_unlock") ||
574 LIT_STRNSTART(name
, "rtc_") ||
575 LIT_STRNSTART(name
, "_rtc_") ||
576 LIT_STRNSTART(name
, "rtclock_") ||
577 LIT_STRNSTART(name
, "clock_") ||
578 LIT_STRNSTART(name
, "absolutetime_to_") ||
579 LIT_STRNEQL(name
, "setPop") ||
580 LIT_STRNEQL(name
, "nanoseconds_to_absolutetime") ||
581 LIT_STRNEQL(name
, "nanotime_to_absolutetime") ||
583 LIT_STRNSTART(name
, "etimer_") ||
585 LIT_STRNSTART(name
, "commpage_") ||
586 LIT_STRNSTART(name
, "pmap_") ||
587 LIT_STRNSTART(name
, "ml_") ||
588 LIT_STRNSTART(name
, "PE_") ||
589 LIT_STRNEQL(name
, "kprintf") ||
590 LIT_STRNSTART(name
, "lapic_") ||
591 LIT_STRNSTART(name
, "acpi_"))
595 * Avoid machine_ routines. PR_5346750.
597 if (LIT_STRNSTART(name
, "machine_"))
600 if (LIT_STRNEQL(name
, "handle_pending_TLB_flushes"))
604 * Place no probes on critical routines. PR_5221096
606 if (!gIgnoreFBTBlacklist
&&
607 bsearch( name
, critical_blacklist
, CRITICAL_BLACKLIST_COUNT
, sizeof(name
), _cmp
) != NULL
)
611 * Place no probes that could be hit in probe context.
613 if (!gIgnoreFBTBlacklist
&&
614 bsearch( name
, probe_ctx_closure
, PROBE_CTX_CLOSURE_COUNT
, sizeof(name
), _cmp
) != NULL
)
618 * Place no probes that could be hit on the way to the debugger.
620 if (LIT_STRNSTART(name
, "kdp_") ||
621 LIT_STRNSTART(name
, "kdb_") ||
622 LIT_STRNSTART(name
, "kdbg_") ||
623 LIT_STRNSTART(name
, "kdebug_") ||
624 LIT_STRNEQL(name
, "kernel_debug") ||
625 LIT_STRNEQL(name
, "Debugger") ||
626 LIT_STRNEQL(name
, "Call_DebuggerC") ||
627 LIT_STRNEQL(name
, "lock_debugger") ||
628 LIT_STRNEQL(name
, "unlock_debugger") ||
629 LIT_STRNEQL(name
, "SysChoked"))
633 * Place no probes that could be hit on the way to a panic.
635 if (NULL
!= strstr(name
, "panic_") ||
636 LIT_STRNEQL(name
, "panic") ||
637 LIT_STRNEQL(name
, "handleMck") ||
638 LIT_STRNEQL(name
, "unresolved_kernel_trap"))
641 if (dtrace_probe_lookup(fbt_id
, modname
, name
, NULL
) != 0)
644 for (j
= 0, instr
= (machine_inst_t
*)sym
[i
].n_value
, theInstr
= 0;
645 (j
< 4) && ((uintptr_t)instr
>= instrLow
) && (instrHigh
> (uintptr_t)(instr
+ 2));
648 if (theInstr
== FBT_PUSHL_EBP
|| theInstr
== FBT_RET
|| theInstr
== FBT_RET_IMM16
)
651 if ((size
= dtrace_instr_size(instr
)) <= 0)
657 if (theInstr
!= FBT_PUSHL_EBP
)
663 limit
= (machine_inst_t
*)instrHigh
;
665 if ((i1
== FBT_MOVL_ESP_EBP0_V0
&& i2
== FBT_MOVL_ESP_EBP1_V0
) ||
666 (i1
== FBT_MOVL_ESP_EBP0_V1
&& i2
== FBT_MOVL_ESP_EBP1_V1
)) {
667 instr
+= 1; /* Advance to the movl %esp,%ebp */
671 * Sometimes, the compiler will schedule an intervening instruction
672 * in the function prologue. Example:
675 * 000006d8 pushl %ebp
676 * 000006d9 movl $0x00000004,%edx
677 * 000006de movl %esp,%ebp
679 * Try the next instruction, to see if it is a movl %esp,%ebp
682 instr
+= 1; /* Advance past the pushl %ebp */
683 if ((size
= dtrace_instr_size(instr
)) <= 0)
688 if ((instr
+ 1) >= limit
)
694 if (!(i1
== FBT_MOVL_ESP_EBP0_V0
&& i2
== FBT_MOVL_ESP_EBP1_V0
) &&
695 !(i1
== FBT_MOVL_ESP_EBP0_V1
&& i2
== FBT_MOVL_ESP_EBP1_V1
))
698 /* instr already points at the movl %esp,%ebp */
702 fbt
= kmem_zalloc(sizeof (fbt_probe_t
), KM_SLEEP
);
703 strlcpy( (char *)&(fbt
->fbtp_name
), name
, MAX_FBTP_NAME_CHARS
);
704 fbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
, name
, FBT_ENTRY
, FBT_AFRAMES_ENTRY
, fbt
);
705 fbt
->fbtp_patchpoint
= instr
;
707 fbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
708 fbt
->fbtp_rval
= DTRACE_INVOP_MOVL_ESP_EBP
;
709 fbt
->fbtp_savedval
= theInstr
;
710 fbt
->fbtp_patchval
= FBT_PATCHVAL
;
712 fbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(instr
)];
713 fbt
->fbtp_symndx
= i
;
714 fbt_probetab
[FBT_ADDR2NDX(instr
)] = fbt
;
722 * If this disassembly fails, then we've likely walked off into
723 * a jump table or some other unsuitable area. Bail out of the
726 if ((size
= dtrace_instr_size(instr
)) <= 0)
730 * We (desperately) want to avoid erroneously instrumenting a
731 * jump table, especially given that our markers are pretty
732 * short: two bytes on x86, and just one byte on amd64. To
733 * determine if we're looking at a true instruction sequence
734 * or an inline jump table that happens to contain the same
735 * byte sequences, we resort to some heuristic sleeze: we
736 * treat this instruction as being contained within a pointer,
737 * and see if that pointer points to within the body of the
738 * function. If it does, we refuse to instrument it.
740 for (j
= 0; j
< sizeof (uintptr_t); j
++) {
741 uintptr_t check
= (uintptr_t)instr
- j
;
744 if (check
< sym
[i
].n_value
)
747 if (check
+ sizeof (uintptr_t) > (uintptr_t)limit
)
750 ptr
= *(uint8_t **)check
;
752 if (ptr
>= (uint8_t *)sym
[i
].n_value
&& ptr
< limit
) {
759 * OK, it's an instruction.
763 /* Walked onto the start of the next routine? If so, bail out of this function. */
764 if (theInstr
== FBT_PUSHL_EBP
)
767 if (!(size
== 1 && (theInstr
== FBT_POPL_EBP
|| theInstr
== FBT_LEAVE
))) {
773 * Found the popl %ebp; or leave.
775 machine_inst_t
*patch_instr
= instr
;
778 * Scan forward for a "ret", or "jmp".
784 size
= dtrace_instr_size(instr
);
785 if (size
<= 0) /* Failed instruction decode? */
790 if (!(size
== FBT_RET_LEN
&& (theInstr
== FBT_RET
)) &&
791 !(size
== FBT_RET_IMM16_LEN
&& (theInstr
== FBT_RET_IMM16
)) &&
792 !(size
== FBT_JMP_SHORT_REL_LEN
&& (theInstr
== FBT_JMP_SHORT_REL
)) &&
793 !(size
== FBT_JMP_NEAR_REL_LEN
&& (theInstr
== FBT_JMP_NEAR_REL
)) &&
794 !(size
== FBT_JMP_FAR_ABS_LEN
&& (theInstr
== FBT_JMP_FAR_ABS
)))
798 * popl %ebp; ret; or leave; ret; or leave; jmp tailCalledFun; -- We have a winner!
800 fbt
= kmem_zalloc(sizeof (fbt_probe_t
), KM_SLEEP
);
801 strlcpy( (char *)&(fbt
->fbtp_name
), name
, MAX_FBTP_NAME_CHARS
);
803 if (retfbt
== NULL
) {
804 fbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
,
805 name
, FBT_RETURN
, FBT_AFRAMES_RETURN
, fbt
);
807 retfbt
->fbtp_next
= fbt
;
808 fbt
->fbtp_id
= retfbt
->fbtp_id
;
812 fbt
->fbtp_patchpoint
= patch_instr
;
814 fbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
816 if (*patch_instr
== FBT_POPL_EBP
) {
817 fbt
->fbtp_rval
= DTRACE_INVOP_POPL_EBP
;
819 ASSERT(*patch_instr
== FBT_LEAVE
);
820 fbt
->fbtp_rval
= DTRACE_INVOP_LEAVE
;
823 (uintptr_t)(patch_instr
- (uint8_t *)sym
[i
].n_value
);
825 fbt
->fbtp_savedval
= *patch_instr
;
826 fbt
->fbtp_patchval
= FBT_PATCHVAL
;
827 fbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(patch_instr
)];
828 fbt
->fbtp_symndx
= i
;
829 fbt_probetab
[FBT_ADDR2NDX(patch_instr
)] = fbt
;
835 #elif defined(__x86_64__)
837 fbt_invop(uintptr_t addr
, uintptr_t *state
, uintptr_t rval
)
839 fbt_probe_t
*fbt
= fbt_probetab
[FBT_ADDR2NDX(addr
)];
841 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_hashnext
) {
842 if ((uintptr_t)fbt
->fbtp_patchpoint
== addr
) {
844 if (fbt
->fbtp_roffset
== 0) {
845 x86_saved_state64_t
*regs
= (x86_saved_state64_t
*)state
;
847 CPU
->cpu_dtrace_caller
= *(uintptr_t *)(((uintptr_t)(regs
->isf
.rsp
))+sizeof(uint64_t)); // 8(%rsp)
848 /* 64-bit ABI, arguments passed in registers. */
849 dtrace_probe(fbt
->fbtp_id
, regs
->rdi
, regs
->rsi
, regs
->rdx
, regs
->rcx
, regs
->r8
);
850 CPU
->cpu_dtrace_caller
= 0;
853 dtrace_probe(fbt
->fbtp_id
, fbt
->fbtp_roffset
, rval
, 0, 0, 0);
854 CPU
->cpu_dtrace_caller
= 0;
857 return (fbt
->fbtp_rval
);
864 #define IS_USER_TRAP(regs) (regs && (((regs)->isf.cs & 3) != 0))
865 #define T_INVALID_OPCODE 6
866 #define FBT_EXCEPTION_CODE T_INVALID_OPCODE
867 #define T_PREEMPT 255
872 x86_saved_state_t
*tagged_regs
,
873 __unused
int unused1
,
874 __unused
int unused2
)
876 kern_return_t retval
= KERN_FAILURE
;
877 x86_saved_state64_t
*saved_state
= saved_state64(tagged_regs
);
879 if (FBT_EXCEPTION_CODE
== trapno
&& !IS_USER_TRAP(saved_state
)) {
881 uint64_t rsp_probe
, *rbp
, r12
, fp
, delta
= 0;
885 oldlevel
= ml_set_interrupts_enabled(FALSE
);
887 /* Calculate where the stack pointer was when the probe instruction "fired." */
888 rsp_probe
= saved_state
->isf
.rsp
; /* Easy, x86_64 establishes this value in idt64.s */
890 emul
= dtrace_invop( saved_state
->isf
.rip
, (uintptr_t *)saved_state
, saved_state
->rax
);
891 __asm__
volatile(".globl _dtrace_invop_callsite");
892 __asm__
volatile("_dtrace_invop_callsite:");
895 case DTRACE_INVOP_NOP
:
896 saved_state
->isf
.rip
+= DTRACE_INVOP_NOP_SKIP
; /* Skip over the patched NOP (planted by sdt). */
897 retval
= KERN_SUCCESS
;
900 case DTRACE_INVOP_MOV_RSP_RBP
:
901 saved_state
->rbp
= rsp_probe
; /* Emulate patched mov %rsp,%rbp */
902 saved_state
->isf
.rip
+= DTRACE_INVOP_MOV_RSP_RBP_SKIP
; /* Skip over the bytes of the patched mov %rsp,%rbp */
903 retval
= KERN_SUCCESS
;
906 case DTRACE_INVOP_POP_RBP
:
907 case DTRACE_INVOP_LEAVE
:
909 * Emulate first micro-op of patched leave: mov %rbp,%rsp
910 * fp points just below the return address slot for target's ret
911 * and at the slot holding the frame pointer saved by the target's prologue.
913 fp
= saved_state
->rbp
;
914 /* Emulate second micro-op of patched leave: patched pop %rbp
915 * savearea rbp is set for the frame of the caller to target
916 * The *live* %rsp will be adjusted below for pop increment(s)
918 saved_state
->rbp
= *(uint64_t *)fp
;
919 /* Skip over the patched leave */
920 saved_state
->isf
.rip
+= DTRACE_INVOP_LEAVE_SKIP
;
922 * Lift the stack to account for the emulated leave
923 * Account for words local in this frame
924 * (in "case DTRACE_INVOP_POPL_EBP:" this is zero.)
926 delta
= ((uint32_t *)fp
) - ((uint32_t *)rsp_probe
); /* delta is a *word* increment */
927 /* Account for popping off the rbp (just accomplished by the emulation
931 saved_state
->isf
.rsp
+= (delta
<< 2);
933 /* XXX Fragile in the extreme.
934 * This is sensitive to trap_from_kernel()'s internals.
936 rbp
= (uint64_t *)__builtin_frame_address(0);
937 rbp
= (uint64_t *)*rbp
;
940 /* Shift contents of stack */
941 for (pDst
= (uint32_t *)fp
;
942 pDst
> (((uint32_t *)r12
));
944 *pDst
= pDst
[-delta
];
946 /* Track the stack lift in "saved_state". */
947 saved_state
= (x86_saved_state64_t
*) (((uintptr_t)saved_state
) + (delta
<< 2));
949 /* Now adjust the value of %r12 in our caller (kernel_trap)'s frame */
950 *(rbp
- 4) = r12
+ (delta
<< 2);
952 retval
= KERN_SUCCESS
;
956 retval
= KERN_FAILURE
;
959 saved_state
->isf
.trapno
= T_PREEMPT
; /* Avoid call to i386_astintr()! */
961 ml_set_interrupts_enabled(oldlevel
);
969 __fbt_provide_module(void *arg
, struct modctl
*ctl
)
972 kernel_mach_header_t
*mh
;
973 struct load_command
*cmd
;
974 kernel_segment_command_t
*orig_ts
= NULL
, *orig_le
= NULL
;
975 struct symtab_command
*orig_st
= NULL
;
976 struct nlist_64
*sym
= NULL
;
978 uintptr_t instrLow
, instrHigh
;
982 int gIgnoreFBTBlacklist
= 0;
983 PE_parse_boot_argn("IgnoreFBTBlacklist", &gIgnoreFBTBlacklist
, sizeof (gIgnoreFBTBlacklist
));
985 mh
= (kernel_mach_header_t
*)(ctl
->address
);
986 modname
= ctl
->mod_modname
;
988 if (0 == ctl
->address
|| 0 == ctl
->size
) /* Has the linker been jettisoned? */
992 * Employees of dtrace and their families are ineligible. Void
996 if (LIT_STRNEQL(modname
, "com.apple.driver.dtrace"))
999 if (strstr(modname
, "CHUD") != NULL
)
1002 if (mh
->magic
!= MH_MAGIC_64
)
1005 cmd
= (struct load_command
*) &mh
[1];
1006 for (i
= 0; i
< mh
->ncmds
; i
++) {
1007 if (cmd
->cmd
== LC_SEGMENT_KERNEL
) {
1008 kernel_segment_command_t
*orig_sg
= (kernel_segment_command_t
*) cmd
;
1010 if (LIT_STRNEQL(orig_sg
->segname
, SEG_TEXT
))
1012 else if (LIT_STRNEQL(orig_sg
->segname
, SEG_LINKEDIT
))
1014 else if (LIT_STRNEQL(orig_sg
->segname
, ""))
1015 orig_ts
= orig_sg
; /* kexts have a single unnamed segment */
1017 else if (cmd
->cmd
== LC_SYMTAB
)
1018 orig_st
= (struct symtab_command
*) cmd
;
1020 cmd
= (struct load_command
*) ((caddr_t
) cmd
+ cmd
->cmdsize
);
1023 if ((orig_ts
== NULL
) || (orig_st
== NULL
) || (orig_le
== NULL
))
1026 sym
= (struct nlist_64
*)(orig_le
->vmaddr
+ orig_st
->symoff
- orig_le
->fileoff
);
1027 strings
= (char *)(orig_le
->vmaddr
+ orig_st
->stroff
- orig_le
->fileoff
);
1029 /* Find extent of the TEXT section */
1030 instrLow
= (uintptr_t)orig_ts
->vmaddr
;
1031 instrHigh
= (uintptr_t)(orig_ts
->vmaddr
+ orig_ts
->vmsize
);
1033 for (i
= 0; i
< orig_st
->nsyms
; i
++) {
1034 fbt_probe_t
*fbt
, *retfbt
;
1035 machine_inst_t
*instr
, *limit
, theInstr
, i1
, i2
, i3
;
1036 uint8_t n_type
= sym
[i
].n_type
& (N_TYPE
| N_EXT
);
1037 char *name
= strings
+ sym
[i
].n_un
.n_strx
;
1040 /* Check that the symbol is a global and that it has a name. */
1041 if (((N_SECT
| N_EXT
) != n_type
&& (N_ABS
| N_EXT
) != n_type
))
1044 if (0 == sym
[i
].n_un
.n_strx
) /* iff a null, "", name. */
1047 /* Lop off omnipresent leading underscore. */
1051 if (LIT_STRNSTART(name
, "dtrace_") && !LIT_STRNSTART(name
, "dtrace_safe_")) {
1053 * Anything beginning with "dtrace_" may be called
1054 * from probe context unless it explitly indicates
1055 * that it won't be called from probe context by
1056 * using the prefix "dtrace_safe_".
1061 if (LIT_STRNSTART(name
, "fasttrap_") ||
1062 LIT_STRNSTART(name
, "fuword") ||
1063 LIT_STRNSTART(name
, "suword") ||
1064 LIT_STRNEQL(name
, "sprlock") ||
1065 LIT_STRNEQL(name
, "sprunlock") ||
1066 LIT_STRNEQL(name
, "uread") ||
1067 LIT_STRNEQL(name
, "uwrite"))
1068 continue; /* Fasttrap inner-workings. */
1070 if (LIT_STRNSTART(name
, "dsmos_"))
1071 continue; /* Don't Steal Mac OS X! */
1073 if (LIT_STRNSTART(name
, "_dtrace"))
1074 continue; /* Shims in dtrace.c */
1076 if (LIT_STRNSTART(name
, "chud"))
1077 continue; /* Professional courtesy. */
1079 if (LIT_STRNSTART(name
, "hibernate_"))
1080 continue; /* Let sleeping dogs lie. */
1082 if (LIT_STRNEQL(name
, "ZN9IOService14newTemperatureElPS_") || /* IOService::newTemperature */
1083 LIT_STRNEQL(name
, "ZN9IOService26temperatureCriticalForZoneEPS_")) /* IOService::temperatureCriticalForZone */
1084 continue; /* Per the fire code */
1087 * Place no probes (illegal instructions) in the exception handling path!
1089 if (LIT_STRNEQL(name
, "t_invop") ||
1090 LIT_STRNEQL(name
, "enter_lohandler") ||
1091 LIT_STRNEQL(name
, "lo_alltraps") ||
1092 LIT_STRNEQL(name
, "kernel_trap") ||
1093 LIT_STRNEQL(name
, "interrupt") ||
1094 LIT_STRNEQL(name
, "i386_astintr"))
1097 if (LIT_STRNEQL(name
, "current_thread") ||
1098 LIT_STRNEQL(name
, "ast_pending") ||
1099 LIT_STRNEQL(name
, "fbt_perfCallback") ||
1100 LIT_STRNEQL(name
, "machine_thread_get_kern_state") ||
1101 LIT_STRNEQL(name
, "get_threadtask") ||
1102 LIT_STRNEQL(name
, "ml_set_interrupts_enabled") ||
1103 LIT_STRNEQL(name
, "dtrace_invop") ||
1104 LIT_STRNEQL(name
, "fbt_invop") ||
1105 LIT_STRNEQL(name
, "sdt_invop") ||
1106 LIT_STRNEQL(name
, "max_valid_stack_address"))
1112 if (LIT_STRNSTART(name
, "machine_stack_") ||
1113 LIT_STRNSTART(name
, "mapping_") ||
1114 LIT_STRNEQL(name
, "tmrCvt") ||
1116 LIT_STRNSTART(name
, "tsc_") ||
1118 LIT_STRNSTART(name
, "pmCPU") ||
1119 LIT_STRNEQL(name
, "pmKextRegister") ||
1120 LIT_STRNEQL(name
, "pmMarkAllCPUsOff") ||
1121 LIT_STRNEQL(name
, "pmSafeMode") ||
1122 LIT_STRNEQL(name
, "pmTimerSave") ||
1123 LIT_STRNEQL(name
, "pmTimerRestore") ||
1124 LIT_STRNEQL(name
, "pmUnRegister") ||
1125 LIT_STRNSTART(name
, "pms") ||
1126 LIT_STRNEQL(name
, "power_management_init") ||
1127 LIT_STRNSTART(name
, "usimple_") ||
1128 LIT_STRNSTART(name
, "lck_spin_lock") ||
1129 LIT_STRNSTART(name
, "lck_spin_unlock") ||
1131 LIT_STRNSTART(name
, "rtc_") ||
1132 LIT_STRNSTART(name
, "_rtc_") ||
1133 LIT_STRNSTART(name
, "rtclock_") ||
1134 LIT_STRNSTART(name
, "clock_") ||
1135 LIT_STRNSTART(name
, "absolutetime_to_") ||
1136 LIT_STRNEQL(name
, "setPop") ||
1137 LIT_STRNEQL(name
, "nanoseconds_to_absolutetime") ||
1138 LIT_STRNEQL(name
, "nanotime_to_absolutetime") ||
1140 LIT_STRNSTART(name
, "etimer_") ||
1142 LIT_STRNSTART(name
, "commpage_") ||
1143 LIT_STRNSTART(name
, "pmap_") ||
1144 LIT_STRNSTART(name
, "ml_") ||
1145 LIT_STRNSTART(name
, "PE_") ||
1146 LIT_STRNEQL(name
, "kprintf") ||
1147 LIT_STRNSTART(name
, "lapic_") ||
1148 LIT_STRNSTART(name
, "acpi_"))
1152 * Avoid machine_ routines. PR_5346750.
1154 if (LIT_STRNSTART(name
, "machine_"))
1157 if (LIT_STRNEQL(name
, "handle_pending_TLB_flushes"))
1161 * Place no probes on critical routines. PR_5221096
1163 if (!gIgnoreFBTBlacklist
&&
1164 bsearch( name
, critical_blacklist
, CRITICAL_BLACKLIST_COUNT
, sizeof(name
), _cmp
) != NULL
)
1168 * Place no probes that could be hit in probe context.
1170 if (!gIgnoreFBTBlacklist
&&
1171 bsearch( name
, probe_ctx_closure
, PROBE_CTX_CLOSURE_COUNT
, sizeof(name
), _cmp
) != NULL
)
1175 * Place no probes that could be hit on the way to the debugger.
1177 if (LIT_STRNSTART(name
, "kdp_") ||
1178 LIT_STRNSTART(name
, "kdb_") ||
1179 LIT_STRNSTART(name
, "kdbg_") ||
1180 LIT_STRNSTART(name
, "kdebug_") ||
1181 LIT_STRNEQL(name
, "kernel_debug") ||
1182 LIT_STRNEQL(name
, "Debugger") ||
1183 LIT_STRNEQL(name
, "Call_DebuggerC") ||
1184 LIT_STRNEQL(name
, "lock_debugger") ||
1185 LIT_STRNEQL(name
, "unlock_debugger") ||
1186 LIT_STRNEQL(name
, "SysChoked"))
1190 * Place no probes that could be hit on the way to a panic.
1192 if (NULL
!= strstr(name
, "panic_") ||
1193 LIT_STRNEQL(name
, "panic") ||
1194 LIT_STRNEQL(name
, "handleMck") ||
1195 LIT_STRNEQL(name
, "unresolved_kernel_trap"))
1198 if (dtrace_probe_lookup(fbt_id
, modname
, name
, NULL
) != 0)
1201 for (j
= 0, instr
= (machine_inst_t
*)sym
[i
].n_value
, theInstr
= 0;
1202 (j
< 4) && ((uintptr_t)instr
>= instrLow
) && (instrHigh
> (uintptr_t)(instr
+ 2));
1204 theInstr
= instr
[0];
1205 if (theInstr
== FBT_PUSH_RBP
|| theInstr
== FBT_RET
|| theInstr
== FBT_RET_IMM16
)
1208 if ((size
= dtrace_instr_size(instr
)) <= 0)
1214 if (theInstr
!= FBT_PUSH_RBP
)
1221 limit
= (machine_inst_t
*)instrHigh
;
1223 if (i1
== FBT_REX_RSP_RBP
&& i2
== FBT_MOV_RSP_RBP0
&& i3
== FBT_MOV_RSP_RBP1
) {
1224 instr
+= 1; /* Advance to the mov %rsp,%rbp */
1232 * Sometimes, the compiler will schedule an intervening instruction
1233 * in the function prologue. Example:
1236 * 000006d8 pushl %ebp
1237 * 000006d9 movl $0x00000004,%edx
1238 * 000006de movl %esp,%ebp
1240 * Try the next instruction, to see if it is a movl %esp,%ebp
1243 instr
+= 1; /* Advance past the pushl %ebp */
1244 if ((size
= dtrace_instr_size(instr
)) <= 0)
1249 if ((instr
+ 1) >= limit
)
1255 if (!(i1
== FBT_MOVL_ESP_EBP0_V0
&& i2
== FBT_MOVL_ESP_EBP1_V0
) &&
1256 !(i1
== FBT_MOVL_ESP_EBP0_V1
&& i2
== FBT_MOVL_ESP_EBP1_V1
))
1259 /* instr already points at the movl %esp,%ebp */
1264 fbt
= kmem_zalloc(sizeof (fbt_probe_t
), KM_SLEEP
);
1265 strlcpy( (char *)&(fbt
->fbtp_name
), name
, MAX_FBTP_NAME_CHARS
);
1266 fbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
, name
, FBT_ENTRY
, FBT_AFRAMES_ENTRY
, fbt
);
1267 fbt
->fbtp_patchpoint
= instr
;
1268 fbt
->fbtp_ctl
= ctl
;
1269 fbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
1270 fbt
->fbtp_rval
= DTRACE_INVOP_MOV_RSP_RBP
;
1271 fbt
->fbtp_savedval
= theInstr
;
1272 fbt
->fbtp_patchval
= FBT_PATCHVAL
;
1274 fbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(instr
)];
1275 fbt
->fbtp_symndx
= i
;
1276 fbt_probetab
[FBT_ADDR2NDX(instr
)] = fbt
;
1284 * If this disassembly fails, then we've likely walked off into
1285 * a jump table or some other unsuitable area. Bail out of the
1288 if ((size
= dtrace_instr_size(instr
)) <= 0)
1292 * We (desperately) want to avoid erroneously instrumenting a
1293 * jump table, especially given that our markers are pretty
1294 * short: two bytes on x86, and just one byte on amd64. To
1295 * determine if we're looking at a true instruction sequence
1296 * or an inline jump table that happens to contain the same
1297 * byte sequences, we resort to some heuristic sleeze: we
1298 * treat this instruction as being contained within a pointer,
1299 * and see if that pointer points to within the body of the
1300 * function. If it does, we refuse to instrument it.
1302 for (j
= 0; j
< sizeof (uintptr_t); j
++) {
1303 uintptr_t check
= (uintptr_t)instr
- j
;
1306 if (check
< sym
[i
].n_value
)
1309 if (check
+ sizeof (uintptr_t) > (uintptr_t)limit
)
1312 ptr
= *(uint8_t **)check
;
1314 if (ptr
>= (uint8_t *)sym
[i
].n_value
&& ptr
< limit
) {
1321 * OK, it's an instruction.
1323 theInstr
= instr
[0];
1325 /* Walked onto the start of the next routine? If so, bail out of this function. */
1326 if (theInstr
== FBT_PUSH_RBP
)
1329 if (!(size
== 1 && (theInstr
== FBT_POP_RBP
|| theInstr
== FBT_LEAVE
))) {
1335 * Found the pop %rbp; or leave.
1337 machine_inst_t
*patch_instr
= instr
;
1340 * Scan forward for a "ret", or "jmp".
1346 size
= dtrace_instr_size(instr
);
1347 if (size
<= 0) /* Failed instruction decode? */
1350 theInstr
= instr
[0];
1352 if (!(size
== FBT_RET_LEN
&& (theInstr
== FBT_RET
)) &&
1353 !(size
== FBT_RET_IMM16_LEN
&& (theInstr
== FBT_RET_IMM16
)) &&
1354 !(size
== FBT_JMP_SHORT_REL_LEN
&& (theInstr
== FBT_JMP_SHORT_REL
)) &&
1355 !(size
== FBT_JMP_NEAR_REL_LEN
&& (theInstr
== FBT_JMP_NEAR_REL
)) &&
1356 !(size
== FBT_JMP_FAR_ABS_LEN
&& (theInstr
== FBT_JMP_FAR_ABS
)))
1360 * pop %rbp; ret; or leave; ret; or leave; jmp tailCalledFun; -- We have a winner!
1362 fbt
= kmem_zalloc(sizeof (fbt_probe_t
), KM_SLEEP
);
1363 strlcpy( (char *)&(fbt
->fbtp_name
), name
, MAX_FBTP_NAME_CHARS
);
1365 if (retfbt
== NULL
) {
1366 fbt
->fbtp_id
= dtrace_probe_create(fbt_id
, modname
,
1367 name
, FBT_RETURN
, FBT_AFRAMES_RETURN
, fbt
);
1369 retfbt
->fbtp_next
= fbt
;
1370 fbt
->fbtp_id
= retfbt
->fbtp_id
;
1374 fbt
->fbtp_patchpoint
= patch_instr
;
1375 fbt
->fbtp_ctl
= ctl
;
1376 fbt
->fbtp_loadcnt
= ctl
->mod_loadcnt
;
1378 if (*patch_instr
== FBT_POP_RBP
) {
1379 fbt
->fbtp_rval
= DTRACE_INVOP_POP_RBP
;
1381 ASSERT(*patch_instr
== FBT_LEAVE
);
1382 fbt
->fbtp_rval
= DTRACE_INVOP_LEAVE
;
1385 (uintptr_t)(patch_instr
- (uint8_t *)sym
[i
].n_value
);
1387 fbt
->fbtp_savedval
= *patch_instr
;
1388 fbt
->fbtp_patchval
= FBT_PATCHVAL
;
1389 fbt
->fbtp_hashnext
= fbt_probetab
[FBT_ADDR2NDX(patch_instr
)];
1390 fbt
->fbtp_symndx
= i
;
1391 fbt_probetab
[FBT_ADDR2NDX(patch_instr
)] = fbt
;
1401 extern struct modctl g_fbt_kernctl
;
1402 #undef kmem_alloc /* from its binding to dt_kmem_alloc glue */
1403 #undef kmem_free /* from its binding to dt_kmem_free glue */
1404 #include <vm/vm_kern.h>
1408 fbt_provide_module(void *arg
, struct modctl
*ctl
)
1411 __fbt_provide_module(arg
, &g_fbt_kernctl
);
1413 if ( (vm_offset_t
)g_fbt_kernctl
.address
!= (vm_offset_t
)NULL
)
1414 kmem_free(kernel_map
, (vm_offset_t
)g_fbt_kernctl
.address
, round_page(g_fbt_kernctl
.size
));
1415 g_fbt_kernctl
.address
= 0;
1416 g_fbt_kernctl
.size
= 0;