4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* #pragma ident "@(#)fbt.c 1.18 07/01/10 SMI" */
30 #define _KERNEL /* Solaris vs. Darwin */
34 #include <mach-o/loader.h>
35 #include <libkern/kernel_mach_header.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
42 #include <sys/ioctl.h>
44 #include <sys/fcntl.h>
45 #include <miscfs/devfs/devfs.h>
46 #include <pexpert/pexpert.h>
48 #include <sys/dtrace.h>
49 #include <sys/dtrace_impl.h>
52 #include <sys/dtrace_glue.h>
53 #include <san/kasan.h>
55 /* #include <machine/trap.h> */
56 struct savearea_t
; /* Used anonymously */
58 #if defined(__arm__) || defined(__arm64__)
59 typedef kern_return_t (*perfCallback
)(int, struct savearea_t
*, __unused
int, __unused
int);
60 extern perfCallback tempDTraceTrapHook
;
61 extern kern_return_t
fbt_perfCallback(int, struct savearea_t
*, __unused
int, __unused
int);
62 #elif defined(__x86_64__)
63 typedef kern_return_t (*perfCallback
)(int, struct savearea_t
*, uintptr_t *, __unused
int);
64 extern perfCallback tempDTraceTrapHook
;
65 extern kern_return_t
fbt_perfCallback(int, struct savearea_t
*, uintptr_t *, __unused
int);
67 #error Unknown architecture
70 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
71 #define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */
73 static dev_info_t
*fbt_devi
;
74 static int fbt_probetab_size
;
75 dtrace_provider_id_t fbt_id
;
76 fbt_probe_t
**fbt_probetab
;
77 int fbt_probetab_mask
;
78 static int fbt_verbose
= 0;
80 int ignore_fbt_blacklist
= 0;
82 extern int dtrace_kernel_symbol_mode
;
85 void fbt_init( void );
88 * Critical routines that must not be probed. PR_5221096, PR_5379018.
89 * The blacklist must be kept in alphabetic order for purposes of bsearch().
91 static const char * critical_blacklist
[] =
95 "_ZN9IOService14newTemperatureElPS_", /* IOService::newTemperature */
96 "_ZN9IOService26temperatureCriticalForZoneEPS_", /* IOService::temperatureCriticalForZone */
97 "_ZNK6OSData14getBytesNoCopyEv", /* Data::getBytesNoCopy, IOHibernateSystemWake path */
98 "_disable_preemption",
119 "cpu_processor_alloc",
120 "cpu_processor_free",
121 "cpu_signal_handler",
131 "cpu_topology_start_cpu",
140 "handle_pending_TLB_flushes",
141 "hw_compare_and_store",
147 "machine_idle_cstate",
148 "machine_thread_get_kern_state",
152 "nanoseconds_to_absolutetime",
153 "nanotime_to_absolutetime",
164 "pmap_cpu_high_map_vaddr",
165 "pmap_cpu_high_shared_remap",
167 "power_management_init",
168 "preemption_underflow_panic",
169 "register_cpu_setup_func",
179 "unregister_cpu_setup_func",
182 #define CRITICAL_BLACKLIST_COUNT (sizeof(critical_blacklist)/sizeof(critical_blacklist[0]))
185 * The transitive closure of entry points that can be reached from probe context.
186 * (Apart from routines whose names begin with dtrace_).
188 static const char * probe_ctx_closure
[] =
195 "absolutetime_to_microtime",
202 "clock_get_calendar_nanotime_nowait",
215 "drain_write_buffer",
220 "get_bsdthread_info",
223 "invalidate_mmu_icache",
227 "kernel_preempt_check",
229 "mach_absolute_time",
230 "max_valid_stack_address",
233 "ml_at_interrupt_context",
234 "ml_phys_write_byte_64",
235 "ml_phys_write_half_64",
236 "ml_phys_write_word_64",
237 "ml_set_interrupts_enabled",
241 "mt_cur_thread_cycles",
242 "mt_cur_thread_instrs",
244 "mt_fixed_counts_internal",
245 "mt_mtc_update_count",
252 "pmap_get_mapwindow",
255 "pmap_put_mapwindow",
266 "sync_iss_to_iks_unconditionally",
270 #define PROBE_CTX_CLOSURE_COUNT (sizeof(probe_ctx_closure)/sizeof(probe_ctx_closure[0]))
272 #pragma clang diagnostic push
273 #pragma clang diagnostic ignored "-Wcast-qual"
274 static int _cmp(const void *a
, const void *b
)
276 return strncmp((const char *)a
, *(const char **)b
, strlen((const char *)a
) + 1);
278 #pragma clang diagnostic pop
283 fbt_module_excluded(struct modctl
* ctl
)
285 ASSERT(!MOD_FBT_DONE(ctl
));
287 if (ctl
->mod_address
== 0 || ctl
->mod_size
== 0) {
291 if (ctl
->mod_loaded
== 0) {
296 * If the user sets this, trust they know what they are doing.
298 if (ignore_fbt_blacklist
)
302 * These drivers control low level functions that when traced
303 * cause problems often in the sleep/wake paths as well as
304 * critical debug and panic paths.
305 * If somebody really wants to drill in on one of these kexts, then
306 * they can override blacklisting using the boot-arg above.
310 if (strstr(ctl
->mod_modname
, "AppleACPIEC") != NULL
)
313 if (strstr(ctl
->mod_modname
, "AppleACPIPlatform") != NULL
)
316 if (strstr(ctl
->mod_modname
, "AppleRTC") != NULL
)
319 if (strstr(ctl
->mod_modname
, "IOACPIFamily") != NULL
)
322 if (strstr(ctl
->mod_modname
, "AppleIntelCPUPowerManagement") != NULL
)
325 if (strstr(ctl
->mod_modname
, "AppleProfile") != NULL
)
328 if (strstr(ctl
->mod_modname
, "AppleIntelProfile") != NULL
)
331 if (strstr(ctl
->mod_modname
, "AppleEFI") != NULL
)
334 #elif __arm__ || __arm64__
335 if (LIT_STRNEQL(ctl
->mod_modname
, "com.apple.driver.AppleARMPlatform") ||
336 LIT_STRNEQL(ctl
->mod_modname
, "com.apple.driver.AppleARMPL192VIC") ||
337 LIT_STRNEQL(ctl
->mod_modname
, "com.apple.driver.AppleInterruptController"))
345 * FBT probe name validation
348 fbt_excluded(const char* name
)
351 * If the user set this, trust they know what they are doing.
353 if (ignore_fbt_blacklist
)
356 if (LIT_STRNSTART(name
, "dtrace_") && !LIT_STRNSTART(name
, "dtrace_safe_")) {
358 * Anything beginning with "dtrace_" may be called
359 * from probe context unless it explitly indicates
360 * that it won't be called from probe context by
361 * using the prefix "dtrace_safe_".
367 * Place no probes on critical routines (5221096)
369 if (bsearch( name
, critical_blacklist
, CRITICAL_BLACKLIST_COUNT
, sizeof(name
), _cmp
) != NULL
)
373 * Place no probes that could be hit in probe context.
375 if (bsearch( name
, probe_ctx_closure
, PROBE_CTX_CLOSURE_COUNT
, sizeof(name
), _cmp
) != NULL
) {
380 * Place no probes that could be hit in probe context.
381 * In the interests of safety, some of these may be overly cautious.
382 * Also exclude very low-level "firmware" class calls.
384 if (LIT_STRNSTART(name
, "cpu_") || /* Coarse */
385 LIT_STRNSTART(name
, "platform_") || /* Coarse */
386 LIT_STRNSTART(name
, "machine_") || /* Coarse */
387 LIT_STRNSTART(name
, "ml_") || /* Coarse */
388 LIT_STRNSTART(name
, "PE_") || /* Coarse */
389 LIT_STRNSTART(name
, "rtc_") || /* Coarse */
390 LIT_STRNSTART(name
, "_rtc_") ||
391 LIT_STRNSTART(name
, "rtclock_") ||
392 LIT_STRNSTART(name
, "clock_") ||
393 LIT_STRNSTART(name
, "bcopy") ||
394 LIT_STRNSTART(name
, "pmap_") ||
395 LIT_STRNSTART(name
, "hw_") || /* Coarse */
396 LIT_STRNSTART(name
, "lapic_") || /* Coarse */
397 LIT_STRNSTART(name
, "OSAdd") ||
398 LIT_STRNSTART(name
, "OSBit") ||
399 LIT_STRNSTART(name
, "OSDecrement") ||
400 LIT_STRNSTART(name
, "OSIncrement") ||
401 LIT_STRNSTART(name
, "OSCompareAndSwap") ||
402 LIT_STRNSTART(name
, "etimer_") ||
403 LIT_STRNSTART(name
, "dtxnu_kern_") ||
404 LIT_STRNSTART(name
, "flush_mmu_tlb_"))
407 * Fasttrap inner-workings we can't instrument
410 if (LIT_STRNSTART(name
, "fasttrap_") ||
411 LIT_STRNSTART(name
, "fuword") ||
412 LIT_STRNSTART(name
, "suword"))
415 if (LIT_STRNSTART(name
, "_dtrace"))
416 return TRUE
; /* Shims in dtrace.c */
418 if (LIT_STRNSTART(name
, "hibernate_"))
422 * Place no probes in the exception handling path
424 #if __arm__ || __arm64__
425 if (LIT_STRNSTART(name
, "fleh_") ||
426 LIT_STRNSTART(name
, "sleh_") ||
427 LIT_STRNSTART(name
, "timer_state_event") ||
428 LIT_STRNEQL(name
, "get_vfp_enabled"))
431 if (LIT_STRNSTART(name
, "_ZNK15OSMetaClassBase8metaCastEPK11OSMetaClass") ||
432 LIT_STRNSTART(name
, "_ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass") ||
433 LIT_STRNSTART(name
, "_ZNK11OSMetaClass13checkMetaCastEPK15OSMetaClassBase"))
439 if (LIT_STRNSTART(name
, "machine_") ||
440 LIT_STRNSTART(name
, "mapping_") ||
441 LIT_STRNSTART(name
, "tsc_") ||
442 LIT_STRNSTART(name
, "pmCPU") ||
443 LIT_STRNSTART(name
, "pms") ||
444 LIT_STRNSTART(name
, "usimple_") ||
445 LIT_STRNSTART(name
, "lck_spin_lock") ||
446 LIT_STRNSTART(name
, "lck_spin_unlock") ||
447 LIT_STRNSTART(name
, "absolutetime_to_") ||
448 LIT_STRNSTART(name
, "commpage_") ||
449 LIT_STRNSTART(name
, "ml_") ||
450 LIT_STRNSTART(name
, "PE_") ||
451 LIT_STRNSTART(name
, "act_machine") ||
452 LIT_STRNSTART(name
, "acpi_") ||
453 LIT_STRNSTART(name
, "pal_")) {
456 // Don't Steal Mac OS X
457 if (LIT_STRNSTART(name
, "dsmos_"))
463 * Place no probes that could be hit on the way to the debugger.
465 if (LIT_STRNSTART(name
, "kdp_") ||
466 LIT_STRNSTART(name
, "kdb_") ||
467 LIT_STRNSTART(name
, "debug_")) {
472 if (LIT_STRNSTART(name
, "kasan") ||
473 LIT_STRNSTART(name
, "__kasan") ||
474 LIT_STRNSTART(name
, "__asan")) {
480 * Place no probes that could be hit on the way to a panic.
482 if (NULL
!= strstr(name
, "panic_"))
491 fbt_destroy(void *arg
, dtrace_id_t id
, void *parg
)
493 #pragma unused(arg,id)
494 fbt_probe_t
*fbt
= parg
, *next
, *hash
, *last
;
499 * Now we need to remove this probe from the fbt_probetab.
501 ndx
= FBT_ADDR2NDX(fbt
->fbtp_patchpoint
);
503 hash
= fbt_probetab
[ndx
];
505 while (hash
!= fbt
) {
506 ASSERT(hash
!= NULL
);
508 hash
= hash
->fbtp_hashnext
;
512 last
->fbtp_hashnext
= fbt
->fbtp_hashnext
;
514 fbt_probetab
[ndx
] = fbt
->fbtp_hashnext
;
517 next
= fbt
->fbtp_next
;
518 kmem_free(fbt
, sizeof (fbt_probe_t
));
521 } while (fbt
!= NULL
);
526 fbt_enable(void *arg
, dtrace_id_t id
, void *parg
)
528 #pragma unused(arg,id)
529 fbt_probe_t
*fbt
= parg
;
530 struct modctl
*ctl
= NULL
;
532 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
536 if (!ctl
->mod_loaded
) {
538 cmn_err(CE_NOTE
, "fbt is failing for probe %s "
539 "(module %s unloaded)",
540 fbt
->fbtp_name
, ctl
->mod_modname
);
547 * Now check that our modctl has the expected load count. If it
548 * doesn't, this module must have been unloaded and reloaded -- and
549 * we're not going to touch it.
551 if (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
) {
553 cmn_err(CE_NOTE
, "fbt is failing for probe %s "
554 "(module %s reloaded)",
555 fbt
->fbtp_name
, ctl
->mod_modname
);
561 dtrace_casptr(&tempDTraceTrapHook
, NULL
, fbt_perfCallback
);
562 if (tempDTraceTrapHook
!= (perfCallback
)fbt_perfCallback
) {
564 cmn_err(CE_NOTE
, "fbt_enable is failing for probe %s "
565 "in module %s: tempDTraceTrapHook already occupied.",
566 fbt
->fbtp_name
, ctl
->mod_modname
);
571 if (fbt
->fbtp_currentval
!= fbt
->fbtp_patchval
) {
573 /* Since dtrace probes can call into KASan and vice versa, things can get
574 * very slow if we have a lot of probes. This call will disable the KASan
575 * fakestack after a threshold of probes is reached. */
576 kasan_fakestack_suspend();
579 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_patchval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
580 sizeof(fbt
->fbtp_patchval
));
582 * Make the patched instruction visible via a data + instruction
583 * cache flush for the platforms that need it
585 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
586 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
587 fbt
->fbtp_currentval
= fbt
->fbtp_patchval
;
594 dtrace_membar_consumer();
601 fbt_disable(void *arg
, dtrace_id_t id
, void *parg
)
603 #pragma unused(arg,id)
604 fbt_probe_t
*fbt
= parg
;
605 struct modctl
*ctl
= NULL
;
607 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
610 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
613 if (fbt
->fbtp_currentval
!= fbt
->fbtp_savedval
) {
614 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_savedval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
615 sizeof(fbt
->fbtp_savedval
));
617 * Make the patched instruction visible via a data + instruction
618 * cache flush for the platforms that need it
620 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
621 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
623 fbt
->fbtp_currentval
= fbt
->fbtp_savedval
;
624 ASSERT(ctl
->mod_nenabled
> 0);
628 kasan_fakestack_resume();
632 dtrace_membar_consumer();
637 fbt_suspend(void *arg
, dtrace_id_t id
, void *parg
)
639 #pragma unused(arg,id)
640 fbt_probe_t
*fbt
= parg
;
641 struct modctl
*ctl
= NULL
;
643 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
646 ASSERT(ctl
->mod_nenabled
> 0);
647 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
650 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_savedval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
651 sizeof(fbt
->fbtp_savedval
));
654 * Make the patched instruction visible via a data + instruction
655 * cache flush for the platforms that need it
657 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_savedval
), 0);
658 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_savedval
), 0);
660 fbt
->fbtp_currentval
= fbt
->fbtp_savedval
;
663 dtrace_membar_consumer();
668 fbt_resume(void *arg
, dtrace_id_t id
, void *parg
)
670 #pragma unused(arg,id)
671 fbt_probe_t
*fbt
= parg
;
672 struct modctl
*ctl
= NULL
;
674 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
677 ASSERT(ctl
->mod_nenabled
> 0);
678 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
681 dtrace_casptr(&tempDTraceTrapHook
, NULL
, fbt_perfCallback
);
682 if (tempDTraceTrapHook
!= (perfCallback
)fbt_perfCallback
) {
684 cmn_err(CE_NOTE
, "fbt_resume is failing for probe %s "
685 "in module %s: tempDTraceTrapHook already occupied.",
686 fbt
->fbtp_name
, ctl
->mod_modname
);
691 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_patchval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
692 sizeof(fbt
->fbtp_patchval
));
696 * Make the patched instruction visible via a data + instruction cache flush.
698 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
699 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
702 fbt
->fbtp_currentval
= fbt
->fbtp_patchval
;
705 dtrace_membar_consumer();
709 * APPLE NOTE: fbt_getargdesc not implemented
711 #if !defined(__APPLE__)
714 fbt_getargdesc(void *arg
, dtrace_id_t id
, void *parg
, dtrace_argdesc_t
*desc
)
716 fbt_probe_t
*fbt
= parg
;
717 struct modctl
*ctl
= fbt
->fbtp_ctl
;
718 struct module *mp
= ctl
->mod_mp
;
719 ctf_file_t
*fp
= NULL
, *pfp
;
722 ctf_id_t argv
[32], type
;
723 int argc
= sizeof (argv
) / sizeof (ctf_id_t
);
726 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
729 if (fbt
->fbtp_roffset
!= 0 && desc
->dtargd_ndx
== 0) {
730 (void) strlcpy(desc
->dtargd_native
, "int",
731 sizeof(desc
->dtargd_native
));
735 if ((fp
= ctf_modopen(mp
, &error
)) == NULL
) {
737 * We have no CTF information for this module -- and therefore
738 * no args[] information.
744 * If we have a parent container, we must manually import it.
746 if ((parent
= ctf_parent_name(fp
)) != NULL
) {
747 struct modctl
*mp
= &modules
;
748 struct modctl
*mod
= NULL
;
751 * We must iterate over all modules to find the module that
755 if (strcmp(mp
->mod_modname
, parent
) == 0) {
759 } while ((mp
= mp
->mod_next
) != &modules
);
764 if ((pfp
= ctf_modopen(mod
->mod_mp
, &error
)) == NULL
) {
768 if (ctf_import(fp
, pfp
) != 0) {
776 if (ctf_func_info(fp
, fbt
->fbtp_symndx
, &f
) == CTF_ERR
)
779 if (fbt
->fbtp_roffset
!= 0) {
780 if (desc
->dtargd_ndx
> 1)
783 ASSERT(desc
->dtargd_ndx
== 1);
786 if (desc
->dtargd_ndx
+ 1 > f
.ctc_argc
)
789 if (ctf_func_args(fp
, fbt
->fbtp_symndx
, argc
, argv
) == CTF_ERR
)
792 type
= argv
[desc
->dtargd_ndx
];
795 if (ctf_type_name(fp
, type
, desc
->dtargd_native
,
796 DTRACE_ARGTYPELEN
) != NULL
) {
804 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
806 #endif /* __APPLE__ */
809 fbt_provide_module_user_syms(struct modctl
*ctl
)
812 char *modname
= ctl
->mod_modname
;
814 dtrace_module_symbols_t
* module_symbols
= ctl
->mod_user_symbols
;
815 if (module_symbols
) {
816 for (i
=0; i
<module_symbols
->dtmodsyms_count
; i
++) {
819 * symbol->dtsym_addr (the symbol address) passed in from
820 * user space, is already slid for both kexts and kernel.
822 dtrace_symbol_t
* symbol
= &module_symbols
->dtmodsyms_symbols
[i
];
824 char* name
= symbol
->dtsym_name
;
826 /* Lop off omnipresent leading underscore. */
831 * We're only blacklisting functions in the kernel for now.
833 if (MOD_IS_MACH_KERNEL(ctl
) && fbt_excluded(name
))
837 * Ignore symbols with a null address
839 if (!symbol
->dtsym_addr
)
842 fbt_provide_probe(ctl
, (uintptr_t)symbol
->dtsym_addr
, (uintptr_t)(symbol
->dtsym_addr
+ symbol
->dtsym_size
), modname
, name
, (machine_inst_t
*)(uintptr_t)symbol
->dtsym_addr
);
849 fbt_provide_module(void *arg
, struct modctl
*ctl
)
853 ASSERT(dtrace_kernel_symbol_mode
!= DTRACE_KERNEL_SYMBOLS_NEVER
);
854 LCK_MTX_ASSERT(&mod_lock
, LCK_MTX_ASSERT_OWNED
);
856 // Update the "ignore blacklist" bit
857 if (ignore_fbt_blacklist
)
858 ctl
->mod_flags
|= MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES
;
860 if (MOD_FBT_DONE(ctl
))
863 if (fbt_module_excluded(ctl
)) {
864 ctl
->mod_flags
|= MODCTL_FBT_INVALID
;
868 if (MOD_HAS_KERNEL_SYMBOLS(ctl
)) {
869 fbt_provide_module_kernel_syms(ctl
);
870 ctl
->mod_flags
|= MODCTL_FBT_PROBES_PROVIDED
;
871 if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl
))
872 ctl
->mod_flags
|= MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED
;
876 if (MOD_HAS_USERSPACE_SYMBOLS(ctl
)) {
877 fbt_provide_module_user_syms(ctl
);
878 ctl
->mod_flags
|= MODCTL_FBT_PROBES_PROVIDED
;
879 if (MOD_FBT_PROVIDE_PRIVATE_PROBES(ctl
))
880 ctl
->mod_flags
|= MODCTL_FBT_PRIVATE_PROBES_PROVIDED
;
881 if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl
))
882 ctl
->mod_flags
|= MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED
;
887 static dtrace_pattr_t fbt_attr
= {
888 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
889 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
890 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
891 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
892 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_ISA
},
895 static dtrace_pops_t fbt_pops
= {
902 NULL
, /* APPLE NOTE: fbt_getargdesc not implemented */
909 fbt_cleanup(dev_info_t
*devi
)
911 dtrace_invop_remove(fbt_invop
);
912 ddi_remove_minor_node(devi
, NULL
);
913 kmem_free(fbt_probetab
, fbt_probetab_size
* sizeof (fbt_probe_t
*));
915 fbt_probetab_mask
= 0;
919 fbt_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
925 return (DDI_SUCCESS
);
927 return (DDI_FAILURE
);
930 if (fbt_probetab_size
== 0)
931 fbt_probetab_size
= FBT_PROBETAB_SIZE
;
933 fbt_probetab_mask
= fbt_probetab_size
- 1;
935 kmem_zalloc(fbt_probetab_size
* sizeof (fbt_probe_t
*), KM_SLEEP
);
937 dtrace_invop_add(fbt_invop
);
939 if (ddi_create_minor_node(devi
, "fbt", S_IFCHR
, 0,
940 DDI_PSEUDO
, 0) == DDI_FAILURE
||
941 dtrace_register("fbt", &fbt_attr
, DTRACE_PRIV_KERNEL
, NULL
,
942 &fbt_pops
, NULL
, &fbt_id
) != 0) {
944 return (DDI_FAILURE
);
947 ddi_report_dev(devi
);
950 return (DDI_SUCCESS
);
953 static d_open_t _fbt_open
;
956 _fbt_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
958 #pragma unused(dev,flags,devtype,p)
962 #define FBT_MAJOR -24 /* let the kernel pick the device number */
964 SYSCTL_DECL(_kern_dtrace
);
967 sysctl_dtrace_ignore_fbt_blacklist SYSCTL_HANDLER_ARGS
969 #pragma unused(oidp, arg2)
971 int value
= *(int*)arg1
;
973 err
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
977 if (!(value
== 0 || value
== 1))
981 * We do not allow setting the blacklist back to on, as we have no way
982 * of knowing if those unsafe probes are still used.
984 * If we are using kernel symbols, we also do not allow any change,
985 * since the symbols are jettison'd after the first pass.
987 * We do not need to take any locks here because those symbol modes
988 * are permanent and do not change after boot.
990 if (value
!= 1 || dtrace_kernel_symbol_mode
== DTRACE_KERNEL_SYMBOLS_NEVER
||
991 dtrace_kernel_symbol_mode
== DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL
)
994 ignore_fbt_blacklist
= 1;
1000 SYSCTL_PROC(_kern_dtrace
, OID_AUTO
, ignore_fbt_blacklist
,
1001 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1002 &ignore_fbt_blacklist
, 0,
1003 sysctl_dtrace_ignore_fbt_blacklist
, "I", "fbt provider ignore blacklist");
1006 * A struct describing which functions will get invoked for certain
1009 static struct cdevsw fbt_cdevsw
=
1011 _fbt_open
, /* open */
1012 eno_opcl
, /* close */
1013 eno_rdwrt
, /* read */
1014 eno_rdwrt
, /* write */
1015 eno_ioctl
, /* ioctl */
1016 (stop_fcn_t
*)nulldev
, /* stop */
1017 (reset_fcn_t
*)nulldev
, /* reset */
1019 eno_select
, /* select */
1020 eno_mmap
, /* mmap */
1021 eno_strat
, /* strategy */
1022 eno_getc
, /* getc */
1023 eno_putc
, /* putc */
1027 static int fbt_inited
= 0;
1029 #undef kmem_alloc /* from its binding to dt_kmem_alloc glue */
1030 #undef kmem_free /* from its binding to dt_kmem_free glue */
1031 #include <vm/vm_kern.h>
1036 if (0 == fbt_inited
)
1038 int majdevno
= cdevsw_add(FBT_MAJOR
, &fbt_cdevsw
);
1041 printf("fbt_init: failed to allocate a major number!\n");
1045 PE_parse_boot_argn("IgnoreFBTBlacklist", &ignore_fbt_blacklist
, sizeof (ignore_fbt_blacklist
));
1047 fbt_attach( (dev_info_t
*)(uintptr_t)majdevno
, DDI_ATTACH
);
1049 fbt_inited
= 1; /* Ensure this initialization occurs just one time. */
1052 panic("fbt_init: called twice!\n");