4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* #pragma ident "@(#)fbt.c 1.18 07/01/10 SMI" */
30 #define _KERNEL /* Solaris vs. Darwin */
34 #include <mach-o/loader.h>
35 #include <libkern/kernel_mach_header.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
42 #include <sys/ioctl.h>
44 #include <sys/fcntl.h>
45 #include <miscfs/devfs/devfs.h>
46 #include <pexpert/pexpert.h>
48 #include <sys/dtrace.h>
49 #include <sys/dtrace_impl.h>
52 #include <sys/dtrace_glue.h>
54 /* #include <machine/trap.h> */
55 struct savearea_t
; /* Used anonymously */
57 #if defined(__arm__) || defined(__arm64__)
58 typedef kern_return_t (*perfCallback
)(int, struct savearea_t
*, __unused
int, __unused
int);
59 extern perfCallback tempDTraceTrapHook
;
60 extern kern_return_t
fbt_perfCallback(int, struct savearea_t
*, __unused
int, __unused
int);
61 #elif defined(__x86_64__)
62 typedef kern_return_t (*perfCallback
)(int, struct savearea_t
*, uintptr_t *, __unused
int);
63 extern perfCallback tempDTraceTrapHook
;
64 extern kern_return_t
fbt_perfCallback(int, struct savearea_t
*, uintptr_t *, __unused
int);
66 #error Unknown architecture
69 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
70 #define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */
72 static dev_info_t
*fbt_devi
;
73 static int fbt_probetab_size
;
74 dtrace_provider_id_t fbt_id
;
75 fbt_probe_t
**fbt_probetab
;
76 int fbt_probetab_mask
;
77 static int fbt_verbose
= 0;
79 int ignore_fbt_blacklist
= 0;
81 extern int dtrace_kernel_symbol_mode
;
84 void fbt_init( void );
87 * Critical routines that must not be probed. PR_5221096, PR_5379018.
88 * The blacklist must be kept in alphabetic order for purposes of bsearch().
90 static const char * critical_blacklist
[] =
94 "_ZN9IOService14newTemperatureElPS_", /* IOService::newTemperature */
95 "_ZN9IOService26temperatureCriticalForZoneEPS_", /* IOService::temperatureCriticalForZone */
96 "_ZNK6OSData14getBytesNoCopyEv", /* Data::getBytesNoCopy, IOHibernateSystemWake path */
97 "_disable_preemption",
118 "cpu_processor_alloc",
119 "cpu_processor_free",
120 "cpu_signal_handler",
130 "cpu_topology_start_cpu",
139 "handle_pending_TLB_flushes",
140 "hw_compare_and_store",
146 "machine_idle_cstate",
147 "machine_thread_get_kern_state",
151 "nanoseconds_to_absolutetime",
152 "nanotime_to_absolutetime",
163 "pmap_cpu_high_map_vaddr",
164 "pmap_cpu_high_shared_remap",
166 "power_management_init",
167 "preemption_underflow_panic",
168 "register_cpu_setup_func",
178 "unregister_cpu_setup_func",
181 #define CRITICAL_BLACKLIST_COUNT (sizeof(critical_blacklist)/sizeof(critical_blacklist[0]))
184 * The transitive closure of entry points that can be reached from probe context.
185 * (Apart from routines whose names begin with dtrace_).
187 static const char * probe_ctx_closure
[] =
194 "absolutetime_to_microtime",
201 "clock_get_calendar_nanotime_nowait",
214 "drain_write_buffer",
219 "get_bsdthread_info",
222 "invalidate_mmu_icache",
226 "kernel_preempt_check",
228 "mach_absolute_time",
229 "max_valid_stack_address",
232 "ml_at_interrupt_context",
233 "ml_phys_write_byte_64",
234 "ml_phys_write_half_64",
235 "ml_phys_write_word_64",
236 "ml_set_interrupts_enabled",
240 "mt_cur_thread_cycles",
241 "mt_cur_thread_instrs",
243 "mt_fixed_counts_internal",
244 "mt_mtc_update_count",
251 "pmap_get_mapwindow",
254 "pmap_put_mapwindow",
265 "sync_iss_to_iks_unconditionally",
269 #define PROBE_CTX_CLOSURE_COUNT (sizeof(probe_ctx_closure)/sizeof(probe_ctx_closure[0]))
271 #pragma clang diagnostic push
272 #pragma clang diagnostic ignored "-Wcast-qual"
273 static int _cmp(const void *a
, const void *b
)
275 return strncmp((const char *)a
, *(const char **)b
, strlen((const char *)a
) + 1);
277 #pragma clang diagnostic pop
282 fbt_module_excluded(struct modctl
* ctl
)
284 ASSERT(!MOD_FBT_DONE(ctl
));
286 if (ctl
->mod_address
== 0 || ctl
->mod_size
== 0) {
290 if (ctl
->mod_loaded
== 0) {
295 * If the user sets this, trust they know what they are doing.
297 if (ignore_fbt_blacklist
)
301 * These drivers control low level functions that when traced
302 * cause problems often in the sleep/wake paths as well as
303 * critical debug and panic paths.
304 * If somebody really wants to drill in on one of these kexts, then
305 * they can override blacklisting using the boot-arg above.
309 if (strstr(ctl
->mod_modname
, "AppleACPIEC") != NULL
)
312 if (strstr(ctl
->mod_modname
, "AppleACPIPlatform") != NULL
)
315 if (strstr(ctl
->mod_modname
, "AppleRTC") != NULL
)
318 if (strstr(ctl
->mod_modname
, "IOACPIFamily") != NULL
)
321 if (strstr(ctl
->mod_modname
, "AppleIntelCPUPowerManagement") != NULL
)
324 if (strstr(ctl
->mod_modname
, "AppleProfile") != NULL
)
327 if (strstr(ctl
->mod_modname
, "AppleIntelProfile") != NULL
)
330 if (strstr(ctl
->mod_modname
, "AppleEFI") != NULL
)
333 #elif __arm__ || __arm64__
334 if (LIT_STRNEQL(ctl
->mod_modname
, "com.apple.driver.AppleARMPlatform") ||
335 LIT_STRNEQL(ctl
->mod_modname
, "com.apple.driver.AppleARMPL192VIC") ||
336 LIT_STRNEQL(ctl
->mod_modname
, "com.apple.driver.AppleInterruptController"))
344 * FBT probe name validation
347 fbt_excluded(const char* name
)
350 * If the user set this, trust they know what they are doing.
352 if (ignore_fbt_blacklist
)
355 if (LIT_STRNSTART(name
, "dtrace_") && !LIT_STRNSTART(name
, "dtrace_safe_")) {
357 * Anything beginning with "dtrace_" may be called
358 * from probe context unless it explitly indicates
359 * that it won't be called from probe context by
360 * using the prefix "dtrace_safe_".
366 * Place no probes on critical routines (5221096)
368 if (bsearch( name
, critical_blacklist
, CRITICAL_BLACKLIST_COUNT
, sizeof(name
), _cmp
) != NULL
)
372 * Place no probes that could be hit in probe context.
374 if (bsearch( name
, probe_ctx_closure
, PROBE_CTX_CLOSURE_COUNT
, sizeof(name
), _cmp
) != NULL
) {
379 * Place no probes that could be hit in probe context.
380 * In the interests of safety, some of these may be overly cautious.
381 * Also exclude very low-level "firmware" class calls.
383 if (LIT_STRNSTART(name
, "cpu_") || /* Coarse */
384 LIT_STRNSTART(name
, "platform_") || /* Coarse */
385 LIT_STRNSTART(name
, "machine_") || /* Coarse */
386 LIT_STRNSTART(name
, "ml_") || /* Coarse */
387 LIT_STRNSTART(name
, "PE_") || /* Coarse */
388 LIT_STRNSTART(name
, "rtc_") || /* Coarse */
389 LIT_STRNSTART(name
, "_rtc_") ||
390 LIT_STRNSTART(name
, "rtclock_") ||
391 LIT_STRNSTART(name
, "clock_") ||
392 LIT_STRNSTART(name
, "bcopy") ||
393 LIT_STRNSTART(name
, "pmap_") ||
394 LIT_STRNSTART(name
, "hw_") || /* Coarse */
395 LIT_STRNSTART(name
, "lapic_") || /* Coarse */
396 LIT_STRNSTART(name
, "OSAdd") ||
397 LIT_STRNSTART(name
, "OSBit") ||
398 LIT_STRNSTART(name
, "OSDecrement") ||
399 LIT_STRNSTART(name
, "OSIncrement") ||
400 LIT_STRNSTART(name
, "OSCompareAndSwap") ||
401 LIT_STRNSTART(name
, "etimer_") ||
402 LIT_STRNSTART(name
, "dtxnu_kern_") ||
403 LIT_STRNSTART(name
, "flush_mmu_tlb_"))
406 * Fasttrap inner-workings we can't instrument
409 if (LIT_STRNSTART(name
, "fasttrap_") ||
410 LIT_STRNSTART(name
, "fuword") ||
411 LIT_STRNSTART(name
, "suword"))
414 if (LIT_STRNSTART(name
, "_dtrace"))
415 return TRUE
; /* Shims in dtrace.c */
417 if (LIT_STRNSTART(name
, "hibernate_"))
421 * Place no probes in the exception handling path
423 #if __arm__ || __arm64__
424 if (LIT_STRNSTART(name
, "fleh_") ||
425 LIT_STRNSTART(name
, "sleh_") ||
426 LIT_STRNSTART(name
, "timer_state_event") ||
427 LIT_STRNEQL(name
, "get_vfp_enabled"))
430 if (LIT_STRNSTART(name
, "_ZNK15OSMetaClassBase8metaCastEPK11OSMetaClass") ||
431 LIT_STRNSTART(name
, "_ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass") ||
432 LIT_STRNSTART(name
, "_ZNK11OSMetaClass13checkMetaCastEPK15OSMetaClassBase"))
438 if (LIT_STRNSTART(name
, "machine_") ||
439 LIT_STRNSTART(name
, "mapping_") ||
440 LIT_STRNSTART(name
, "tsc_") ||
441 LIT_STRNSTART(name
, "pmCPU") ||
442 LIT_STRNSTART(name
, "pms") ||
443 LIT_STRNSTART(name
, "usimple_") ||
444 LIT_STRNSTART(name
, "lck_spin_lock") ||
445 LIT_STRNSTART(name
, "lck_spin_unlock") ||
446 LIT_STRNSTART(name
, "absolutetime_to_") ||
447 LIT_STRNSTART(name
, "commpage_") ||
448 LIT_STRNSTART(name
, "ml_") ||
449 LIT_STRNSTART(name
, "PE_") ||
450 LIT_STRNSTART(name
, "act_machine") ||
451 LIT_STRNSTART(name
, "acpi_") ||
452 LIT_STRNSTART(name
, "pal_")) {
455 // Don't Steal Mac OS X
456 if (LIT_STRNSTART(name
, "dsmos_"))
462 * Place no probes that could be hit on the way to the debugger.
464 if (LIT_STRNSTART(name
, "kdp_") ||
465 LIT_STRNSTART(name
, "kdb_") ||
466 LIT_STRNSTART(name
, "debug_")) {
471 * Place no probes that could be hit on the way to a panic.
473 if (NULL
!= strstr(name
, "panic_"))
482 fbt_destroy(void *arg
, dtrace_id_t id
, void *parg
)
484 #pragma unused(arg,id)
485 fbt_probe_t
*fbt
= parg
, *next
, *hash
, *last
;
490 * Now we need to remove this probe from the fbt_probetab.
492 ndx
= FBT_ADDR2NDX(fbt
->fbtp_patchpoint
);
494 hash
= fbt_probetab
[ndx
];
496 while (hash
!= fbt
) {
497 ASSERT(hash
!= NULL
);
499 hash
= hash
->fbtp_hashnext
;
503 last
->fbtp_hashnext
= fbt
->fbtp_hashnext
;
505 fbt_probetab
[ndx
] = fbt
->fbtp_hashnext
;
508 next
= fbt
->fbtp_next
;
509 kmem_free(fbt
, sizeof (fbt_probe_t
));
512 } while (fbt
!= NULL
);
517 fbt_enable(void *arg
, dtrace_id_t id
, void *parg
)
519 #pragma unused(arg,id)
520 fbt_probe_t
*fbt
= parg
;
521 struct modctl
*ctl
= NULL
;
523 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
527 if (!ctl
->mod_loaded
) {
529 cmn_err(CE_NOTE
, "fbt is failing for probe %s "
530 "(module %s unloaded)",
531 fbt
->fbtp_name
, ctl
->mod_modname
);
538 * Now check that our modctl has the expected load count. If it
539 * doesn't, this module must have been unloaded and reloaded -- and
540 * we're not going to touch it.
542 if (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
) {
544 cmn_err(CE_NOTE
, "fbt is failing for probe %s "
545 "(module %s reloaded)",
546 fbt
->fbtp_name
, ctl
->mod_modname
);
552 dtrace_casptr(&tempDTraceTrapHook
, NULL
, fbt_perfCallback
);
553 if (tempDTraceTrapHook
!= (perfCallback
)fbt_perfCallback
) {
555 cmn_err(CE_NOTE
, "fbt_enable is failing for probe %s "
556 "in module %s: tempDTraceTrapHook already occupied.",
557 fbt
->fbtp_name
, ctl
->mod_modname
);
562 if (fbt
->fbtp_currentval
!= fbt
->fbtp_patchval
) {
563 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_patchval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
564 sizeof(fbt
->fbtp_patchval
));
566 * Make the patched instruction visible via a data + instruction
567 * cache flush for the platforms that need it
569 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
570 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
571 fbt
->fbtp_currentval
= fbt
->fbtp_patchval
;
578 dtrace_membar_consumer();
585 fbt_disable(void *arg
, dtrace_id_t id
, void *parg
)
587 #pragma unused(arg,id)
588 fbt_probe_t
*fbt
= parg
;
589 struct modctl
*ctl
= NULL
;
591 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
594 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
597 if (fbt
->fbtp_currentval
!= fbt
->fbtp_savedval
) {
598 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_savedval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
599 sizeof(fbt
->fbtp_savedval
));
601 * Make the patched instruction visible via a data + instruction
602 * cache flush for the platforms that need it
604 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
605 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
607 fbt
->fbtp_currentval
= fbt
->fbtp_savedval
;
608 ASSERT(ctl
->mod_nenabled
> 0);
612 dtrace_membar_consumer();
617 fbt_suspend(void *arg
, dtrace_id_t id
, void *parg
)
619 #pragma unused(arg,id)
620 fbt_probe_t
*fbt
= parg
;
621 struct modctl
*ctl
= NULL
;
623 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
626 ASSERT(ctl
->mod_nenabled
> 0);
627 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
630 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_savedval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
631 sizeof(fbt
->fbtp_savedval
));
634 * Make the patched instruction visible via a data + instruction
635 * cache flush for the platforms that need it
637 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_savedval
), 0);
638 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_savedval
), 0);
640 fbt
->fbtp_currentval
= fbt
->fbtp_savedval
;
643 dtrace_membar_consumer();
648 fbt_resume(void *arg
, dtrace_id_t id
, void *parg
)
650 #pragma unused(arg,id)
651 fbt_probe_t
*fbt
= parg
;
652 struct modctl
*ctl
= NULL
;
654 for (; fbt
!= NULL
; fbt
= fbt
->fbtp_next
) {
657 ASSERT(ctl
->mod_nenabled
> 0);
658 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
661 dtrace_casptr(&tempDTraceTrapHook
, NULL
, fbt_perfCallback
);
662 if (tempDTraceTrapHook
!= (perfCallback
)fbt_perfCallback
) {
664 cmn_err(CE_NOTE
, "fbt_resume is failing for probe %s "
665 "in module %s: tempDTraceTrapHook already occupied.",
666 fbt
->fbtp_name
, ctl
->mod_modname
);
671 (void)ml_nofault_copy( (vm_offset_t
)&fbt
->fbtp_patchval
, (vm_offset_t
)fbt
->fbtp_patchpoint
,
672 sizeof(fbt
->fbtp_patchval
));
676 * Make the patched instruction visible via a data + instruction cache flush.
678 flush_dcache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
679 invalidate_icache((vm_offset_t
)fbt
->fbtp_patchpoint
,(vm_size_t
)sizeof(fbt
->fbtp_patchval
), 0);
682 fbt
->fbtp_currentval
= fbt
->fbtp_patchval
;
685 dtrace_membar_consumer();
689 * APPLE NOTE: fbt_getargdesc not implemented
691 #if !defined(__APPLE__)
694 fbt_getargdesc(void *arg
, dtrace_id_t id
, void *parg
, dtrace_argdesc_t
*desc
)
696 fbt_probe_t
*fbt
= parg
;
697 struct modctl
*ctl
= fbt
->fbtp_ctl
;
698 struct module *mp
= ctl
->mod_mp
;
699 ctf_file_t
*fp
= NULL
, *pfp
;
702 ctf_id_t argv
[32], type
;
703 int argc
= sizeof (argv
) / sizeof (ctf_id_t
);
706 if (!ctl
->mod_loaded
|| (ctl
->mod_loadcnt
!= fbt
->fbtp_loadcnt
))
709 if (fbt
->fbtp_roffset
!= 0 && desc
->dtargd_ndx
== 0) {
710 (void) strlcpy(desc
->dtargd_native
, "int",
711 sizeof(desc
->dtargd_native
));
715 if ((fp
= ctf_modopen(mp
, &error
)) == NULL
) {
717 * We have no CTF information for this module -- and therefore
718 * no args[] information.
724 * If we have a parent container, we must manually import it.
726 if ((parent
= ctf_parent_name(fp
)) != NULL
) {
727 struct modctl
*mp
= &modules
;
728 struct modctl
*mod
= NULL
;
731 * We must iterate over all modules to find the module that
735 if (strcmp(mp
->mod_modname
, parent
) == 0) {
739 } while ((mp
= mp
->mod_next
) != &modules
);
744 if ((pfp
= ctf_modopen(mod
->mod_mp
, &error
)) == NULL
) {
748 if (ctf_import(fp
, pfp
) != 0) {
756 if (ctf_func_info(fp
, fbt
->fbtp_symndx
, &f
) == CTF_ERR
)
759 if (fbt
->fbtp_roffset
!= 0) {
760 if (desc
->dtargd_ndx
> 1)
763 ASSERT(desc
->dtargd_ndx
== 1);
766 if (desc
->dtargd_ndx
+ 1 > f
.ctc_argc
)
769 if (ctf_func_args(fp
, fbt
->fbtp_symndx
, argc
, argv
) == CTF_ERR
)
772 type
= argv
[desc
->dtargd_ndx
];
775 if (ctf_type_name(fp
, type
, desc
->dtargd_native
,
776 DTRACE_ARGTYPELEN
) != NULL
) {
784 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
786 #endif /* __APPLE__ */
789 fbt_provide_module_user_syms(struct modctl
*ctl
)
792 char *modname
= ctl
->mod_modname
;
794 dtrace_module_symbols_t
* module_symbols
= ctl
->mod_user_symbols
;
795 if (module_symbols
) {
796 for (i
=0; i
<module_symbols
->dtmodsyms_count
; i
++) {
799 * symbol->dtsym_addr (the symbol address) passed in from
800 * user space, is already slid for both kexts and kernel.
802 dtrace_symbol_t
* symbol
= &module_symbols
->dtmodsyms_symbols
[i
];
804 char* name
= symbol
->dtsym_name
;
806 /* Lop off omnipresent leading underscore. */
811 * We're only blacklisting functions in the kernel for now.
813 if (MOD_IS_MACH_KERNEL(ctl
) && fbt_excluded(name
))
817 * Ignore symbols with a null address
819 if (!symbol
->dtsym_addr
)
822 fbt_provide_probe(ctl
, (uintptr_t)symbol
->dtsym_addr
, (uintptr_t)(symbol
->dtsym_addr
+ symbol
->dtsym_size
), modname
, name
, (machine_inst_t
*)(uintptr_t)symbol
->dtsym_addr
);
829 fbt_provide_module(void *arg
, struct modctl
*ctl
)
833 ASSERT(dtrace_kernel_symbol_mode
!= DTRACE_KERNEL_SYMBOLS_NEVER
);
834 LCK_MTX_ASSERT(&mod_lock
, LCK_MTX_ASSERT_OWNED
);
836 // Update the "ignore blacklist" bit
837 if (ignore_fbt_blacklist
)
838 ctl
->mod_flags
|= MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES
;
840 if (MOD_FBT_DONE(ctl
))
843 if (fbt_module_excluded(ctl
)) {
844 ctl
->mod_flags
|= MODCTL_FBT_INVALID
;
848 if (MOD_HAS_KERNEL_SYMBOLS(ctl
)) {
849 fbt_provide_module_kernel_syms(ctl
);
850 ctl
->mod_flags
|= MODCTL_FBT_PROBES_PROVIDED
;
851 if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl
))
852 ctl
->mod_flags
|= MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED
;
856 if (MOD_HAS_USERSPACE_SYMBOLS(ctl
)) {
857 fbt_provide_module_user_syms(ctl
);
858 ctl
->mod_flags
|= MODCTL_FBT_PROBES_PROVIDED
;
859 if (MOD_FBT_PROVIDE_PRIVATE_PROBES(ctl
))
860 ctl
->mod_flags
|= MODCTL_FBT_PRIVATE_PROBES_PROVIDED
;
861 if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl
))
862 ctl
->mod_flags
|= MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED
;
867 static dtrace_pattr_t fbt_attr
= {
868 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
869 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
870 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
871 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
872 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_ISA
},
875 static dtrace_pops_t fbt_pops
= {
882 NULL
, /* APPLE NOTE: fbt_getargdesc not implemented */
889 fbt_cleanup(dev_info_t
*devi
)
891 dtrace_invop_remove(fbt_invop
);
892 ddi_remove_minor_node(devi
, NULL
);
893 kmem_free(fbt_probetab
, fbt_probetab_size
* sizeof (fbt_probe_t
*));
895 fbt_probetab_mask
= 0;
899 fbt_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
905 return (DDI_SUCCESS
);
907 return (DDI_FAILURE
);
910 if (fbt_probetab_size
== 0)
911 fbt_probetab_size
= FBT_PROBETAB_SIZE
;
913 fbt_probetab_mask
= fbt_probetab_size
- 1;
915 kmem_zalloc(fbt_probetab_size
* sizeof (fbt_probe_t
*), KM_SLEEP
);
917 dtrace_invop_add(fbt_invop
);
919 if (ddi_create_minor_node(devi
, "fbt", S_IFCHR
, 0,
920 DDI_PSEUDO
, 0) == DDI_FAILURE
||
921 dtrace_register("fbt", &fbt_attr
, DTRACE_PRIV_KERNEL
, NULL
,
922 &fbt_pops
, NULL
, &fbt_id
) != 0) {
924 return (DDI_FAILURE
);
927 ddi_report_dev(devi
);
930 return (DDI_SUCCESS
);
933 static d_open_t _fbt_open
;
936 _fbt_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
938 #pragma unused(dev,flags,devtype,p)
942 #define FBT_MAJOR -24 /* let the kernel pick the device number */
944 SYSCTL_DECL(_kern_dtrace
);
947 sysctl_dtrace_ignore_fbt_blacklist SYSCTL_HANDLER_ARGS
949 #pragma unused(oidp, arg2)
951 int value
= *(int*)arg1
;
953 err
= sysctl_io_number(req
, value
, sizeof(value
), &value
, NULL
);
957 if (!(value
== 0 || value
== 1))
961 * We do not allow setting the blacklist back to on, as we have no way
962 * of knowing if those unsafe probes are still used.
964 * If we are using kernel symbols, we also do not allow any change,
965 * since the symbols are jettison'd after the first pass.
967 * We do not need to take any locks here because those symbol modes
968 * are permanent and do not change after boot.
970 if (value
!= 1 || dtrace_kernel_symbol_mode
== DTRACE_KERNEL_SYMBOLS_NEVER
||
971 dtrace_kernel_symbol_mode
== DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL
)
974 ignore_fbt_blacklist
= 1;
980 SYSCTL_PROC(_kern_dtrace
, OID_AUTO
, ignore_fbt_blacklist
,
981 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
982 &ignore_fbt_blacklist
, 0,
983 sysctl_dtrace_ignore_fbt_blacklist
, "I", "fbt provider ignore blacklist");
986 * A struct describing which functions will get invoked for certain
989 static struct cdevsw fbt_cdevsw
=
991 _fbt_open
, /* open */
992 eno_opcl
, /* close */
993 eno_rdwrt
, /* read */
994 eno_rdwrt
, /* write */
995 eno_ioctl
, /* ioctl */
996 (stop_fcn_t
*)nulldev
, /* stop */
997 (reset_fcn_t
*)nulldev
, /* reset */
999 eno_select
, /* select */
1000 eno_mmap
, /* mmap */
1001 eno_strat
, /* strategy */
1002 eno_getc
, /* getc */
1003 eno_putc
, /* putc */
1007 static int fbt_inited
= 0;
1009 #undef kmem_alloc /* from its binding to dt_kmem_alloc glue */
1010 #undef kmem_free /* from its binding to dt_kmem_free glue */
1011 #include <vm/vm_kern.h>
1016 if (0 == fbt_inited
)
1018 int majdevno
= cdevsw_add(FBT_MAJOR
, &fbt_cdevsw
);
1021 printf("fbt_init: failed to allocate a major number!\n");
1025 PE_parse_boot_argn("IgnoreFBTBlacklist", &ignore_fbt_blacklist
, sizeof (ignore_fbt_blacklist
));
1027 fbt_attach( (dev_info_t
*)(uintptr_t)majdevno
, DDI_ATTACH
);
1029 fbt_inited
= 1; /* Ensure this initialization occurs just one time. */
1032 panic("fbt_init: called twice!\n");