#include <libkern/sysctl.h>
#include <sys/kdebug.h>
+#if MONOTONIC
+#include <kern/monotonic.h>
+#include <machine/monotonic.h>
+#endif /* MONOTONIC */
+
+#include <IOKit/IOPlatformExpert.h>
+
#include <kern/cpu_data.h>
extern uint32_t pmap_find_phys(void *, uint64_t);
extern boolean_t pmap_valid_page(uint32_t);
extern void dtrace_suspend(void);
extern void dtrace_resume(void);
+extern void dtrace_early_init(void);
+extern int dtrace_keep_kernel_symbols(void);
extern void dtrace_init(void);
extern void helper_init(void);
extern void fasttrap_init(void);
extern void dtrace_proc_fork(proc_t*, proc_t*, int);
extern void dtrace_proc_exec(proc_t*);
extern void dtrace_proc_exit(proc_t*);
+
/*
* DTrace Tunable Variables
*
int dtrace_destructive_disallow = 0;
dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
size_t dtrace_difo_maxsize = (256 * 1024);
-dtrace_optval_t dtrace_dof_maxsize = (384 * 1024);
+dtrace_optval_t dtrace_dof_maxsize = (512 * 1024);
dtrace_optval_t dtrace_statvar_maxsize = (16 * 1024);
dtrace_optval_t dtrace_statvar_maxsize_max = (16 * 10 * 1024);
size_t dtrace_actions_max = (16 * 1024);
*/
static dev_info_t *dtrace_devi; /* device info */
static vmem_t *dtrace_arena; /* probe ID arena */
-static taskq_t *dtrace_taskq; /* task queue */
static dtrace_probe_t **dtrace_probes; /* array of all probes */
static int dtrace_nprobes; /* number of probes */
static dtrace_provider_t *dtrace_provider; /* provider list */
static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
static int dtrace_opens; /* number of opens */
static int dtrace_helpers; /* number of helpers */
+static dtrace_hash_t *dtrace_strings;
+static dtrace_hash_t *dtrace_byprov; /* probes hashed by provider */
static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
*/
int dtrace_kernel_symbol_mode; /* See dtrace_impl.h for a description of Darwin's kernel symbol modes. */
static uint32_t dtrace_wake_clients;
-
+static uint8_t dtrace_kerneluuid[16]; /* the 128-bit uuid */
/*
* To save memory, some common memory allocations are given a
*
* ASSERT(MUTEX_HELD(&cpu_lock));
* becomes:
- * lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ * LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
*
*/
static lck_mtx_t dtrace_lock; /* probe state lock */
return (0);
}
-static dtrace_pops_t dtrace_provider_ops = {
- (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
- (void (*)(void *, struct modctl *))dtrace_nullop,
- (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
- (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
- (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
- (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
- NULL,
- NULL,
- NULL,
- (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
+static dtrace_pops_t dtrace_provider_ops = {
+ .dtps_provide = (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
+ .dtps_provide_module = (void (*)(void *, struct modctl *))dtrace_nullop,
+ .dtps_enable = (int (*)(void *, dtrace_id_t, void *))dtrace_nullop,
+ .dtps_disable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
+ .dtps_suspend = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
+ .dtps_resume = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
+ .dtps_getargdesc = NULL,
+ .dtps_getargval = NULL,
+ .dtps_usermode = NULL,
+ .dtps_destroy = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
};
static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
int dtrace_helptrace_enabled = 0;
#endif
+#if defined (__arm64__)
+/*
+ * The ioctl for adding helper DOF is based on the
+ * size of a user_addr_t. We need to recognize both
+ * U32 and U64 as the same action.
+ */
+#define DTRACEHIOC_ADDDOF_U32 _IOW('h', 4, user32_addr_t)
+#define DTRACEHIOC_ADDDOF_U64 _IOW('h', 4, user64_addr_t)
+#endif /* __arm64__ */
/*
* DTrace Error Hashing
* outside of the implementation. There is no real structure to this cpp
* mishmash -- but is there ever?
*/
-#define DTRACE_HASHSTR(hash, probe) \
- dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
-#define DTRACE_HASHNEXT(hash, probe) \
- (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
+#define DTRACE_GETSTR(hash, elm) \
+ (hash->dth_getstr(elm, hash->dth_stroffs))
-#define DTRACE_HASHPREV(hash, probe) \
- (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
+#define DTRACE_HASHSTR(hash, elm) \
+ dtrace_hash_str(DTRACE_GETSTR(hash, elm))
+
+#define DTRACE_HASHNEXT(hash, elm) \
+ (void**)((uintptr_t)(elm) + (hash)->dth_nextoffs)
+
+#define DTRACE_HASHPREV(hash, elm) \
+ (void**)((uintptr_t)(elm) + (hash)->dth_prevoffs)
#define DTRACE_HASHEQ(hash, lhs, rhs) \
- (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
- *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
+ (strcmp(DTRACE_GETSTR(hash, lhs), \
+ DTRACE_GETSTR(hash, rhs)) == 0)
#define DTRACE_AGGHASHSIZE_SLEW 17
(where) = ((thr + DIF_VARIABLE_MAX) & \
(((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
}
+#elif defined(__arm__)
+/* FIXME: three function calls!!! */
+#define DTRACE_TLS_THRKEY(where) { \
+ uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
+ uint64_t thr = (uintptr_t)current_thread(); \
+ uint_t pid = (uint_t)dtrace_proc_selfpid(); \
+ ASSERT(intr < (1 << 3)); \
+ (where) = (((thr << 32 | pid) + DIF_VARIABLE_MAX) & \
+ (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
+}
+#elif defined (__arm64__)
+/* FIXME: two function calls!! */
+#define DTRACE_TLS_THRKEY(where) { \
+ uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
+ uint64_t thr = (uintptr_t)current_thread(); \
+ ASSERT(intr < (1 << 3)); \
+ (where) = ((thr + DIF_VARIABLE_MAX) & \
+ (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
+}
#else
#error Unknown architecture
#endif
if (value <= 0)
return (ERANGE);
+ if (value >= dtrace_copy_maxsize())
+ return (ERANGE);
+
lck_mtx_lock(&dtrace_lock);
dtrace_dof_maxsize = value;
lck_mtx_unlock(&dtrace_lock);
if (error)
return (error);
- if (value != 0 && value != 1)
- return (ERANGE);
+ if (req->newptr) {
+ if (value != 0 && value != 1)
+ return (ERANGE);
- lck_mtx_lock(&dtrace_lock);
- dtrace_provide_private_probes = value;
- lck_mtx_unlock(&dtrace_lock);
+ /*
+ * We do not allow changing this back to zero, as private probes
+ * would still be left registered
+ */
+ if (value != 1)
+ return (EPERM);
+ lck_mtx_lock(&dtrace_lock);
+ dtrace_provide_private_probes = value;
+ lck_mtx_unlock(&dtrace_lock);
+ }
return (0);
}
&dtrace_provide_private_probes, 0,
sysctl_dtrace_provide_private_probes, "I", "provider must provide the private probes");
+/*
+ * kern.dtrace.dof_mode
+ *
+ * Returns the current DOF mode.
+ * This value is read-only.
+ */
+SYSCTL_INT(_kern_dtrace, OID_AUTO, dof_mode, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &dtrace_dof_mode, 0, "dtrace dof mode");
+
/*
* DTrace Probe Context Functions
*
* DTrace subroutines (DIF_SUBR_*) should use this helper to implement
* appropriate memory access protection.
*/
-static int
+int
dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
dtrace_vstate_t *vstate)
{
* APPLE NOTE: Account for introduction of __dtrace_probe()
*/
int aframes = mstate->dtms_probe->dtpr_aframes + 3;
+ dtrace_vstate_t *vstate = &state->dts_vstate;
dtrace_provider_t *pv;
uint64_t val;
}
else
- val = dtrace_getarg(ndx, aframes);
+ val = dtrace_getarg(ndx, aframes, mstate, vstate);
/*
* This is regrettably required to keep the compiler
case DIF_VAR_ZONENAME:
- {
- /* scratch_size is equal to length('global') + 1 for the null-terminator. */
- char *zname = (char *)mstate->dtms_scratch_ptr;
- size_t scratch_size = 6 + 1;
+ {
+ /* scratch_size is equal to length('global') + 1 for the null-terminator. */
+ char *zname = (char *)mstate->dtms_scratch_ptr;
+ size_t scratch_size = 6 + 1;
if (!dtrace_priv_proc(state))
return (0);
- /* The scratch allocation's lifetime is that of the clause. */
- if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
- DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
- return 0;
- }
+ /* The scratch allocation's lifetime is that of the clause. */
+ if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
+ return 0;
+ }
- mstate->dtms_scratch_ptr += scratch_size;
+ mstate->dtms_scratch_ptr += scratch_size;
- /* The kernel does not provide zonename, it will always return 'global'. */
- strlcpy(zname, "global", scratch_size);
+ /* The kernel does not provide zonename, it will always return 'global'. */
+ strlcpy(zname, "global", scratch_size);
+
+ return ((uint64_t)(uintptr_t)zname);
+ }
- return ((uint64_t)(uintptr_t)zname);
- }
+#if MONOTONIC
+ case DIF_VAR_CPUINSTRS:
+ return mt_cur_cpu_instrs();
+
+ case DIF_VAR_CPUCYCLES:
+ return mt_cur_cpu_cycles();
+
+ case DIF_VAR_VINSTRS:
+ return mt_cur_thread_instrs();
+
+ case DIF_VAR_VCYCLES:
+ return mt_cur_thread_cycles();
+#else /* MONOTONIC */
+ case DIF_VAR_CPUINSTRS: /* FALLTHROUGH */
+ case DIF_VAR_CPUCYCLES: /* FALLTHROUGH */
+ case DIF_VAR_VINSTRS: /* FALLTHROUGH */
+ case DIF_VAR_VCYCLES: /* FALLTHROUGH */
+ return 0;
+#endif /* !MONOTONIC */
case DIF_VAR_UID:
if (!dtrace_priv_proc_relaxed(state))
char c, target = (char)tupregs[1].dttk_value;
if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
- regs[rd] = NULL;
+ regs[rd] = 0;
break;
}
addr_limit = addr + lim;
*/
regs[rd] = 0;
mstate->dtms_strtok = 0;
- mstate->dtms_strtok_limit = NULL;
+ mstate->dtms_strtok_limit = 0;
break;
}
case DIF_SUBR_LLTOSTR: {
int64_t i = (int64_t)tupregs[0].dttk_value;
- int64_t val = i < 0 ? i * -1 : i;
- uint64_t size = 22; /* enough room for 2^64 in decimal */
+ uint64_t val, digit;
+ uint64_t size = 65; /* enough room for 2^64 in binary */
char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
+ int base = 10;
+
+ if (nargs > 1) {
+ if ((base = tupregs[1].dttk_value) <= 1 ||
+ base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
+ *flags |= CPU_DTRACE_ILLOP;
+ break;
+ }
+ }
+
+ val = (base == 10 && i < 0) ? i * -1 : i;
if (!DTRACE_INSCRATCH(mstate, size)) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
break;
}
- for (*end-- = '\0'; val; val /= 10)
- *end-- = '0' + (val % 10);
+ for (*end-- = '\0'; val; val /= base) {
+ if ((digit = val % base) <= '9' - '0') {
+ *end-- = '0' + digit;
+ } else {
+ *end-- = 'a' + (digit - ('9' - '0') - 1);
+ }
+ }
+
+ if (i == 0 && base == 16)
+ *end-- = '0';
- if (i == 0)
+ if (base == 16)
+ *end-- = 'x';
+
+ if (i == 0 || base == 8 || base == 16)
*end-- = '0';
- if (i < 0)
+ if (i < 0 && base == 10)
*end-- = '-';
regs[rd] = (uintptr_t)end + 1;
if (pred != NULL) {
dtrace_difo_t *dp = pred->dtp_difo;
- int rval;
+ uint64_t rval;
rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
return (hval);
}
+static const char*
+dtrace_strkey_probe_provider(void *elm, uintptr_t offs)
+{
+#pragma unused(offs)
+ dtrace_probe_t *probe = (dtrace_probe_t*)elm;
+ return probe->dtpr_provider->dtpv_name;
+}
+
+static const char*
+dtrace_strkey_offset(void *elm, uintptr_t offs)
+{
+ return ((char *)((uintptr_t)(elm) + offs));
+}
+
+static const char*
+dtrace_strkey_deref_offset(void *elm, uintptr_t offs)
+{
+ return *((char **)((uintptr_t)(elm) + offs));
+}
+
static dtrace_hash_t *
-dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
+dtrace_hash_create(dtrace_strkey_f func, uintptr_t arg, uintptr_t nextoffs, uintptr_t prevoffs)
{
dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
- hash->dth_stroffs = stroffs;
+ hash->dth_getstr = func;
+ hash->dth_stroffs = arg;
hash->dth_nextoffs = nextoffs;
hash->dth_prevoffs = prevoffs;
for (i = 0; i < size; i++) {
for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
- dtrace_probe_t *probe = bucket->dthb_chain;
+ void *elm = bucket->dthb_chain;
- ASSERT(probe != NULL);
- ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
+ ASSERT(elm != NULL);
+ ndx = DTRACE_HASHSTR(hash, elm) & new_mask;
next = bucket->dthb_next;
bucket->dthb_next = new_tab[ndx];
}
static void
-dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
+dtrace_hash_add(dtrace_hash_t *hash, void *new)
{
int hashval = DTRACE_HASHSTR(hash, new);
int ndx = hashval & hash->dth_mask;
dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
- dtrace_probe_t **nextp, **prevp;
+ void **nextp, **prevp;
for (; bucket != NULL; bucket = bucket->dthb_next) {
if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
bucket->dthb_len++;
}
-static dtrace_probe_t *
-dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
+static void *
+dtrace_hash_lookup_string(dtrace_hash_t *hash, const char *str)
{
- int hashval = DTRACE_HASHSTR(hash, template);
+ int hashval = dtrace_hash_str(str);
int ndx = hashval & hash->dth_mask;
dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
for (; bucket != NULL; bucket = bucket->dthb_next) {
- if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
+ if (strcmp(str, DTRACE_GETSTR(hash, bucket->dthb_chain)) == 0)
return (bucket->dthb_chain);
}
return (NULL);
}
+static dtrace_probe_t *
+dtrace_hash_lookup(dtrace_hash_t *hash, void *template)
+{
+ return dtrace_hash_lookup_string(hash, DTRACE_GETSTR(hash, template));
+}
+
static int
-dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
+dtrace_hash_collisions(dtrace_hash_t *hash, void *template)
{
int hashval = DTRACE_HASHSTR(hash, template);
int ndx = hashval & hash->dth_mask;
}
static void
-dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
+dtrace_hash_remove(dtrace_hash_t *hash, void *elm)
{
- int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
+ int ndx = DTRACE_HASHSTR(hash, elm) & hash->dth_mask;
dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
- dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
- dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
+ void **prevp = DTRACE_HASHPREV(hash, elm);
+ void **nextp = DTRACE_HASHNEXT(hash, elm);
/*
- * Find the bucket that we're removing this probe from.
+ * Find the bucket that we're removing this elm from.
*/
for (; bucket != NULL; bucket = bucket->dthb_next) {
- if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
+ if (DTRACE_HASHEQ(hash, bucket->dthb_chain, elm))
break;
}
if (*prevp == NULL) {
if (*nextp == NULL) {
/*
- * The removed probe was the only probe on this
+ * The removed element was the only element on this
* bucket; we need to remove the bucket.
*/
dtrace_hashbucket_t *b = hash->dth_tab[ndx];
- ASSERT(bucket->dthb_chain == probe);
+ ASSERT(bucket->dthb_chain == elm);
ASSERT(b != NULL);
if (b == bucket) {
}
/*
- * Return a duplicate copy of a string. If the specified string is NULL,
- * this function returns a zero-length string.
- * APPLE NOTE: Darwin employs size bounded string operation.
+ * Returns a dtrace-managed copy of a string, and will
+ * deduplicate copies of the same string.
+ * If the specified string is NULL, returns an empty string
*/
static char *
-dtrace_strdup(const char *str)
+dtrace_strref(const char *str)
{
+ dtrace_string_t *s = NULL;
size_t bufsize = (str != NULL ? strlen(str) : 0) + 1;
- char *new = kmem_zalloc(bufsize, KM_SLEEP);
- if (str != NULL)
- (void) strlcpy(new, str, bufsize);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- return (new);
+ if (str == NULL)
+ str = "";
+
+ for (s = dtrace_hash_lookup_string(dtrace_strings, str); s != NULL;
+ s = *(DTRACE_HASHNEXT(dtrace_strings, s))) {
+ if (strncmp(str, s->dtst_str, bufsize) != 0) {
+ continue;
+ }
+ ASSERT(s->dtst_refcount != UINT32_MAX);
+ s->dtst_refcount++;
+ return s->dtst_str;
+ }
+
+ s = kmem_zalloc(sizeof(dtrace_string_t) + bufsize, KM_SLEEP);
+ s->dtst_refcount = 1;
+ (void) strlcpy(s->dtst_str, str, bufsize);
+
+ dtrace_hash_add(dtrace_strings, s);
+
+ return s->dtst_str;
+}
+
+static void
+dtrace_strunref(const char *str)
+{
+ ASSERT(str != NULL);
+ dtrace_string_t *s = NULL;
+ size_t bufsize = strlen(str) + 1;
+
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+
+ for (s = dtrace_hash_lookup_string(dtrace_strings, str); s != NULL;
+ s = *(DTRACE_HASHNEXT(dtrace_strings, s))) {
+ if (strncmp(str, s->dtst_str, bufsize) != 0) {
+ continue;
+ }
+ ASSERT(s->dtst_refcount != 0);
+ s->dtst_refcount--;
+ if (s->dtst_refcount == 0) {
+ dtrace_hash_remove(dtrace_strings, s);
+ kmem_free(s, sizeof(dtrace_string_t) + bufsize);
+ }
+ return;
+ }
+ panic("attempt to unref non-existent string %s", str);
}
#define DTRACE_ISALPHA(c) \
if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
- priv = DTRACE_PRIV_USER | DTRACE_PRIV_PROC;
+ priv = DTRACE_PRIV_USER | DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER;
}
else {
priv = DTRACE_PRIV_ALL;
}
+ *uidp = 0;
+ *zoneidp = 0;
} else {
*uidp = crgetuid(cr);
*zoneidp = crgetzoneid(cr);
dtrace_match_string(const char *s, const char *p, int depth)
{
#pragma unused(depth) /* __APPLE__ */
+ return (s != NULL && s == p);
+}
- /* APPLE NOTE: Darwin employs size bounded string operation. */
- return (s != NULL && strncmp(s, p, strlen(s) + 1) == 0);
+/*ARGSUSED*/
+static int
+dtrace_match_module(const char *s, const char *p, int depth)
+{
+#pragma unused(depth) /* __APPLE__ */
+ size_t len;
+ if (s == NULL || p == NULL)
+ return (0);
+
+ len = strlen(p);
+
+ if (strncmp(p, s, len) != 0)
+ return (0);
+
+ if (s[len] == '.' || s[len] == '\0')
+ return (1);
+
+ return (0);
}
/*ARGSUSED*/
dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *, void *), void *arg1, void *arg2)
{
- dtrace_probe_t template, *probe;
+ dtrace_probe_t *probe;
+ dtrace_provider_t prov_template = {
+ .dtpv_name = (char *)(uintptr_t)pkp->dtpk_prov
+ };
+
+ dtrace_probe_t template = {
+ .dtpr_provider = &prov_template,
+ .dtpr_mod = (char *)(uintptr_t)pkp->dtpk_mod,
+ .dtpr_func = (char *)(uintptr_t)pkp->dtpk_func,
+ .dtpr_name = (char *)(uintptr_t)pkp->dtpk_name
+ };
+
dtrace_hash_t *hash = NULL;
int len, rc, best = INT_MAX, nmatched = 0;
dtrace_id_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
/*
* If the probe ID is specified in the key, just lookup by ID and
return (nmatched);
}
- template.dtpr_mod = (char *)(uintptr_t)pkp->dtpk_mod;
- template.dtpr_func = (char *)(uintptr_t)pkp->dtpk_func;
- template.dtpr_name = (char *)(uintptr_t)pkp->dtpk_name;
-
/*
- * We want to find the most distinct of the module name, function
- * name, and name. So for each one that is not a glob pattern or
- * empty string, we perform a lookup in the corresponding hash and
- * use the hash table with the fewest collisions to do our search.
+ * We want to find the most distinct of the provider name, module name,
+ * function name, and name. So for each one that is not a glob
+ * pattern or empty string, we perform a lookup in the corresponding
+ * hash and use the hash table with the fewest collisions to do our
+ * search.
*/
+ if (pkp->dtpk_pmatch == &dtrace_match_string &&
+ (len = dtrace_hash_collisions(dtrace_byprov, &template)) < best) {
+ best = len;
+ hash = dtrace_byprov;
+ }
+
if (pkp->dtpk_mmatch == &dtrace_match_string &&
(len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
best = len;
return (&dtrace_match_string);
}
+static dtrace_probekey_f *
+dtrace_probekey_module_func(const char *p)
+{
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+
+ dtrace_probekey_f *f = dtrace_probekey_func(p);
+ if (f == &dtrace_match_string) {
+ dtrace_probe_t template = {
+ .dtpr_mod = (char *)(uintptr_t)p,
+ };
+ if (dtrace_hash_lookup(dtrace_bymod, &template) == NULL) {
+ return (&dtrace_match_module);
+ }
+ return (&dtrace_match_string);
+ }
+ return f;
+}
+
/*
* Build a probe comparison key for use with dtrace_match_probe() from the
* given probe description. By convention, a null key only matches anchored
static void
dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
{
- pkp->dtpk_prov = pdp->dtpd_provider;
+
+ pkp->dtpk_prov = dtrace_strref(pdp->dtpd_provider);
pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
- pkp->dtpk_mod = pdp->dtpd_mod;
- pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
+ pkp->dtpk_mod = dtrace_strref(pdp->dtpd_mod);
+ pkp->dtpk_mmatch = dtrace_probekey_module_func(pdp->dtpd_mod);
- pkp->dtpk_func = pdp->dtpd_func;
+ pkp->dtpk_func = dtrace_strref(pdp->dtpd_func);
pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
- pkp->dtpk_name = pdp->dtpd_name;
+ pkp->dtpk_name = dtrace_strref(pdp->dtpd_name);
pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
pkp->dtpk_id = pdp->dtpd_id;
pkp->dtpk_fmatch = &dtrace_match_nonzero;
}
+static void
+dtrace_probekey_release(dtrace_probekey_t *pkp)
+{
+ dtrace_strunref(pkp->dtpk_prov);
+ dtrace_strunref(pkp->dtpk_mod);
+ dtrace_strunref(pkp->dtpk_func);
+ dtrace_strunref(pkp->dtpk_name);
+}
+
static int
dtrace_cond_provider_match(dtrace_probedesc_t *desc, void *data)
{
provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
- /* APPLE NOTE: Darwin employs size bounded string operation. */
- {
- size_t bufsize = strlen(name) + 1;
- provider->dtpv_name = kmem_alloc(bufsize, KM_SLEEP);
- (void) strlcpy(provider->dtpv_name, name, bufsize);
- }
-
provider->dtpv_attr = *pap;
provider->dtpv_priv.dtpp_flags = priv;
if (cr != NULL) {
*idp = (dtrace_provider_id_t)provider;
if (pops == &dtrace_provider_ops) {
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+
+ provider->dtpv_name = dtrace_strref(name);
+
ASSERT(dtrace_anon.dta_enabling == NULL);
/*
lck_mtx_lock(&dtrace_provider_lock);
lck_mtx_lock(&dtrace_lock);
+ provider->dtpv_name = dtrace_strref(name);
+
/*
* If there is at least one provider registered, we'll add this
* provider after the first provider.
{
dtrace_provider_t *old = (dtrace_provider_t *)id;
dtrace_provider_t *prev = NULL;
- int i, self = 0;
- dtrace_probe_t *probe, *first = NULL;
+ int self = 0;
+ dtrace_probe_t *probe, *first = NULL, *next = NULL;
+ dtrace_probe_t template = {
+ .dtpr_provider = old
+ };
if (old->dtpv_pops.dtps_enable ==
(int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
*/
ASSERT(old == dtrace_provider);
ASSERT(dtrace_devi != NULL);
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
self = 1;
if (dtrace_provider->dtpv_next != NULL) {
* All of the probes for this provider are disabled; we can safely
* remove all of them from their hash chains and from the probe array.
*/
- for (i = 0; i < dtrace_nprobes && old->dtpv_probe_count!=0; i++) {
- if ((probe = dtrace_probes[i]) == NULL)
- continue;
-
+ for (probe = dtrace_hash_lookup(dtrace_byprov, &template); probe != NULL;
+ probe = *(DTRACE_HASHNEXT(dtrace_byprov, probe))) {
if (probe->dtpr_provider != old)
continue;
- dtrace_probes[i] = NULL;
+ dtrace_probes[probe->dtpr_id - 1] = NULL;
old->dtpv_probe_count--;
dtrace_hash_remove(dtrace_bymod, probe);
first = probe;
probe->dtpr_nextmod = NULL;
} else {
+ /*
+ * Use nextmod as the chain of probes to remove
+ */
probe->dtpr_nextmod = first;
first = probe;
}
}
+ for (probe = first; probe != NULL; probe = next) {
+ next = probe->dtpr_nextmod;
+ dtrace_hash_remove(dtrace_byprov, probe);
+ }
+
/*
* The provider's probes have been removed from the hash chains and
* from the probe array. Now issue a dtrace_sync() to be sure that
*/
dtrace_sync();
- for (probe = first; probe != NULL; probe = first) {
- first = probe->dtpr_nextmod;
+ for (probe = first; probe != NULL; probe = next) {
+ next = probe->dtpr_nextmod;
old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
probe->dtpr_arg);
- kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
- kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
- kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
+ dtrace_strunref(probe->dtpr_mod);
+ dtrace_strunref(probe->dtpr_func);
+ dtrace_strunref(probe->dtpr_name);
vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
zfree(dtrace_probe_t_zone, probe);
}
prev->dtpv_next = old->dtpv_next;
}
+ dtrace_strunref(old->dtpv_name);
+
if (!self) {
lck_mtx_unlock(&dtrace_lock);
lck_mtx_unlock(&mod_lock);
lck_mtx_unlock(&dtrace_provider_lock);
}
- kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
kmem_free(old, sizeof (dtrace_provider_t));
return (0);
dtrace_condense(dtrace_provider_id_t id)
{
dtrace_provider_t *prov = (dtrace_provider_t *)id;
- int i;
- dtrace_probe_t *probe;
+ dtrace_probe_t *probe, *first = NULL;
+ dtrace_probe_t template = {
+ .dtpr_provider = prov
+ };
/*
* Make sure this isn't the dtrace provider itself.
/*
* Attempt to destroy the probes associated with this provider.
*/
- for (i = 0; i < dtrace_nprobes; i++) {
- if ((probe = dtrace_probes[i]) == NULL)
- continue;
+ for (probe = dtrace_hash_lookup(dtrace_byprov, &template); probe != NULL;
+ probe = *(DTRACE_HASHNEXT(dtrace_byprov, probe))) {
if (probe->dtpr_provider != prov)
continue;
if (probe->dtpr_ecb != NULL)
continue;
- dtrace_probes[i] = NULL;
+ dtrace_probes[probe->dtpr_id - 1] = NULL;
prov->dtpv_probe_count--;
dtrace_hash_remove(dtrace_bymod, probe);
dtrace_hash_remove(dtrace_byfunc, probe);
dtrace_hash_remove(dtrace_byname, probe);
- prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
+ prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
probe->dtpr_arg);
- kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
- kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
- kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
+ dtrace_strunref(probe->dtpr_mod);
+ dtrace_strunref(probe->dtpr_func);
+ dtrace_strunref(probe->dtpr_name);
+ if (first == NULL) {
+ first = probe;
+ probe->dtpr_nextmod = NULL;
+ } else {
+ /*
+ * Use nextmod as the chain of probes to remove
+ */
+ probe->dtpr_nextmod = first;
+ first = probe;
+ }
+ }
+
+ for (probe = first; probe != NULL; probe = first) {
+ first = probe->dtpr_nextmod;
+ dtrace_hash_remove(dtrace_byprov, probe);
+ vmem_free(dtrace_arena, (void *)((uintptr_t)probe->dtpr_id), 1);
zfree(dtrace_probe_t_zone, probe);
- vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
}
lck_mtx_unlock(&dtrace_lock);
dtrace_id_t id;
if (provider == dtrace_provider) {
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
} else {
lck_mtx_lock(&dtrace_lock);
}
probe->dtpr_id = id;
probe->dtpr_gen = dtrace_probegen++;
- probe->dtpr_mod = dtrace_strdup(mod);
- probe->dtpr_func = dtrace_strdup(func);
- probe->dtpr_name = dtrace_strdup(name);
+ probe->dtpr_mod = dtrace_strref(mod);
+ probe->dtpr_func = dtrace_strref(func);
+ probe->dtpr_name = dtrace_strref(name);
probe->dtpr_arg = arg;
probe->dtpr_aframes = aframes;
probe->dtpr_provider = provider;
+ dtrace_hash_add(dtrace_byprov, probe);
dtrace_hash_add(dtrace_bymod, probe);
dtrace_hash_add(dtrace_byfunc, probe);
dtrace_hash_add(dtrace_byname, probe);
static dtrace_probe_t *
dtrace_probe_lookup_id(dtrace_id_t id)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (id == 0 || id > (dtrace_id_t)dtrace_nprobes)
return (NULL);
dtrace_id_t id;
int match;
- pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
+ lck_mtx_lock(&dtrace_lock);
+
+ pkey.dtpk_prov = dtrace_strref(((dtrace_provider_t *)prid)->dtpv_name);
pkey.dtpk_pmatch = &dtrace_match_string;
- pkey.dtpk_mod = mod;
+ pkey.dtpk_mod = dtrace_strref(mod);
pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
- pkey.dtpk_func = func;
+ pkey.dtpk_func = dtrace_strref(func);
pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
- pkey.dtpk_name = name;
+ pkey.dtpk_name = dtrace_strref(name);
pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
pkey.dtpk_id = DTRACE_IDNONE;
- lck_mtx_lock(&dtrace_lock);
match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
dtrace_probe_lookup_match, &id, NULL);
+
+ dtrace_probekey_release(&pkey);
+
lck_mtx_unlock(&dtrace_lock);
ASSERT(match == 1 || match == 0);
struct modctl *ctl;
int all = 0;
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
if (prv == NULL) {
all = 1;
uint32_t priv;
uid_t uid;
zoneid_t zoneid;
+ int err;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
dtrace_ecb_create_cache = NULL;
dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
&priv, &uid, &zoneid);
- return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
- enab, ep));
+ err = dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, enab, ep);
+
+ dtrace_probekey_release(&pkey);
+
+ return err;
}
/*
dof_hdr_t *dof = (dof_hdr_t *)daddr;
uint32_t i;
- lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < dof->dofh_secnum; i++) {
dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
dof_hdr_t *dof = (dof_hdr_t *)daddr;
uint32_t i;
- lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < dof->dofh_secnum; i++) {
dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
meta->dtm_mops = *mops;
-
- /* APPLE NOTE: Darwin employs size bounded string operation. */
- {
- size_t bufsize = strlen(name) + 1;
- meta->dtm_name = kmem_alloc(bufsize, KM_SLEEP);
- (void) strlcpy(meta->dtm_name, name, bufsize);
- }
-
meta->dtm_arg = arg;
lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_unlock(&dtrace_meta_lock);
cmn_err(CE_WARN, "failed to register meta-register %s: "
"user-land meta-provider exists", name);
- kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
kmem_free(meta, sizeof (dtrace_meta_t));
return (EINVAL);
}
+ meta->dtm_name = dtrace_strref(name);
+
dtrace_meta_pid = meta;
*idp = (dtrace_meta_provider_id_t)meta;
*pp = NULL;
+ dtrace_strunref(old->dtm_name);
+
lck_mtx_unlock(&dtrace_lock);
lck_mtx_unlock(&dtrace_meta_lock);
- kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
kmem_free(old, sizeof (dtrace_meta_t));
return (0);
{
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
dp->dtdo_refcnt++;
ASSERT(dp->dtdo_refcnt != 0);
int oldsvars, osz, nsz, otlocals, ntlocals;
uint_t i, id;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
for (i = 0; i < dp->dtdo_varlen; i++) {
{
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp->dtdo_refcnt != 0);
for (i = 0; i < dp->dtdo_varlen; i++) {
{
dtrace_predicate_t *pred;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp->dtdo_refcnt != 0);
pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
static void
dtrace_predicate_hold(dtrace_predicate_t *pred)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
ASSERT(pred->dtp_refcnt > 0);
dtrace_difo_t *dp = pred->dtp_difo;
#pragma unused(dp) /* __APPLE__ */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
ASSERT(pred->dtp_refcnt > 0);
dtrace_ecb_t *ecb;
dtrace_epid_t epid;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
ecb->dte_predicate = NULL;
{
dtrace_probe_t *probe = ecb->dte_probe;
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(ecb->dte_next == NULL);
if (probe == NULL) {
dtrace_optval_t nframes=0, strsize;
uint64_t arg = desc->dtad_arg;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
if (DTRACEACT_ISAGG(desc->dtad_kind)) {
dtrace_ecb_t *pecb, *prev = NULL;
dtrace_probe_t *probe = ecb->dte_probe;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (probe == NULL) {
/*
dtrace_predicate_t *pred;
dtrace_epid_t epid = ecb->dte_epid;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(ecb->dte_next == NULL);
ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
dtrace_provider_t *prov;
dtrace_ecbdesc_t *desc = enab->dten_current;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(state != NULL);
ecb = dtrace_ecb_add(state, probe);
dtrace_ecb_t *ecb;
#pragma unused(ecb) /* __APPLE__ */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (id == 0 || id > (dtrace_epid_t)state->dts_necbs)
return (NULL);
dtrace_aggregation_t *agg;
#pragma unused(agg) /* __APPLE__ */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (id == 0 || id > (dtrace_aggid_t)state->dts_naggregations)
return (NULL);
dtrace_buffer_t *buf;
size_t size_before_alloc = dtrace_buffer_memory_inuse;
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (size > (size_t)dtrace_nonroot_maxsize &&
!PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
ASSERT(buf->dtb_xamot == NULL);
-
/* DTrace, please do not eat all the memory. */
if (dtrace_buffer_canalloc(size) == B_FALSE)
goto err;
dtrace_buffer_polish(dtrace_buffer_t *buf)
{
ASSERT(buf->dtb_flags & DTRACEBUF_RING);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
return;
dtrace_ecbdesc_t *ep;
dtrace_vstate_t *vstate = enab->dten_vstate;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < enab->dten_ndesc; i++) {
dtrace_actdesc_t *act, *next;
{
dtrace_state_t *state;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
ASSERT(enab->dten_vstate != NULL);
dtrace_enabling_t *new, *enab;
int found = 0, err = ENOENT;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
{
dtrace_enabling_t *enab, *next;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
/*
* Iterate over all retained enablings, destroy the enablings retained
int i = 0;
int total_matched = 0, matched = 0;
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < enab->dten_ndesc; i++) {
dtrace_ecbdesc_t *ep = enab->dten_desc[i];
dtrace_probedesc_t desc;
dtrace_genid_t gen;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
if (prv == NULL) {
all = 1;
roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- dof = dt_kmem_zalloc_aligned(len, 8, KM_SLEEP);
+ dof = kmem_zalloc_aligned(len, 8, KM_SLEEP);
dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
{
dof_hdr_t hdr, *dof;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
/*
* First, we're going to copyin() the sizeof (dof_hdr_t).
return (NULL);
}
- dof = dt_kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
+ dof = kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
if (copyin(uarg, dof, hdr.dofh_loadsz) != 0 ||
dof->dofh_loadsz != hdr.dofh_loadsz) {
- dt_kmem_free_aligned(dof, hdr.dofh_loadsz);
+ kmem_free_aligned(dof, hdr.dofh_loadsz);
*errp = EFAULT;
return (NULL);
}
{
dof_hdr_t hdr, *dof;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
/*
* First, we're going to copyin() the sizeof (dof_hdr_t).
return (NULL);
}
- dof = dt_kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
+ dof = kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
if (uread(p, dof, hdr.dofh_loadsz, uarg) != KERN_SUCCESS) {
- dt_kmem_free_aligned(dof, hdr.dofh_loadsz);
+ kmem_free_aligned(dof, hdr.dofh_loadsz);
*errp = EFAULT;
return (NULL);
}
return (dof);
}
+static void
+dtrace_dof_destroy(dof_hdr_t *dof)
+{
+ kmem_free_aligned(dof, dof->dofh_loadsz);
+}
+
static dof_hdr_t *
dtrace_dof_property(const char *name)
{
- uchar_t *buf;
- uint64_t loadsz;
- unsigned int len, i;
+ unsigned int len = 0;
dof_hdr_t *dof;
- /*
- * Unfortunately, array of values in .conf files are always (and
- * only) interpreted to be integer arrays. We must read our DOF
- * as an integer array, and then squeeze it into a byte array.
- */
- if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
- name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
- return (NULL);
+ if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
+ return NULL;
+ }
+
+ if (!PEReadNVRAMProperty(name, NULL, &len)) {
+ return NULL;
+ }
- for (i = 0; i < len; i++)
- buf[i] = (uchar_t)(((int *)buf)[i]);
+ dof = kmem_alloc_aligned(len, 8, KM_SLEEP);
+
+ if (!PEReadNVRAMProperty(name, dof, &len)) {
+ dtrace_dof_destroy(dof);
+ dtrace_dof_error(NULL, "unreadable DOF");
+ return NULL;
+ }
if (len < sizeof (dof_hdr_t)) {
- ddi_prop_free(buf);
+ dtrace_dof_destroy(dof);
dtrace_dof_error(NULL, "truncated header");
return (NULL);
}
- if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
- ddi_prop_free(buf);
+ if (len < dof->dofh_loadsz) {
+ dtrace_dof_destroy(dof);
dtrace_dof_error(NULL, "truncated DOF");
return (NULL);
}
- if (loadsz >= (uint64_t)dtrace_dof_maxsize) {
- ddi_prop_free(buf);
- dtrace_dof_error(NULL, "oversized DOF");
+ if (len != dof->dofh_loadsz) {
+ dtrace_dof_destroy(dof);
+ dtrace_dof_error(NULL, "invalid DOF size");
return (NULL);
}
- dof = dt_kmem_alloc_aligned(loadsz, 8, KM_SLEEP);
- bcopy(buf, dof, loadsz);
- ddi_prop_free(buf);
+ if (dof->dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
+ dtrace_dof_destroy(dof);
+ dtrace_dof_error(NULL, "oversized DOF");
+ return (NULL);
+ }
return (dof);
}
-static void
-dtrace_dof_destroy(dof_hdr_t *dof)
-{
- dt_kmem_free_aligned(dof, dof->dofh_loadsz);
-}
-
/*
* Return the dof_sec_t pointer corresponding to a given section index. If the
* index is not valid, dtrace_dof_error() is called and NULL is returned. If
dtrace_enabling_t *enab;
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
/*
return (-1);
}
- if (dof->dofh_secsize == 0) {
- dtrace_dof_error(dof, "zero section header size");
+ if (dof->dofh_secsize < sizeof(dof_sec_t)) {
+ dtrace_dof_error(dof, "invalid section header size");
return (-1);
}
dtrace_dynvar_t *dvar, *next, *start;
size_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
bzero(dstate, sizeof (dtrace_dstate_t));
static void
dtrace_dstate_fini(dtrace_dstate_t *dstate)
{
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
if (dstate->dtds_base == NULL)
return;
dtrace_optval_t *opt;
int bufsize = (int)NCPU * sizeof (dtrace_buffer_t), i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
/* Cause restart */
*new_state = NULL;
major = ddi_driver_major(dtrace_devi);
}
- state->dts_dev = makedevice(major, minor);
+ state->dts_dev = makedev(major, minor);
if (devp != NULL)
*devp = state->dts_dev;
* the normal checks are bypassed.
*/
#if defined(__APPLE__)
+ if (cr != NULL) {
+ kauth_cred_ref(cr);
+ state->dts_cred.dcr_cred = cr;
+ }
if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
/*
size_t limit = buf->dtb_size;
int flags = 0, rval;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(which < DTRACEOPT_MAX);
ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
(state == dtrace_anon.dta_state &&
{
dtrace_icookie_t cookie;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
state->dts_activity != DTRACE_ACTIVITY_DRAINING)
dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
dtrace_optval_t val)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
return (EBUSY);
int nspec = state->dts_nspeculations;
uint32_t match;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
/*
* First, retract any retained enablings for this state.
* Release the credential hold we took in dtrace_state_create().
*/
if (state->dts_cred.dcr_cred != NULL)
- crfree(state->dts_cred.dcr_cred);
+ kauth_cred_unref(&state->dts_cred.dcr_cred);
/*
* Now we can safely disable and destroy any enabled probes. Because
/*
* DTrace Anonymous Enabling Functions
*/
+
+int
+dtrace_keep_kernel_symbols(void)
+{
+ if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
+ return 0;
+ }
+
+ if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL)
+ return 1;
+
+ return 0;
+}
+
static dtrace_state_t *
dtrace_anon_grab(void)
{
dtrace_state_t *state;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if ((state = dtrace_anon.dta_state) == NULL) {
ASSERT(dtrace_anon.dta_enabling == NULL);
dof_hdr_t *dof;
char c[32]; /* enough for "dof-data-" + digits */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; ; i++) {
(void) snprintf(c, sizeof (c), "dof-data-%d", i);
break;
}
+#ifdef illumos
/*
* We want to create anonymous state, so we need to transition
* the kernel debugger to indicate that DTrace is active. If
dtrace_dof_destroy(dof);
break;
}
+#endif
/*
* If we haven't allocated an anonymous state, we'll do so now.
dtrace_vstate_t *vstate;
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (help == NULL || gen > help->dthps_generation)
return (EINVAL);
/*
* If we have a meta provider, remove this helper provider.
*/
- lck_mtx_lock(&dtrace_meta_lock);
if (dtrace_meta_pid != NULL) {
ASSERT(dtrace_deferred_pid == NULL);
dtrace_helper_provider_remove(&prov->dthp_prov,
p);
}
- lck_mtx_unlock(&dtrace_meta_lock);
dtrace_helper_provider_destroy(prov);
dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
dof_helper_t *dofhp)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_lock(&dtrace_lock);
if (!dtrace_attached() || dtrace_meta_pid == NULL) {
p);
}
}
-
- lck_mtx_unlock(&dtrace_meta_lock);
}
static int
dtrace_helper_provider_t *hprov, **tmp_provs;
uint_t tmp_maxprovs, i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
help = p->p_dtrace_helpers;
ASSERT(help != NULL);
int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
uintptr_t daddr = (uintptr_t)dof;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if ((help = p->p_dtrace_helpers) == NULL)
help = dtrace_helpers_create(p);
* Any existing helpers force non-lazy behavior.
*/
if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
- lck_mtx_lock(&p->p_dtrace_sprlock);
+ dtrace_sprlock(p);
dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
unsigned int existing_dofs_count = (existing_dofs) ? existing_dofs->dofiod_count : 0;
#endif /* DEBUG */
unlock:
- lck_mtx_unlock(&p->p_dtrace_sprlock);
+ dtrace_sprunlock(p);
} else {
rval = EACCES;
}
* Any existing helpers force non-lazy behavior.
*/
if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
- lck_mtx_lock(&p->p_dtrace_sprlock);
+ dtrace_sprlock(p);
dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
#endif
}
-
- lck_mtx_unlock(&p->p_dtrace_sprlock);
- } else {
+ dtrace_sprunlock(p);
+ } else {
rval = EACCES;
}
dtrace_lazy_dofs_destroy(proc_t *p)
{
lck_rw_lock_shared(&dtrace_dof_mode_lock);
- lck_mtx_lock(&p->p_dtrace_sprlock);
+ dtrace_sprlock(p);
ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
p->p_dtrace_lazy_dofs = NULL;
- lck_mtx_unlock(&p->p_dtrace_sprlock);
+ dtrace_sprunlock(p);
lck_rw_unlock_shared(&dtrace_dof_mode_lock);
if (lazy_dofs) {
* fault in the dof. We could fix this by holding locks longer,
* but the errors are benign.
*/
- lck_mtx_lock(&p->p_dtrace_sprlock);
+ dtrace_sprlock(p);
ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
p->p_dtrace_lazy_dofs = NULL;
- lck_mtx_unlock(&p->p_dtrace_sprlock);
-
+ dtrace_sprunlock(p);
+ lck_mtx_lock(&dtrace_meta_lock);
/*
* Process each dof_helper_t
*/
lck_mtx_unlock(&dtrace_lock);
}
}
-
+ lck_mtx_unlock(&dtrace_meta_lock);
kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
+ } else {
+ lck_mtx_unlock(&dtrace_meta_lock);
}
}
static int
dtrace_lazy_dofs_duplicate(proc_t *parent, proc_t *child)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_assert(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_assert(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
lck_rw_lock_shared(&dtrace_dof_mode_lock);
- lck_mtx_lock(&parent->p_dtrace_sprlock);
+ dtrace_sprlock(parent);
/*
* We need to make sure that the transition to lazy dofs -> helpers
bcopy(parent_dofs, child_dofs, parent_dofs_size);
}
- lck_mtx_unlock(&parent->p_dtrace_sprlock);
+ dtrace_sprunlock(parent);
if (child_dofs) {
- lck_mtx_lock(&child->p_dtrace_sprlock);
+ dtrace_sprlock(child);
child->p_dtrace_lazy_dofs = child_dofs;
- lck_mtx_unlock(&child->p_dtrace_sprlock);
+ dtrace_sprunlock(child);
/**
* We process the DOF at this point if the mode is set to
* LAZY_OFF. This can happen if DTrace is still processing the
{
dtrace_helpers_t *help;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(p->p_dtrace_helpers == NULL);
help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
dtrace_vstate_t *vstate;
uint_t i;
+ lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_lock(&dtrace_lock);
ASSERT(p->p_dtrace_helpers != NULL);
* Destroy the helper providers.
*/
if (help->dthps_maxprovs > 0) {
- lck_mtx_lock(&dtrace_meta_lock);
if (dtrace_meta_pid != NULL) {
ASSERT(dtrace_deferred_pid == NULL);
lck_mtx_unlock(&dtrace_lock);
}
- lck_mtx_unlock(&dtrace_meta_lock);
for (i = 0; i < help->dthps_nprovs; i++) {
dtrace_helper_provider_destroy(help->dthps_provs[i]);
--dtrace_helpers;
lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&dtrace_meta_lock);
}
static void
uint_t i;
int j, sz, hasprovs = 0;
+ lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_lock(&dtrace_lock);
ASSERT(from->p_dtrace_helpers != NULL);
ASSERT(dtrace_helpers > 0);
if (hasprovs)
dtrace_helper_provider_register(to, newhelp, NULL);
+
+ lck_mtx_unlock(&dtrace_meta_lock);
}
/**
* the p_dtrace_sprlock lock. A full sprlock would
* task_suspend the parent.
*/
- lck_mtx_lock(&parent_proc->p_dtrace_sprlock);
+ dtrace_sprlock(parent_proc);
/*
* Remove all DTrace tracepoints from the child process. We
dtrace_fasttrap_fork(parent_proc, child_proc);
}
- lck_mtx_unlock(&parent_proc->p_dtrace_sprlock);
+ dtrace_sprunlock(parent_proc);
/*
* Duplicate any lazy dof(s). This must be done while NOT
struct modctl *nextp, *prevp;
ASSERT(newctl != NULL);
- lck_mtx_assert(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
// Insert new module at the front of the list,
static modctl_t *
dtrace_modctl_lookup(struct kmod_info * kmod)
{
- lck_mtx_assert(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
struct modctl * ctl;
dtrace_modctl_remove(struct modctl * ctl)
{
ASSERT(ctl != NULL);
- lck_mtx_assert(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
modctl_t *prevp, *nextp, *curp;
// Remove stale chain first
ctl->mod_loaded = 1;
ctl->mod_flags = 0;
ctl->mod_user_symbols = NULL;
-
+
/*
* Find the UUID for this module, if it has one
*/
if (ctl->mod_address == g_kernel_kmod_info.address) {
ctl->mod_flags |= MODCTL_IS_MACH_KERNEL;
+ memcpy(dtrace_kerneluuid, ctl->mod_uuid, sizeof(dtrace_kerneluuid));
+ }
+ /*
+ * Static kexts have a UUID that is not used for symbolication, as all their
+ * symbols are in kernel
+ */
+ else if ((flag & KMOD_DTRACE_STATIC_KEXT) == KMOD_DTRACE_STATIC_KEXT) {
+ memcpy(ctl->mod_uuid, dtrace_kerneluuid, sizeof(dtrace_kerneluuid));
+ ctl->mod_flags |= MODCTL_IS_STATIC_KEXT;
}
}
dtrace_modctl_add(ctl);
probe->dtpr_provider->dtpv_probe_count--;
next = probe->dtpr_nextmod;
+ dtrace_hash_remove(dtrace_byprov, probe);
dtrace_hash_remove(dtrace_bymod, probe);
dtrace_hash_remove(dtrace_byfunc, probe);
dtrace_hash_remove(dtrace_byname, probe);
prov = probe->dtpr_provider;
prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
probe->dtpr_arg);
- kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
- kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
- kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
+ dtrace_strunref(probe->dtpr_mod);
+ dtrace_strunref(probe->dtpr_func);
+ dtrace_strunref(probe->dtpr_name);
vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
zfree(dtrace_probe_t_zone, probe);
static int
dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
{
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
lck_mtx_lock(&dtrace_lock);
switch (what) {
*/
/*ARGSUSED*/
static int
-dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
+dtrace_attach(dev_info_t *devi)
{
-#pragma unused(cmd) /* __APPLE__ */
dtrace_provider_id_t id;
dtrace_state_t *state = NULL;
dtrace_enabling_t *enab;
lck_mtx_lock(&dtrace_lock);
/* Darwin uses BSD cloning device driver to automagically obtain minor device number. */
-
- ddi_report_dev(devi);
dtrace_devi = devi;
dtrace_modload = dtrace_module_loaded;
register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
- dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
- 1, INT_MAX, 0);
dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
sizeof (dtrace_dstate_percpu_t) * (int)NCPU, DTRACE_STATE_ALIGN,
NULL, NULL, NULL, NULL, NULL, 0);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+
+ dtrace_byprov = dtrace_hash_create(dtrace_strkey_probe_provider,
+ 0, /* unused */
+ offsetof(dtrace_probe_t, dtpr_nextprov),
+ offsetof(dtrace_probe_t, dtpr_prevprov));
- dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
+ dtrace_bymod = dtrace_hash_create(dtrace_strkey_deref_offset,
+ offsetof(dtrace_probe_t, dtpr_mod),
offsetof(dtrace_probe_t, dtpr_nextmod),
offsetof(dtrace_probe_t, dtpr_prevmod));
- dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
+ dtrace_byfunc = dtrace_hash_create(dtrace_strkey_deref_offset,
+ offsetof(dtrace_probe_t, dtpr_func),
offsetof(dtrace_probe_t, dtpr_nextfunc),
offsetof(dtrace_probe_t, dtpr_prevfunc));
- dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
+ dtrace_byname = dtrace_hash_create(dtrace_strkey_deref_offset,
+ offsetof(dtrace_probe_t, dtpr_name),
offsetof(dtrace_probe_t, dtpr_nextname),
offsetof(dtrace_probe_t, dtpr_prevname));
dtrace_provider, NULL, NULL, "END", 0, NULL);
dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
dtrace_provider, NULL, NULL, "ERROR", 3, NULL);
+#elif (defined(__arm__) || defined(__arm64__))
+ dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
+ dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "END", 1, NULL);
+ dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
#else
#error Unknown Architecture
#endif
dtrace_opens++;
dtrace_membar_producer();
+#ifdef illumos
/*
* If the kernel debugger is active (that is, if the kernel debugger
* modified text in some way), we won't allow the open.
lck_mtx_unlock(&cpu_lock);
return (EBUSY);
}
+#endif
rv = dtrace_state_create(devp, cred_p, &state);
lck_mtx_unlock(&cpu_lock);
if (rv != 0 || state == NULL) {
- if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
+ if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) {
+#ifdef illumos
(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
+#endif
+ }
lck_mtx_unlock(&dtrace_lock);
/* propagate EAGAIN or ERESTART */
return (rv);
* Only relinquish control of the kernel debugger interface when there
* are no consumers and no anonymous enablings.
*/
- if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
+ if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) {
+#ifdef illumos
(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
-
+#endif
+ }
+
lck_mtx_unlock(&dtrace_lock);
lck_mtx_unlock(&cpu_lock);
return KERN_SUCCESS;
switch (cmd) {
+#if defined (__arm64__)
+ case DTRACEHIOC_ADDDOF_U32:
+ case DTRACEHIOC_ADDDOF_U64:
+#else
case DTRACEHIOC_ADDDOF:
+#endif /* __arm64__*/
{
dof_helper_t *dhp = NULL;
size_t dof_ioctl_data_size;
int multi_dof_claimed = 0;
proc_t* p = current_proc();
+ /*
+ * If this is a restricted process and dtrace is restricted,
+ * do not allow DOFs to be registered
+ */
+ if (dtrace_is_restricted() &&
+ !dtrace_are_restrictions_relaxed() &&
+ !dtrace_can_attach_to_proc(current_proc())) {
+ return (EACCES);
+ }
+
/*
* Read the number of DOF sections being passed in.
*/
dtrace_dof_error(NULL, "failed to copyin dofiod_count");
return (EFAULT);
}
-
+
/*
* Range check the count.
*/
dof_hdr_t *dof = dtrace_dof_copyin(dhp->dofhp_dof, &rval);
if (dof != NULL) {
+ lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_lock(&dtrace_lock);
/*
}
lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&dtrace_meta_lock);
}
} while (++i < multi_dof->dofiod_count && rval == 0);
}
* EACCES means non-lazy
*/
if (rval == EACCES) {
+ lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_lock(&dtrace_lock);
rval = dtrace_helper_destroygen(p, generation);
lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&dtrace_meta_lock);
}
return (rval);
desc.dtpd_id++;
}
- if (cmd == DTRACEIOC_PROBEMATCH) {
- dtrace_probekey(&desc, &pkey);
- pkey.dtpk_id = DTRACE_IDNONE;
- }
-
dtrace_cred2priv(cr, &priv, &uid, &zoneid);
lck_mtx_lock(&dtrace_lock);
- if (cmd == DTRACEIOC_PROBEMATCH) {
- /* Quiet compiler warning */
+ if (cmd == DTRACEIOC_PROBEMATCH) {
+ dtrace_probekey(&desc, &pkey);
+ pkey.dtpk_id = DTRACE_IDNONE;
+
+ /* Quiet compiler warning */
for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
if ((probe = dtrace_probes[i - 1]) != NULL &&
(m = dtrace_match_probe(probe, &pkey,
lck_mtx_unlock(&dtrace_lock);
return (EINVAL);
}
+ dtrace_probekey_release(&pkey);
} else {
/* Quiet compiler warning */
/*
* We have our snapshot; now copy it out.
*/
- if (copyout(buf->dtb_xamot, (user_addr_t)desc.dtbd_data,
+ if (dtrace_buffer_copyout(buf->dtb_xamot,
+ (user_addr_t)desc.dtbd_data,
buf->dtb_xamot_offset) != 0) {
lck_mtx_unlock(&dtrace_lock);
return (EFAULT);
ctl->mod_flags |= MODCTL_FBT_PROVIDE_PRIVATE_PROBES;
ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
- if (!MOD_SYMBOLS_DONE(ctl)) {
+ if (!MOD_SYMBOLS_DONE(ctl) && !MOD_IS_STATIC_KEXT(ctl)) {
dtmul_count++;
rval = EINVAL;
}
* are available, add user syms if the module might use them.
*/
ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
- if (!MOD_SYMBOLS_DONE(ctl)) {
+ if (!MOD_SYMBOLS_DONE(ctl) && !MOD_IS_STATIC_KEXT(ctl)) {
UUID* uuid = &uuids_list->dtmul_uuid[dtmul_count];
if (dtmul_count++ < uuids_list->dtmul_count) {
memcpy(uuid, ctl->mod_uuid, sizeof(UUID));
ctl->mod_flags |= MODCTL_FBT_PROVIDE_PRIVATE_PROBES;
ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
- if (MOD_HAS_UUID(ctl) && !MOD_SYMBOLS_DONE(ctl)) {
- if (memcmp(module_symbols->dtmodsyms_uuid, ctl->mod_uuid, sizeof(UUID)) == 0) {
- /* BINGO! */
- ctl->mod_user_symbols = module_symbols;
- break;
- }
+ if (MOD_HAS_UUID(ctl) && !MOD_SYMBOLS_DONE(ctl) && memcmp(module_symbols->dtmodsyms_uuid, ctl->mod_uuid, sizeof(UUID)) == 0) {
+ dtrace_provider_t *prv;
+ ctl->mod_user_symbols = module_symbols;
+
+ /*
+ * We're going to call each providers per-module provide operation
+ * specifying only this module.
+ */
+ for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
+ prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
+ /*
+ * We gave every provider a chance to provide with the user syms, go ahead and clear them
+ */
+ ctl->mod_user_symbols = NULL; /* MUST reset this to clear HAS_USERSPACE_SYMBOLS */
}
ctl = ctl->mod_next;
}
- if (ctl) {
- dtrace_provider_t *prv;
-
- /*
- * We're going to call each providers per-module provide operation
- * specifying only this module.
- */
- for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
- prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
-
- /*
- * We gave every provider a chance to provide with the user syms, go ahead and clear them
- */
- ctl->mod_user_symbols = NULL; /* MUST reset this to clear HAS_USERSPACE_SYMBOLS */
- }
-
lck_mtx_unlock(&mod_lock);
lck_mtx_unlock(&dtrace_provider_lock);
dtrace_probes = NULL;
dtrace_nprobes = 0;
+ dtrace_hash_destroy(dtrace_strings);
+ dtrace_hash_destroy(dtrace_byprov);
dtrace_hash_destroy(dtrace_bymod);
dtrace_hash_destroy(dtrace_byfunc);
dtrace_hash_destroy(dtrace_byname);
+ dtrace_strings = NULL;
+ dtrace_byprov = NULL;
dtrace_bymod = NULL;
dtrace_byfunc = NULL;
dtrace_byname = NULL;
lck_mtx_unlock(&dtrace_lock);
lck_mtx_unlock(&dtrace_provider_lock);
+#ifdef illumos
/*
* We don't destroy the task queue until after we have dropped our
* locks (taskq_destroy() may block on running tasks). To prevent
*/
taskq_destroy(dtrace_taskq);
dtrace_taskq = NULL;
+#endif
return (DDI_SUCCESS);
}
static int gMajDevNo;
+void dtrace_early_init (void)
+{
+ dtrace_restriction_policy_load();
+
+ /*
+ * See dtrace_impl.h for a description of kernel symbol modes.
+ * The default is to wait for symbols from userspace (lazy symbols).
+ */
+ if (!PE_parse_boot_argn("dtrace_kernel_symbol_mode", &dtrace_kernel_symbol_mode, sizeof (dtrace_kernel_symbol_mode))) {
+ dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE;
+ }
+}
+
void
dtrace_init( void )
{
return;
}
-#if defined(DTRACE_MEMORY_ZONES)
- /*
- * Initialize the dtrace kalloc-emulation zones.
- */
- dtrace_alloc_init();
-#endif /* DTRACE_MEMORY_ZONES */
-
/*
* Allocate the dtrace_probe_t zone
*/
(void)dtrace_abs_to_nano(0LL); /* Force once only call to clock_timebase_info (which can take a lock) */
+ dtrace_strings = dtrace_hash_create(dtrace_strkey_offset,
+ offsetof(dtrace_string_t, dtst_str),
+ offsetof(dtrace_string_t, dtst_next),
+ offsetof(dtrace_string_t, dtst_prev));
+
dtrace_isa_init();
/*
* See dtrace_impl.h for a description of dof modes.
* makes no sense...
*/
if (!PE_parse_boot_argn("dtrace_dof_mode", &dtrace_dof_mode, sizeof (dtrace_dof_mode))) {
+#if CONFIG_EMBEDDED
+ /* Disable DOF mode by default for performance reasons */
+ dtrace_dof_mode = DTRACE_DOF_MODE_NEVER;
+#else
dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
+#endif
}
/*
break;
}
- /*
- * See dtrace_impl.h for a description of kernel symbol modes.
- * The default is to wait for symbols from userspace (lazy symbols).
- */
- if (!PE_parse_boot_argn("dtrace_kernel_symbol_mode", &dtrace_kernel_symbol_mode, sizeof (dtrace_kernel_symbol_mode))) {
- dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE;
- }
-
- dtrace_restriction_policy_load();
-
gDTraceInited = 1;
} else
* run. That way, anonymous DOF enabled under dtrace_attach() is safe
* to go.
*/
- dtrace_attach( (dev_info_t *)(uintptr_t)makedev(gMajDevNo, 0), 0 ); /* Punning a dev_t to a dev_info_t* */
+ dtrace_attach( (dev_info_t *)(uintptr_t)makedev(gMajDevNo, 0)); /* Punning a dev_t to a dev_info_t* */
/*
* Add the mach_kernel to the module list for lazy processing
if (dtrace_module_loaded(&fake_kernel_kmod, 0) != 0) {
printf("dtrace_postinit: Could not register mach_kernel modctl\n");
}
+
+ if (!PE_parse_boot_argn("dtrace_provide_private_probes", &dtrace_provide_private_probes, sizeof (dtrace_provide_private_probes))) {
+ dtrace_provide_private_probes = 0;
+ }
(void)OSKextRegisterKextsWithDTrace();
}