#include <libkern/sysctl.h>
#include <sys/kdebug.h>
+#if MONOTONIC
+#include <kern/monotonic.h>
+#include <machine/monotonic.h>
+#endif /* MONOTONIC */
+
#include <kern/cpu_data.h>
extern uint32_t pmap_find_phys(void *, uint64_t);
extern boolean_t pmap_valid_page(uint32_t);
int dtrace_destructive_disallow = 0;
dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
size_t dtrace_difo_maxsize = (256 * 1024);
-dtrace_optval_t dtrace_dof_maxsize = (384 * 1024);
+dtrace_optval_t dtrace_dof_maxsize = (512 * 1024);
dtrace_optval_t dtrace_statvar_maxsize = (16 * 1024);
dtrace_optval_t dtrace_statvar_maxsize_max = (16 * 10 * 1024);
size_t dtrace_actions_max = (16 * 1024);
*
* ASSERT(MUTEX_HELD(&cpu_lock));
* becomes:
- * lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ * LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
*
*/
static lck_mtx_t dtrace_lock; /* probe state lock */
int dtrace_helptrace_enabled = 0;
#endif
+#if defined (__arm64__)
+/*
+ * The ioctl for adding helper DOF is based on the
+ * size of a user_addr_t. We need to recognize both
+ * U32 and U64 as the same action.
+ */
+#define DTRACEHIOC_ADDDOF_U32 _IOW('h', 4, user32_addr_t)
+#define DTRACEHIOC_ADDDOF_U64 _IOW('h', 4, user64_addr_t)
+#endif /* __arm64__ */
/*
* DTrace Error Hashing
(where) = ((thr + DIF_VARIABLE_MAX) & \
(((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
}
+#elif defined(__arm__)
+/* FIXME: three function calls!!! */
+#define DTRACE_TLS_THRKEY(where) { \
+ uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
+ uint64_t thr = (uintptr_t)current_thread(); \
+ uint_t pid = (uint_t)dtrace_proc_selfpid(); \
+ ASSERT(intr < (1 << 3)); \
+ (where) = (((thr << 32 | pid) + DIF_VARIABLE_MAX) & \
+ (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
+}
+#elif defined (__arm64__)
+/* FIXME: two function calls!! */
+#define DTRACE_TLS_THRKEY(where) { \
+ uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
+ uint64_t thr = (uintptr_t)current_thread(); \
+ ASSERT(intr < (1 << 3)); \
+ (where) = ((thr + DIF_VARIABLE_MAX) & \
+ (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
+}
#else
#error Unknown architecture
#endif
if (error)
return (error);
- if (value != 0 && value != 1)
- return (ERANGE);
+ if (req->newptr) {
+ if (value != 0 && value != 1)
+ return (ERANGE);
- lck_mtx_lock(&dtrace_lock);
- dtrace_provide_private_probes = value;
- lck_mtx_unlock(&dtrace_lock);
+ /*
+ * We do not allow changing this back to zero, as private probes
+ * would still be left registered
+ */
+ if (value != 1)
+ return (EPERM);
+ lck_mtx_lock(&dtrace_lock);
+ dtrace_provide_private_probes = value;
+ lck_mtx_unlock(&dtrace_lock);
+ }
return (0);
}
* DTrace subroutines (DIF_SUBR_*) should use this helper to implement
* appropriate memory access protection.
*/
-static int
+int
dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
dtrace_vstate_t *vstate)
{
* APPLE NOTE: Account for introduction of __dtrace_probe()
*/
int aframes = mstate->dtms_probe->dtpr_aframes + 3;
+ dtrace_vstate_t *vstate = &state->dts_vstate;
dtrace_provider_t *pv;
uint64_t val;
}
else
- val = dtrace_getarg(ndx, aframes);
+ val = dtrace_getarg(ndx, aframes, mstate, vstate);
/*
* This is regrettably required to keep the compiler
case DIF_VAR_ZONENAME:
- {
- /* scratch_size is equal to length('global') + 1 for the null-terminator. */
- char *zname = (char *)mstate->dtms_scratch_ptr;
- size_t scratch_size = 6 + 1;
+ {
+ /* scratch_size is equal to length('global') + 1 for the null-terminator. */
+ char *zname = (char *)mstate->dtms_scratch_ptr;
+ size_t scratch_size = 6 + 1;
if (!dtrace_priv_proc(state))
return (0);
- /* The scratch allocation's lifetime is that of the clause. */
- if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
- DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
- return 0;
- }
+ /* The scratch allocation's lifetime is that of the clause. */
+ if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
+ return 0;
+ }
+
+ mstate->dtms_scratch_ptr += scratch_size;
- mstate->dtms_scratch_ptr += scratch_size;
+ /* The kernel does not provide zonename, it will always return 'global'. */
+ strlcpy(zname, "global", scratch_size);
- /* The kernel does not provide zonename, it will always return 'global'. */
- strlcpy(zname, "global", scratch_size);
+ return ((uint64_t)(uintptr_t)zname);
+ }
- return ((uint64_t)(uintptr_t)zname);
- }
+#if MONOTONIC
+ case DIF_VAR_CPUINSTRS:
+ return mt_cur_cpu_instrs();
+
+ case DIF_VAR_CPUCYCLES:
+ return mt_cur_cpu_cycles();
+
+ case DIF_VAR_VINSTRS:
+ return mt_cur_thread_instrs();
+
+ case DIF_VAR_VCYCLES:
+ return mt_cur_thread_cycles();
+#else /* MONOTONIC */
+ case DIF_VAR_CPUINSTRS: /* FALLTHROUGH */
+ case DIF_VAR_CPUCYCLES: /* FALLTHROUGH */
+ case DIF_VAR_VINSTRS: /* FALLTHROUGH */
+ case DIF_VAR_VCYCLES: /* FALLTHROUGH */
+ return 0;
+#endif /* !MONOTONIC */
case DIF_VAR_UID:
if (!dtrace_priv_proc_relaxed(state))
char c, target = (char)tupregs[1].dttk_value;
if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
- regs[rd] = NULL;
+ regs[rd] = 0;
break;
}
addr_limit = addr + lim;
*/
regs[rd] = 0;
mstate->dtms_strtok = 0;
- mstate->dtms_strtok_limit = NULL;
+ mstate->dtms_strtok_limit = 0;
break;
}
case DIF_SUBR_LLTOSTR: {
int64_t i = (int64_t)tupregs[0].dttk_value;
- int64_t val = i < 0 ? i * -1 : i;
- uint64_t size = 22; /* enough room for 2^64 in decimal */
+ uint64_t val, digit;
+ uint64_t size = 65; /* enough room for 2^64 in binary */
char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
+ int base = 10;
+
+ if (nargs > 1) {
+ if ((base = tupregs[1].dttk_value) <= 1 ||
+ base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
+ *flags |= CPU_DTRACE_ILLOP;
+ break;
+ }
+ }
+
+ val = (base == 10 && i < 0) ? i * -1 : i;
if (!DTRACE_INSCRATCH(mstate, size)) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
break;
}
- for (*end-- = '\0'; val; val /= 10)
- *end-- = '0' + (val % 10);
+ for (*end-- = '\0'; val; val /= base) {
+ if ((digit = val % base) <= '9' - '0') {
+ *end-- = '0' + digit;
+ } else {
+ *end-- = 'a' + (digit - ('9' - '0') - 1);
+ }
+ }
- if (i == 0)
+ if (i == 0 && base == 16)
*end-- = '0';
- if (i < 0)
+ if (base == 16)
+ *end-- = 'x';
+
+ if (i == 0 || base == 8 || base == 16)
+ *end-- = '0';
+
+ if (i < 0 && base == 10)
*end-- = '-';
regs[rd] = (uintptr_t)end + 1;
if (pred != NULL) {
dtrace_difo_t *dp = pred->dtp_difo;
- int rval;
+ uint64_t rval;
rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
- priv = DTRACE_PRIV_USER | DTRACE_PRIV_PROC;
+ priv = DTRACE_PRIV_USER | DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER;
}
else {
priv = DTRACE_PRIV_ALL;
}
+ *uidp = 0;
+ *zoneidp = 0;
} else {
*uidp = crgetuid(cr);
*zoneidp = crgetzoneid(cr);
int len, rc, best = INT_MAX, nmatched = 0;
dtrace_id_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
/*
* If the probe ID is specified in the key, just lookup by ID and
*idp = (dtrace_provider_id_t)provider;
if (pops == &dtrace_provider_ops) {
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dtrace_anon.dta_enabling == NULL);
/*
*/
ASSERT(old == dtrace_provider);
ASSERT(dtrace_devi != NULL);
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
self = 1;
if (dtrace_provider->dtpv_next != NULL) {
dtrace_id_t id;
if (provider == dtrace_provider) {
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
} else {
lck_mtx_lock(&dtrace_lock);
}
static dtrace_probe_t *
dtrace_probe_lookup_id(dtrace_id_t id)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (id == 0 || id > (dtrace_id_t)dtrace_nprobes)
return (NULL);
struct modctl *ctl;
int all = 0;
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
if (prv == NULL) {
all = 1;
uid_t uid;
zoneid_t zoneid;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
dtrace_ecb_create_cache = NULL;
dof_hdr_t *dof = (dof_hdr_t *)daddr;
uint32_t i;
- lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < dof->dofh_secnum; i++) {
dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
dof_hdr_t *dof = (dof_hdr_t *)daddr;
uint32_t i;
- lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < dof->dofh_secnum; i++) {
dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
{
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
dp->dtdo_refcnt++;
ASSERT(dp->dtdo_refcnt != 0);
int oldsvars, osz, nsz, otlocals, ntlocals;
uint_t i, id;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
for (i = 0; i < dp->dtdo_varlen; i++) {
{
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp->dtdo_refcnt != 0);
for (i = 0; i < dp->dtdo_varlen; i++) {
{
dtrace_predicate_t *pred;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp->dtdo_refcnt != 0);
pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
static void
dtrace_predicate_hold(dtrace_predicate_t *pred)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
ASSERT(pred->dtp_refcnt > 0);
dtrace_difo_t *dp = pred->dtp_difo;
#pragma unused(dp) /* __APPLE__ */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
ASSERT(pred->dtp_refcnt > 0);
dtrace_ecb_t *ecb;
dtrace_epid_t epid;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
ecb->dte_predicate = NULL;
{
dtrace_probe_t *probe = ecb->dte_probe;
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(ecb->dte_next == NULL);
if (probe == NULL) {
dtrace_optval_t nframes=0, strsize;
uint64_t arg = desc->dtad_arg;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
if (DTRACEACT_ISAGG(desc->dtad_kind)) {
dtrace_ecb_t *pecb, *prev = NULL;
dtrace_probe_t *probe = ecb->dte_probe;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (probe == NULL) {
/*
dtrace_predicate_t *pred;
dtrace_epid_t epid = ecb->dte_epid;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(ecb->dte_next == NULL);
ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
dtrace_provider_t *prov;
dtrace_ecbdesc_t *desc = enab->dten_current;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(state != NULL);
ecb = dtrace_ecb_add(state, probe);
dtrace_ecb_t *ecb;
#pragma unused(ecb) /* __APPLE__ */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (id == 0 || id > (dtrace_epid_t)state->dts_necbs)
return (NULL);
dtrace_aggregation_t *agg;
#pragma unused(agg) /* __APPLE__ */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (id == 0 || id > (dtrace_aggid_t)state->dts_naggregations)
return (NULL);
dtrace_buffer_t *buf;
size_t size_before_alloc = dtrace_buffer_memory_inuse;
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (size > (size_t)dtrace_nonroot_maxsize &&
!PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
ASSERT(buf->dtb_xamot == NULL);
-
/* DTrace, please do not eat all the memory. */
if (dtrace_buffer_canalloc(size) == B_FALSE)
goto err;
dtrace_buffer_polish(dtrace_buffer_t *buf)
{
ASSERT(buf->dtb_flags & DTRACEBUF_RING);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
return;
dtrace_ecbdesc_t *ep;
dtrace_vstate_t *vstate = enab->dten_vstate;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < enab->dten_ndesc; i++) {
dtrace_actdesc_t *act, *next;
{
dtrace_state_t *state;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
ASSERT(enab->dten_vstate != NULL);
dtrace_enabling_t *new, *enab;
int found = 0, err = ENOENT;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
{
dtrace_enabling_t *enab, *next;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
/*
* Iterate over all retained enablings, destroy the enablings retained
int i = 0;
int total_matched = 0, matched = 0;
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; i < enab->dten_ndesc; i++) {
dtrace_ecbdesc_t *ep = enab->dten_desc[i];
dtrace_probedesc_t desc;
dtrace_genid_t gen;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
if (prv == NULL) {
all = 1;
roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
dof = dt_kmem_zalloc_aligned(len, 8, KM_SLEEP);
dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
{
dof_hdr_t hdr, *dof;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
/*
* First, we're going to copyin() the sizeof (dof_hdr_t).
{
dof_hdr_t hdr, *dof;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
/*
* First, we're going to copyin() the sizeof (dof_hdr_t).
dtrace_enabling_t *enab;
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
/*
dtrace_dynvar_t *dvar, *next, *start;
size_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
bzero(dstate, sizeof (dtrace_dstate_t));
static void
dtrace_dstate_fini(dtrace_dstate_t *dstate)
{
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
if (dstate->dtds_base == NULL)
return;
dtrace_optval_t *opt;
int bufsize = (int)NCPU * sizeof (dtrace_buffer_t), i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
/* Cause restart */
*new_state = NULL;
size_t limit = buf->dtb_size;
int flags = 0, rval;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(which < DTRACEOPT_MAX);
ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
(state == dtrace_anon.dta_state &&
{
dtrace_icookie_t cookie;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
state->dts_activity != DTRACE_ACTIVITY_DRAINING)
dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
dtrace_optval_t val)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
return (EBUSY);
int nspec = state->dts_nspeculations;
uint32_t match;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
/*
* First, retract any retained enablings for this state.
{
dtrace_state_t *state;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if ((state = dtrace_anon.dta_state) == NULL) {
ASSERT(dtrace_anon.dta_enabling == NULL);
dof_hdr_t *dof;
char c[32]; /* enough for "dof-data-" + digits */
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
for (i = 0; ; i++) {
(void) snprintf(c, sizeof (c), "dof-data-%d", i);
dtrace_vstate_t *vstate;
uint_t i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if (help == NULL || gen > help->dthps_generation)
return (EINVAL);
dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
dof_helper_t *dofhp)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(&dtrace_meta_lock);
lck_mtx_lock(&dtrace_lock);
dtrace_helper_provider_t *hprov, **tmp_provs;
uint_t tmp_maxprovs, i;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
help = p->p_dtrace_helpers;
ASSERT(help != NULL);
int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
uintptr_t daddr = (uintptr_t)dof;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if ((help = p->p_dtrace_helpers) == NULL)
help = dtrace_helpers_create(p);
static int
dtrace_lazy_dofs_duplicate(proc_t *parent, proc_t *child)
{
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_assert(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_assert(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
lck_rw_lock_shared(&dtrace_dof_mode_lock);
lck_mtx_lock(&parent->p_dtrace_sprlock);
{
dtrace_helpers_t *help;
- lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
ASSERT(p->p_dtrace_helpers == NULL);
help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
struct modctl *nextp, *prevp;
ASSERT(newctl != NULL);
- lck_mtx_assert(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
// Insert new module at the front of the list,
static modctl_t *
dtrace_modctl_lookup(struct kmod_info * kmod)
{
- lck_mtx_assert(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
struct modctl * ctl;
dtrace_modctl_remove(struct modctl * ctl)
{
ASSERT(ctl != NULL);
- lck_mtx_assert(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
modctl_t *prevp, *nextp, *curp;
// Remove stale chain first
static int
dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
{
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
lck_mtx_lock(&dtrace_lock);
switch (what) {
register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
sizeof (dtrace_dstate_percpu_t) * (int)NCPU, DTRACE_STATE_ALIGN,
NULL, NULL, NULL, NULL, NULL, 0);
- lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
offsetof(dtrace_probe_t, dtpr_nextmod),
dtrace_provider, NULL, NULL, "END", 0, NULL);
dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
dtrace_provider, NULL, NULL, "ERROR", 3, NULL);
+#elif (defined(__arm__) || defined(__arm64__))
+ dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
+ dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "END", 1, NULL);
+ dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
+ dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
#else
#error Unknown Architecture
#endif
return KERN_SUCCESS;
switch (cmd) {
+#if defined (__arm64__)
+ case DTRACEHIOC_ADDDOF_U32:
+ case DTRACEHIOC_ADDDOF_U64:
+#else
case DTRACEHIOC_ADDDOF:
+#endif /* __arm64__*/
{
dof_helper_t *dhp = NULL;
size_t dof_ioctl_data_size;
int multi_dof_claimed = 0;
proc_t* p = current_proc();
+ /*
+ * If this is a restricted process and dtrace is restricted,
+ * do not allow DOFs to be registered
+ */
+ if (dtrace_is_restricted() &&
+ !dtrace_are_restrictions_relaxed() &&
+ !dtrace_can_attach_to_proc(current_proc())) {
+ return (EACCES);
+ }
+
/*
* Read the number of DOF sections being passed in.
*/
dtrace_dof_error(NULL, "failed to copyin dofiod_count");
return (EFAULT);
}
-
+
/*
* Range check the count.
*/
/*
* We have our snapshot; now copy it out.
*/
- if (copyout(buf->dtb_xamot, (user_addr_t)desc.dtbd_data,
+ if (dtrace_buffer_copyout(buf->dtb_xamot,
+ (user_addr_t)desc.dtbd_data,
buf->dtb_xamot_offset) != 0) {
lck_mtx_unlock(&dtrace_lock);
return (EFAULT);
* makes no sense...
*/
if (!PE_parse_boot_argn("dtrace_dof_mode", &dtrace_dof_mode, sizeof (dtrace_dof_mode))) {
+#if CONFIG_EMBEDDED
+ /* Disable DOF mode by default for performance reasons */
+ dtrace_dof_mode = DTRACE_DOF_MODE_NEVER;
+#else
dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
+#endif
}
/*
if (dtrace_module_loaded(&fake_kernel_kmod, 0) != 0) {
printf("dtrace_postinit: Could not register mach_kernel modctl\n");
}
+
+ if (!PE_parse_boot_argn("dtrace_provide_private_probes", &dtrace_provide_private_probes, sizeof (dtrace_provide_private_probes))) {
+ dtrace_provide_private_probes = 0;
+ }
(void)OSKextRegisterKextsWithDTrace();
}