]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/dev/dtrace/dtrace.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace.c
index f34ed4fd023a218df69ef41c135668c31ec5be65..bdbe6a8746acf34b8caf87bd2286425b469227c7 100644 (file)
@@ -970,6 +970,8 @@ dtrace_priv_proc_common_zone(dtrace_state_t *state)
 
        return (0);
 #else
+#pragma unused(state)
+
        return 1; /* Darwin doesn't do zones. */
 #endif /* __APPLE__ */
 }
@@ -1003,6 +1005,11 @@ dtrace_priv_proc_destructive(dtrace_state_t *state)
 {
        int action = state->dts_cred.dcr_action;
 
+#if defined(__APPLE__)
+       if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
+               goto bad;
+#endif /* __APPLE__ */
+
        if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
            dtrace_priv_proc_common_zone(state) == 0)
                goto bad;
@@ -1026,6 +1033,11 @@ bad:
 static int
 dtrace_priv_proc_control(dtrace_state_t *state)
 {
+#if defined(__APPLE__)
+       if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
+               goto bad;
+#endif /* __APPLE__ */
+
        if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
                return (1);
 
@@ -1034,6 +1046,9 @@ dtrace_priv_proc_control(dtrace_state_t *state)
            dtrace_priv_proc_common_nocd())
                return (1);
 
+#if defined(__APPLE__)
+bad:
+#endif /* __APPLE__ */
        cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
 
        return (0);
@@ -1042,6 +1057,28 @@ dtrace_priv_proc_control(dtrace_state_t *state)
 static int
 dtrace_priv_proc(dtrace_state_t *state)
 {
+#if defined(__APPLE__)
+       if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
+               goto bad;
+#endif /* __APPLE__ */
+
+       if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
+               return (1);
+
+#if defined(__APPLE__)
+bad:
+#endif /* __APPLE__ */
+       cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
+
+       return (0);
+}
+
+#if defined(__APPLE__)
+/* dtrace_priv_proc() omitting the P_LNOATTACH check. For PID and EXECNAME accesses. */
+static int
+dtrace_priv_proc_relaxed(dtrace_state_t *state)
+{
+
        if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
                return (1);
 
@@ -1049,6 +1086,7 @@ dtrace_priv_proc(dtrace_state_t *state)
 
        return (0);
 }
+#endif /* __APPLE__ */
 
 static int
 dtrace_priv_kernel(dtrace_state_t *state)
@@ -1088,7 +1126,7 @@ dtrace_dynvar_clean(dtrace_dstate_t *dstate)
        dtrace_dstate_percpu_t *dcpu;
        int i, work = 0;
 
-       for (i = 0; i < NCPU; i++) {
+       for (i = 0; i < (int)NCPU; i++) {
                dcpu = &dstate->dtds_percpu[i];
 
                ASSERT(dcpu->dtdsc_rinsing == NULL);
@@ -1138,7 +1176,7 @@ dtrace_dynvar_clean(dtrace_dstate_t *dstate)
 
        dtrace_sync();
 
-       for (i = 0; i < NCPU; i++) {
+       for (i = 0; i < (int)NCPU; i++) {
                dcpu = &dstate->dtds_percpu[i];
 
                if (dcpu->dtdsc_rinsing == NULL)
@@ -1483,7 +1521,7 @@ retry:
                                case DTRACE_DSTATE_CLEAN: {
                                        void *sp = &dstate->dtds_state;
 
-                                       if (++cpu >= NCPU)
+                                       if (++cpu >= (int)NCPU)
                                                cpu = 0;
 
                                        if (dcpu->dtdsc_dirty != NULL &&
@@ -1631,6 +1669,7 @@ retry:
 static void
 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
 {
+#pragma unused(arg)
        if (nval < *oval)
                *oval = nval;
 }
@@ -1639,6 +1678,7 @@ dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
 static void
 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
 {
+#pragma unused(arg)
        if (nval > *oval)
                *oval = nval;
 }
@@ -1708,6 +1748,7 @@ dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
 static void
 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
 {
+#pragma unused(arg)
        data[0]++;
        data[1] += nval;
 }
@@ -1716,6 +1757,7 @@ dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
 static void
 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
 {
+#pragma unused(nval,arg)
        *oval = *oval + 1;
 }
 
@@ -1723,6 +1765,7 @@ dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
 static void
 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
 {
+#pragma unused(arg)
        *oval += nval;
 }
 
@@ -1737,6 +1780,7 @@ static void
 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
     intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
 {
+#pragma unused(arg)
        dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
        uint32_t i, ndx, size, fsize;
        uint32_t align = sizeof (uint64_t) - 1;
@@ -2688,7 +2732,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
 
 #else
        case DIF_VAR_PID:
-               if (!dtrace_priv_proc(state))
+               if (!dtrace_priv_proc_relaxed(state))
                        return (0);
 
                /*
@@ -2717,7 +2761,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
                return ((uint64_t)curthread->t_procp->p_ppid);
 #else
        case DIF_VAR_PPID:
-               if (!dtrace_priv_proc(state))
+               if (!dtrace_priv_proc_relaxed(state))
                        return (0);
 
                /*
@@ -2779,7 +2823,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
                    mstate->dtms_scratch_base + mstate->dtms_scratch_size)
                        return 0;
                        
-               if (!dtrace_priv_proc(state))
+               if (!dtrace_priv_proc_relaxed(state))
                        return (0);
 
                mstate->dtms_scratch_ptr += scratch_size;
@@ -3040,7 +3084,12 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
 
                if (subr == DIF_SUBR_COPYIN) {
                        DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
+#if !defined(__APPLE__)
                        dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#else
+                       if (dtrace_priv_proc(state))
+                               dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#endif /* __APPLE__ */
                        DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
                }
 
@@ -3065,7 +3114,12 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
                }
 
                DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
+#if !defined(__APPLE__)
                dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#else
+               if (dtrace_priv_proc(state))
+                       dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#endif /* __APPLE__ */
                DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
                break;
        }
@@ -3090,7 +3144,12 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
                }
 
                DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
+#if !defined(__APPLE__)
                dtrace_copyinstr(tupregs[0].dttk_value, dest, size);
+#else
+               if (dtrace_priv_proc(state))
+                       dtrace_copyinstr(tupregs[0].dttk_value, dest, size);
+#endif /* __APPLE__ */
                DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
 
                ((char *)dest)[size - 1] = '\0';
@@ -3481,7 +3540,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
                 * string -- setting a bit in the map for every character
                 * found in the token string.
                 */
-               for (i = 0; i < sizeof (tokmap); i++)
+               for (i = 0; i < (int)sizeof (tokmap); i++)
                        tokmap[i] = 0;
 
                for (; tokaddr < toklimit; tokaddr++) {
@@ -4527,7 +4586,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
                                size_t sz = v->dtdv_type.dtdt_size;
 
                                sz += sizeof (uint64_t);
-                               ASSERT(svar->dtsv_size == NCPU * sz);
+                               ASSERT(svar->dtsv_size == (int)NCPU * sz);
                                a += CPU->cpu_id * sz;
 
                                if (*(uint8_t *)a == UINT8_MAX) {
@@ -4544,7 +4603,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
                                break;
                        }
 
-                       ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
+                       ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
                        tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
                        regs[rd] = tmp[CPU->cpu_id];
                        break;
@@ -4566,7 +4625,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
                                size_t sz = v->dtdv_type.dtdt_size;
 
                                sz += sizeof (uint64_t);
-                               ASSERT(svar->dtsv_size == NCPU * sz);
+                               ASSERT(svar->dtsv_size == (int)NCPU * sz);
                                a += CPU->cpu_id * sz;
 
                                if (regs[rd] == NULL) {
@@ -4582,7 +4641,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
                                break;
                        }
 
-                       ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
+                       ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
                        tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
                        tmp[CPU->cpu_id] = regs[rd];
                        break;
@@ -5295,6 +5354,31 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
        now = dtrace_gethrtime(); /* must not precede dtrace_calc_thread_recent_vtime() call! */
 #endif /* __APPLE__ */
 
+#if defined(__APPLE__)
+       /*
+        * A provider may call dtrace_probe_error() in lieu of dtrace_probe() in some circumstances.
+        * See, e.g. fasttrap_isa.c. However the provider has no access to ECB context, so passes
+        * NULL through "arg0" and the probe_id of the ovedrriden probe as arg1. Detect that here
+        * and cons up a viable state (from the probe_id).
+        */
+       if (dtrace_probeid_error == id && NULL == arg0) {
+               dtrace_id_t ftp_id = (dtrace_id_t)arg1;
+               dtrace_probe_t *ftp_probe = dtrace_probes[ftp_id - 1];
+               dtrace_ecb_t *ftp_ecb = ftp_probe->dtpr_ecb;
+
+               if (NULL != ftp_ecb) {
+                       dtrace_state_t *ftp_state = ftp_ecb->dte_state;
+
+                       arg0 = (uint64_t)(uintptr_t)ftp_state;
+                       arg1 = ftp_ecb->dte_epid;
+                       /*
+                        * args[2-4] established by caller.
+                        */
+                       ftp_state->dts_arg_error_illval = -1; /* arg5 */
+               }
+       }
+#endif /* __APPLE__ */
+
        mstate.dtms_probe = probe;
        mstate.dtms_arg[0] = arg0;
        mstate.dtms_arg[1] = arg1;
@@ -5327,7 +5411,7 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
 #ifdef lint
                uint64_t val = 0;
 #else
-               uint64_t val;
+               uint64_t val = 0;
 #endif
 
                mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
@@ -5368,16 +5452,6 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
                        }
                }
 
-#if defined(__APPLE__)
-        /*
-         * If the thread on which this probe has fired belongs to a process marked P_LNOATTACH
-         * then this enabling is not permitted to observe it. Move along, nothing to see here.
-         */
-        if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
-            continue;
-        }
-#endif /* __APPLE__ */
-
                if (ecb->dte_cond) {
                        /*
                         * If the dte_cond bits indicate that this
@@ -6469,6 +6543,7 @@ dtrace_match_string(const char *s, const char *p, int depth)
 static int
 dtrace_match_nul(const char *s, const char *p, int depth)
 {
+#pragma unused(s,p,depth)
        return (1); /* always match the empty pattern */
 }
 
@@ -6476,6 +6551,7 @@ dtrace_match_nul(const char *s, const char *p, int depth)
 static int
 dtrace_match_nonzero(const char *s, const char *p, int depth)
 {
+#pragma unused(p,depth)
        return (s != NULL && s[0] != '\0');
 }
 
@@ -7230,7 +7306,6 @@ dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
        }
 
        do {
-               kmod_info_t *ktl;
                /*
                 * First, call the blanket provide operation.
                 */
@@ -7256,10 +7331,10 @@ dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
 
                lck_mtx_unlock(&mod_lock);
 #else
-#if 0 /* XXX Workaround for PR_4643546 XXX */
+#if 0 /* FIXME: Workaround for PR_4643546 */
                simple_lock(&kmod_lock);
                
-               ktl = kmod;
+               kmod_info_t *ktl = kmod;
                while (ktl) {
                        prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ktl);
                        ktl = ktl->next;
@@ -8495,10 +8570,10 @@ dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
                        svarp = &vstate->dtvs_locals;
 
                        if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
-                               dsize = NCPU * (v->dtdv_type.dtdt_size +
+                               dsize = (int)NCPU * (v->dtdv_type.dtdt_size +
                                    sizeof (uint64_t));
                        else
-                               dsize = NCPU * sizeof (uint64_t);
+                               dsize = (int)NCPU * sizeof (uint64_t);
 
                        break;
 
@@ -9034,7 +9109,7 @@ dtrace_ecb_resize(dtrace_ecb_t *ecb)
                         */
                        diff = offs + sizeof (dtrace_aggid_t);
 
-                       if (diff = (diff & (sizeof (uint64_t) - 1)))
+                       if ((diff = (diff & (sizeof (uint64_t) - 1))))
                                offs += sizeof (uint64_t) - diff;
 
                        aggbase = offs - sizeof (dtrace_aggid_t);
@@ -9729,12 +9804,12 @@ dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
                 * of creating our own (saving both time and space).
                 */
                dtrace_ecb_t *cached = dtrace_ecb_create_cache;
-               dtrace_action_t *act = cached->dte_action;
+               dtrace_action_t *act_if = cached->dte_action;
 
-               if (act != NULL) {
-                       ASSERT(act->dta_refcnt > 0);
-                       act->dta_refcnt++;
-                       ecb->dte_action = act;
+               if (act_if != NULL) {
+                       ASSERT(act_if->dta_refcnt > 0);
+                       act_if->dta_refcnt++;
+                       ecb->dte_action = act_if;
                        ecb->dte_action_last = cached->dte_action_last;
                        ecb->dte_needed = cached->dte_needed;
                        ecb->dte_size = cached->dte_size;
@@ -9895,7 +9970,7 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
                return (EFBIG);
 
 #if defined(__APPLE__)
-       if (size > (sane_size / 8) / NCPU) /* As in kdbg_set_nkdbufs(), roughly. */
+       if (size > (sane_size / 8) / (int)NCPU) /* As in kdbg_set_nkdbufs(), roughly. */
                return (ENOMEM);
 #endif /* __APPLE__ */
 
@@ -9990,7 +10065,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
        intptr_t offs = buf->dtb_offset, soffs;
        intptr_t woffs;
        caddr_t tomax;
-       size_t total;
+       size_t total_off;
 
        if (buf->dtb_flags & DTRACEBUF_INACTIVE)
                return (-1);
@@ -10034,7 +10109,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
                goto out;
        }
 
-       total = needed + (offs & (align - 1));
+       total_off = needed + (offs & (align - 1));
 
        /*
         * For a ring buffer, life is quite a bit more complicated.  Before
@@ -10043,15 +10118,15 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
         * is required.)
         */
        if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
-           offs + total > buf->dtb_size) {
+           offs + total_off > buf->dtb_size) {
                woffs = buf->dtb_xamot_offset;
 
-               if (offs + total > buf->dtb_size) {
+               if (offs + total_off > buf->dtb_size) {
                        /*
                         * We can't fit in the end of the buffer.  First, a
                         * sanity check that we can fit in the buffer at all.
                         */
-                       if (total > buf->dtb_size) {
+                       if (total_off > buf->dtb_size) {
                                dtrace_buffer_drop(buf);
                                return (-1);
                        }
@@ -10094,7 +10169,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
                         * that the top of the buffer is aligned.
                         */
                        offs = 0;
-                       total = needed;
+                       total_off = needed;
                        buf->dtb_flags |= DTRACEBUF_WRAPPED;
                } else {
                        /*
@@ -10120,7 +10195,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
                        }
                }
 
-               while (offs + total > woffs) {
+               while (offs + total_off > woffs) {
                        dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
                        size_t size;
 
@@ -10160,7 +10235,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
                                if (offs == 0) {
                                        buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
                                        buf->dtb_offset = 0;
-                                       woffs = total;
+                                       woffs = total_off;
 
                                        while (woffs < buf->dtb_size)
                                                tomax[woffs++] = 0;
@@ -10267,7 +10342,7 @@ dtrace_buffer_free(dtrace_buffer_t *bufs)
 {
        int i;
 
-       for (i = 0; i < NCPU; i++) {
+       for (i = 0; i < (int)NCPU; i++) {
                dtrace_buffer_t *buf = &bufs[i];
 
                if (buf->dtb_tomax == NULL) {
@@ -10648,7 +10723,7 @@ static int
 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched)
 {
        dtrace_enabling_t *enab;
-       int matched, total = 0, err;
+       int matched, total_matched = 0, err;
 
        lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
        lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
@@ -10662,11 +10737,11 @@ dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched)
                if ((err = dtrace_enabling_match(enab, &matched)) != 0)
                        return (err);
 
-               total += matched;
+               total_matched += matched;
        }
 
        if (nmatched != NULL)
-               *nmatched = total;
+               *nmatched = total_matched;
 
        return (0);
 }
@@ -10758,6 +10833,7 @@ dtrace_enabling_provide(dtrace_provider_t *prv)
 static void
 dtrace_dof_error(dof_hdr_t *dof, const char *str)
 {
+#pragma unused(dof)
        if (dtrace_err_verbose)
                cmn_err(CE_WARN, "failed to process DOF: %s", str);
 
@@ -11089,7 +11165,7 @@ dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
        size_t ttl = 0;
        dof_difohdr_t *dofd;
        uintptr_t daddr = (uintptr_t)dof;
-       size_t max = dtrace_difo_maxsize;
+       size_t max_size = dtrace_difo_maxsize;
        int i, l, n;
 
        static const struct {
@@ -11154,7 +11230,7 @@ dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
                    dofd->dofd_links[l])) == NULL)
                        goto err; /* invalid section link */
 
-               if (ttl + subsec->dofs_size > max) {
+               if (ttl + subsec->dofs_size > max_size) {
                        dtrace_dof_error(dof, "exceeds maximum size");
                        goto err;
                }
@@ -11821,7 +11897,7 @@ static
 int
 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
 {
-       size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
+       size_t hashsize, maxper, min_size, chunksize = dstate->dtds_chunksize;
        void *base;
        uintptr_t limit;
        dtrace_dynvar_t *dvar, *next, *start;
@@ -11835,8 +11911,8 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
        if ((dstate->dtds_chunksize = chunksize) == 0)
                dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
 
-       if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
-               size = min;
+       if (size < (min_size = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
+               size = min_size;
 
        if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
                return (ENOMEM);
@@ -11844,7 +11920,7 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
        dstate->dtds_size = size;
        dstate->dtds_base = base;
        dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
-       bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
+       bzero(dstate->dtds_percpu, (int)NCPU * sizeof (dtrace_dstate_percpu_t));
 
        hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
 
@@ -11875,10 +11951,10 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
            ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
        limit = (uintptr_t)base + size;
 
-       maxper = (limit - (uintptr_t)start) / NCPU;
+       maxper = (limit - (uintptr_t)start) / (int)NCPU;
        maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
 
-       for (i = 0; i < NCPU; i++) {
+       for (i = 0; i < (int)NCPU; i++) {
                dstate->dtds_percpu[i].dtdsc_free = dvar = start;
 
                /*
@@ -11888,7 +11964,7 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
                 * whatever is left over.  In either case, we set the limit to
                 * be the limit of the dynamic variable space.
                 */
-               if (maxper == 0 || i == NCPU - 1) {
+               if (maxper == 0 || i == (int)NCPU - 1) {
                        limit = (uintptr_t)base + size;
                        start = NULL;
                } else {
@@ -12005,7 +12081,7 @@ dtrace_state_create(dev_t *devp, cred_t *cr)
        char c[30];
        dtrace_state_t *state;
        dtrace_optval_t *opt;
-       int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
+       int bufsize = (int)NCPU * sizeof (dtrace_buffer_t), i;
 
        lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
        lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
@@ -12244,7 +12320,7 @@ static int
 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
 {
        dtrace_optval_t *opt = state->dts_options, size;
-       processorid_t cpu;
+       processorid_t cpu = 0;
        int flags = 0, rval;
 
        lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
@@ -12364,7 +12440,7 @@ dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
        dtrace_buffer_t *buf;
        cyc_handler_t hdlr;
        cyc_time_t when;
-       int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
+       int rval = 0, i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
        dtrace_icookie_t cookie;
 
        lck_mtx_lock(&cpu_lock);
@@ -12742,7 +12818,7 @@ dtrace_state_destroy(dtrace_state_t *state)
        dtrace_ecb_t *ecb;
        dtrace_vstate_t *vstate = &state->dts_vstate;
        minor_t minor = getminor(state->dts_dev);
-       int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
+       int i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
        dtrace_speculation_t *spec = state->dts_speculations;
        int nspec = state->dts_nspeculations;
        uint32_t match;
@@ -13034,7 +13110,7 @@ dtrace_helper_trace(dtrace_helper_action_t *helper,
                if ((svar = vstate->dtvs_locals[i]) == NULL)
                        continue;
 
-               ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
+               ASSERT(svar->dtsv_size >= (int)NCPU * sizeof (uint64_t));
                ent->dtht_locals[i] =
                    ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
        }
@@ -13047,7 +13123,7 @@ dtrace_helper(int which, dtrace_mstate_t *mstate,
        uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
        uint64_t sarg0 = mstate->dtms_arg[0];
        uint64_t sarg1 = mstate->dtms_arg[1];
-       uint64_t rval;
+       uint64_t rval = 0;
        dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
        dtrace_helper_action_t *helper;
        dtrace_vstate_t *vstate;
@@ -13196,7 +13272,7 @@ dtrace_helper_destroygen(proc_t* p, int gen)
         * given generation number.
         */
        for (;;) {
-               dtrace_helper_provider_t *prov;
+               dtrace_helper_provider_t *prov = NULL;
 
                /*
                 * Look for a helper provider with the right generation. We
@@ -14774,7 +14850,7 @@ dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
            1, INT_MAX, 0);
 
        dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
-           sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
+           sizeof (dtrace_dstate_percpu_t) * (int)NCPU, DTRACE_STATE_ALIGN,
            NULL, NULL, NULL, NULL, NULL, 0);
 
        lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
@@ -14836,13 +14912,6 @@ dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
            dtrace_provider, NULL, NULL, "END", 0, NULL);
        dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
            dtrace_provider, NULL, NULL, "ERROR", 3, NULL);
-#elif defined(__arm__)
-       dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
-           dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
-       dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
-           dtrace_provider, NULL, NULL, "END", 1, NULL);
-       dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
-           dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
 #else
 #error Unknown Architecture
 #endif /* __APPLE__ */
@@ -15016,6 +15085,7 @@ dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
 static int
 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
 {
+#pragma unused(flag,otyp,cred_p)
        minor_t minor = getminor(dev);
        dtrace_state_t *state;
 
@@ -15235,6 +15305,8 @@ dtrace_ioctl_helper(int cmd, caddr_t arg, int *rv)
 static int
 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
 {
+#pragma unused(md)
+
        minor_t minor = getminor(dev);
        dtrace_state_t *state;
        int rval;
@@ -15739,7 +15811,7 @@ dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
                if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
                        return (EFAULT);
 
-               if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
+               if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= (int)NCPU)
                        return (EINVAL);
 
                lck_mtx_lock(&dtrace_lock);
@@ -15905,7 +15977,7 @@ dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
                nerrs = state->dts_errors;
                dstate = &state->dts_vstate.dtvs_dynvars;
 
-               for (i = 0; i < NCPU; i++) {
+               for (i = 0; i < (int)NCPU; i++) {
                        dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
 
                        stat.dtst_dyndrops += dcpu->dtdsc_drops;
@@ -16490,7 +16562,7 @@ dtrace_init( void )
                 * XXX Warn if state is LAZY_OFF? It won't break anything, but
                 * makes no sense...
                 */
-               if (!PE_parse_boot_arg("dtrace_dof_mode", &dtrace_dof_mode)) {
+               if (!PE_parse_boot_argn("dtrace_dof_mode", &dtrace_dof_mode, sizeof (dtrace_dof_mode))) {
                        dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
                }