+ /*
+ * Read the number of DOF sections being passed in.
+ */
+ if (copyin(user_address + offsetof(dof_ioctl_data_t, dofiod_count),
+ &dof_count,
+ sizeof(dof_count))) {
+ dtrace_dof_error(NULL, "failed to copyin dofiod_count");
+ return (EFAULT);
+ }
+
+ /*
+ * Range check the count.
+ */
+ if (dof_count == 0 || dof_count > 1024) {
+ dtrace_dof_error(NULL, "dofiod_count is not valid");
+ return (EINVAL);
+ }
+
+ /*
+ * Allocate a correctly sized structure and copyin the data.
+ */
+ dof_ioctl_data_size = DOF_IOCTL_DATA_T_SIZE(dof_count);
+ if ((multi_dof = kmem_alloc(dof_ioctl_data_size, KM_SLEEP)) == NULL)
+ return (ENOMEM);
+
+ /* NOTE! We can no longer exit this method via return */
+ if (copyin(user_address, multi_dof, dof_ioctl_data_size) != 0) {
+ dtrace_dof_error(NULL, "failed copyin of dof_ioctl_data_t");
+ rval = EFAULT;
+ goto cleanup;
+ }
+
+ /*
+ * Check that the count didn't change between the first copyin and the second.
+ */
+ if (multi_dof->dofiod_count != dof_count) {
+ rval = EINVAL;
+ goto cleanup;
+ }
+
+ /*
+ * Try to process lazily first.
+ */
+ rval = dtrace_lazy_dofs_add(p, multi_dof, &multi_dof_claimed);
+
+ /*
+ * If rval is EACCES, we must be non-lazy.
+ */
+ if (rval == EACCES) {
+ rval = 0;
+ /*
+ * Process each dof_helper_t
+ */
+ i = 0;
+ do {
+ dhp = &multi_dof->dofiod_helpers[i];
+
+ dof_hdr_t *dof = dtrace_dof_copyin(dhp->dofhp_dof, &rval);
+
+ if (dof != NULL) {
+ lck_mtx_lock(&dtrace_lock);
+
+ /*
+ * dtrace_helper_slurp() takes responsibility for the dof --
+ * it may free it now or it may save it and free it later.
+ */
+ if ((dhp->dofhp_dof = (uint64_t)dtrace_helper_slurp(p, dof, dhp)) == -1ULL) {
+ rval = EINVAL;
+ }
+
+ lck_mtx_unlock(&dtrace_lock);
+ }
+ } while (++i < multi_dof->dofiod_count && rval == 0);
+ }
+
+ /*
+ * We need to copyout the multi_dof struct, because it contains
+ * the generation (unique id) values needed to call DTRACEHIOC_REMOVE
+ *
+ * This could certainly be better optimized.
+ */
+ if (copyout(multi_dof, user_address, dof_ioctl_data_size) != 0) {
+ dtrace_dof_error(NULL, "failed copyout of dof_ioctl_data_t");
+ /* Don't overwrite pre-existing error code */
+ if (rval == 0) rval = EFAULT;
+ }
+
+ cleanup:
+ /*
+ * If we had to allocate struct memory, free it.
+ */
+ if (multi_dof != NULL && !multi_dof_claimed) {
+ kmem_free(multi_dof, dof_ioctl_data_size);
+ }
+
+ return rval;
+ }
+
+ case DTRACEHIOC_REMOVE: {
+ int generation = *(int*)arg;
+ proc_t* p = current_proc();
+
+ /*
+ * Try lazy first.
+ */
+ int rval = dtrace_lazy_dofs_remove(p, generation);
+
+ /*
+ * EACCES means non-lazy
+ */
+ if (rval == EACCES) {
+ lck_mtx_lock(&dtrace_lock);
+ rval = dtrace_helper_destroygen(p, generation);
+ lck_mtx_unlock(&dtrace_lock);
+ }
+
+ return (rval);
+ }
+
+ default:
+ break;
+ }
+
+ return ENOTTY;
+}
+
+/*ARGSUSED*/
+static int
+dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
+{
+#pragma unused(md)
+ minor_t minor = getminor(dev);
+ dtrace_state_t *state;
+ int rval;
+
+ /* Darwin puts Helper on its own major device. */
+
+ state = ddi_get_soft_state(dtrace_softstate, minor);
+
+ if (state->dts_anon) {
+ ASSERT(dtrace_anon.dta_state == NULL);
+ state = state->dts_anon;
+ }
+
+ switch (cmd) {
+ case DTRACEIOC_PROVIDER: {
+ dtrace_providerdesc_t pvd;
+ dtrace_provider_t *pvp;
+
+ if (copyin(arg, &pvd, sizeof (pvd)) != 0)
+ return (EFAULT);
+
+ pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
+ lck_mtx_lock(&dtrace_provider_lock);
+
+ for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
+ if (strncmp(pvp->dtpv_name, pvd.dtvd_name, DTRACE_PROVNAMELEN) == 0)
+ break;
+ }
+
+ lck_mtx_unlock(&dtrace_provider_lock);
+
+ if (pvp == NULL)
+ return (ESRCH);
+
+ bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
+ bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
+ if (copyout(&pvd, arg, sizeof (pvd)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_EPROBE: {
+ dtrace_eprobedesc_t epdesc;
+ dtrace_ecb_t *ecb;
+ dtrace_action_t *act;
+ void *buf;
+ size_t size;
+ uintptr_t dest;
+ int nrecs;
+
+ if (copyin(arg, &epdesc, sizeof (epdesc)) != 0)
+ return (EFAULT);
+
+ lck_mtx_lock(&dtrace_lock);
+
+ if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+
+ if (ecb->dte_probe == NULL) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+
+ epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
+ epdesc.dtepd_uarg = ecb->dte_uarg;
+ epdesc.dtepd_size = ecb->dte_size;
+
+ nrecs = epdesc.dtepd_nrecs;
+ epdesc.dtepd_nrecs = 0;
+ for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
+ if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
+ continue;
+
+ epdesc.dtepd_nrecs++;
+ }
+
+ /*
+ * Now that we have the size, we need to allocate a temporary
+ * buffer in which to store the complete description. We need
+ * the temporary buffer to be able to drop dtrace_lock()
+ * across the copyout(), below.
+ */
+ size = sizeof (dtrace_eprobedesc_t) +
+ (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
+
+ buf = kmem_alloc(size, KM_SLEEP);
+ dest = (uintptr_t)buf;
+
+ bcopy(&epdesc, (void *)dest, sizeof (epdesc));
+ dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
+
+ for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
+ if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
+ continue;
+
+ if (nrecs-- == 0)
+ break;
+
+ bcopy(&act->dta_rec, (void *)dest,
+ sizeof (dtrace_recdesc_t));
+ dest += sizeof (dtrace_recdesc_t);
+ }
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (copyout(buf, arg, dest - (uintptr_t)buf) != 0) {
+ kmem_free(buf, size);
+ return (EFAULT);
+ }
+
+ kmem_free(buf, size);
+ return (0);
+ }
+
+ case DTRACEIOC_AGGDESC: {
+ dtrace_aggdesc_t aggdesc;
+ dtrace_action_t *act;
+ dtrace_aggregation_t *agg;
+ int nrecs;
+ uint32_t offs;
+ dtrace_recdesc_t *lrec;
+ void *buf;
+ size_t size;
+ uintptr_t dest;
+
+ if (copyin(arg, &aggdesc, sizeof (aggdesc)) != 0)
+ return (EFAULT);
+
+ lck_mtx_lock(&dtrace_lock);
+
+ if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+
+ aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
+
+ nrecs = aggdesc.dtagd_nrecs;
+ aggdesc.dtagd_nrecs = 0;
+
+ offs = agg->dtag_base;
+ lrec = &agg->dtag_action.dta_rec;
+ aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
+
+ for (act = agg->dtag_first; ; act = act->dta_next) {
+ ASSERT(act->dta_intuple ||
+ DTRACEACT_ISAGG(act->dta_kind));
+
+ /*
+ * If this action has a record size of zero, it
+ * denotes an argument to the aggregating action.
+ * Because the presence of this record doesn't (or
+ * shouldn't) affect the way the data is interpreted,
+ * we don't copy it out to save user-level the
+ * confusion of dealing with a zero-length record.
+ */
+ if (act->dta_rec.dtrd_size == 0) {
+ ASSERT(agg->dtag_hasarg);
+ continue;
+ }
+
+ aggdesc.dtagd_nrecs++;
+
+ if (act == &agg->dtag_action)
+ break;
+ }
+
+ /*
+ * Now that we have the size, we need to allocate a temporary
+ * buffer in which to store the complete description. We need
+ * the temporary buffer to be able to drop dtrace_lock()
+ * across the copyout(), below.
+ */
+ size = sizeof (dtrace_aggdesc_t) +
+ (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
+
+ buf = kmem_alloc(size, KM_SLEEP);
+ dest = (uintptr_t)buf;
+
+ bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
+ dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
+
+ for (act = agg->dtag_first; ; act = act->dta_next) {
+ dtrace_recdesc_t rec = act->dta_rec;
+
+ /*
+ * See the comment in the above loop for why we pass
+ * over zero-length records.
+ */
+ if (rec.dtrd_size == 0) {
+ ASSERT(agg->dtag_hasarg);
+ continue;
+ }
+
+ if (nrecs-- == 0)
+ break;
+
+ rec.dtrd_offset -= offs;
+ bcopy(&rec, (void *)dest, sizeof (rec));
+ dest += sizeof (dtrace_recdesc_t);
+
+ if (act == &agg->dtag_action)
+ break;
+ }
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (copyout(buf, arg, dest - (uintptr_t)buf) != 0) {
+ kmem_free(buf, size);
+ return (EFAULT);
+ }
+
+ kmem_free(buf, size);
+ return (0);
+ }
+
+ case DTRACEIOC_ENABLE: {
+ dof_hdr_t *dof;
+ dtrace_enabling_t *enab = NULL;
+ dtrace_vstate_t *vstate;
+ int err = 0;
+
+ *rv = 0;
+
+ /*
+ * If a NULL argument has been passed, we take this as our
+ * cue to reevaluate our enablings.
+ */
+ if (arg == NULL) {
+ dtrace_enabling_matchall();
+
+ return (0);
+ }
+
+ if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
+ return (rval);
+
+ lck_mtx_lock(&cpu_lock);
+ lck_mtx_lock(&dtrace_lock);
+ vstate = &state->dts_vstate;
+
+ if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
+ lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&cpu_lock);
+ dtrace_dof_destroy(dof);
+ return (EBUSY);
+ }
+
+ if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
+ lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&cpu_lock);
+ dtrace_dof_destroy(dof);
+ return (EINVAL);
+ }
+
+ if ((rval = dtrace_dof_options(dof, state)) != 0) {
+ dtrace_enabling_destroy(enab);
+ lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&cpu_lock);
+ dtrace_dof_destroy(dof);
+ return (rval);
+ }
+
+ if ((err = dtrace_enabling_match(enab, rv)) == 0) {
+ err = dtrace_enabling_retain(enab);
+ } else {
+ dtrace_enabling_destroy(enab);
+ }
+
+ lck_mtx_unlock(&cpu_lock);
+ lck_mtx_unlock(&dtrace_lock);
+ dtrace_dof_destroy(dof);
+
+ return (err);
+ }
+
+ case DTRACEIOC_REPLICATE: {
+ dtrace_repldesc_t desc;
+ dtrace_probedesc_t *match = &desc.dtrpd_match;
+ dtrace_probedesc_t *create = &desc.dtrpd_create;
+ int err;
+
+ if (copyin(arg, &desc, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
+ match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
+ match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
+ match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
+
+ create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
+ create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
+ create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
+ create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
+
+ lck_mtx_lock(&dtrace_lock);
+ err = dtrace_enabling_replicate(state, match, create);
+ lck_mtx_unlock(&dtrace_lock);
+
+ return (err);
+ }
+
+ case DTRACEIOC_PROBEMATCH:
+ case DTRACEIOC_PROBES: {
+ dtrace_probe_t *probe = NULL;
+ dtrace_probedesc_t desc;
+ dtrace_probekey_t pkey;
+ dtrace_id_t i;
+ int m = 0;
+ uint32_t priv;
+ uid_t uid;
+ zoneid_t zoneid;
+
+ if (copyin(arg, &desc, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
+ desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
+ desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
+ desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
+
+ /*
+ * Before we attempt to match this probe, we want to give
+ * all providers the opportunity to provide it.
+ */
+ if (desc.dtpd_id == DTRACE_IDNONE) {
+ lck_mtx_lock(&dtrace_provider_lock);
+ dtrace_probe_provide(&desc, NULL);
+ lck_mtx_unlock(&dtrace_provider_lock);
+ desc.dtpd_id++;
+ }
+
+ if (cmd == DTRACEIOC_PROBEMATCH) {
+ dtrace_probekey(&desc, &pkey);
+ pkey.dtpk_id = DTRACE_IDNONE;
+ }
+
+ dtrace_cred2priv(cr, &priv, &uid, &zoneid);
+
+ lck_mtx_lock(&dtrace_lock);
+
+ if (cmd == DTRACEIOC_PROBEMATCH) {
+ /* Quiet compiler warning */
+ for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
+ if ((probe = dtrace_probes[i - 1]) != NULL &&
+ (m = dtrace_match_probe(probe, &pkey,
+ priv, uid, zoneid)) != 0)
+ break;
+ }
+
+ if (m < 0) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+
+ } else {
+ /* Quiet compiler warning */
+ for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
+ if ((probe = dtrace_probes[i - 1]) != NULL &&
+ dtrace_match_priv(probe, priv, uid, zoneid))
+ break;
+ }
+ }
+
+ if (probe == NULL) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (ESRCH);
+ }
+
+ dtrace_probe_description(probe, &desc);
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (copyout(&desc, arg, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_PROBEARG: {
+ dtrace_argdesc_t desc;
+ dtrace_probe_t *probe;
+ dtrace_provider_t *prov;
+
+ if (copyin(arg, &desc, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ if (desc.dtargd_id == DTRACE_IDNONE)
+ return (EINVAL);
+
+ if (desc.dtargd_ndx == DTRACE_ARGNONE)
+ return (EINVAL);
+
+ lck_mtx_lock(&dtrace_provider_lock);
+ lck_mtx_lock(&mod_lock);
+ lck_mtx_lock(&dtrace_lock);
+
+ /* Quiet compiler warning */
+ if (desc.dtargd_id > (dtrace_id_t)dtrace_nprobes) {
+ lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&mod_lock);
+ lck_mtx_unlock(&dtrace_provider_lock);
+ return (EINVAL);
+ }
+
+ if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
+ lck_mtx_unlock(&dtrace_lock);
+ lck_mtx_unlock(&mod_lock);
+ lck_mtx_unlock(&dtrace_provider_lock);
+ return (EINVAL);
+ }
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ prov = probe->dtpr_provider;
+
+ if (prov->dtpv_pops.dtps_getargdesc == NULL) {
+ /*
+ * There isn't any typed information for this probe.
+ * Set the argument number to DTRACE_ARGNONE.
+ */
+ desc.dtargd_ndx = DTRACE_ARGNONE;
+ } else {
+ desc.dtargd_native[0] = '\0';
+ desc.dtargd_xlate[0] = '\0';
+ desc.dtargd_mapping = desc.dtargd_ndx;
+
+ prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
+ probe->dtpr_id, probe->dtpr_arg, &desc);
+ }
+
+ lck_mtx_unlock(&mod_lock);
+ lck_mtx_unlock(&dtrace_provider_lock);
+
+ if (copyout(&desc, arg, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_GO: {
+ processorid_t cpuid;
+ rval = dtrace_state_go(state, &cpuid);
+
+ if (rval != 0)
+ return (rval);
+
+ if (copyout(&cpuid, arg, sizeof (cpuid)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_STOP: {
+ processorid_t cpuid;
+
+ lck_mtx_lock(&dtrace_lock);
+ rval = dtrace_state_stop(state, &cpuid);
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (rval != 0)
+ return (rval);
+
+ if (copyout(&cpuid, arg, sizeof (cpuid)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_DOFGET: {
+ dof_hdr_t hdr, *dof;
+ uint64_t len;
+
+ if (copyin(arg, &hdr, sizeof (hdr)) != 0)
+ return (EFAULT);
+
+ lck_mtx_lock(&dtrace_lock);
+ dof = dtrace_dof_create(state);
+ lck_mtx_unlock(&dtrace_lock);
+
+ len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
+ rval = copyout(dof, arg, len);
+ dtrace_dof_destroy(dof);
+
+ return (rval == 0 ? 0 : EFAULT);
+ }
+
+ case DTRACEIOC_AGGSNAP:
+ case DTRACEIOC_BUFSNAP: {
+ dtrace_bufdesc_t desc;
+ caddr_t cached;
+ dtrace_buffer_t *buf;
+
+ if (copyin(arg, &desc, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ if ((int)desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
+ return (EINVAL);
+
+ lck_mtx_lock(&dtrace_lock);
+
+ if (cmd == DTRACEIOC_BUFSNAP) {
+ buf = &state->dts_buffer[desc.dtbd_cpu];
+ } else {
+ buf = &state->dts_aggbuffer[desc.dtbd_cpu];
+ }
+
+ if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
+ size_t sz = buf->dtb_offset;
+
+ if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EBUSY);
+ }
+
+ /*
+ * If this buffer has already been consumed, we're
+ * going to indicate that there's nothing left here
+ * to consume.
+ */
+ if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
+ lck_mtx_unlock(&dtrace_lock);
+
+ desc.dtbd_size = 0;
+ desc.dtbd_drops = 0;
+ desc.dtbd_errors = 0;
+ desc.dtbd_oldest = 0;
+ sz = sizeof (desc);
+
+ if (copyout(&desc, arg, sz) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ /*
+ * If this is a ring buffer that has wrapped, we want
+ * to copy the whole thing out.
+ */
+ if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
+ dtrace_buffer_polish(buf);
+ sz = buf->dtb_size;
+ }
+
+ if (copyout(buf->dtb_tomax, (user_addr_t)desc.dtbd_data, sz) != 0) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EFAULT);
+ }
+
+ desc.dtbd_size = sz;
+ desc.dtbd_drops = buf->dtb_drops;
+ desc.dtbd_errors = buf->dtb_errors;
+ desc.dtbd_oldest = buf->dtb_xamot_offset;
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (copyout(&desc, arg, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ buf->dtb_flags |= DTRACEBUF_CONSUMED;
+
+ return (0);
+ }
+
+ if (buf->dtb_tomax == NULL) {
+ ASSERT(buf->dtb_xamot == NULL);
+ lck_mtx_unlock(&dtrace_lock);
+ return (ENOENT);
+ }
+
+ cached = buf->dtb_tomax;
+ ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
+
+ dtrace_xcall(desc.dtbd_cpu,
+ (dtrace_xcall_t)dtrace_buffer_switch, buf);
+
+ state->dts_errors += buf->dtb_xamot_errors;
+
+ /*
+ * If the buffers did not actually switch, then the cross call
+ * did not take place -- presumably because the given CPU is
+ * not in the ready set. If this is the case, we'll return
+ * ENOENT.
+ */
+ if (buf->dtb_tomax == cached) {
+ ASSERT(buf->dtb_xamot != cached);
+ lck_mtx_unlock(&dtrace_lock);
+ return (ENOENT);
+ }
+
+ ASSERT(cached == buf->dtb_xamot);
+
+ /*
+ * We have our snapshot; now copy it out.
+ */
+ if (copyout(buf->dtb_xamot, (user_addr_t)desc.dtbd_data,
+ buf->dtb_xamot_offset) != 0) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EFAULT);
+ }
+
+ desc.dtbd_size = buf->dtb_xamot_offset;
+ desc.dtbd_drops = buf->dtb_xamot_drops;
+ desc.dtbd_errors = buf->dtb_xamot_errors;
+ desc.dtbd_oldest = 0;
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ /*
+ * Finally, copy out the buffer description.
+ */
+ if (copyout(&desc, arg, sizeof (desc)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_CONF: {
+ dtrace_conf_t conf;
+
+ bzero(&conf, sizeof (conf));
+ conf.dtc_difversion = DIF_VERSION;
+ conf.dtc_difintregs = DIF_DIR_NREGS;
+ conf.dtc_diftupregs = DIF_DTR_NREGS;
+ conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
+
+ if (copyout(&conf, arg, sizeof (conf)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_STATUS: {
+ dtrace_status_t stat;
+ dtrace_dstate_t *dstate;
+ int i, j;
+ uint64_t nerrs;
+
+ /*
+ * See the comment in dtrace_state_deadman() for the reason
+ * for setting dts_laststatus to INT64_MAX before setting
+ * it to the correct value.
+ */
+ state->dts_laststatus = INT64_MAX;
+ dtrace_membar_producer();
+ state->dts_laststatus = dtrace_gethrtime();
+
+ bzero(&stat, sizeof (stat));
+
+ lck_mtx_lock(&dtrace_lock);
+
+ if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (ENOENT);
+ }
+
+ if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
+ stat.dtst_exiting = 1;
+
+ nerrs = state->dts_errors;
+ dstate = &state->dts_vstate.dtvs_dynvars;
+
+ for (i = 0; i < (int)NCPU; i++) {
+ dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
+
+ stat.dtst_dyndrops += dcpu->dtdsc_drops;
+ stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
+ stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
+
+ if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
+ stat.dtst_filled++;
+
+ nerrs += state->dts_buffer[i].dtb_errors;
+
+ for (j = 0; j < state->dts_nspeculations; j++) {
+ dtrace_speculation_t *spec;
+ dtrace_buffer_t *buf;
+
+ spec = &state->dts_speculations[j];
+ buf = &spec->dtsp_buffer[i];
+ stat.dtst_specdrops += buf->dtb_xamot_drops;
+ }
+ }
+
+ stat.dtst_specdrops_busy = state->dts_speculations_busy;
+ stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
+ stat.dtst_stkstroverflows = state->dts_stkstroverflows;
+ stat.dtst_dblerrors = state->dts_dblerrors;
+ stat.dtst_killed =
+ (state->dts_activity == DTRACE_ACTIVITY_KILLED);
+ stat.dtst_errors = nerrs;
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (copyout(&stat, arg, sizeof (stat)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ case DTRACEIOC_FORMAT: {
+ dtrace_fmtdesc_t fmt;
+ char *str;
+ int len;
+
+ if (copyin(arg, &fmt, sizeof (fmt)) != 0)
+ return (EFAULT);
+
+ lck_mtx_lock(&dtrace_lock);
+
+ if (fmt.dtfd_format == 0 ||
+ fmt.dtfd_format > state->dts_nformats) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+
+ /*
+ * Format strings are allocated contiguously and they are
+ * never freed; if a format index is less than the number
+ * of formats, we can assert that the format map is non-NULL
+ * and that the format for the specified index is non-NULL.
+ */
+ ASSERT(state->dts_formats != NULL);
+ str = state->dts_formats[fmt.dtfd_format - 1];
+ ASSERT(str != NULL);
+
+ len = strlen(str) + 1;
+
+ if (len > fmt.dtfd_length) {
+ fmt.dtfd_length = len;
+
+ if (copyout(&fmt, arg, sizeof (fmt)) != 0) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+ } else {
+ if (copyout(str, (user_addr_t)fmt.dtfd_string, len) != 0) {
+ lck_mtx_unlock(&dtrace_lock);
+ return (EINVAL);
+ }
+ }
+
+ lck_mtx_unlock(&dtrace_lock);
+ return (0);
+ }
+
+ default:
+ break;
+ }
+
+ return (ENOTTY);
+}
+#endif /* __APPLE__ */
+
+#if !defined(__APPLE__)
+/*ARGSUSED*/
+static int
+dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ dtrace_state_t *state;
+
+ switch (cmd) {
+ case DDI_DETACH:
+ break;
+
+ case DDI_SUSPEND:
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ lck_mtx_lock(&cpu_lock);
+ lck_mtx_lock(&dtrace_provider_lock);
+ lck_mtx_lock(&dtrace_lock);