* Use is subject to license terms.
*/
-/*
- * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
- */
-
#include <sys/types.h>
#include <sys/time.h>
#include <sys/dtrace_impl.h>
#include <sys/proc.h>
+#include <security/mac_framework.h>
+
#include <miscfs/devfs/devfs.h>
#include <sys/proc_internal.h>
#include <sys/dtrace_glue.h>
* never hold the provider lock and creation lock simultaneously
*/
-static dev_info_t *fasttrap_devi;
static dtrace_meta_provider_id_t fasttrap_meta_id;
static thread_t fasttrap_cleanup_thread;
-static lck_mtx_t fasttrap_cleanup_mtx;
+static LCK_GRP_DECLARE(fasttrap_lck_grp, "fasttrap");
+static LCK_ATTR_DECLARE(fasttrap_lck_attr, 0, 0);
+static LCK_MTX_DECLARE_ATTR(fasttrap_cleanup_mtx,
+ &fasttrap_lck_grp, &fasttrap_lck_attr);
#define FASTTRAP_CLEANUP_PROVIDER 0x1
static fasttrap_hash_t fasttrap_procs;
static uint64_t fasttrap_pid_count; /* pid ref count */
-static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
+static LCK_MTX_DECLARE_ATTR(fasttrap_count_mtx, /* lock on ref count */
+ &fasttrap_lck_grp, &fasttrap_lck_attr);
#define FASTTRAP_ENABLE_FAIL 1
#define FASTTRAP_ENABLE_PARTIAL 2
* 20k elements allocated, the space saved is substantial.
*/
-struct zone *fasttrap_tracepoint_t_zone;
+ZONE_DECLARE(fasttrap_tracepoint_t_zone, "dtrace.fasttrap_tracepoint_t",
+ sizeof(fasttrap_tracepoint_t), ZC_NONE);
/*
* APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
"dtrace.fasttrap_probe_t[3]"
};
-/*
- * APPLE NOTE: We have to manage locks explicitly
- */
-lck_grp_t* fasttrap_lck_grp;
-lck_grp_attr_t* fasttrap_lck_grp_attr;
-lck_attr_t* fasttrap_lck_attr;
-
static int
fasttrap_highbit(ulong_t i)
{
return later;
}
-#ifdef FASTTRAP_ASYNC_REMOVE
typedef struct fasttrap_tracepoint_spec {
pid_t fttps_pid;
user_addr_t fttps_pc;
static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
-static lck_mtx_t fasttrap_retired_mtx;
+static LCK_MTX_DECLARE_ATTR(fasttrap_retired_mtx,
+ &fasttrap_lck_grp, &fasttrap_lck_attr);
#define DEFAULT_RETIRED_SIZE 256
s->fttps_pc = tp->ftt_pc;
if (fasttrap_cur_retired == fasttrap_retired_size) {
- fasttrap_retired_size *= 2;
fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
- fasttrap_retired_size *
- sizeof(fasttrap_tracepoint_t*),
+ fasttrap_retired_size * 2 *
+ sizeof(*fasttrap_retired_spec),
KM_SLEEP);
- memcpy(new_retired, fasttrap_retired_spec, sizeof(fasttrap_tracepoint_t*) * fasttrap_retired_size);
- kmem_free(fasttrap_retired_spec, sizeof(fasttrap_tracepoint_t*) * (fasttrap_retired_size / 2));
+ memcpy(new_retired, fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
+ kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
+ fasttrap_retired_size *= 2;
fasttrap_retired_spec = new_retired;
}
fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
}
-#else
-void fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
-{
- if (tp->ftt_retired)
- return;
-
- fasttrap_tracepoint_remove(p, tp);
-}
-#endif
static void
fasttrap_pid_cleanup_compute_priority(void)
while (1) {
unsigned int later = 0;
- work = atomic_and_32(&fasttrap_cleanup_work, 0);
+ work = os_atomic_xchg(&fasttrap_cleanup_work, 0, relaxed);
lck_mtx_unlock(&fasttrap_cleanup_mtx);
if (work & FASTTRAP_CLEANUP_PROVIDER) {
later = fasttrap_pid_cleanup_providers();
}
-#ifdef FASTTRAP_ASYNC_REMOVE
if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
fasttrap_tracepoint_cleanup();
}
-#endif
lck_mtx_lock(&fasttrap_cleanup_mtx);
fasttrap_pid_cleanup_compute_priority();
* (if detach fails).
*/
if (later > 0) {
- struct timespec t = {1, 0};
+ struct timespec t = {.tv_sec = 1, .tv_nsec = 0};
msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", &t);
}
else
fasttrap_pid_cleanup(uint32_t work)
{
lck_mtx_lock(&fasttrap_cleanup_mtx);
- atomic_or_32(&fasttrap_cleanup_work, work);
+ os_atomic_or(&fasttrap_cleanup_work, work, relaxed);
fasttrap_pid_cleanup_compute_priority();
wakeup(&fasttrap_pid_cleanup_cb);
lck_mtx_unlock(&fasttrap_cleanup_mtx);
}
+static int
+fasttrap_setdebug(proc_t *p)
+{
+ LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
+
+ /*
+ * CS_KILL and CS_HARD will cause code-signing to kill the process
+ * when the process text is modified, so register the intent
+ * to allow invalid access beforehand.
+ */
+ if ((p->p_csflags & (CS_KILL|CS_HARD))) {
+ proc_unlock(p);
+ for (int i = 0; i < DTRACE_NCLIENTS; i++) {
+ dtrace_state_t *state = dtrace_state_get(i);
+ if (state == NULL)
+ continue;
+ if (state->dts_cred.dcr_cred == NULL)
+ continue;
+ /*
+ * The get_task call flags whether the process should
+ * be flagged to have the cs_allow_invalid call
+ * succeed. We want the best credential that any dtrace
+ * client has, so try all of them.
+ */
+
+ /*
+ * mac_proc_check_get_task() can trigger upcalls. It's
+ * not safe to hold proc references accross upcalls, so
+ * just drop the reference. Given the context, it
+ * should not be possible for the process to actually
+ * disappear.
+ */
+ struct proc_ident pident = proc_ident(p);
+ sprunlock(p);
+ p = PROC_NULL;
+
+ (void) mac_proc_check_get_task(state->dts_cred.dcr_cred, &pident, TASK_FLAVOR_CONTROL);
+
+ p = sprlock(pident.p_pid);
+ if (p == PROC_NULL) {
+ return (ESRCH);
+ }
+ }
+ int rc = cs_allow_invalid(p);
+ proc_lock(p);
+ if (rc == 0) {
+ return (EACCES);
+ }
+ }
+ return (0);
+}
/*
* This is called from cfork() via dtrace_fasttrap_fork(). The child
printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp->p_pid);
return;
}
+
+ proc_lock(cp);
+ if (fasttrap_setdebug(cp) == ESRCH) {
+ printf("fasttrap_fork: failed to re-acquire proc\n");
+ return;
+ }
proc_unlock(cp);
/*
*/
dtrace_ptss_fork(p, cp);
- proc_lock(cp);
sprunlock(cp);
}
* explaining. This method is always called with the proc_lock held.
* We must drop the proc_lock before calling fasttrap_provider_retire
* to avoid a deadlock when it takes the bucket lock.
- *
+ *
* Next, the dtrace_ptss_exec_exit function requires the sprlock
- * be held, but not the proc_lock.
+ * be held, but not the proc_lock.
*
* Finally, we must re-acquire the proc_lock
*/
ASSERT(tp->ftt_ids != NULL);
idp = &tp->ftt_ids;
break;
-
+
case DTFTP_RETURN:
case DTFTP_POST_OFFSETS:
ASSERT(tp->ftt_retids != NULL);
idp = &tp->ftt_retids;
break;
-
+
default:
/* Fix compiler warning... */
idp = NULL;
return(0);
}
+ proc_lock(p);
+ int p_pid = proc_pid(p);
+
+ rc = fasttrap_setdebug(p);
+ switch (rc) {
+ case EACCES:
+ proc_unlock(p);
+ sprunlock(p);
+ cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
+ "Process does not allow invalid code pages\n", p_pid);
+ return (0);
+ case ESRCH:
+ cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
+ "Failed to re-acquire process\n", p_pid);
+ return (0);
+ default:
+ assert(rc == 0);
+ break;
+ }
+
/*
* APPLE NOTE: We do not have an equivalent thread structure to Solaris.
* Solaris uses its ulwp_t struct for scratch space to support the pid provider.
i--;
}
- proc_lock(p);
sprunlock(p);
/*
}
}
- proc_lock(p);
sprunlock(p);
probe->ftp_enabled = 1;
*/
if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
// ASSERT(!(p->p_flag & SVFORK));
- proc_unlock(p);
}
lck_mtx_lock(&provider->ftp_mtx);
whack = provider->ftp_marked = 1;
lck_mtx_unlock(&provider->ftp_mtx);
- proc_lock(p);
sprunlock(p);
} else {
/*
ASSERT(!probe->ftp_enabled);
ASSERT(fasttrap_total >= probe->ftp_ntps);
- atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
- atomic_add_32(&fasttrap_retired, -probe->ftp_ntps);
+ os_atomic_sub(&fasttrap_total, probe->ftp_ntps, relaxed);
+ os_atomic_sub(&fasttrap_retired, probe->ftp_ntps, relaxed);
if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
fasttrap_mod_barrier(probe->ftp_gen);
};
static dtrace_pops_t pid_pops = {
- fasttrap_pid_provide,
- NULL,
- fasttrap_pid_enable,
- fasttrap_pid_disable,
- NULL,
- NULL,
- fasttrap_pid_getargdesc,
- fasttrap_pid_getarg,
- NULL,
- fasttrap_pid_destroy
+ .dtps_provide = fasttrap_pid_provide,
+ .dtps_provide_module = NULL,
+ .dtps_enable = fasttrap_pid_enable,
+ .dtps_disable = fasttrap_pid_disable,
+ .dtps_suspend = NULL,
+ .dtps_resume = NULL,
+ .dtps_getargdesc = fasttrap_pid_getargdesc,
+ .dtps_getargval = fasttrap_pid_getarg,
+ .dtps_usermode = NULL,
+ .dtps_destroy = fasttrap_pid_destroy
};
static dtrace_pops_t usdt_pops = {
- fasttrap_pid_provide,
- NULL,
- fasttrap_pid_enable,
- fasttrap_pid_disable,
- NULL,
- NULL,
- fasttrap_pid_getargdesc,
- fasttrap_usdt_getarg,
- NULL,
- fasttrap_pid_destroy
+ .dtps_provide = fasttrap_pid_provide,
+ .dtps_provide_module = NULL,
+ .dtps_enable = fasttrap_pid_enable,
+ .dtps_disable = fasttrap_pid_disable,
+ .dtps_suspend = NULL,
+ .dtps_resume = NULL,
+ .dtps_getargdesc = fasttrap_pid_getargdesc,
+ .dtps_getargval = fasttrap_usdt_getarg,
+ .dtps_usermode = NULL,
+ .dtps_destroy = fasttrap_pid_destroy
};
static fasttrap_proc_t *
lck_mtx_lock(&fprc->ftpc_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
fprc->ftpc_rcount++;
- atomic_add_64(&fprc->ftpc_acount, 1);
+ os_atomic_inc(&fprc->ftpc_acount, relaxed);
ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
lck_mtx_unlock(&fprc->ftpc_mtx);
lck_mtx_lock(&fprc->ftpc_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
fprc->ftpc_rcount++;
- atomic_add_64(&fprc->ftpc_acount, 1);
+ os_atomic_inc(&fprc->ftpc_acount, relaxed);
ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
lck_mtx_unlock(&fprc->ftpc_mtx);
/*
* APPLE NOTE: We have to initialize all locks explicitly
*/
- lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
+ lck_mtx_init(&new_fprc->ftpc_mtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
new_fprc->ftpc_next = bucket->ftb_data;
bucket->ftb_data = new_fprc;
* APPLE NOTE: explicit lock management. Not 100% certain we need this, the
* memory is freed even without the destroy. Maybe accounting cleanup?
*/
- lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
+ lck_mtx_destroy(&fprc->ftpc_mtx, &fasttrap_lck_grp);
kmem_free(fprc, sizeof (fasttrap_proc_t));
}
* APPLE NOTE: We have no equivalent to crhold,
* even though there is a cr_ref filed in ucred.
*/
- // lck_mtx_lock(&p->p_crlock;
- crhold(p->p_ucred);
- cred = p->p_ucred;
- // lck_mtx_unlock(&p->p_crlock);
+ cred = kauth_cred_proc_ref(p);
proc_unlock(p);
new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
/*
* APPLE NOTE: locks require explicit init
*/
- lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
- lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
+ lck_mtx_init(&new_fp->ftp_mtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
+ lck_mtx_init(&new_fp->ftp_cmtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
ASSERT(new_fp->ftp_proc != NULL);
lck_mtx_lock(&fp->ftp_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
fasttrap_provider_free(new_fp);
- crfree(cred);
+ kauth_cred_unref(&cred);
return (fp);
}
}
&new_fp->ftp_provid) != 0) {
lck_mtx_unlock(&bucket->ftb_mtx);
fasttrap_provider_free(new_fp);
- crfree(cred);
+ kauth_cred_unref(&cred);
return (NULL);
}
lck_mtx_lock(&new_fp->ftp_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
- crfree(cred);
+ kauth_cred_unref(&cred);
+
return (new_fp);
}
* count of active providers on the associated process structure.
*/
if (!provider->ftp_retired) {
- atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
+ os_atomic_dec(&provider->ftp_proc->ftpc_acount, relaxed);
ASSERT(provider->ftp_proc->ftpc_acount <
provider->ftp_proc->ftpc_rcount);
}
* APPLE NOTE: explicit lock management. Not 100% certain we need this, the
* memory is freed even without the destroy. Maybe accounting cleanup?
*/
- lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
- lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
+ lck_mtx_destroy(&provider->ftp_mtx, &fasttrap_lck_grp);
+ lck_mtx_destroy(&provider->ftp_cmtx, &fasttrap_lck_grp);
kmem_free(provider, sizeof (fasttrap_provider_t));
proc_lock(p);
p->p_dtrace_probes--;
proc_unlock(p);
-
+
proc_rele(p);
}
* bucket lock therefore protects the integrity of the provider hash
* table.
*/
- atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
+ os_atomic_dec(&fp->ftp_proc->ftpc_acount, relaxed);
ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
/*
* Add this provider probes to the retired count and
* make sure we don't add them twice
*/
- atomic_add_32(&fasttrap_retired, fp->ftp_pcount);
+ os_atomic_add(&fasttrap_retired, fp->ftp_pcount, relaxed);
fp->ftp_pcount = 0;
fp->ftp_retired = 1;
if (p == PROC_NULL)
return (ESRCH);
- /*
- * Set that the process is allowed to run modified code and
- * bail if it is not allowed to
- */
-#if CONFIG_EMBEDDED
- if ((p->p_csflags & (CS_KILL|CS_HARD)) && !cs_allow_invalid(p)) {
- proc_rele(p);
- return (EPERM);
- }
-#endif
if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
provider_name, &pid_attr)) == NULL) {
proc_rele(p);
pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
continue;
- atomic_add_32(&fasttrap_total, 1);
+ os_atomic_inc(&fasttrap_total, relaxed);
if (fasttrap_total > fasttrap_max) {
- atomic_add_32(&fasttrap_total, -1);
+ os_atomic_dec(&fasttrap_total, relaxed);
goto no_mem;
}
provider->ftp_pcount++;
pp->ftp_pid = pdata->ftps_pid;
pp->ftp_ntps = 1;
- tp = zalloc(fasttrap_tracepoint_t_zone);
+ tp = zalloc(fasttrap_tracepoint_t_zone);
bzero(tp, sizeof (fasttrap_tracepoint_t));
tp->ftt_proc = provider->ftp_proc;
} else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
pdata->ftps_func, name) == 0) {
- atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
+ os_atomic_add(&fasttrap_total, pdata->ftps_noffs, relaxed);
if (fasttrap_total > fasttrap_max) {
- atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
+ os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
goto no_mem;
}
if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
continue;
- atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
+ os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
goto no_mem;
}
provider->ftp_pcount += pdata->ftps_noffs;
* this field is simply initialized to 0 on its way
* into the kernel.
*/
-
+
tp->ftt_fntype = pdata->ftps_arch_subinfo;
#endif
pp->ftp_tps[i].fit_tp = tp;
#if 0
/*
- * APPLE NOTE: This is hideously expensive. See note in
+ * APPLE NOTE: This is hideously expensive. See note in
* fasttrap_meta_provide() for why we can get away without
* checking here.
*/
ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
ASSERT(ntps > 0);
- atomic_add_32(&fasttrap_total, ntps);
+ os_atomic_add(&fasttrap_total, ntps, relaxed);
if (fasttrap_total > fasttrap_max) {
- atomic_add_32(&fasttrap_total, -ntps);
+ os_atomic_sub(&fasttrap_total, ntps, relaxed);
lck_mtx_unlock(&provider->ftp_cmtx);
return;
}
* All ARM and ARM64 probes are zero offset. We need to zero out the
* thumb bit because we still support 32bit user processes.
* On 64bit user processes, bit zero won't be set anyway.
- */
+ */
tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
tp->ftt_fntype = FASTTRAP_FN_USDT;
#else
* All ARM and ARM64 probes are zero offset. We need to zero out the
* thumb bit because we still support 32bit user processes.
* On 64bit user processes, bit zero won't be set anyway.
- */
+ */
tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
tp->ftt_fntype = FASTTRAP_FN_USDT;
#else
}
static dtrace_mops_t fasttrap_mops = {
- fasttrap_meta_create_probe,
- fasttrap_meta_provide,
- fasttrap_meta_remove,
- fasttrap_meta_provider_name
+ .dtms_create_probe = fasttrap_meta_create_probe,
+ .dtms_provide_proc = fasttrap_meta_provide,
+ .dtms_remove_proc = fasttrap_meta_remove,
+ .dtms_provider_name = fasttrap_meta_provider_name
};
/*
return utf8_validatestr((unsigned const char*) str, len);
}
+/*
+ * Checks that provided credentials are allowed to debug target process.
+ */
+static int
+fasttrap_check_cred_priv(cred_t *cr, proc_t *p)
+{
+ int err = 0;
+
+ /* Only root can use DTrace. */
+ if (!kauth_cred_issuser(cr)) {
+ err = EPERM;
+ goto out;
+ }
+
+ /* Process is marked as no attach. */
+ if (ISSET(p->p_lflag, P_LNOATTACH)) {
+ err = EBUSY;
+ goto out;
+ }
+
+#if CONFIG_MACF
+ /* Check with MAC framework when enabled. */
+ struct proc_ident cur_ident = proc_ident(current_proc());
+ struct proc_ident p_ident = proc_ident(p);
+
+ /* Do not hold ref to proc here to avoid deadlock. */
+ proc_rele(p);
+ err = mac_proc_check_debug(&cur_ident, cr, &p_ident);
+
+ if (proc_find_ident(&p_ident) == PROC_NULL) {
+ err = ESRCH;
+ goto out_no_proc;
+ }
+#endif /* CONFIG_MACF */
+
+out:
+ proc_rele(p);
+
+out_no_proc:
+ return err;
+}
+
/*ARGSUSED*/
static int
fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
ret = ESRCH;
goto err;
}
- // proc_lock(p);
- // FIXME! How is this done on OS X?
- // if ((ret = priv_proc_cred_perm(cr, p, NULL,
- // VREAD | VWRITE)) != 0) {
- // mutex_exit(&p->p_lock);
- // return (ret);
- // }
- // proc_unlock(p);
- proc_rele(p);
+
+ ret = fasttrap_check_cred_priv(cr, p);
+ if (ret != 0) {
+ goto err;
+ }
}
ret = fasttrap_add_probe(probe);
fasttrap_instr_query_t instr;
fasttrap_tracepoint_t *tp;
uint_t index;
- // int ret;
+ int ret;
if (copyin(arg, &instr, sizeof (instr)) != 0)
return (EFAULT);
proc_rele(p);
return (ESRCH);
}
- //proc_lock(p);
- // FIXME! How is this done on OS X?
- // if ((ret = priv_proc_cred_perm(cr, p, NULL,
- // VREAD)) != 0) {
- // mutex_exit(&p->p_lock);
- // return (ret);
- // }
- // proc_unlock(p);
- proc_rele(p);
+
+ ret = fasttrap_check_cred_priv(cr, p);
+ if (ret != 0) {
+ return (ret);
+ }
}
index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
return (EINVAL);
}
-static int
-fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
+static void
+fasttrap_attach(void)
{
ulong_t nent;
-
- switch (cmd) {
- case DDI_ATTACH:
- break;
- case DDI_RESUME:
- return (DDI_SUCCESS);
- default:
- return (DDI_FAILURE);
- }
-
- ddi_report_dev(devi);
- fasttrap_devi = devi;
+ unsigned int i;
/*
* Install our hooks into fork(2), exec(2), and exit(2).
*/
fasttrap_max = (sane_size >> 28) * 100000;
-#if CONFIG_EMBEDDED
-#if defined(__LP64__)
- /*
- * On embedded, the zone map does not grow with the memory size over 1GB
- * (see osfmk/vm/vm_init.c)
- */
- if (fasttrap_max > 400000) {
- fasttrap_max = 400000;
- }
-#endif
-#endif
if (fasttrap_max == 0)
fasttrap_max = 50000;
/*
* Conjure up the tracepoints hashtable...
*/
+#ifdef illumos
nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
"fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
+#else
+ nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
+#endif
if (nent <= 0 || nent > 0x1000000)
nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
sizeof (fasttrap_bucket_t), KM_SLEEP);
ASSERT(fasttrap_tpoints.fth_table != NULL);
- /*
- * APPLE NOTE: explicitly initialize all locks...
- */
- unsigned int i;
- for (i=0; i<fasttrap_tpoints.fth_nent; i++) {
- lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
+ for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
+ lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
+ &fasttrap_lck_attr);
}
/*
sizeof (fasttrap_bucket_t), KM_SLEEP);
ASSERT(fasttrap_provs.fth_table != NULL);
- /*
- * APPLE NOTE: explicitly initialize all locks...
- */
- for (i=0; i<fasttrap_provs.fth_nent; i++) {
- lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
+ for (i = 0; i < fasttrap_provs.fth_nent; i++) {
+ lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
+ &fasttrap_lck_attr);
}
/*
sizeof (fasttrap_bucket_t), KM_SLEEP);
ASSERT(fasttrap_procs.fth_table != NULL);
- /*
- * APPLE NOTE: explicitly initialize all locks...
- */
- for (i=0; i<fasttrap_procs.fth_nent; i++) {
- lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
+#ifndef illumos
+ for (i = 0; i < fasttrap_procs.fth_nent; i++) {
+ lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
+ &fasttrap_lck_attr);
}
+#endif
(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
&fasttrap_meta_id);
-
- return (DDI_SUCCESS);
}
-static int
+static int
_fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
{
#pragma unused(dev, flags, devtype, p)
_fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
{
int err, rv = 0;
- user_addr_t uaddrp;
+ user_addr_t uaddrp;
- if (proc_is64bit(p))
- uaddrp = *(user_addr_t *)data;
- else
- uaddrp = (user_addr_t) *(uint32_t *)data;
+ if (proc_is64bit(p)) {
+ uaddrp = *(user_addr_t *)data;
+ } else {
+ uaddrp = (user_addr_t) *(uint32_t *)data;
+ }
err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
} else if (rv != 0) {
ASSERT( (rv & 0xfff00000) == 0 );
return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
- } else
+ } else
return 0;
}
-static int gFasttrapInited = 0;
+static int fasttrap_inited = 0;
#define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
-/*
- * A struct describing which functions will get invoked for certain
- * actions.
- */
-
-static struct cdevsw fasttrap_cdevsw =
+static const struct cdevsw fasttrap_cdevsw =
{
- _fasttrap_open, /* open */
- eno_opcl, /* close */
- eno_rdwrt, /* read */
- eno_rdwrt, /* write */
- _fasttrap_ioctl, /* ioctl */
- (stop_fcn_t *)nulldev, /* stop */
- (reset_fcn_t *)nulldev, /* reset */
- NULL, /* tty's */
- eno_select, /* select */
- eno_mmap, /* mmap */
- eno_strat, /* strategy */
- eno_getc, /* getc */
- eno_putc, /* putc */
- 0 /* type */
+ .d_open = _fasttrap_open,
+ .d_close = eno_opcl,
+ .d_read = eno_rdwrt,
+ .d_write = eno_rdwrt,
+ .d_ioctl = _fasttrap_ioctl,
+ .d_stop = (stop_fcn_t *)nulldev,
+ .d_reset = (reset_fcn_t *)nulldev,
+ .d_select = eno_select,
+ .d_mmap = eno_mmap,
+ .d_strategy = eno_strat,
+ .d_reserved_1 = eno_getc,
+ .d_reserved_2 = eno_putc,
};
void fasttrap_init(void);
*
* The reason is to delay allocating the (rather large) resources as late as possible.
*/
- if (0 == gFasttrapInited) {
+ if (!fasttrap_inited) {
int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
if (majdevno < 0) {
return;
}
- /*
- * Allocate the fasttrap_tracepoint_t zone
- */
- fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
- 1024 * sizeof(fasttrap_tracepoint_t),
- sizeof(fasttrap_tracepoint_t),
- "dtrace.fasttrap_tracepoint_t");
-
/*
* fasttrap_probe_t's are variable in size. We use an array of zones to
* cover the most common sizes.
*/
int i;
for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
- size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
- fasttrap_probe_t_zones[i] = zinit(zone_element_size,
- 1024 * zone_element_size,
- zone_element_size,
- fasttrap_probe_t_zone_names[i]);
+ fasttrap_probe_t_zones[i] =
+ zone_create(fasttrap_probe_t_zone_names[i],
+ offsetof(fasttrap_probe_t, ftp_tps[i]), ZC_NONE);
}
-
- /*
- * Create the fasttrap lock group. Must be done before fasttrap_attach()!
- */
- fasttrap_lck_attr = lck_attr_alloc_init();
- fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
- fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
-
- /*
- * Initialize global locks
- */
- lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
- lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
- if (DDI_FAILURE == fasttrap_attach((dev_info_t *)(uintptr_t)device, 0 )) {
- // FIX ME! Do we remove the devfs node here?
- // What kind of error reporting?
- printf("fasttrap_init: Call to fasttrap_attach failed.\n");
- return;
- }
+ fasttrap_attach();
/*
* Start the fasttrap cleanup thread
}
thread_set_thread_name(fasttrap_cleanup_thread, "dtrace_fasttrap_cleanup_thread");
-#ifdef FASTTRAP_ASYNC_REMOVE
fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
- fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(fasttrap_tracepoint_t*),
+ fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
KM_SLEEP);
- lck_mtx_init(&fasttrap_retired_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
-#endif
- gFasttrapInited = 1;
+ fasttrap_inited = 1;
}
}