*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
- * #pragma ident "@(#)fasttrap.c 1.21 06/06/12 SMI"
+ * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
*/
#include <sys/types.h>
#include <sys/conf.h>
#include <sys/systm.h>
#include <sys/kauth.h>
+#include <sys/utfconv.h>
#include <sys/fasttrap.h>
#include <sys/fasttrap_impl.h>
#include <kern/zalloc.h>
-#define proc_t struct proc
+/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
+#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
+
+__private_extern__
+void
+qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
/*
* User-Land Trap-Based Tracing
*/
static volatile uint64_t fasttrap_mod_gen;
-#if !defined(__APPLE__)
/*
- * When the fasttrap provider is loaded, fasttrap_max is set to either
- * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
- * fasttrap.conf file. Each time a probe is created, fasttrap_total is
+ * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
+ * base on system memory. Each time a probe is created, fasttrap_total is
* incremented by the number of tracepoints that may be associated with that
* probe; fasttrap_total is capped at fasttrap_max.
*/
-#define FASTTRAP_MAX_DEFAULT 2500000
-#endif
static uint32_t fasttrap_max;
static uint32_t fasttrap_total;
static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
-#if defined(__APPLE__)
-static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, fasttrap_provider_type_t, const char *,
+static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
const dtrace_pattr_t *);
-#endif
-static void fasttrap_provider_retire(pid_t, const char *, int);
+static void fasttrap_provider_retire(proc_t*, const char *, int);
static void fasttrap_provider_free(fasttrap_provider_t *);
static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
-#if defined(__APPLE__)
-
/*
- * To save memory, some common memory allocations are given a
- * unique zone. In example, dtrace_probe_t is 72 bytes in size,
+ * APPLE NOTE: To save memory, some common memory allocations are given
+ * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
* which means it would fall into the kalloc.128 bucket. With
* 20k elements allocated, the space saved is substantial.
*/
struct zone *fasttrap_tracepoint_t_zone;
/*
- * fasttrap_probe_t's are variable in size. Some quick profiling has shown
+ * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
* that the sweet spot for reducing memory footprint is covering the first
* three sizes. Everything larger goes into the common pool.
*/
};
/*
- * We have to manage locks explicitly
+ * APPLE NOTE: We have to manage locks explicitly
*/
lck_grp_t* fasttrap_lck_grp;
lck_grp_attr_t* fasttrap_lck_grp_attr;
lck_attr_t* fasttrap_lck_attr;
-#endif
static int
fasttrap_highbit(ulong_t i)
}
/*
- * FIXME - needs implementation
+ * APPLE NOTE: fasttrap_sigtrap not implemented
*/
void
fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
{
#pragma unused(p, t, pc)
-#if 0
+#if !defined(__APPLE__)
sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
sqp->sq_info.si_signo = SIGTRAP;
if (t != NULL)
aston(t);
-#endif
+#endif /* __APPLE__ */
printf("fasttrap_sigtrap called with no implementation.\n");
}
lck_mtx_lock(&bucket->ftb_mtx);
for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
if (tp->ftt_pid == ppid &&
- !tp->ftt_proc->ftpc_defunct) {
+ tp->ftt_proc->ftpc_acount != 0) {
fasttrap_tracepoint_remove(cp, tp);
+
+ /*
+ * The count of active providers can only be
+ * decremented (i.e. to zero) during exec,
+ * exit, and removal of a meta provider so it
+ * should be impossible to drop the count
+ * mid-fork.
+ */
+ ASSERT(tp->ftt_proc->ftpc_acount != 0);
}
}
lck_mtx_unlock(&bucket->ftb_mtx);
* We clean up the pid provider for this process here; user-land
* static probes are handled by the meta-provider remove entry point.
*/
- fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
-#if defined(__APPLE__)
+ fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
+
/*
- * We also need to remove any aliased providers.
+ * APPLE NOTE: We also need to remove any aliased providers.
* XXX optimization: track which provider types are instantiated
* and only retire as needed.
*/
- fasttrap_provider_retire(p->p_pid, FASTTRAP_OBJC_NAME, 0);
- fasttrap_provider_retire(p->p_pid, FASTTRAP_ONESHOT_NAME, 0);
-#endif /* __APPLE__ */
+ fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
+ fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
/*
* This should be called after it is no longer possible for a user
again:
lck_mtx_lock(&bucket->ftb_mtx);
for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
+ /*
+ * Note that it's safe to access the active count on the
+ * associated proc structure because we know that at least one
+ * provider (this one) will still be around throughout this
+ * operation.
+ */
if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
- tp->ftt_proc->ftpc_defunct)
+ tp->ftt_proc->ftpc_acount == 0)
continue;
/*
if (tp->ftt_ids != NULL) {
tmp_probe = tp->ftt_ids->fti_probe;
+ /* LINTED - alignment */
tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
} else {
tmp_probe = tp->ftt_retids->fti_probe;
+ /* LINTED - alignment */
tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
}
ASSERT(fasttrap_pid_count > 0);
fasttrap_pid_count--;
if (fasttrap_pid_count == 0) {
- cpu_t *cur, *cpu = CPU;
+ dtrace_cpu_t *cur, *cpu = CPU;
/*
* APPLE NOTE: This loop seems broken, it touches every CPU
}
/*ARGSUSED*/
-static void
+static int
fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg, id)
* provider can't go away while we're in this code path.
*/
if (probe->ftp_prov->ftp_retired)
- return;
+ return(0);
/*
* If we can't find the process, it may be that we're in the context of
* USDT probes. Otherwise, the process is gone so bail.
*/
if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
-#if defined(__APPLE__)
/*
* APPLE NOTE: We should never end up here. The Solaris sprlock()
* does not return process's with SIDL set, but we always return
* the child process.
*/
- return;
-#else
-
- if ((curproc->p_flag & SFORKING) == 0)
- return;
-
- lck_mtx_lock(&pidlock);
- p = prfind(probe->ftp_pid);
-
- /*
- * Confirm that curproc is indeed forking the process in which
- * we're trying to enable probes.
- */
- ASSERT(p != NULL);
- //ASSERT(p->p_parent == curproc);
- ASSERT(p->p_stat == SIDL);
-
- lck_mtx_lock(&p->p_lock);
- lck_mtx_unlock(&pidlock);
-
- sprlock_proc(p);
-#endif
+ return(0);
}
/*
* drop our reference on the trap table entry.
*/
fasttrap_disable_callbacks();
- return;
+ return(0);
}
}
sprunlock(p);
probe->ftp_enabled = 1;
+ return (0);
}
/*ARGSUSED*/
#pragma unused(arg, id)
fasttrap_probe_t *probe = parg;
char *str;
- int i;
+ int i, ndx;
desc->dtargd_native[0] = '\0';
desc->dtargd_xlate[0] = '\0';
return;
}
- /*
- * We only need to set this member if the argument is remapped.
- */
- if (probe->ftp_argmap != NULL)
- desc->dtargd_mapping = probe->ftp_argmap[desc->dtargd_ndx];
+ ndx = (probe->ftp_argmap != NULL) ?
+ probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
str = probe->ftp_ntypes;
- for (i = 0; i < desc->dtargd_mapping; i++) {
+ for (i = 0; i < ndx; i++) {
str += strlen(str) + 1;
}
ASSERT(fasttrap_total >= probe->ftp_ntps);
atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
-#if !defined(__APPLE__)
- size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
-#endif
if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
fasttrap_mod_barrier(probe->ftp_gen);
for (i = 0; i < probe->ftp_ntps; i++) {
-#if !defined(__APPLE__)
- kmem_free(probe->ftp_tps[i].fit_tp, sizeof (fasttrap_tracepoint_t));
-#else
zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
-#endif
}
-#if !defined(__APPLE__)
- kmem_free(probe, size);
-#else
if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
} else {
size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
kmem_free(probe, size);
}
-#endif
}
lck_mtx_lock(&bucket->ftb_mtx);
for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
- if (fprc->ftpc_pid == pid && !fprc->ftpc_defunct) {
+ if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
lck_mtx_lock(&fprc->ftpc_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
- fprc->ftpc_count++;
+ fprc->ftpc_rcount++;
+ atomic_add_64(&fprc->ftpc_acount, 1);
+ ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
lck_mtx_unlock(&fprc->ftpc_mtx);
return (fprc);
new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
ASSERT(new_fprc != NULL);
new_fprc->ftpc_pid = pid;
- new_fprc->ftpc_count = 1;
+ new_fprc->ftpc_rcount = 1;
+ new_fprc->ftpc_acount = 1;
lck_mtx_lock(&bucket->ftb_mtx);
* been created for this pid while we weren't under the bucket lock.
*/
for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
- if (fprc->ftpc_pid == pid && !fprc->ftpc_defunct) {
+ if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
lck_mtx_lock(&fprc->ftpc_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
- fprc->ftpc_count++;
+ fprc->ftpc_rcount++;
+ atomic_add_64(&fprc->ftpc_acount, 1);
+ ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
lck_mtx_unlock(&fprc->ftpc_mtx);
kmem_free(new_fprc, sizeof (fasttrap_proc_t));
}
}
-#if defined(__APPLE__)
/*
- * We have to initialize all locks explicitly
+ * APPLE NOTE: We have to initialize all locks explicitly
*/
lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
-#endif
new_fprc->ftpc_next = bucket->ftb_data;
bucket->ftb_data = new_fprc;
lck_mtx_lock(&proc->ftpc_mtx);
- ASSERT(proc->ftpc_count != 0);
+ ASSERT(proc->ftpc_rcount != 0);
+ ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
- if (--proc->ftpc_count != 0) {
+ if (--proc->ftpc_rcount != 0) {
lck_mtx_unlock(&proc->ftpc_mtx);
return;
}
lck_mtx_unlock(&proc->ftpc_mtx);
+ /*
+ * There should definitely be no live providers associated with this
+ * process at this point.
+ */
+ ASSERT(proc->ftpc_acount == 0);
+
bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
lck_mtx_lock(&bucket->ftb_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
-#if defined(__APPLE__)
/*
- * Apple explicit lock management. Not 100% certain we need this, the
+ * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
* memory is freed even without the destroy. Maybe accounting cleanup?
*/
lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
-#endif
kmem_free(fprc, sizeof (fasttrap_proc_t));
}
/*
- * Lookup a fasttrap-managed provider based on its name and associated pid.
+ * Lookup a fasttrap-managed provider based on its name and associated proc.
+ * A reference to the proc must be held for the duration of the call.
* If the pattr argument is non-NULL, this function instantiates the provider
* if it doesn't exist otherwise it returns NULL. The provider is returned
* with its lock held.
*/
-#if defined(__APPLE__)
static fasttrap_provider_t *
-fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, const char *name,
+fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
const dtrace_pattr_t *pattr)
-#endif /* __APPLE__ */
{
+ pid_t pid = p->p_pid;
fasttrap_provider_t *fp, *new_fp = NULL;
fasttrap_bucket_t *bucket;
char provname[DTRACE_PROVNAMELEN];
- proc_t *p;
cred_t *cred;
ASSERT(strlen(name) < sizeof (fp->ftp_name));
*/
for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
if (fp->ftp_pid == pid &&
-#if defined(__APPLE__)
fp->ftp_provider_type == provider_type &&
-#endif /* __APPLE__ */
strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
!fp->ftp_retired) {
lck_mtx_lock(&fp->ftp_mtx);
lck_mtx_unlock(&bucket->ftb_mtx);
/*
- * Make sure the process exists, isn't a child created as the result
+ * Make sure the process isn't a child created as the result
* of a vfork(2), and isn't a zombie (but may be in fork).
*/
- if ((p = proc_find(pid)) == NULL) {
- return NULL;
- }
proc_lock(p);
if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
proc_unlock(p);
- proc_rele(p);
return (NULL);
}
/*
* Grab the credentials for this process so we have
* something to pass to dtrace_register().
+ * APPLE NOTE: We have no equivalent to crhold,
+ * even though there is a cr_ref filed in ucred.
*/
-#if !defined(__APPLE__)
- mutex_enter(&p->p_crlock);
- crhold(p->p_cred);
- cred = p->p_cred;
- mutex_exit(&p->p_crlock);
- mutex_exit(&p->p_lock);
-#else
- // lck_mtx_lock(&p->p_crlock);
- // Seems like OS X has no equivalent to crhold, even though it has a cr_ref field in ucred
+ // lck_mtx_lock(&p->p_crlock;
crhold(p->p_ucred);
cred = p->p_ucred;
// lck_mtx_unlock(&p->p_crlock);
proc_unlock(p);
- proc_rele(p);
-#endif /* __APPLE__ */
new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
ASSERT(new_fp != NULL);
- new_fp->ftp_pid = pid;
+ new_fp->ftp_pid = p->p_pid;
new_fp->ftp_proc = fasttrap_proc_lookup(pid);
-#if defined(__APPLE__)
new_fp->ftp_provider_type = provider_type;
/*
- * Apple locks require explicit init.
+ * APPLE NOTE: locks require explicit init
*/
lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
-#endif /* __APPLE__ */
ASSERT(new_fp->ftp_proc != NULL);
ASSERT(provider->ftp_ccount == 0);
ASSERT(provider->ftp_mcount == 0);
+ /*
+ * If this provider hasn't been retired, we need to explicitly drop the
+ * count of active providers on the associated process structure.
+ */
+ if (!provider->ftp_retired) {
+ atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
+ ASSERT(provider->ftp_proc->ftpc_acount <
+ provider->ftp_proc->ftpc_rcount);
+ }
+
fasttrap_proc_release(provider->ftp_proc);
-#if defined(__APPLE__)
/*
- * Apple explicit lock management. Not 100% certain we need this, the
+ * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
* memory is freed even without the destroy. Maybe accounting cleanup?
*/
lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
-#endif
kmem_free(provider, sizeof (fasttrap_provider_t));
}
static void
-fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
+fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
{
fasttrap_provider_t *fp;
fasttrap_bucket_t *bucket;
ASSERT(strlen(name) < sizeof (fp->ftp_name));
- bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
+ bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
lck_mtx_lock(&bucket->ftb_mtx);
for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
- if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
+ if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
!fp->ftp_retired)
break;
}
}
/*
- * Mark the provider to be removed in our post-processing step,
- * mark it retired, and mark its proc as defunct (though it may
- * already be marked defunct by another provider that shares the
- * same proc). Marking it indicates that we should try to remove it;
- * setting the retired flag indicates that we're done with this
- * provider; setting the proc to be defunct indicates that all
- * tracepoints associated with the traced process should be ignored.
+ * Mark the provider to be removed in our post-processing step, mark it
+ * retired, and drop the active count on its proc. Marking it indicates
+ * that we should try to remove it; setting the retired flag indicates
+ * that we're done with this provider; dropping the active the proc
+ * releases our hold, and when this reaches zero (as it will during
+ * exit or exec) the proc and associated providers become defunct.
*
* We obviously need to take the bucket lock before the provider lock
* to perform the lookup, but we need to drop the provider lock
* bucket lock therefore protects the integrity of the provider hash
* table.
*/
- fp->ftp_proc->ftpc_defunct = 1;
+ atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
+ ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
+
fp->ftp_retired = 1;
fp->ftp_marked = 1;
provid = fp->ftp_provid;
/*
* We don't have to worry about invalidating the same provider twice
- * since fasttrap_provider_lookup() will ignore provider that have
+ * since fasttrap_provider_lookup() will ignore providers that have
* been marked as retired.
*/
dtrace_invalidate(provid);
fasttrap_pid_cleanup();
}
+static int
+fasttrap_uint32_cmp(const void *ap, const void *bp)
+{
+ return (*(const uint32_t *)ap - *(const uint32_t *)bp);
+}
+
+static int
+fasttrap_uint64_cmp(const void *ap, const void *bp)
+{
+ return (*(const uint64_t *)ap - *(const uint64_t *)bp);
+}
+
static int
fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
{
+ proc_t *p;
fasttrap_provider_t *provider;
fasttrap_probe_t *pp;
fasttrap_tracepoint_t *tp;
const char *name;
unsigned int i, aframes, whack;
-#if defined(__APPLE__)
+ /*
+ * There needs to be at least one desired trace point.
+ */
+ if (pdata->ftps_noffs == 0)
+ return (EINVAL);
+
switch (pdata->ftps_probe_type) {
-#endif
case DTFTP_ENTRY:
name = "entry";
aframes = FASTTRAP_ENTRY_AFRAMES;
return (EINVAL);
}
-#if defined(__APPLE__)
const char* provider_name;
switch (pdata->ftps_provider_type) {
case DTFTP_PROVIDER_PID:
return (EINVAL);
}
- if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, pdata->ftps_provider_type,
+ p = proc_find(pdata->ftps_pid);
+ if (p == PROC_NULL)
+ return (ESRCH);
+
+ if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
provider_name, &pid_attr)) == NULL)
return (ESRCH);
-#endif /* __APPLE__ */
+ proc_rele(p);
/*
* Increment this reference count to indicate that a consumer is
* actively adding a new probe associated with this provider. This
char name_str[17];
(void) snprintf(name_str, sizeof(name_str), "%llx",
- (unsigned long long)pdata->ftps_offs[i]);
+ (uint64_t)pdata->ftps_offs[i]);
if (dtrace_probe_lookup(provider->ftp_provid,
pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
goto no_mem;
}
-#if !defined(__APPLE__)
- pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
- ASSERT(pp != NULL);
-#else
pp = zalloc(fasttrap_probe_t_zones[1]);
bzero(pp, sizeof (fasttrap_probe_t));
-#endif
pp->ftp_prov = provider;
pp->ftp_faddr = pdata->ftps_pc;
pp->ftp_pid = pdata->ftps_pid;
pp->ftp_ntps = 1;
-#if !defined(__APPLE__)
- tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
-#else
tp = zalloc(fasttrap_tracepoint_t_zone);
bzero(tp, sizeof (fasttrap_tracepoint_t));
-#endif
tp->ftt_proc = provider->ftp_proc;
tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
tp->ftt_pid = pdata->ftps_pid;
+
pp->ftp_tps[0].fit_tp = tp;
pp->ftp_tps[0].fit_id.fti_probe = pp;
-#if defined(__APPLE__)
pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
-#endif
pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
pdata->ftps_mod, pdata->ftps_func, name_str,
FASTTRAP_OFFSET_AFRAMES, pp);
goto no_mem;
}
+ /*
+ * Make sure all tracepoint program counter values are unique.
+ * We later assume that each probe has exactly one tracepoint
+ * for a given pc.
+ */
+ qsort(pdata->ftps_offs, pdata->ftps_noffs,
+ sizeof (uint64_t), fasttrap_uint64_cmp);
+ for (i = 1; i < pdata->ftps_noffs; i++) {
+ if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
+ continue;
+
+ atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
+ goto no_mem;
+ }
+
ASSERT(pdata->ftps_noffs > 0);
-#if !defined(__APPLE__)
- pp = kmem_zalloc(offsetof(fasttrap_probe_t,
- ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
- ASSERT(pp != NULL);
-#else
if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
} else {
pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
}
-#endif
pp->ftp_prov = provider;
pp->ftp_faddr = pdata->ftps_pc;
pp->ftp_ntps = pdata->ftps_noffs;
for (i = 0; i < pdata->ftps_noffs; i++) {
-#if !defined(__APPLE__)
- tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
-#else
tp = zalloc(fasttrap_tracepoint_t_zone);
bzero(tp, sizeof (fasttrap_tracepoint_t));
-#endif
-
tp->ftt_proc = provider->ftp_proc;
tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
tp->ftt_pid = pdata->ftps_pid;
pp->ftp_tps[i].fit_tp = tp;
pp->ftp_tps[i].fit_id.fti_probe = pp;
-#if defined(__APPLE__)
pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
-#endif
}
pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
/*ARGSUSED*/
static void *
-fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
+fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
{
#pragma unused(arg)
fasttrap_provider_t *provider;
FASTTRAP_PID_NAME);
return (NULL);
}
-#if defined(__APPLE__)
+
/*
- * We also need to check the other pid provider types
+ * APPLE NOTE: We also need to check the objc and oneshot pid provider types
*/
if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
cmn_err(CE_WARN, "failed to instantiate provider %s: "
FASTTRAP_ONESHOT_NAME);
return (NULL);
}
-#endif /* __APPLE__ */
/*
* The highest stability class that fasttrap supports is ISA; cap
* the stability of the new provider accordingly.
*/
- if (dhpv->dthpv_pattr.dtpa_provider.dtat_class >= DTRACE_CLASS_COMMON)
+ if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
- if (dhpv->dthpv_pattr.dtpa_mod.dtat_class >= DTRACE_CLASS_COMMON)
+ if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
- if (dhpv->dthpv_pattr.dtpa_func.dtat_class >= DTRACE_CLASS_COMMON)
+ if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
- if (dhpv->dthpv_pattr.dtpa_name.dtat_class >= DTRACE_CLASS_COMMON)
+ if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
- if (dhpv->dthpv_pattr.dtpa_args.dtat_class >= DTRACE_CLASS_COMMON)
+ if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
-#if defined(__APPLE__)
- if ((provider = fasttrap_provider_lookup(pid, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
+ if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
&dhpv->dthpv_pattr)) == NULL) {
cmn_err(CE_WARN, "failed to instantiate provider %s for "
- "process %u", dhpv->dthpv_provname, (uint_t)pid);
+ "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
return (NULL);
}
* having duplicate probes. However, duplicate probes are not fatal,
* and there is no way to get that by accident, so we will not check
* for that case.
+ *
+ * UPDATE: It turns out there are several use cases that require adding
+ * probes to existing providers. Disabling the dtrace_probe_lookup()
+ * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
*/
- if (provider->ftp_mcount != 0) {
- /* This is the duplicate provider case. */
- lck_mtx_unlock(&provider->ftp_mtx);
- return NULL;
- }
-#endif /* __APPLE__ */
-
/*
* Up the meta provider count so this provider isn't removed until
* the meta provider has been told to remove it.
*/
ASSERT(provider->ftp_mcount > 0);
+ /*
+ * The offsets must be unique.
+ */
+ qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
+ fasttrap_uint32_cmp);
+ for (i = 1; i < dhpb->dthpb_noffs; i++) {
+ if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
+ dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
+ return;
+ }
+
+ qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
+ fasttrap_uint32_cmp);
+ for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
+ if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
+ dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
+ return;
+ }
+
/*
* Grab the creation lock to ensure consistency between calls to
* dtrace_probe_lookup() and dtrace_probe_create() in the face of
*/
lck_mtx_lock(&provider->ftp_cmtx);
-#if !defined(__APPLE__)
+#if 0
/*
* APPLE NOTE: This is hideously expensive. See note in
* fasttrap_meta_provide() for why we can get away without
return;
}
-#if !defined(__APPLE__)
- pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
- ASSERT(pp != NULL);
-#else
if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
pp = zalloc(fasttrap_probe_t_zones[ntps]);
bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
} else {
pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
}
-#endif
pp->ftp_prov = provider;
pp->ftp_pid = provider->ftp_pid;
* First create a tracepoint for each actual point of interest.
*/
for (i = 0; i < dhpb->dthpb_noffs; i++) {
-#if !defined(__APPLE__)
- tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
-#else
tp = zalloc(fasttrap_tracepoint_t_zone);
bzero(tp, sizeof (fasttrap_tracepoint_t));
-#endif
tp->ftt_proc = provider->ftp_proc;
-#if defined(__APPLE__)
+
/*
* APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
* Unfortunately, a side effect of this is that the relocations do not point at exactly
* the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
*/
-#if defined(__i386__)
+#if defined(__x86_64__)
/*
* Both 32 & 64 bit want to go back one byte, to point at the first NOP
*/
tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
-#elif defined(__ppc__)
- /* All PPC probes are zero offset. */
- tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i];
#else
#error "Architecture not supported"
#endif
-#else
- tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
-#endif
tp->ftt_pid = provider->ftp_pid;
pp->ftp_tps[i].fit_tp = tp;
pp->ftp_tps[i].fit_id.fti_probe = pp;
-#ifdef __sparc
- pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
-#else
pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
-#endif
}
/*
* Then create a tracepoint for each is-enabled point.
*/
for (j = 0; i < ntps; i++, j++) {
-#if !defined(__APPLE__)
- tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
-#else
tp = zalloc(fasttrap_tracepoint_t_zone);
bzero(tp, sizeof (fasttrap_tracepoint_t));
-#endif
tp->ftt_proc = provider->ftp_proc;
-#if defined(__APPLE__)
+
/*
* APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
* Unfortunately, a side effect of this is that the relocations do not point at exactly
* the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
*/
-#if defined(__i386__)
+#if defined(__x86_64__)
/*
* Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
*/
tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
-#elif defined(__ppc__)
- /* All PPC is-enabled probes are zero offset. */
- tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j];
#else
#error "Architecture not supported"
#endif
-#else
- tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
-#endif
tp->ftt_pid = provider->ftp_pid;
pp->ftp_tps[i].fit_tp = tp;
/*ARGSUSED*/
static void
-fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
+fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
{
#pragma unused(arg)
/*
* provider until that count has dropped to zero. This just puts
* the provider on death row.
*/
- fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
+ fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
+}
+
+static char*
+fasttrap_meta_provider_name(void *arg)
+{
+ fasttrap_provider_t *fprovider = arg;
+ dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
+ return provider->dtpv_name;
}
static dtrace_mops_t fasttrap_mops = {
fasttrap_meta_create_probe,
fasttrap_meta_provide,
- fasttrap_meta_remove
+ fasttrap_meta_remove,
+ fasttrap_meta_provider_name
};
+/*
+ * Validate a null-terminated string. If str is not null-terminated,
+ * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
+ * returned.
+ *
+ * str: string to validate.
+ * maxlen: maximal length of the string, null-terminated byte included.
+ */
+static int
+fasttrap_validatestr(char const* str, size_t maxlen) {
+ size_t len;
+
+ assert(str);
+ assert(maxlen != 0);
+
+ /* Check if the string is null-terminated. */
+ len = strnlen(str, maxlen);
+ if (len >= maxlen)
+ return -1;
+
+ /* Finally, check for UTF8 validity. */
+ return utf8_validatestr((unsigned const char*) str, len);
+}
+
/*ARGSUSED*/
static int
-fasttrap_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
+fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
{
#pragma unused(dev, md, rv)
if (!dtrace_attached())
return (EAGAIN);
if (cmd == FASTTRAPIOC_MAKEPROBE) {
- // FIXME! What size is arg? If it is not 64 bit, how do we pass in a 64 bit value?
- fasttrap_probe_spec_t *uprobe = (void *)arg;
fasttrap_probe_spec_t *probe;
uint64_t noffs;
- size_t size, i;
+ size_t size;
int ret;
- char *c;
- /*
- * FIXME! How does this work? The kern is running in 32 bit mode. It has a 32 bit pointer,
- * uprobe. We do address manipulations on it, and still have a 64 bit value? This seems
- * broken. What is the right way to do this?
- */
- if (copyin((user_addr_t)(unsigned long)&uprobe->ftps_noffs, &noffs,
- sizeof (uprobe->ftps_noffs)))
+ if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
+ sizeof (probe->ftps_noffs)))
return (EFAULT);
/*
probe = kmem_alloc(size, KM_SLEEP);
- if (copyin((user_addr_t)(unsigned long)uprobe, probe, size) != 0) {
+ if (copyin(arg, probe, size) != 0 ||
+ probe->ftps_noffs != noffs) {
kmem_free(probe, size);
return (EFAULT);
}
* Verify that the function and module strings contain no
* funny characters.
*/
- for (i = 0, c = &probe->ftps_func[0]; i < sizeof(probe->ftps_func) && *c != '\0'; i++, c++) {
- if (*c < 0x20 || 0x7f <= *c) {
- ret = EINVAL;
- goto err;
- }
- }
- if (*c != '\0') {
+
+ if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
ret = EINVAL;
goto err;
}
- for (i = 0, c = &probe->ftps_mod[0]; i < sizeof(probe->ftps_mod) && *c != '\0'; i++, c++) {
- if (*c < 0x20 || 0x7f <= *c) {
- ret = EINVAL;
- goto err;
- }
- }
- if (*c != '\0') {
+ if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
ret = EINVAL;
goto err;
}
if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
if (p != PROC_NULL)
proc_rele(p);
- return (ESRCH);
+ ret = ESRCH;
+ goto err;
}
// proc_lock(p);
// FIXME! How is this done on OS X?
uint_t index;
// int ret;
- if (copyin((user_addr_t)(unsigned long)arg, &instr, sizeof (instr)) != 0)
+ if (copyin(arg, &instr, sizeof (instr)) != 0)
return (EFAULT);
if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
while (tp != NULL) {
if (instr.ftiq_pid == tp->ftt_pid &&
instr.ftiq_pc == tp->ftt_pc &&
- !tp->ftt_proc->ftpc_defunct)
+ tp->ftt_proc->ftpc_acount != 0)
break;
tp = tp->ftt_next;
sizeof (instr.ftiq_instr));
lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
- if (copyout(&instr, (user_addr_t)(unsigned long)arg, sizeof (instr)) != 0)
+ if (copyout(&instr, arg, sizeof (instr)) != 0)
return (EFAULT);
return (0);
dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
-#if !defined(__APPLE__)
- fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
- "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
-#else
/*
- * We're sizing based on system memory. 100k probes per 256M of system memory.
+ * APPLE NOTE: We size the maximum number of fasttrap probes
+ * based on system memory. 100k probes per 256M of system memory.
* Yes, this is a WAG.
*/
fasttrap_max = (sane_size >> 28) * 100000;
-#endif
+ if (fasttrap_max == 0)
+ fasttrap_max = 50000;
+
fasttrap_total = 0;
/*
fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
sizeof (fasttrap_bucket_t), KM_SLEEP);
ASSERT(fasttrap_tpoints.fth_table != NULL);
-#if defined(__APPLE__)
+
/*
- * We have to explicitly initialize all locks...
+ * APPLE NOTE: explicitly initialize all locks...
*/
unsigned int i;
for (i=0; i<fasttrap_tpoints.fth_nent; i++) {
lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
}
-#endif
/*
* ... and the providers hash table...
fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
sizeof (fasttrap_bucket_t), KM_SLEEP);
ASSERT(fasttrap_provs.fth_table != NULL);
-#if defined(__APPLE__)
+
/*
- * We have to explicitly initialize all locks...
+ * APPLE NOTE: explicitly initialize all locks...
*/
for (i=0; i<fasttrap_provs.fth_nent; i++) {
lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
}
-#endif
/*
* ... and the procs hash table.
fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
sizeof (fasttrap_bucket_t), KM_SLEEP);
ASSERT(fasttrap_procs.fth_table != NULL);
-#if defined(__APPLE__)
+
/*
- * We have to explicitly initialize all locks...
+ * APPLE NOTE: explicitly initialize all locks...
*/
for (i=0; i<fasttrap_procs.fth_nent; i++) {
lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
}
-#endif
(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
&fasttrap_meta_id);
static int
_fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
{
-#pragma unused(p)
int err, rv = 0;
+ user_addr_t uaddrp;
- /*
- * FIXME! 64 bit problem with the data var.
- */
- err = fasttrap_ioctl(dev, (int)cmd, *(intptr_t *)data, fflag, CRED(), &rv);
+ if (proc_is64bit(p))
+ uaddrp = *(user_addr_t *)data;
+ else
+ uaddrp = (user_addr_t) *(uint32_t *)data;
+
+ err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
/* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
if (err != 0) {
lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
- if (DDI_FAILURE == fasttrap_attach((dev_info_t *)device, 0 )) {
+ if (DDI_FAILURE == fasttrap_attach((dev_info_t *)(uintptr_t)device, 0 )) {
// FIX ME! Do we remove the devfs node here?
// What kind of error reporting?
printf("fasttrap_init: Call to fasttrap_attach failed.\n");
return;
}
- gFasttrapInited = 1;
+ gFasttrapInited = 1;
}
}