return (0);
#else
+#pragma unused(state)
+
return 1; /* Darwin doesn't do zones. */
#endif /* __APPLE__ */
}
dtrace_dstate_percpu_t *dcpu;
int i, work = 0;
- for (i = 0; i < NCPU; i++) {
+ for (i = 0; i < (int)NCPU; i++) {
dcpu = &dstate->dtds_percpu[i];
ASSERT(dcpu->dtdsc_rinsing == NULL);
dtrace_sync();
- for (i = 0; i < NCPU; i++) {
+ for (i = 0; i < (int)NCPU; i++) {
dcpu = &dstate->dtds_percpu[i];
if (dcpu->dtdsc_rinsing == NULL)
case DTRACE_DSTATE_CLEAN: {
void *sp = &dstate->dtds_state;
- if (++cpu >= NCPU)
+ if (++cpu >= (int)NCPU)
cpu = 0;
if (dcpu->dtdsc_dirty != NULL &&
static void
dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
{
+#pragma unused(arg)
if (nval < *oval)
*oval = nval;
}
static void
dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
{
+#pragma unused(arg)
if (nval > *oval)
*oval = nval;
}
static void
dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
{
+#pragma unused(arg)
data[0]++;
data[1] += nval;
}
static void
dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
{
+#pragma unused(nval,arg)
*oval = *oval + 1;
}
static void
dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
{
+#pragma unused(arg)
*oval += nval;
}
dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
{
+#pragma unused(arg)
dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
uint32_t i, ndx, size, fsize;
uint32_t align = sizeof (uint64_t) - 1;
* string -- setting a bit in the map for every character
* found in the token string.
*/
- for (i = 0; i < sizeof (tokmap); i++)
+ for (i = 0; i < (int)sizeof (tokmap); i++)
tokmap[i] = 0;
for (; tokaddr < toklimit; tokaddr++) {
size_t sz = v->dtdv_type.dtdt_size;
sz += sizeof (uint64_t);
- ASSERT(svar->dtsv_size == NCPU * sz);
+ ASSERT(svar->dtsv_size == (int)NCPU * sz);
a += CPU->cpu_id * sz;
if (*(uint8_t *)a == UINT8_MAX) {
break;
}
- ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
+ ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
regs[rd] = tmp[CPU->cpu_id];
break;
size_t sz = v->dtdv_type.dtdt_size;
sz += sizeof (uint64_t);
- ASSERT(svar->dtsv_size == NCPU * sz);
+ ASSERT(svar->dtsv_size == (int)NCPU * sz);
a += CPU->cpu_id * sz;
if (regs[rd] == NULL) {
break;
}
- ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
+ ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
tmp[CPU->cpu_id] = regs[rd];
break;
#ifdef lint
uint64_t val = 0;
#else
- uint64_t val;
+ uint64_t val = 0;
#endif
mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
static int
dtrace_match_nul(const char *s, const char *p, int depth)
{
+#pragma unused(s,p,depth)
return (1); /* always match the empty pattern */
}
static int
dtrace_match_nonzero(const char *s, const char *p, int depth)
{
+#pragma unused(p,depth)
return (s != NULL && s[0] != '\0');
}
}
do {
- kmod_info_t *ktl;
/*
* First, call the blanket provide operation.
*/
lck_mtx_unlock(&mod_lock);
#else
-#if 0 /* XXX Workaround for PR_4643546 XXX */
+#if 0 /* FIXME: Workaround for PR_4643546 */
simple_lock(&kmod_lock);
- ktl = kmod;
+ kmod_info_t *ktl = kmod;
while (ktl) {
prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ktl);
ktl = ktl->next;
svarp = &vstate->dtvs_locals;
if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
- dsize = NCPU * (v->dtdv_type.dtdt_size +
+ dsize = (int)NCPU * (v->dtdv_type.dtdt_size +
sizeof (uint64_t));
else
- dsize = NCPU * sizeof (uint64_t);
+ dsize = (int)NCPU * sizeof (uint64_t);
break;
*/
diff = offs + sizeof (dtrace_aggid_t);
- if (diff = (diff & (sizeof (uint64_t) - 1)))
+ if ((diff = (diff & (sizeof (uint64_t) - 1))))
offs += sizeof (uint64_t) - diff;
aggbase = offs - sizeof (dtrace_aggid_t);
* of creating our own (saving both time and space).
*/
dtrace_ecb_t *cached = dtrace_ecb_create_cache;
- dtrace_action_t *act = cached->dte_action;
+ dtrace_action_t *act_if = cached->dte_action;
- if (act != NULL) {
- ASSERT(act->dta_refcnt > 0);
- act->dta_refcnt++;
- ecb->dte_action = act;
+ if (act_if != NULL) {
+ ASSERT(act_if->dta_refcnt > 0);
+ act_if->dta_refcnt++;
+ ecb->dte_action = act_if;
ecb->dte_action_last = cached->dte_action_last;
ecb->dte_needed = cached->dte_needed;
ecb->dte_size = cached->dte_size;
return (EFBIG);
#if defined(__APPLE__)
- if (size > (sane_size / 8) / NCPU) /* As in kdbg_set_nkdbufs(), roughly. */
+ if (size > (sane_size / 8) / (int)NCPU) /* As in kdbg_set_nkdbufs(), roughly. */
return (ENOMEM);
#endif /* __APPLE__ */
intptr_t offs = buf->dtb_offset, soffs;
intptr_t woffs;
caddr_t tomax;
- size_t total;
+ size_t total_off;
if (buf->dtb_flags & DTRACEBUF_INACTIVE)
return (-1);
goto out;
}
- total = needed + (offs & (align - 1));
+ total_off = needed + (offs & (align - 1));
/*
* For a ring buffer, life is quite a bit more complicated. Before
* is required.)
*/
if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
- offs + total > buf->dtb_size) {
+ offs + total_off > buf->dtb_size) {
woffs = buf->dtb_xamot_offset;
- if (offs + total > buf->dtb_size) {
+ if (offs + total_off > buf->dtb_size) {
/*
* We can't fit in the end of the buffer. First, a
* sanity check that we can fit in the buffer at all.
*/
- if (total > buf->dtb_size) {
+ if (total_off > buf->dtb_size) {
dtrace_buffer_drop(buf);
return (-1);
}
* that the top of the buffer is aligned.
*/
offs = 0;
- total = needed;
+ total_off = needed;
buf->dtb_flags |= DTRACEBUF_WRAPPED;
} else {
/*
}
}
- while (offs + total > woffs) {
+ while (offs + total_off > woffs) {
dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
size_t size;
if (offs == 0) {
buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
buf->dtb_offset = 0;
- woffs = total;
+ woffs = total_off;
while (woffs < buf->dtb_size)
tomax[woffs++] = 0;
{
int i;
- for (i = 0; i < NCPU; i++) {
+ for (i = 0; i < (int)NCPU; i++) {
dtrace_buffer_t *buf = &bufs[i];
if (buf->dtb_tomax == NULL) {
dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched)
{
dtrace_enabling_t *enab;
- int matched, total = 0, err;
+ int matched, total_matched = 0, err;
lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
if ((err = dtrace_enabling_match(enab, &matched)) != 0)
return (err);
- total += matched;
+ total_matched += matched;
}
if (nmatched != NULL)
- *nmatched = total;
+ *nmatched = total_matched;
return (0);
}
static void
dtrace_dof_error(dof_hdr_t *dof, const char *str)
{
+#pragma unused(dof)
if (dtrace_err_verbose)
cmn_err(CE_WARN, "failed to process DOF: %s", str);
size_t ttl = 0;
dof_difohdr_t *dofd;
uintptr_t daddr = (uintptr_t)dof;
- size_t max = dtrace_difo_maxsize;
+ size_t max_size = dtrace_difo_maxsize;
int i, l, n;
static const struct {
dofd->dofd_links[l])) == NULL)
goto err; /* invalid section link */
- if (ttl + subsec->dofs_size > max) {
+ if (ttl + subsec->dofs_size > max_size) {
dtrace_dof_error(dof, "exceeds maximum size");
goto err;
}
int
dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
{
- size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
+ size_t hashsize, maxper, min_size, chunksize = dstate->dtds_chunksize;
void *base;
uintptr_t limit;
dtrace_dynvar_t *dvar, *next, *start;
if ((dstate->dtds_chunksize = chunksize) == 0)
dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
- if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
- size = min;
+ if (size < (min_size = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
+ size = min_size;
if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
return (ENOMEM);
dstate->dtds_size = size;
dstate->dtds_base = base;
dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
- bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
+ bzero(dstate->dtds_percpu, (int)NCPU * sizeof (dtrace_dstate_percpu_t));
hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
limit = (uintptr_t)base + size;
- maxper = (limit - (uintptr_t)start) / NCPU;
+ maxper = (limit - (uintptr_t)start) / (int)NCPU;
maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
- for (i = 0; i < NCPU; i++) {
+ for (i = 0; i < (int)NCPU; i++) {
dstate->dtds_percpu[i].dtdsc_free = dvar = start;
/*
* whatever is left over. In either case, we set the limit to
* be the limit of the dynamic variable space.
*/
- if (maxper == 0 || i == NCPU - 1) {
+ if (maxper == 0 || i == (int)NCPU - 1) {
limit = (uintptr_t)base + size;
start = NULL;
} else {
char c[30];
dtrace_state_t *state;
dtrace_optval_t *opt;
- int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
+ int bufsize = (int)NCPU * sizeof (dtrace_buffer_t), i;
lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
{
dtrace_optval_t *opt = state->dts_options, size;
- processorid_t cpu;
+ processorid_t cpu = 0;
int flags = 0, rval;
lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
dtrace_buffer_t *buf;
cyc_handler_t hdlr;
cyc_time_t when;
- int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
+ int rval = 0, i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
dtrace_icookie_t cookie;
lck_mtx_lock(&cpu_lock);
dtrace_ecb_t *ecb;
dtrace_vstate_t *vstate = &state->dts_vstate;
minor_t minor = getminor(state->dts_dev);
- int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
+ int i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
dtrace_speculation_t *spec = state->dts_speculations;
int nspec = state->dts_nspeculations;
uint32_t match;
if ((svar = vstate->dtvs_locals[i]) == NULL)
continue;
- ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
+ ASSERT(svar->dtsv_size >= (int)NCPU * sizeof (uint64_t));
ent->dtht_locals[i] =
((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
}
uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
uint64_t sarg0 = mstate->dtms_arg[0];
uint64_t sarg1 = mstate->dtms_arg[1];
- uint64_t rval;
+ uint64_t rval = 0;
dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
dtrace_helper_action_t *helper;
dtrace_vstate_t *vstate;
* given generation number.
*/
for (;;) {
- dtrace_helper_provider_t *prov;
+ dtrace_helper_provider_t *prov = NULL;
/*
* Look for a helper provider with the right generation. We
1, INT_MAX, 0);
dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
- sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
+ sizeof (dtrace_dstate_percpu_t) * (int)NCPU, DTRACE_STATE_ALIGN,
NULL, NULL, NULL, NULL, NULL, 0);
lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
static int
dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
{
+#pragma unused(flag,otyp,cred_p)
minor_t minor = getminor(dev);
dtrace_state_t *state;
static int
dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
{
+#pragma unused(md)
+
minor_t minor = getminor(dev);
dtrace_state_t *state;
int rval;
if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
return (EFAULT);
- if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
+ if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= (int)NCPU)
return (EINVAL);
lck_mtx_lock(&dtrace_lock);
nerrs = state->dts_errors;
dstate = &state->dts_vstate.dtvs_dynvars;
- for (i = 0; i < NCPU; i++) {
+ for (i = 0; i < (int)NCPU; i++) {
dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
stat.dtst_dyndrops += dcpu->dtdsc_drops;