X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/143464d58d2bd6378e74eec636961ceb0d32fb91..fe8ab488e9161c46dd9885d58fc52996dc0249ff:/bsd/dev/dtrace/dtrace.c?ds=sidebyside diff --git a/bsd/dev/dtrace/dtrace.c b/bsd/dev/dtrace/dtrace.c index 314ed0b57..25f6d7c8e 100644 --- a/bsd/dev/dtrace/dtrace.c +++ b/bsd/dev/dtrace/dtrace.c @@ -69,34 +69,6 @@ * [Group] Functions", allowing one to find each block by searching forward * on capital-f functions. */ -#if !defined(__APPLE__) -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#else #include #include #include @@ -120,16 +92,14 @@ #include #include #include +#include #include -#if defined(__APPLE__) #include extern uint32_t pmap_find_phys(void *, uint64_t); extern boolean_t pmap_valid_page(uint32_t); extern void OSKextRegisterKextsWithDTrace(void); extern kmod_info_t g_kernel_kmod_info; -#endif /* __APPLE__ */ - /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */ #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */ @@ -150,16 +120,16 @@ extern void dtrace_postinit(void); extern kern_return_t chudxnu_dtrace_callback (uint64_t selector, uint64_t *args, uint32_t count); -#endif /* __APPLE__ */ +/* Import this function to retrieve the physical memory. */ +extern int kernel_sysctlbyname(const char *name, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); /* * DTrace Tunable Variables * - * The following variables may be tuned by adding a line to /etc/system that - * includes both the name of the DTrace module ("dtrace") and the name of the - * variable. For example: - * - * set dtrace:dtrace_destructive_disallow = 1 + * The following variables may be dynamically tuned by using sysctl(8), the + * variables being stored in the kern.dtrace namespace. For example: + * sysctl kern.dtrace.dof_maxsize = 1048575 # 1M * * In general, the only variables that one should be tuning this way are those * that affect system-wide DTrace behavior, and for which the default behavior @@ -168,10 +138,10 @@ extern kern_return_t chudxnu_dtrace_callback * When tuning these variables, avoid pathological values; while some attempt * is made to verify the integrity of these variables, they are not considered * part of the supported interface to DTrace, and they are therefore not - * checked comprehensively. Further, these variables should not be tuned - * dynamically via "mdb -kw" or other means; they should only be tuned via - * /etc/system. + * checked comprehensively. */ +uint64_t dtrace_buffer_memory_maxsize = 0; /* initialized in dtrace_init */ +uint64_t dtrace_buffer_memory_inuse = 0; int dtrace_destructive_disallow = 0; dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); size_t dtrace_difo_maxsize = (256 * 1024); @@ -201,6 +171,7 @@ hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ int dtrace_devdepth_max = 32; int dtrace_err_verbose; +int dtrace_provide_private_probes = 0; hrtime_t dtrace_deadman_interval = NANOSEC; hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; @@ -246,7 +217,7 @@ static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ -#if defined(__APPLE__) + static int dtrace_dof_mode; /* See dtrace_impl.h for a description of Darwin's dof modes. */ /* @@ -254,9 +225,8 @@ static int dtrace_dof_mode; /* See dtrace_impl.h for a description of Darwin's * fbt_provide and sdt_provide. Its clearly not a dtrace tunable variable either... */ int dtrace_kernel_symbol_mode; /* See dtrace_impl.h for a description of Darwin's kernel symbol modes. */ -#endif -#if defined(__APPLE__) + /* * To save memory, some common memory allocations are given a * unique zone. For example, dtrace_probe_t is 72 bytes in size, @@ -267,7 +237,6 @@ int dtrace_kernel_symbol_mode; /* See dtrace_impl.h for a description of Darwi struct zone *dtrace_probe_t_zone; static int dtrace_module_unloaded(struct kmod_info *kmod); -#endif /* __APPLE__ */ /* * DTrace Locking @@ -300,17 +269,14 @@ static int dtrace_module_unloaded(struct kmod_info *kmod); * acquired _between_ dtrace_provider_lock and dtrace_lock. */ -#if !defined(__APPLE__) -static kmutex_t dtrace_lock; /* probe state lock */ -static kmutex_t dtrace_provider_lock; /* provider state lock */ -static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ -#else + /* * APPLE NOTE: * - * All kmutex_t vars have been changed to lck_mtx_t. - * Note that lck_mtx_t's require explicit initialization. + * For porting purposes, all kmutex_t vars have been changed + * to lck_mtx_t, which require explicit initialization. * + * kmutex_t becomes lck_mtx_t * mutex_enter() becomes lck_mtx_lock() * mutex_exit() becomes lck_mtx_unlock() * @@ -320,13 +286,11 @@ static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ * becomes: * lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED); * - * Due to the number of these changes, they are not called out explicitly. */ static lck_mtx_t dtrace_lock; /* probe state lock */ static lck_mtx_t dtrace_provider_lock; /* provider state lock */ static lck_mtx_t dtrace_meta_lock; /* meta-provider state lock */ static lck_rw_t dtrace_dof_mode_lock; /* dof mode lock */ -#endif /* __APPLE__ */ /* * DTrace Provider Variables @@ -375,11 +339,7 @@ dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ uint32_t dtrace_helptrace_next = 0; uint32_t dtrace_helptrace_nlocals; char *dtrace_helptrace_buffer; -#if !defined(__APPLE__) /* Quiet compiler warning */ -int dtrace_helptrace_bufsize = 512 * 1024; -#else size_t dtrace_helptrace_bufsize = 512 * 1024; -#endif /* __APPLE__ */ #if DEBUG int dtrace_helptrace_enabled = 1; @@ -387,6 +347,7 @@ int dtrace_helptrace_enabled = 1; int dtrace_helptrace_enabled = 0; #endif + /* * DTrace Error Hashing * @@ -430,7 +391,7 @@ static lck_mtx_t dtrace_errlock; /* * The key for a thread-local variable consists of the lower 61 bits of the - * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. + * current_thread(), plus the 3 bits of the highest active interrupt above LOCK_LEVEL. * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never * equal to a variable identifier. This is necessary (but not sufficient) to * assure that global associative arrays never collide with thread-local @@ -443,17 +404,6 @@ static lck_mtx_t dtrace_errlock; * no way for a global variable key signature to match a thread-local key * signature. */ -#if !defined(__APPLE__) -#define DTRACE_TLS_THRKEY(where) { \ - uint_t intr = 0; \ - uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ - for (; actv; actv >>= 1) \ - intr++; \ - ASSERT(intr < (1 << 3)); \ - (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ - (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ -} -#else #if defined (__x86_64__) /* FIXME: two function calls!! */ #define DTRACE_TLS_THRKEY(where) { \ @@ -466,7 +416,6 @@ static lck_mtx_t dtrace_errlock; #else #error Unknown architecture #endif -#endif /* __APPLE__ */ #define DT_BSWAP_8(x) ((x) & 0xff) #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) @@ -508,44 +457,6 @@ static lck_mtx_t dtrace_errlock; ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ (mstate)->dtms_scratch_ptr >= (alloc_sz)) -#if !defined(__APPLE__) -#define DTRACE_LOADFUNC(bits) \ -/*CSTYLED*/ \ -uint##bits##_t \ -dtrace_load##bits(uintptr_t addr) \ -{ \ - size_t size = bits / NBBY; \ - /*CSTYLED*/ \ - uint##bits##_t rval; \ - int i; \ - volatile uint16_t *flags = (volatile uint16_t *) \ - &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ - \ - DTRACE_ALIGNCHECK(addr, size, flags); \ - \ - for (i = 0; i < dtrace_toxranges; i++) { \ - if (addr >= dtrace_toxrange[i].dtt_limit) \ - continue; \ - \ - if (addr + size <= dtrace_toxrange[i].dtt_base) \ - continue; \ - \ - /* \ - * This address falls within a toxic region; return 0. \ - */ \ - *flags |= CPU_DTRACE_BADADDR; \ - cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ - return (0); \ - } \ - \ - *flags |= CPU_DTRACE_NOFAULT; \ - /*CSTYLED*/ \ - rval = *((volatile uint##bits##_t *)addr); \ - *flags &= ~CPU_DTRACE_NOFAULT; \ - \ - return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ -} -#else /* __APPLE__ */ #define RECOVER_LABEL(bits) dtraceLoadRecover##bits: #if defined (__x86_64__) @@ -601,7 +512,6 @@ dtrace_load##bits(uintptr_t addr) \ #else /* all other architectures */ #error Unknown Architecture #endif -#endif /* __APPLE__ */ #ifdef __LP64__ #define dtrace_loadptr dtrace_load64 @@ -636,14 +546,6 @@ dtrace_load##bits(uintptr_t addr) \ (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) -#if defined (__APPLE__) -/* Avoid compiler warnings when assigning regs[rd] = NULL */ -#ifdef NULL -#undef NULL -#define NULL (uintptr_t)0 -#endif -#endif /* __APPLE__ */ - static size_t dtrace_strlen(const char *, size_t); static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); static void dtrace_enabling_provide(dtrace_provider_t *); @@ -661,6 +563,225 @@ static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); + +/* + * DTrace sysctl handlers + * + * These declarations and functions are used for a deeper DTrace configuration. + * Most of them are not per-consumer basis and may impact the other DTrace + * consumers. Correctness may not be supported for all the variables, so you + * should be careful about what values you are using. + */ + +SYSCTL_DECL(_kern_dtrace); +SYSCTL_NODE(_kern, OID_AUTO, dtrace, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "dtrace"); + +static int +sysctl_dtrace_err_verbose SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2) + int changed, error; + int value = *(int *) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, &changed); + if (error || !changed) + return (error); + + if (value != 0 && value != 1) + return (ERANGE); + + lck_mtx_lock(&dtrace_lock); + dtrace_err_verbose = value; + lck_mtx_unlock(&dtrace_lock); + + return (0); +} + +/* + * kern.dtrace.err_verbose + * + * Set DTrace verbosity when an error occured (0 = disabled, 1 = enabld). + * Errors are reported when a DIFO or a DOF has been rejected by the kernel. + */ +SYSCTL_PROC(_kern_dtrace, OID_AUTO, err_verbose, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &dtrace_err_verbose, 0, + sysctl_dtrace_err_verbose, "I", "dtrace error verbose"); + +static int +sysctl_dtrace_buffer_memory_maxsize SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2, req) + int changed, error; + uint64_t value = *(uint64_t *) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, &changed); + if (error || !changed) + return (error); + + if (value <= dtrace_buffer_memory_inuse) + return (ERANGE); + + lck_mtx_lock(&dtrace_lock); + dtrace_buffer_memory_maxsize = value; + lck_mtx_unlock(&dtrace_lock); + + return (0); +} + +/* + * kern.dtrace.buffer_memory_maxsize + * + * Set DTrace maximal size in bytes used by all the consumers' state buffers. By default + * the limit is PHYS_MEM / 3 for *all* consumers. Attempting to set a null, a negative value + * or a value <= to dtrace_buffer_memory_inuse will result in a failure. + */ +SYSCTL_PROC(_kern_dtrace, OID_AUTO, buffer_memory_maxsize, + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + &dtrace_buffer_memory_maxsize, 0, + sysctl_dtrace_buffer_memory_maxsize, "Q", "dtrace state buffer memory maxsize"); + +/* + * kern.dtrace.buffer_memory_inuse + * + * Current state buffer memory used, in bytes, by all the DTrace consumers. + * This value is read-only. + */ +SYSCTL_QUAD(_kern_dtrace, OID_AUTO, buffer_memory_inuse, CTLFLAG_RD | CTLFLAG_LOCKED, + &dtrace_buffer_memory_inuse, "dtrace state buffer memory in-use"); + +static int +sysctl_dtrace_difo_maxsize SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2, req) + int changed, error; + size_t value = *(size_t*) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, &changed); + if (error || !changed) + return (error); + + if (value <= 0) + return (ERANGE); + + lck_mtx_lock(&dtrace_lock); + dtrace_difo_maxsize = value; + lck_mtx_unlock(&dtrace_lock); + + return (0); +} + +/* + * kern.dtrace.difo_maxsize + * + * Set the DIFO max size in bytes, check the definition of dtrace_difo_maxsize + * to get the default value. Attempting to set a null or negative size will + * result in a failure. + */ +SYSCTL_PROC(_kern_dtrace, OID_AUTO, difo_maxsize, + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + &dtrace_difo_maxsize, 0, + sysctl_dtrace_difo_maxsize, "Q", "dtrace difo maxsize"); + +static int +sysctl_dtrace_dof_maxsize SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2, req) + int changed, error; + dtrace_optval_t value = *(dtrace_optval_t *) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, &changed); + if (error || !changed) + return (error); + + if (value <= 0) + return (ERANGE); + + lck_mtx_lock(&dtrace_lock); + dtrace_dof_maxsize = value; + lck_mtx_unlock(&dtrace_lock); + + return (0); +} + +/* + * kern.dtrace.dof_maxsize + * + * Set the DOF max size in bytes, check the definition of dtrace_dof_maxsize to + * get the default value. Attempting to set a null or negative size will result + * in a failure. + */ +SYSCTL_PROC(_kern_dtrace, OID_AUTO, dof_maxsize, + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + &dtrace_dof_maxsize, 0, + sysctl_dtrace_dof_maxsize, "Q", "dtrace dof maxsize"); + +static int +sysctl_dtrace_global_maxsize SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2, req) + int changed, error; + dtrace_optval_t value = *(dtrace_optval_t*) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, &changed); + if (error || !changed) + return (error); + + if (value <= 0) + return (ERANGE); + + lck_mtx_lock(&dtrace_lock); + dtrace_global_maxsize = value; + lck_mtx_unlock(&dtrace_lock); + + return (0); +} + +/* + * kern.dtrace.global_maxsize + * + * Set the global variable max size in bytes, check the definition of + * dtrace_global_maxsize to get the default value. Attempting to set a null or + * negative size will result in a failure. + */ +SYSCTL_PROC(_kern_dtrace, OID_AUTO, global_maxsize, + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + &dtrace_global_maxsize, 0, + sysctl_dtrace_global_maxsize, "Q", "dtrace global maxsize"); + +static int +sysctl_dtrace_provide_private_probes SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2) + int error; + int value = *(int *) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, NULL); + if (error) + return (error); + + if (value != 0 && value != 1) + return (ERANGE); + + lck_mtx_lock(&dtrace_lock); + dtrace_provide_private_probes = value; + lck_mtx_unlock(&dtrace_lock); + + return (0); +} + +/* + * kern.dtrace.provide_private_probes + * + * Set whether the providers must provide the private probes. This is + * mainly used by the FBT provider to request probes for the private/static + * symbols. + */ +SYSCTL_PROC(_kern_dtrace, OID_AUTO, provide_private_probes, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &dtrace_provide_private_probes, 0, + sysctl_dtrace_provide_private_probes, "I", "provider must provide the private probes"); + /* * DTrace Probe Context Functions * @@ -861,11 +982,7 @@ static int dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { -#if !defined(__APPLE__) /* Quiet compiler warning - matches dtrace_dif_emulate */ - volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; -#else volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; -#endif /* __APPLE */ /* * If we hold the privilege to read from kernel memory, then @@ -1237,11 +1354,7 @@ dtrace_priv_proc_common_user(dtrace_state_t *state) */ ASSERT(s_cr != NULL); -#if !defined(__APPLE__) - if ((cr = CRED()) != NULL && -#else if ((cr = dtrace_CRED()) != NULL && -#endif /* __APPLE__ */ posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_uid && posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_ruid && posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_suid && @@ -1262,7 +1375,7 @@ static int dtrace_priv_proc_common_zone(dtrace_state_t *state) { cred_t *cr, *s_cr = state->dts_cred.dcr_cred; -#pragma unused(cr, s_cr) /* __APPLE__ */ +#pragma unused(cr, s_cr, state) /* __APPLE__ */ /* * We should always have a non-NULL state cred here, since if cred @@ -1270,52 +1383,29 @@ dtrace_priv_proc_common_zone(dtrace_state_t *state) */ ASSERT(s_cr != NULL); -#if !defined(__APPLE__) - if ((cr = CRED()) != NULL && - s_cr->cr_zone == cr->cr_zone) - return (1); - - return (0); -#else -#pragma unused(state) - - return 1; /* Darwin doesn't do zones. */ -#endif /* __APPLE__ */ + return 1; /* APPLE NOTE: Darwin doesn't do zones. */ } /* * This privilege check should be used by actions and subroutines to * verify that the process has not setuid or changed credentials. */ -#if !defined(__APPLE__) -static int -dtrace_priv_proc_common_nocd() -{ - proc_t *proc; - - if ((proc = ttoproc(curthread)) != NULL && - !(proc->p_flag & SNOCD)) - return (1); - - return (0); -} -#else static int dtrace_priv_proc_common_nocd(void) { return 1; /* Darwin omits "No Core Dump" flag. */ } -#endif /* __APPLE__ */ static int dtrace_priv_proc_destructive(dtrace_state_t *state) { int action = state->dts_cred.dcr_action; -#if defined(__APPLE__) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) goto bad; -#endif /* __APPLE__ */ + + if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc())) + goto bad; if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && dtrace_priv_proc_common_zone(state) == 0) @@ -1340,10 +1430,11 @@ bad: static int dtrace_priv_proc_control(dtrace_state_t *state) { -#if defined(__APPLE__) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) goto bad; -#endif /* __APPLE__ */ + + if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc())) + goto bad; if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) return (1); @@ -1353,9 +1444,7 @@ dtrace_priv_proc_control(dtrace_state_t *state) dtrace_priv_proc_common_nocd()) return (1); -#if defined(__APPLE__) bad: -#endif /* __APPLE__ */ cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; return (0); @@ -1364,24 +1453,26 @@ bad: static int dtrace_priv_proc(dtrace_state_t *state) { -#if defined(__APPLE__) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) goto bad; -#endif /* __APPLE__ */ + + if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc())) + goto bad; if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) return (1); -#if defined(__APPLE__) bad: -#endif /* __APPLE__ */ cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; return (0); } -#if defined(__APPLE__) -/* dtrace_priv_proc() omitting the P_LNOATTACH check. For PID and EXECNAME accesses. */ +/* + * The P_LNOATTACH check is an Apple specific check. + * We need a version of dtrace_priv_proc() that omits + * that check for PID and EXECNAME accesses + */ static int dtrace_priv_proc_relaxed(dtrace_state_t *state) { @@ -1393,14 +1484,17 @@ dtrace_priv_proc_relaxed(dtrace_state_t *state) return (0); } -#endif /* __APPLE__ */ static int dtrace_priv_kernel(dtrace_state_t *state) { + if (dtrace_is_restricted()) + goto bad; + if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) return (1); +bad: cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; return (0); @@ -1409,9 +1503,13 @@ dtrace_priv_kernel(dtrace_state_t *state) static int dtrace_priv_kernel_destructive(dtrace_state_t *state) { + if (dtrace_is_restricted()) + goto bad; + if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) return (1); +bad: cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; return (0); @@ -1423,10 +1521,7 @@ dtrace_priv_kernel_destructive(dtrace_state_t *state) * clean the dirty dynamic variable lists on all CPUs. Dynamic variable * cleaning is explained in detail in . */ -#if defined(__APPLE__) /* Quiet compiler warning. */ -static -#endif /* __APPLE__ */ -void +static void dtrace_dynvar_clean(dtrace_dstate_t *dstate) { dtrace_dynvar_t *dirty; @@ -1518,10 +1613,7 @@ dtrace_dynvar_clean(dtrace_dstate_t *dstate) * variable can be allocated. If NULL is returned, the appropriate counter * will be incremented. */ -#if defined(__APPLE__) /* Quiet compiler warning. */ -static -#endif /* __APPLE__ */ -dtrace_dynvar_t * +static dtrace_dynvar_t * dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) @@ -1620,15 +1712,9 @@ dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, while ((lock = *lockp) & 1) continue; -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (dtrace_casptr((void *)lockp, - (void *)lock, (void *)(lock + 1)) == (void *)lock) - break; -#else if (dtrace_casptr((void *)(uintptr_t)lockp, (void *)lock, (void *)(lock + 1)) == (void *)lock) break; -#endif /* __APPLE__ */ } dtrace_membar_producer(); @@ -2498,27 +2584,16 @@ dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, dtrace_speculation_t *spec; dtrace_buffer_t *src, *dest; uintptr_t daddr, saddr, dlimit; -#if !defined(__APPLE__) /* Quiet compiler warning */ - dtrace_speculation_state_t current, new; -#else dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE; -#endif /* __APPLE__ */ intptr_t offs; if (which == 0) return; -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (which > state->dts_nspeculations) { - cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; - return; - } -#else if (which > (dtrace_specid_t)state->dts_nspeculations) { cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; return; } -#endif /* __APPLE__ */ spec = &state->dts_speculations[which - 1]; src = &spec->dtsp_buffer[cpu]; @@ -2646,27 +2721,16 @@ dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, dtrace_specid_t which) { dtrace_speculation_t *spec; -#if !defined(__APPLE__) /* Quiet compiler warning */ - dtrace_speculation_state_t current, new; -#else dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE; -#endif /* __APPLE__ */ dtrace_buffer_t *buf; if (which == 0) return; -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (which > state->dts_nspeculations) { - cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; - return; - } -#else if (which > (dtrace_specid_t)state->dts_nspeculations) { cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; return; } -#endif /* __APPLE__ */ spec = &state->dts_speculations[which - 1]; buf = &spec->dtsp_buffer[cpu]; @@ -2726,11 +2790,7 @@ dtrace_speculation_clean_here(dtrace_state_t *state) return; } -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (i = 0; i < state->dts_nspeculations; i++) { -#else for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) { -#endif /* __APPLE__ */ dtrace_speculation_t *spec = &state->dts_speculations[i]; dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; @@ -2765,19 +2825,11 @@ dtrace_speculation_clean_here(dtrace_state_t *state) static void dtrace_speculation_clean(dtrace_state_t *state) { -#if !defined(__APPLE__) /* Quiet compiler warning */ - int work = 0, rv; -#else int work = 0; uint32_t rv; -#endif /* __APPLE__ */ dtrace_specid_t i; -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (i = 0; i < state->dts_nspeculations; i++) { -#else for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) { -#endif /* __APPLE__ */ dtrace_speculation_t *spec = &state->dts_speculations[i]; ASSERT(!spec->dtsp_cleaning); @@ -2801,11 +2853,7 @@ dtrace_speculation_clean(dtrace_state_t *state) * speculation buffers, as appropriate. We can now set the state * to inactive. */ -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (i = 0; i < state->dts_nspeculations; i++) { -#else for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) { -#endif /* __APPLE__ */ dtrace_speculation_t *spec = &state->dts_speculations[i]; dtrace_speculation_state_t current, new; @@ -2836,21 +2884,13 @@ dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, dtrace_specid_t which) { dtrace_speculation_t *spec; -#if !defined(__APPLE__) /* Quiet compiler warning */ - dtrace_speculation_state_t current, new; -#else dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE; -#endif /* __APPLE__ */ dtrace_buffer_t *buf; if (which == 0) return (NULL); -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (which > state->dts_nspeculations) { -#else if (which > (dtrace_specid_t)state->dts_nspeculations) { -#endif /* __APPLE__ */ cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; return (NULL); } @@ -2911,9 +2951,7 @@ dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, * dtrace_dif_variable() uses this routine as a helper for various * builtin values such as 'execname' and 'probefunc.' */ -#if defined(__APPLE__) /* Quiet compiler warning. */ static -#endif /* __APPLE__ */ uintptr_t dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, dtrace_mstate_t *mstate) @@ -2940,7 +2978,7 @@ dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, if (mstate->dtms_scratch_ptr + strsz > mstate->dtms_scratch_base + mstate->dtms_scratch_size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - return (NULL); + return (0); } dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, @@ -2972,12 +3010,10 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); if (ndx >= sizeof (mstate->dtms_arg) / sizeof (mstate->dtms_arg[0])) { -#if !defined(__APPLE__) - int aframes = mstate->dtms_probe->dtpr_aframes + 2; -#else - /* Account for introduction of __dtrace_probe() on xnu. */ + /* + * APPLE NOTE: Account for introduction of __dtrace_probe() + */ int aframes = mstate->dtms_probe->dtpr_aframes + 3; -#endif /* __APPLE__ */ dtrace_provider_t *pv; uint64_t val; @@ -2986,12 +3022,11 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, mstate->dtms_probe->dtpr_id, mstate->dtms_probe->dtpr_arg, ndx, aframes); -#if defined(__APPLE__) /* Special case access of arg5 as passed to dtrace_probe_error() (which see.) */ else if (mstate->dtms_probe->dtpr_id == dtrace_probeid_error && ndx == 5) { return ((dtrace_state_t *)(uintptr_t)(mstate->dtms_arg[0]))->dts_arg_error_illval; } -#endif /* __APPLE__ */ + else val = dtrace_getarg(ndx, aframes); @@ -3012,22 +3047,6 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return (mstate->dtms_arg[ndx]); -#if !defined(__APPLE__) - case DIF_VAR_UREGS: { - klwp_t *lwp; - - if (!dtrace_priv_proc(state)) - return (0); - - if ((lwp = curthread->t_lwp) == NULL) { - DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); - cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; - return (0); - } - - return (dtrace_getreg(lwp->lwp_regs, ndx)); - } -#else case DIF_VAR_UREGS: { thread_t thread; @@ -3042,20 +3061,13 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return (dtrace_getreg(find_user_regs(thread), ndx)); } -#endif /* __APPLE__ */ -#if !defined(__APPLE__) - case DIF_VAR_CURTHREAD: - if (!dtrace_priv_kernel(state)) - return (0); - return ((uint64_t)(uintptr_t)curthread); -#else + case DIF_VAR_CURTHREAD: if (!dtrace_priv_kernel(state)) return (0); return ((uint64_t)(uintptr_t)current_thread()); -#endif /* __APPLE__ */ case DIF_VAR_TIMESTAMP: if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { @@ -3064,15 +3076,9 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, } return (mstate->dtms_timestamp); -#if !defined(__APPLE__) - case DIF_VAR_VTIMESTAMP: - ASSERT(dtrace_vtime_references != 0); - return (curthread->t_dtrace_vtime); -#else case DIF_VAR_VTIMESTAMP: ASSERT(dtrace_vtime_references != 0); return (dtrace_get_thread_vtime(current_thread())); -#endif /* __APPLE__ */ case DIF_VAR_WALLTIMESTAMP: if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { @@ -3081,6 +3087,13 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, } return (mstate->dtms_walltimestamp); + case DIF_VAR_MACHTIMESTAMP: + if (!(mstate->dtms_present & DTRACE_MSTATE_MACHTIMESTAMP)) { + mstate->dtms_machtimestamp = mach_absolute_time(); + mstate->dtms_present |= DTRACE_MSTATE_MACHTIMESTAMP; + } + return (mstate->dtms_machtimestamp); + case DIF_VAR_IPL: if (!dtrace_priv_kernel(state)) return (0); @@ -3102,12 +3115,10 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, if (!dtrace_priv_kernel(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { -#if !defined(__APPLE__) - int aframes = mstate->dtms_probe->dtpr_aframes + 2; -#else - /* Account for introduction of __dtrace_probe() on xnu. */ + /* + * APPLE NOTE: Account for introduction of __dtrace_probe() + */ int aframes = mstate->dtms_probe->dtpr_aframes + 3; -#endif /* __APPLE__ */ mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; @@ -3138,12 +3149,10 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, if (!dtrace_priv_kernel(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { -#if !defined(__APPLE__) - int aframes = mstate->dtms_probe->dtpr_aframes + 2; -#else - /* Account for introduction of __dtrace_probe() on xnu. */ + /* + * APPLE NOTE: Account for introduction of __dtrace_probe() + */ int aframes = mstate->dtms_probe->dtpr_aframes + 3; -#endif /* __APPLE__ */ if (!DTRACE_ANCHORED(mstate->dtms_probe)) { /* @@ -3158,11 +3167,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); mstate->dtms_caller = caller[1]; } else if ((mstate->dtms_caller = -#if !defined(__APPLE__) /* Quiet compiler warnings */ - dtrace_caller(aframes)) == -1) { -#else - dtrace_caller(aframes)) == (uintptr_t)-1) { -#endif /* __APPLE__ */ + dtrace_caller(aframes)) == (uintptr_t)-1) { /* * We have failed to do this the quick way; * we must resort to the slower approach of @@ -3192,7 +3197,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, * uint64_t will contain the caller, which is what * we're after. */ - ustack[2] = NULL; + ustack[2] = 0; DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_getupcstack(ustack, 3); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); @@ -3226,30 +3231,6 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, (uintptr_t)mstate->dtms_probe->dtpr_name, state, mstate)); -#if !defined(__APPLE__) - case DIF_VAR_PID: - if (!dtrace_priv_proc(state)) - return (0); - - /* - * Note that we are assuming that an unanchored probe is - * always due to a high-level interrupt. (And we're assuming - * that there is only a single high level interrupt.) - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return (pid0.pid_id); - - /* - * It is always safe to dereference one's own t_procp pointer: - * it always points to a valid, allocated proc structure. - * Further, it is always safe to dereference the p_pidp member - * of one's own proc structure. (These are truisms becuase - * threads and processes don't clean up their own state -- - * they leave that task to whomever reaps them.) - */ - return ((uint64_t)curthread->t_procp->p_pidp->pid_id); - -#else case DIF_VAR_PID: if (!dtrace_priv_proc_relaxed(state)) return (0); @@ -3264,27 +3245,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return 0; return ((uint64_t)dtrace_proc_selfpid()); -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_VAR_PPID: - if (!dtrace_priv_proc(state)) - return (0); - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return (pid0.pid_id); - - /* - * It is always safe to dereference one's own t_procp pointer: - * it always points to a valid, allocated proc structure. - * (This is true because threads don't clean up their own - * state -- they leave that task to whomever reaps them.) - */ - return ((uint64_t)curthread->t_procp->p_ppid); -#else case DIF_VAR_PPID: if (!dtrace_priv_proc_relaxed(state)) return (0); @@ -3296,18 +3257,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return (0); return ((uint64_t)dtrace_proc_selfppid()); -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_VAR_TID: - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return (0); - return ((uint64_t)curthread->t_tid); -#else case DIF_VAR_TID: /* We do not need to check for null current_thread() */ return thread_tid(current_thread()); /* globally unique */ @@ -3325,29 +3275,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, /* We do not need to check for null current_thread() */ return thread_dispatchqaddr(current_thread()); -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_VAR_EXECNAME: - if (!dtrace_priv_proc(state)) - return (0); - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return ((uint64_t)(uintptr_t)p0.p_user.u_comm); - - /* - * It is always safe to dereference one's own t_procp pointer: - * it always points to a valid, allocated proc structure. - * (This is true because threads don't clean up their own - * state -- they leave that task to whomever reaps them.) - */ - return (dtrace_dif_varstr( - (uintptr_t)curthread->t_procp->p_user.u_comm, - state, mstate)); -#else case DIF_VAR_EXECNAME: { char *xname = (char *)mstate->dtms_scratch_ptr; @@ -3367,29 +3295,8 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return ((uint64_t)(uintptr_t)xname); } -#endif /* __APPLE__ */ -#if !defined(__APPLE__) - case DIF_VAR_ZONENAME: - if (!dtrace_priv_proc(state)) - return (0); - - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); - /* - * It is always safe to dereference one's own t_procp pointer: - * it always points to a valid, allocated proc structure. - * (This is true because threads don't clean up their own - * state -- they leave that task to whomever reaps them.) - */ - return (dtrace_dif_varstr( - (uintptr_t)curthread->t_procp->p_zone->zone_name, - state, mstate)); -#else case DIF_VAR_ZONENAME: { /* scratch_size is equal to length('global') + 1 for the null-terminator. */ @@ -3412,30 +3319,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return ((uint64_t)(uintptr_t)zname); } -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_VAR_UID: - if (!dtrace_priv_proc(state)) - return (0); - - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return ((uint64_t)p0.p_cred->cr_uid); - /* - * It is always safe to dereference one's own t_procp pointer: - * it always points to a valid, allocated proc structure. - * (This is true because threads don't clean up their own - * state -- they leave that task to whomever reaps them.) - * - * Additionally, it is safe to dereference one's own process - * credential, since this is never NULL after process birth. - */ - return ((uint64_t)curthread->t_procp->p_cred->cr_uid); -#else case DIF_VAR_UID: if (!dtrace_priv_proc_relaxed(state)) return (0); @@ -3447,30 +3331,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return (0); return ((uint64_t) dtrace_proc_selfruid()); -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_VAR_GID: - if (!dtrace_priv_proc(state)) - return (0); - - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return ((uint64_t)p0.p_cred->cr_gid); - /* - * It is always safe to dereference one's own t_procp pointer: - * it always points to a valid, allocated proc structure. - * (This is true because threads don't clean up their own - * state -- they leave that task to whomever reaps them.) - * - * Additionally, it is safe to dereference one's own process - * credential, since this is never NULL after process birth. - */ - return ((uint64_t)curthread->t_procp->p_cred->cr_gid); -#else case DIF_VAR_GID: if (!dtrace_priv_proc(state)) return (0); @@ -3489,32 +3350,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); return -1ULL; } -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_VAR_ERRNO: { - klwp_t *lwp; - if (!dtrace_priv_proc(state)) - return (0); - - /* - * See comment in DIF_VAR_PID. - */ - if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) - return (0); - - /* - * It is always safe to dereference one's own t_lwp pointer in - * the event that this pointer is non-NULL. (This is true - * because threads and lwps don't clean up their own state -- - * they leave that task to whomever reaps them.) - */ - if ((lwp = curthread->t_lwp) == NULL) - return (0); - return ((uint64_t)lwp->lwp_errno); - } -#else case DIF_VAR_ERRNO: { uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (!dtrace_priv_proc(state)) @@ -3533,7 +3369,6 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return -1ULL; } } -#endif /* __APPLE__ */ default: DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); @@ -3554,11 +3389,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, dtrace_mstate_t *mstate, dtrace_state_t *state) { volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; -#if !defined(__APPLE__) - volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; -#else volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; -#endif /* __APPLE__ */ dtrace_vstate_t *vstate = &state->dts_vstate; #if !defined(__APPLE__) @@ -3584,7 +3415,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_MUTEX_OWNED: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3598,7 +3429,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_MUTEX_OWNER: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3613,7 +3444,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3624,7 +3455,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_MUTEX_TYPE_SPIN: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3637,7 +3468,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3649,7 +3480,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_RW_WRITE_HELD: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3660,7 +3491,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_RW_ISWRITER: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3687,7 +3518,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, } if (!dtrace_canload(src, size, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -3715,18 +3546,14 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, if (scratch_size < size || !DTRACE_INSCRATCH(mstate, scratch_size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } if (subr == DIF_SUBR_COPYIN) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); -#if !defined(__APPLE__) - dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); -#else if (dtrace_priv_proc(state)) dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); -#endif /* __APPLE__ */ DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); } @@ -3751,12 +3578,8 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); -#if !defined(__APPLE__) - dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); -#else if (dtrace_priv_proc(state)) dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); -#endif /* __APPLE__ */ DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; } @@ -3775,17 +3598,13 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, */ if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); -#if !defined(__APPLE__) - dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); -#else if (dtrace_priv_proc(state)) dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); -#endif /* __APPLE__ */ DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); ((char *)dest)[size - 1] = '\0'; @@ -3794,65 +3613,6 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, break; } -#if !defined(__APPLE__) - case DIF_SUBR_MSGSIZE: - case DIF_SUBR_MSGDSIZE: { - uintptr_t baddr = tupregs[0].dttk_value, daddr; - uintptr_t wptr, rptr; - size_t count = 0; - int cont = 0; - - while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { - - if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, - vstate)) { - regs[rd] = NULL; - break; - } - - wptr = dtrace_loadptr(baddr + - offsetof(mblk_t, b_wptr)); - - rptr = dtrace_loadptr(baddr + - offsetof(mblk_t, b_rptr)); - - if (wptr < rptr) { - *flags |= CPU_DTRACE_BADADDR; - *illval = tupregs[0].dttk_value; - break; - } - - daddr = dtrace_loadptr(baddr + - offsetof(mblk_t, b_datap)); - - baddr = dtrace_loadptr(baddr + - offsetof(mblk_t, b_cont)); - - /* - * We want to prevent against denial-of-service here, - * so we're only going to search the list for - * dtrace_msgdsize_max mblks. - */ - if (cont++ > dtrace_msgdsize_max) { - *flags |= CPU_DTRACE_ILLOP; - break; - } - - if (subr == DIF_SUBR_MSGDSIZE) { - if (dtrace_load8(daddr + - offsetof(dblk_t, db_type)) != M_DATA) - continue; - } - - count += wptr - rptr; - } - - if (!(*flags & CPU_DTRACE_FAULT)) - regs[rd] = count; - - break; - } -#else case DIF_SUBR_MSGSIZE: case DIF_SUBR_MSGDSIZE: { /* Darwin does not implement SysV streams messages */ @@ -3860,29 +3620,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, regs[rd] = 0; break; } -#endif /* __APPLE__ */ -#if !defined(__APPLE__) - case DIF_SUBR_PROGENYOF: { - pid_t pid = tupregs[0].dttk_value; - proc_t *p; - int rval = 0; - - DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - - for (p = curthread->t_procp; p != NULL; p = p->p_parent) { - if (p->p_pidp->pid_id == pid) { - rval = 1; - break; - } - } - - DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - - regs[rd] = rval; - break; - } -#else case DIF_SUBR_PROGENYOF: { pid_t pid = tupregs[0].dttk_value; struct proc *p = current_proc(); @@ -3911,16 +3649,15 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, regs[rd] = rval; break; } -#endif /* __APPLE__ */ case DIF_SUBR_SPECULATION: regs[rd] = dtrace_speculation(state); break; -#if !defined(__APPLE__) + case DIF_SUBR_COPYOUT: { uintptr_t kaddr = tupregs[0].dttk_value; - uintptr_t uaddr = tupregs[1].dttk_value; + user_addr_t uaddr = tupregs[1].dttk_value; uint64_t size = tupregs[2].dttk_value; if (!dtrace_destructive_disallow && @@ -3935,7 +3672,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, case DIF_SUBR_COPYOUTSTR: { uintptr_t kaddr = tupregs[0].dttk_value; - uintptr_t uaddr = tupregs[1].dttk_value; + user_addr_t uaddr = tupregs[1].dttk_value; uint64_t size = tupregs[2].dttk_value; if (!dtrace_destructive_disallow && @@ -3947,37 +3684,6 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, } break; } -#else - case DIF_SUBR_COPYOUT: { - uintptr_t kaddr = tupregs[0].dttk_value; - user_addr_t uaddr = tupregs[1].dttk_value; - uint64_t size = tupregs[2].dttk_value; - - if (!dtrace_destructive_disallow && - dtrace_priv_proc_control(state) && - !dtrace_istoxic(kaddr, size)) { - DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - dtrace_copyout(kaddr, uaddr, size, flags); - DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - } - break; - } - - case DIF_SUBR_COPYOUTSTR: { - uintptr_t kaddr = tupregs[0].dttk_value; - user_addr_t uaddr = tupregs[1].dttk_value; - uint64_t size = tupregs[2].dttk_value; - - if (!dtrace_destructive_disallow && - dtrace_priv_proc_control(state) && - !dtrace_istoxic(kaddr, size)) { - DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - dtrace_copyoutstr(kaddr, uaddr, size, flags); - DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - } - break; - } -#endif /* __APPLE__ */ case DIF_SUBR_STRLEN: { size_t sz; @@ -3986,7 +3692,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, state->dts_options[DTRACEOPT_STRSIZE]); if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4009,7 +3715,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; char c, target = (char)tupregs[1].dttk_value; - for (regs[rd] = NULL; addr < limit; addr++) { + for (regs[rd] = 0; addr < limit; addr++) { if ((c = dtrace_load8(addr)) == target) { regs[rd] = addr; @@ -4022,7 +3728,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, } if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4053,13 +3759,13 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, regs[rd] = notfound; if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4137,21 +3843,13 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, break; } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (pos > len) -#else if ((size_t)pos > len) -#endif /* __APPLE__ */ pos = len; } else { if (pos < 0) pos = 0; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (pos >= len) { -#else if ((size_t)pos >= len) { -#endif /* __APPLE__ */ if (sublen == 0) regs[rd] = len; break; @@ -4193,30 +3891,25 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t limit, toklimit = tokaddr + size; char *dest = (char *)mstate->dtms_scratch_ptr; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - uint8_t c, tokmap[32]; /* 256 / 8 */ - int i; -#else uint8_t c='\0', tokmap[32]; /* 256 / 8 */ uint64_t i = 0; -#endif /* __APPLE__ */ /* * Check both the token buffer and (later) the input buffer, * since both could be non-scratch addresses. */ if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } - if (addr == NULL) { + if (addr == 0) { /* * If the address specified is NULL, we use our saved * strtok pointer from the mstate. Note that this @@ -4235,9 +3928,9 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, * would fail this access check. */ if (!dtrace_strcanload(addr, size, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; - } + } } /* @@ -4275,8 +3968,8 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, * We return NULL in this case, and we set the saved * address to NULL as well. */ - regs[rd] = NULL; - mstate->dtms_strtok = NULL; + regs[rd] = 0; + mstate->dtms_strtok = 0; break; } @@ -4312,13 +4005,13 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, int64_t i = 0; if (!dtrace_canload(s, len + 1, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4334,15 +4027,6 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, } } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (index >= len || index < 0) { - remaining = 0; - } else if (remaining < 0) { - remaining += len - index; - } else if (index + remaining > size) { - remaining = size - index; - } -#else if ((size_t)index >= len || index < 0) { remaining = 0; } else if (remaining < 0) { @@ -4350,7 +4034,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, } else if ((uint64_t)index + (uint64_t)remaining > size) { remaining = size - index; } -#endif /* __APPLE__ */ + for (i = 0; i < remaining; i++) { if ((d[i] = dtrace_load8(s + index + i)) == '\0') break; @@ -4363,265 +4047,44 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, break; } -#if !defined(__APPLE__) - case DIF_SUBR_GETMAJOR: -#ifdef _LP64 - regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; -#else - regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; -#endif - break; - -#else /* __APPLE__ */ case DIF_SUBR_GETMAJOR: regs[rd] = (uintptr_t)major( (dev_t)tupregs[0].dttk_value ); break; -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_SUBR_GETMINOR: -#ifdef _LP64 - regs[rd] = tupregs[0].dttk_value & MAXMIN64; -#else - regs[rd] = tupregs[0].dttk_value & MAXMIN; -#endif - break; -#else /* __APPLE__ */ case DIF_SUBR_GETMINOR: regs[rd] = (uintptr_t)minor( (dev_t)tupregs[0].dttk_value ); break; -#endif /* __APPLE__ */ - -#if !defined(__APPLE__) - case DIF_SUBR_DDI_PATHNAME: { - /* - * This one is a galactic mess. We are going to roughly - * emulate ddi_pathname(), but it's made more complicated - * by the fact that we (a) want to include the minor name and - * (b) must proceed iteratively instead of recursively. - */ - uintptr_t dest = mstate->dtms_scratch_ptr; - uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; - char *start = (char *)dest, *end = start + size - 1; - uintptr_t daddr = tupregs[0].dttk_value; - int64_t minor = (int64_t)tupregs[1].dttk_value; - char *s; - int i, len, depth = 0; - - /* - * Due to all the pointer jumping we do and context we must - * rely upon, we just mandate that the user must have kernel - * read privileges to use this routine. - */ - if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { - *flags |= CPU_DTRACE_KPRIV; - *illval = daddr; - regs[rd] = NULL; - } - - if (!DTRACE_INSCRATCH(mstate, size)) { - DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; - break; - } - - *end = '\0'; - - /* - * We want to have a name for the minor. In order to do this, - * we need to walk the minor list from the devinfo. We want - * to be sure that we don't infinitely walk a circular list, - * so we check for circularity by sending a scout pointer - * ahead two elements for every element that we iterate over; - * if the list is circular, these will ultimately point to the - * same element. You may recognize this little trick as the - * answer to a stupid interview question -- one that always - * seems to be asked by those who had to have it laboriously - * explained to them, and who can't even concisely describe - * the conditions under which one would be forced to resort to - * this technique. Needless to say, those conditions are - * found here -- and probably only here. Is this the only use - * of this infamous trick in shipping, production code? If it - * isn't, it probably should be... - */ - if (minor != -1) { - uintptr_t maddr = dtrace_loadptr(daddr + - offsetof(struct dev_info, devi_minor)); - - uintptr_t next = offsetof(struct ddi_minor_data, next); - uintptr_t name = offsetof(struct ddi_minor_data, - d_minor) + offsetof(struct ddi_minor, name); - uintptr_t dev = offsetof(struct ddi_minor_data, - d_minor) + offsetof(struct ddi_minor, dev); - uintptr_t scout; - - if (maddr != NULL) - scout = dtrace_loadptr(maddr + next); - - while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { - uint64_t m; -#ifdef _LP64 - m = dtrace_load64(maddr + dev) & MAXMIN64; -#else - m = dtrace_load32(maddr + dev) & MAXMIN; -#endif - if (m != minor) { - maddr = dtrace_loadptr(maddr + next); - - if (scout == NULL) - continue; - - scout = dtrace_loadptr(scout + next); - - if (scout == NULL) - continue; - - scout = dtrace_loadptr(scout + next); - - if (scout == NULL) - continue; - - if (scout == maddr) { - *flags |= CPU_DTRACE_ILLOP; - break; - } - - continue; - } - - /* - * We have the minor data. Now we need to - * copy the minor's name into the end of the - * pathname. - */ - s = (char *)dtrace_loadptr(maddr + name); - len = dtrace_strlen(s, size); - - if (*flags & CPU_DTRACE_FAULT) - break; - - if (len != 0) { - if ((end -= (len + 1)) < start) - break; - - *end = ':'; - } - - for (i = 1; i <= len; i++) - end[i] = dtrace_load8((uintptr_t)s++); - break; - } - } - - while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { - ddi_node_state_t devi_state; - - devi_state = dtrace_load32(daddr + - offsetof(struct dev_info, devi_node_state)); - - if (*flags & CPU_DTRACE_FAULT) - break; - - if (devi_state >= DS_INITIALIZED) { - s = (char *)dtrace_loadptr(daddr + - offsetof(struct dev_info, devi_addr)); - len = dtrace_strlen(s, size); - - if (*flags & CPU_DTRACE_FAULT) - break; - - if (len != 0) { - if ((end -= (len + 1)) < start) - break; - - *end = '@'; - } - - for (i = 1; i <= len; i++) - end[i] = dtrace_load8((uintptr_t)s++); - } - - /* - * Now for the node name... - */ - s = (char *)dtrace_loadptr(daddr + - offsetof(struct dev_info, devi_node_name)); - daddr = dtrace_loadptr(daddr + - offsetof(struct dev_info, devi_parent)); - - /* - * If our parent is NULL (that is, if we're the root - * node), we're going to use the special path - * "devices". - */ - if (daddr == NULL) - s = "devices"; - - len = dtrace_strlen(s, size); - if (*flags & CPU_DTRACE_FAULT) - break; - - if ((end -= (len + 1)) < start) - break; - - for (i = 1; i <= len; i++) - end[i] = dtrace_load8((uintptr_t)s++); - *end = '/'; - - if (depth++ > dtrace_devdepth_max) { - *flags |= CPU_DTRACE_ILLOP; - break; - } - } - - if (end < start) - DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - - if (daddr == NULL) { - regs[rd] = (uintptr_t)end; - mstate->dtms_scratch_ptr += size; - } - - break; - } -#else case DIF_SUBR_DDI_PATHNAME: { - /* FIXME: awaits galactic disentanglement ;-} */ + /* APPLE NOTE: currently unsupported on Darwin */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - regs[rd] = NULL; + regs[rd] = 0; break; } -#endif /* __APPLE__ */ case DIF_SUBR_STRJOIN: { char *d = (char *)mstate->dtms_scratch_ptr; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t s1 = tupregs[0].dttk_value; uintptr_t s2 = tupregs[1].dttk_value; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i = 0; -#else uint64_t i = 0; -#endif /* __APPLE__ */ if (!dtrace_strcanload(s1, size, mstate, vstate) || !dtrace_strcanload(s2, size, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } for (;;) { if (i >= size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4634,7 +4097,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, for (;;) { if (i >= size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4658,7 +4121,7 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4716,13 +4179,13 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, int start, end; if (!dtrace_canload(src, len + 1, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4828,13 +4291,8 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, end = lastbase; } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - for (i = start, j = 0; i <= end && j < size - 1; i++, j++) - dest[j] = dtrace_load8(src + i); -#else for (i = start, j = 0; i <= end && (uint64_t)j < size - 1; i++, j++) dest[j] = dtrace_load8(src + i); -#endif /* __APPLE__ */ dest[j] = '\0'; regs[rd] = (uintptr_t)dest; @@ -4849,13 +4307,13 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, int i = 0, j = 0; if (!dtrace_strcanload(src, size, mstate, vstate)) { - regs[rd] = NULL; + regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -4865,13 +4323,8 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, do { c = dtrace_load8(src + i++); next: -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (j + 5 >= size) /* 5 = strlen("/..c\0") */ - break; -#else if ((uint64_t)(j + 5) >= size) /* 5 = strlen("/..c\0") */ break; -#endif /* __APPLE__ */ if (c != '/') { dest[j++] = c; @@ -4998,7 +4451,7 @@ next: #endif /* __APPLE__ */ if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } base = (char *)mstate->dtms_scratch_ptr; @@ -5057,7 +4510,7 @@ next: size = INET6_ADDRSTRLEN; if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } base = (char *)mstate->dtms_scratch_ptr; @@ -5071,11 +4524,7 @@ next: firstzero = -1; tryzero = -1; numzero = 1; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - for (i = 0; i < sizeof (struct in6_addr); i++) { -#else for (i = 0; i < (int)sizeof (struct in6_addr); i++) { -#endif /* __APPLE__ */ if (ip6._S6_un._S6_u8[i] == 0 && tryzero == -1 && i % 2 == 0) { tryzero = i; @@ -5100,11 +4549,7 @@ next: numzero += 2; } } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); -#else ASSERT(firstzero + numzero <= (int)sizeof (struct in6_addr)); -#endif /* __APPLE__ */ /* * Check for an IPv4 embedded address. @@ -5112,13 +4557,8 @@ next: v6end = sizeof (struct in6_addr) - 2; if (IN6_IS_ADDR_V4MAPPED(&ip6) || IN6_IS_ADDR_V4COMPAT(&ip6)) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - for (i = sizeof (struct in6_addr) - 1; - i >= DTRACE_V4MAPPED_OFFSET; i--) { -#else for (i = sizeof (struct in6_addr) - 1; i >= (int)DTRACE_V4MAPPED_OFFSET; i--) { -#endif /* __APPLE__ */ ASSERT(end >= base); val = ip6._S6_un._S6_u8[i]; @@ -5131,13 +4571,8 @@ next: } } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (i > DTRACE_V4MAPPED_OFFSET) - *end-- = '.'; -#else if (i > (int)DTRACE_V4MAPPED_OFFSET) *end-- = '.'; -#endif /* __APPLE__ */ } if (subr == DIF_SUBR_INET_NTOA6) @@ -5189,7 +4624,7 @@ next: * The user didn't use AH_INET or AH_INET6. */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -5197,10 +4632,52 @@ inetout: regs[rd] = (uintptr_t)end + 1; mstate->dtms_scratch_ptr += size; break; } - -#ifdef __APPLE__ - /* CoreProfile callback ('core_profile(uint64_t, [uint64_t], [uint64_t] ...)') */ + case DIF_SUBR_TOUPPER: + case DIF_SUBR_TOLOWER: { + uintptr_t src = tupregs[0].dttk_value; + char *dest = (char *)mstate->dtms_scratch_ptr; + char lower, upper, base, c; + uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; + size_t len = dtrace_strlen((char*) src, size); + size_t i = 0; + + lower = (subr == DIF_SUBR_TOUPPER) ? 'a' : 'A'; + upper = (subr == DIF_SUBR_TOUPPER) ? 'z' : 'Z'; + base = (subr == DIF_SUBR_TOUPPER) ? 'A' : 'a'; + + if (!dtrace_canload(src, len + 1, mstate, vstate)) { + regs[rd] = 0; + break; + } + + if (!DTRACE_INSCRATCH(mstate, size)) { + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); + regs[rd] = 0; + break; + } + + for (i = 0; i < size - 1; ++i) { + if ((c = dtrace_load8(src + i)) == '\0') + break; + if (c >= lower && c <= upper) + c = base + (c - lower); + dest[i] = c; + } + + ASSERT(i < size); + + dest[i] = '\0'; + regs[rd] = (uintptr_t) dest; + mstate->dtms_scratch_ptr += size; + + break; + } + +/* + * APPLE NOTE: + * CoreProfile callback ('core_profile (uint64_t, [uint64_t], [uint64_t] ...)') + */ case DIF_SUBR_COREPROFILE: { uint64_t selector = tupregs[0].dttk_value; uint64_t args[DIF_DTR_NREGS-1] = {0ULL}; @@ -5229,9 +4706,6 @@ inetout: regs[rd] = (uintptr_t)end + 1; regs[rd] = ret; break; } - -#endif /* __APPLE__ */ - } } @@ -5254,11 +4728,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; dtrace_difv_t *v; volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; -#if !defined(__APPLE__) - volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; -#else volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; -#endif /* __APPLE__ */ dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ uint64_t regs[DIF_DIR_NREGS]; @@ -5266,11 +4736,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; int64_t cc_r; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - uint_t pc = 0, id, opc; -#else uint_t pc = 0, id, opc = 0; -#endif /* __APPLE__ */ uint8_t ttop = 0; dif_instr_t instr; uint_t r1, r2, rd; @@ -5484,36 +4950,12 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, case DIF_OP_LDX: regs[rd] = dtrace_load64(regs[r1]); break; -#if !defined(__APPLE__) - case DIF_OP_ULDSB: - regs[rd] = (int8_t) - dtrace_fuword8((void *)(uintptr_t)regs[r1]); - break; - case DIF_OP_ULDSH: - regs[rd] = (int16_t) - dtrace_fuword16((void *)(uintptr_t)regs[r1]); - break; - case DIF_OP_ULDSW: - regs[rd] = (int32_t) - dtrace_fuword32((void *)(uintptr_t)regs[r1]); - break; - case DIF_OP_ULDUB: - regs[rd] = - dtrace_fuword8((void *)(uintptr_t)regs[r1]); - break; - case DIF_OP_ULDUH: - regs[rd] = - dtrace_fuword16((void *)(uintptr_t)regs[r1]); - break; - case DIF_OP_ULDUW: - regs[rd] = - dtrace_fuword32((void *)(uintptr_t)regs[r1]); - break; - case DIF_OP_ULDX: - regs[rd] = - dtrace_fuword64((void *)(uintptr_t)regs[r1]); - break; -#else /* Darwin 32-bit kernel may fetch from 64-bit user. Don't want uintptr_t cast. */ +/* + * Darwin 32-bit kernel may fetch from 64-bit user. + * Do not cast regs to uintptr_t + * DIF_OP_ULDSB,DIF_OP_ULDSH, DIF_OP_ULDSW, DIF_OP_ULDUB + * DIF_OP_ULDUH, DIF_OP_ULDUW, DIF_OP_ULDX + */ case DIF_OP_ULDSB: regs[rd] = (int8_t) dtrace_fuword8(regs[r1]); @@ -5541,7 +4983,6 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, case DIF_OP_ULDX: regs[rd] = dtrace_fuword64(regs[r1]); -#endif /* __APPLE__ */ break; case DIF_OP_RET: rval = regs[rd]; @@ -5561,10 +5002,10 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, uintptr_t s1 = regs[r1]; uintptr_t s2 = regs[r2]; - if (s1 != NULL && + if (s1 != 0 && !dtrace_strcanload(s1, sz, mstate, vstate)) break; - if (s2 != NULL && + if (s2 != 0 && !dtrace_strcanload(s2, sz, mstate, vstate)) break; @@ -5603,7 +5044,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, * then this is to be treated as a * reference to a NULL variable. */ - regs[rd] = NULL; + regs[rd] = 0; } else { regs[rd] = a + sizeof (uint64_t); } @@ -5627,10 +5068,10 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { uintptr_t a = (uintptr_t)svar->dtsv_data; - ASSERT(a != NULL); + ASSERT(a != 0); ASSERT(svar->dtsv_size != 0); - if (regs[rd] == NULL) { + if (regs[rd] == 0) { *(uint8_t *)a = UINT8_MAX; break; } else { @@ -5672,13 +5113,8 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, id -= DIF_VAR_OTHER_UBASE; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(id < vstate->dtvs_nlocals); -#else ASSERT(id < (uint_t)vstate->dtvs_nlocals); -#endif /* __APPLE__ */ ASSERT(vstate->dtvs_locals != NULL); - svar = vstate->dtvs_locals[id]; ASSERT(svar != NULL); v = &svar->dtsv_var; @@ -5697,7 +5133,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, * then this is to be treated as a * reference to a NULL variable. */ - regs[rd] = NULL; + regs[rd] = 0; } else { regs[rd] = a + sizeof (uint64_t); } @@ -5715,12 +5151,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(id < vstate->dtvs_nlocals); -#else ASSERT(id < (uint_t)vstate->dtvs_nlocals); -#endif /* __APPLE__ */ - ASSERT(vstate->dtvs_locals != NULL); svar = vstate->dtvs_locals[id]; ASSERT(svar != NULL); @@ -5734,7 +5165,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, ASSERT(svar->dtsv_size == (int)NCPU * sz); a += CPU->cpu_id * sz; - if (regs[rd] == NULL) { + if (regs[rd] == 0) { *(uint8_t *)a = UINT8_MAX; break; } else { @@ -5815,11 +5246,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, * Given that we're storing to thread-local data, * we need to flush our predicate cache. */ -#if !defined(__APPLE__) - curthread->t_predcache = NULL; -#else dtrace_set_thread_predcache(current_thread(), 0); -#endif /* __APPLE__ */ if (dvar == NULL) break; @@ -5990,7 +5417,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, if (size < regs[r1] || !DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); - regs[rd] = NULL; + regs[rd] = 0; break; } @@ -6058,11 +5485,12 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[rd]; break; } -#if !defined(__APPLE__) - if (regs[rd] & 7) { -#else - if (regs[rd] & 3) { /* Darwin kmem_zalloc() called from dtrace_difo_init() is 4-byte aligned. */ -#endif /* __APPLE__ */ + + /* + * Darwin kmem_zalloc() called from + * dtrace_difo_init() is 4-byte aligned. + */ + if (regs[rd] & 3) { *flags |= CPU_DTRACE_BADALIGN; *illval = regs[rd]; break; @@ -6087,13 +5515,8 @@ dtrace_action_breakpoint(dtrace_ecb_t *ecb) dtrace_probe_t *probe = ecb->dte_probe; dtrace_provider_t *prov = probe->dtpr_provider; char c[DTRACE_FULLNAMELEN + 80], *str; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - char *msg = "dtrace: breakpoint action at probe "; - char *ecbmsg = " (ecb "; -#else const char *msg = "dtrace: breakpoint action at probe "; const char *ecbmsg = " (ecb "; -#endif /* __APPLE__ */ uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); uintptr_t val = (uintptr_t)ecb; int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; @@ -6164,13 +5587,8 @@ dtrace_action_panic(dtrace_ecb_t *ecb) if (dtrace_panicked != NULL) return; -#if !defined(__APPLE__) - if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) - return; -#else if (dtrace_casptr(&dtrace_panicked, NULL, current_thread()) != NULL) return; -#endif /* __APPLE__ */ /* * We won the right to panic. (We want to be sure that only one @@ -6181,10 +5599,11 @@ dtrace_action_panic(dtrace_ecb_t *ecb) probe->dtpr_provider->dtpv_name, probe->dtpr_mod, probe->dtpr_func, probe->dtpr_name, (void *)ecb); -#if defined(__APPLE__) - /* Mac OS X debug feature -- can return from panic() */ + /* + * APPLE NOTE: this was for an old Mac OS X debug feature + * allowing a return from panic(). Revisit someday. + */ dtrace_panicked = NULL; -#endif /* __APPLE__ */ } static void @@ -6198,24 +5617,17 @@ dtrace_action_raise(uint64_t sig) return; } -#if !defined(__APPLE__) /* * raise() has a queue depth of 1 -- we ignore all subsequent * invocations of the raise() action. */ - if (curthread->t_dtrace_sig == 0) - curthread->t_dtrace_sig = (uint8_t)sig; - curthread->t_sig_check = 1; - aston(curthread); -#else uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread && uthread->t_dtrace_sig == 0) { uthread->t_dtrace_sig = sig; act_set_astbsd(current_thread()); } -#endif /* __APPLE__ */ } static void @@ -6224,13 +5636,6 @@ dtrace_action_stop(void) if (dtrace_destructive_disallow) return; -#if !defined(__APPLE__) - if (!curthread->t_dtrace_stop) { - curthread->t_dtrace_stop = 1; - curthread->t_sig_check = 1; - aston(curthread); - } -#else uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) { /* @@ -6240,10 +5645,14 @@ dtrace_action_stop(void) uthread->t_dtrace_stop = 1; act_set_astbsd(current_thread()); } -#endif /* __APPLE__ */ } -#if defined(__APPLE__) + +/* + * APPLE NOTE: pidresume works in conjunction with the dtrace stop action. + * Both activate only when the currently running process next leaves the + * kernel. + */ static void dtrace_action_pidresume(uint64_t pid) { @@ -6269,8 +5678,6 @@ dtrace_action_pidresume(uint64_t pid) act_set_astbsd(current_thread()); } } -#endif /* __APPLE__ */ - static void dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) @@ -6343,11 +5750,7 @@ dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, size = (uintptr_t)fps - mstate->dtms_scratch_ptr + (nframes * sizeof (uint64_t)); -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (!DTRACE_INSCRATCH(mstate, size)) { -#else if (!DTRACE_INSCRATCH(mstate, (uintptr_t)size)) { -#endif /* __APPLE__ */ /* * Not enough room for our frame pointers -- need to indicate * that we ran out of scratch space. @@ -6439,15 +5842,9 @@ out: * is the function called by the provider to fire a probe -- from which all * subsequent probe-context DTrace activity emanates. */ -#if !defined(__APPLE__) -void -dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, - uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) -#else static void __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) -#endif /* __APPLE__ */ { processorid_t cpuid; dtrace_icookie_t cookie; @@ -6461,30 +5858,13 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, volatile uint16_t *flags; hrtime_t now; -#if !defined(__APPLE__) - /* - * Kick out immediately if this CPU is still being born (in which case - * curthread will be set to -1) or the current thread can't allow - * probes in its current context. - */ - if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) - return; -#else - /* Not a concern for Darwin */ -#endif /* __APPLE__ */ - cookie = dtrace_interrupt_disable(); probe = dtrace_probes[id - 1]; cpuid = CPU->cpu_id; onintr = CPU_ON_INTR(CPU); -#if !defined(__APPLE__) - if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && - probe->dtpr_predcache == curthread->t_predcache) { -#else if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && probe->dtpr_predcache == dtrace_get_thread_predcache(current_thread())) { -#endif /* __APPLE__ */ /* * We have hit in the predicate cache; we know that * this predicate would evaluate to be false. @@ -6508,8 +5888,11 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, if (vtime && curthread->t_dtrace_start) curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; #else - /* FIXME: the time spent entering DTrace and arriving to this point is attributed - to the current thread. Instead it should accrue to DTrace. */ + /* + * APPLE NOTE: The time spent entering DTrace and arriving + * to this point, is attributed to the current thread. + * Instead it should accrue to DTrace. FIXME + */ vtime = dtrace_vtime_references != 0; if (vtime) @@ -6531,12 +5914,12 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, now = dtrace_gethrtime(); /* must not precede dtrace_calc_thread_recent_vtime() call! */ #endif /* __APPLE__ */ -#if defined(__APPLE__) /* - * A provider may call dtrace_probe_error() in lieu of dtrace_probe() in some circumstances. - * See, e.g. fasttrap_isa.c. However the provider has no access to ECB context, so passes - * 0 through "arg0" and the probe_id of the overridden probe as arg1. Detect that here - * and cons up a viable state (from the probe_id). + * APPLE NOTE: A provider may call dtrace_probe_error() in lieu of + * dtrace_probe() in some circumstances. See, e.g. fasttrap_isa.c. + * However the provider has no access to ECB context, so passes + * 0 through "arg0" and the probe_id of the overridden probe as arg1. + * Detect that here and cons up a viable state (from the probe_id). */ if (dtrace_probeid_error == id && 0 == arg0) { dtrace_id_t ftp_id = (dtrace_id_t)arg1; @@ -6554,11 +5937,10 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, ftp_state->dts_arg_error_illval = -1; /* arg5 */ } } -#endif /* __APPLE__ */ mstate.dtms_difo = NULL; mstate.dtms_probe = probe; - mstate.dtms_strtok = NULL; + mstate.dtms_strtok = 0; mstate.dtms_arg[0] = arg0; mstate.dtms_arg[1] = arg1; mstate.dtms_arg[2] = arg2; @@ -6574,6 +5956,7 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; dtrace_vstate_t *vstate = &state->dts_vstate; dtrace_provider_t *prov = probe->dtpr_provider; + uint64_t tracememsize = 0; int committed = 0; caddr_t tomax; @@ -6670,11 +6053,7 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, * XXX this is hackish, but so is setting a variable * XXX in a McCarthy OR... */ -#if !defined(__APPLE__) - if ((cr = CRED()) == NULL || -#else if ((cr = dtrace_CRED()) == NULL || -#endif /* __APPLE__ */ posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_uid || posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_ruid || posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_suid || @@ -6685,7 +6064,7 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, (proc = ttoproc(curthread)) == NULL || (proc->p_flag & SNOCD)) #else - 1) /* Darwin omits "No Core Dump" flag. */ + 1) /* APPLE NOTE: Darwin omits "No Core Dump" flag */ #endif /* __APPLE__ */ continue; } @@ -6704,7 +6083,7 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, cr->cr_zone->zone_id) continue; #else - /* Darwin doesn't do zones. */ + /* APPLE NOTE: Darwin doesn't do zones. */ #endif /* __APPLE__ */ } } @@ -6765,11 +6144,8 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, * Update the predicate cache... */ ASSERT(cid == pred->dtp_cacheid); -#if !defined(__APPLE__) - curthread->t_predcache = cid; -#else + dtrace_set_thread_predcache(current_thread(), cid); -#endif /* __APPLE__ */ } continue; @@ -6831,18 +6207,10 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, if (!dtrace_priv_kernel(state)) continue; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - dtrace_getpcstack((pc_t *)(tomax + valoffs), - size / sizeof (pc_t), probe->dtpr_aframes, - DTRACE_ANCHORED(probe) ? NULL : - (uint32_t *)arg0); -#else dtrace_getpcstack((pc_t *)(tomax + valoffs), size / sizeof (pc_t), probe->dtpr_aframes, DTRACE_ANCHORED(probe) ? NULL : (uint32_t *)(uintptr_t)arg0); -#endif /* __APPLE__ */ - continue; case DTRACEACT_JSTACK: @@ -6937,12 +6305,10 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, dtrace_action_raise(val); continue; -#if defined(__APPLE__) - case DTRACEACT_PIDRESUME: + case DTRACEACT_PIDRESUME: /* __APPLE__ */ if (dtrace_priv_proc_destructive(state)) dtrace_action_pidresume(val); continue; -#endif /* __APPLE__ */ case DTRACEACT_COMMIT: ASSERT(!committed); @@ -6967,9 +6333,12 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, case DTRACEACT_PRINTA: case DTRACEACT_SYSTEM: case DTRACEACT_FREOPEN: -#if defined(__APPLE__) - case DTRACEACT_APPLEBINARY: -#endif /* __APPLE__ */ + case DTRACEACT_APPLEBINARY: /* __APPLE__ */ + case DTRACEACT_TRACEMEM: + break; + + case DTRACEACT_TRACEMEM_DYNSIZE: + tracememsize = val; break; case DTRACEACT_SYM: @@ -6978,23 +6347,6 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, continue; break; -#if !defined(__APPLE__) - case DTRACEACT_USYM: - case DTRACEACT_UMOD: - case DTRACEACT_UADDR: { - struct pid *pid = curthread->t_procp->p_pidp; - - if (!dtrace_priv_proc(state)) - continue; - - DTRACE_STORE(uint64_t, tomax, - valoffs, (uint64_t)pid->pid_id); - DTRACE_STORE(uint64_t, tomax, - valoffs + sizeof (uint64_t), val); - - continue; - } -#else case DTRACEACT_USYM: case DTRACEACT_UMOD: case DTRACEACT_UADDR: { @@ -7008,7 +6360,6 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, continue; } -#endif /* __APPLE__ */ case DTRACEACT_EXIT: { /* @@ -7051,6 +6402,13 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { uintptr_t end = valoffs + size; + if (tracememsize != 0 && + valoffs + tracememsize < end) + { + end = valoffs + tracememsize; + tracememsize = 0; + } + if (!dtrace_vcanload((void *)(uintptr_t)val, &dp->dtdo_rtype, &mstate, vstate)) continue; @@ -7141,13 +6499,14 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, * time to prevent it from being accumulated * into t_dtrace_vtime. */ -#if !defined(__APPLE__) - curthread->t_dtrace_start = 0; -#else - /* Set the sign bit on t_dtrace_tracing to suspend accumulation to it. */ + + /* + * Darwin sets the sign bit on t_dtrace_tracing + * to suspend accumulation to it. + */ dtrace_set_thread_tracing(current_thread(), - (1ULL<<63) | dtrace_get_thread_tracing(current_thread())); -#endif /* __APPLE__ */ + (1ULL<<63) | dtrace_get_thread_tracing(current_thread())); + } /* @@ -7174,11 +6533,7 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, buf->dtb_offset = offs + ecb->dte_size; } -#if !defined(__APPLE__) - if (vtime) - curthread->t_dtrace_start = dtrace_gethrtime(); -#else - /* FIXME: the time spent leaving DTrace from this point to the rti is attributed + /* FIXME: On Darwin the time spent leaving DTrace from this point to the rti is attributed to the current thread. Instead it should accrue to DTrace. */ if (vtime) { thread_t thread = current_thread(); @@ -7192,16 +6547,18 @@ __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, dtrace_set_thread_tracing(thread, (~(1ULL<<63)) & t); } } -#endif /* __APPLE__ */ dtrace_interrupt_enable(cookie); } -#if defined(__APPLE__) -/* Don't allow a thread to re-enter dtrace_probe(). This could occur if a probe is encountered - on some function in the transitive closure of the call to dtrace_probe(). Solaris has some - strong guarantees that this won't happen, the Darwin implementation is not so mature as to - make those guarantees. */ +/* + * APPLE NOTE: Don't allow a thread to re-enter dtrace_probe(). + * This could occur if a probe is encountered on some function in the + * transitive closure of the call to dtrace_probe(). + * Solaris has some strong guarantees that this won't happen. + * The Darwin implementation is not so mature as to make those guarantees. + * Hence, the introduction of __dtrace_probe() on xnu. + */ void dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, @@ -7222,7 +6579,6 @@ dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, #endif enable_preemption(); } -#endif /* __APPLE__ */ /* * DTrace Probe Hashing Functions @@ -7236,11 +6592,7 @@ dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, * specified.) */ static uint_t -#if !defined(__APPLE__) /* Quiet compiler warnings */ -dtrace_hash_str(char *p) -#else dtrace_hash_str(const char *p) -#endif /* __APPLE__ */ { unsigned int g; uint_t hval = 0; @@ -7272,7 +6624,12 @@ dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) return (hash); } -#if !defined(__APPLE__) /* Unused. Quiet compiler warning. */ +/* + * APPLE NOTE: dtrace_hash_destroy is not used. + * It is called by dtrace_detach which is not + * currently implemented. Revisit someday. + */ +#if !defined(__APPLE__) static void dtrace_hash_destroy(dtrace_hash_t *hash) { @@ -7386,7 +6743,7 @@ dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) return (bucket->dthb_len); } - return (NULL); + return (0); } static void @@ -7458,19 +6815,8 @@ dtrace_badattr(const dtrace_attribute_t *a) /* * Return a duplicate copy of a string. If the specified string is NULL, * this function returns a zero-length string. + * APPLE NOTE: Darwin employs size bounded string operation. */ -#if !defined(__APPLE__) -static char * -dtrace_strdup(const char *str) -{ - char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); - - if (str != NULL) - (void) strcpy(new, str); - - return (new); -} -#else /* Employ size bounded string operation. */ static char * dtrace_strdup(const char *str) { @@ -7482,7 +6828,6 @@ dtrace_strdup(const char *str) return (new); } -#endif /* __APPLE__ */ #define DTRACE_ISALPHA(c) \ (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) @@ -7541,20 +6886,12 @@ dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) static void dtrace_errdebug(const char *str) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; -#else int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; -#endif /* __APPLE__ */ int occupied = 0; lck_mtx_lock(&dtrace_errlock); dtrace_errlast = str; -#if !defined(__APPLE__) - dtrace_errthread = curthread; -#else dtrace_errthread = (kthread_t *)current_thread(); -#endif /* __APPLE__ */ while (occupied++ < DTRACE_ERRHASHSZ) { if (dtrace_errhash[hval].dter_msg == str) { @@ -7782,11 +7119,9 @@ static int dtrace_match_string(const char *s, const char *p, int depth) { #pragma unused(depth) /* __APPLE__ */ -#if !defined(__APPLE__) - return (s != NULL && strcmp(s, p) == 0); -#else /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ return (s != NULL && strncmp(s, p, strlen(s) + 1) == 0); -#endif /* __APPLE__ */ } /*ARGSUSED*/ @@ -7830,15 +7165,9 @@ dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, return (nmatched); } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - template.dtpr_mod = (char *)pkp->dtpk_mod; - template.dtpr_func = (char *)pkp->dtpk_func; - template.dtpr_name = (char *)pkp->dtpk_name; -#else template.dtpr_mod = (char *)(uintptr_t)pkp->dtpk_mod; template.dtpr_func = (char *)(uintptr_t)pkp->dtpk_func; template.dtpr_name = (char *)(uintptr_t)pkp->dtpk_name; -#endif /* __APPLE__ */ /* * We want to find the most distinct of the module name, function @@ -7869,11 +7198,7 @@ dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, * invoke our callback for each one that matches our input probe key. */ if (hash == NULL) { -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (i = 0; i < dtrace_nprobes; i++) { -#else for (i = 0; i < (dtrace_id_t)dtrace_nprobes; i++) { -#endif /* __APPLE__ */ if ((probe = dtrace_probes[i]) == NULL || dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) @@ -8032,16 +7357,13 @@ dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, } provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); -#if !defined(__APPLE__) - provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); - (void) strcpy(provider->dtpv_name, name); -#else /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ { size_t bufsize = strlen(name) + 1; provider->dtpv_name = kmem_alloc(bufsize, KM_SLEEP); (void) strlcpy(provider->dtpv_name, name, bufsize); } -#endif /* __APPLE__ */ provider->dtpv_attr = *pap; provider->dtpv_priv.dtpp_flags = priv; @@ -8178,7 +7500,7 @@ dtrace_unregister(dtrace_provider_id_t id) /* * Attempt to destroy the probes associated with this provider. */ - if (old->ecb_count!=0) { + if (old->dtpv_ecb_count!=0) { /* * We have at least one ECB; we can't remove this provider. */ @@ -8194,7 +7516,7 @@ dtrace_unregister(dtrace_provider_id_t id) * All of the probes for this provider are disabled; we can safely * remove all of them from their hash chains and from the probe array. */ - for (i = 0; i < dtrace_nprobes && old->probe_count!=0; i++) { + for (i = 0; i < dtrace_nprobes && old->dtpv_probe_count!=0; i++) { if ((probe = dtrace_probes[i]) == NULL) continue; @@ -8202,7 +7524,7 @@ dtrace_unregister(dtrace_provider_id_t id) continue; dtrace_probes[i] = NULL; - old->probe_count--; + old->dtpv_probe_count--; dtrace_hash_remove(dtrace_bymod, probe); dtrace_hash_remove(dtrace_byfunc, probe); @@ -8233,11 +7555,7 @@ dtrace_unregister(dtrace_provider_id_t id) kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); -#if !defined(__APPLE__) - kmem_free(probe, sizeof (dtrace_probe_t)); -#else zfree(dtrace_probe_t_zone, probe); -#endif } if ((prev = dtrace_provider) == old) { @@ -8338,7 +7656,7 @@ dtrace_condense(dtrace_provider_id_t id) continue; dtrace_probes[i] = NULL; - prov->probe_count--; + prov->dtpv_probe_count--; dtrace_hash_remove(dtrace_bymod, probe); dtrace_hash_remove(dtrace_byfunc, probe); @@ -8349,11 +7667,7 @@ dtrace_condense(dtrace_provider_id_t id) kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); -#if !defined(__APPLE__) - kmem_free(probe, sizeof (dtrace_probe_t)); -#else zfree(dtrace_probe_t_zone, probe); -#endif vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); } @@ -8392,12 +7706,9 @@ dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, VM_BESTFIT | VM_SLEEP); -#if !defined(__APPLE__) - probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); -#else + probe = zalloc(dtrace_probe_t_zone); bzero(probe, sizeof (dtrace_probe_t)); -#endif probe->dtpr_id = id; probe->dtpr_gen = dtrace_probegen++; @@ -8412,11 +7723,7 @@ dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, dtrace_hash_add(dtrace_byfunc, probe); dtrace_hash_add(dtrace_byname, probe); -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (id - 1 >= dtrace_nprobes) { -#else if (id - 1 >= (dtrace_id_t)dtrace_nprobes) { -#endif /* __APPLE__ */ size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); size_t nsize = osize << 1; @@ -8449,16 +7756,12 @@ dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, dtrace_nprobes <<= 1; } -#if !defined(__APPLE__) /* Quiet compiler warning */ - ASSERT(id - 1 < dtrace_nprobes); -#else ASSERT(id - 1 < (dtrace_id_t)dtrace_nprobes); -#endif /* __APPLE__ */ } ASSERT(dtrace_probes[id - 1] == NULL); dtrace_probes[id - 1] = probe; - provider->probe_count++; + provider->dtpv_probe_count++; if (provider != dtrace_provider) lck_mtx_unlock(&dtrace_lock); @@ -8471,13 +7774,8 @@ dtrace_probe_lookup_id(dtrace_id_t id) { lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (id == 0 || id > dtrace_nprobes) - return (NULL); -#else if (id == 0 || id > (dtrace_id_t)dtrace_nprobes) return (NULL); -#endif /* __APPLE__ */ return (dtrace_probes[id - 1]); } @@ -8550,21 +7848,13 @@ dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) bzero(pdp, sizeof (dtrace_probedesc_t)); pdp->dtpd_id = prp->dtpr_id; -#if !defined(__APPLE__) - (void) strncpy(pdp->dtpd_provider, - prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); - - (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); - (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); - (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); -#else /* Employ size bounded string operation. */ + /* APPLE NOTE: Darwin employs size bounded string operation. */ (void) strlcpy(pdp->dtpd_provider, prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN); (void) strlcpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN); (void) strlcpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN); (void) strlcpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN); -#endif /* __APPLE__ */ } /* @@ -8609,22 +7899,11 @@ dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) */ lck_mtx_lock(&mod_lock); -#if !defined(__APPLE__) - ctl = &modules; - do { - if (ctl->mod_busy || ctl->mod_mp == NULL) - continue; - - prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); - - } while ((ctl = ctl->mod_next) != &modules); -#else ctl = dtrace_modctl_list; while (ctl) { prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); ctl = ctl->mod_next; } -#endif lck_mtx_unlock(&mod_lock); } while (all && (prv = prv->dtpv_next) != NULL); @@ -8798,18 +8077,10 @@ dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) #else dhpb.dthpb_base = dhp->dofhp_addr; /* FIXME: James, why? */ #endif -#if !defined(__APPLE__) /* Quiet compiler warning */ - dhpb.dthpb_offs = off + probe->dofpr_offidx; -#else dhpb.dthpb_offs = (int32_t *)(off + probe->dofpr_offidx); -#endif /* __APPLE__ */ dhpb.dthpb_noffs = probe->dofpr_noffs; if (enoff != NULL) { -#if !defined(__APPLE__) /* Quiet compiler warning */ - dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; -#else dhpb.dthpb_enoffs = (int32_t *)(enoff + probe->dofpr_enoffidx); -#endif /* __APPLE__ */ dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; } else { dhpb.dthpb_enoffs = NULL; @@ -8830,11 +8101,7 @@ dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i; -#else uint32_t i; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED); @@ -8891,11 +8158,7 @@ dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i; -#else uint32_t i; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED); @@ -8922,11 +8185,7 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, { dtrace_meta_t *meta; dtrace_helpers_t *help, *next; -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ *idp = DTRACE_METAPROVNONE; @@ -8951,16 +8210,14 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); meta->dtm_mops = *mops; -#if !defined(__APPLE__) - meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); - (void) strcpy(meta->dtm_name, name); -#else /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ { size_t bufsize = strlen(name) + 1; meta->dtm_name = kmem_alloc(bufsize, KM_SLEEP); (void) strlcpy(meta->dtm_name, name, bufsize); } -#endif /* __APPLE__ */ + meta->dtm_arg = arg; lck_mtx_lock(&dtrace_meta_lock); @@ -9076,12 +8333,9 @@ static int dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, cred_t *cr) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int err = 0, i; -#else int err = 0; uint_t i; -#endif /* __APPLE__ */ + int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; int kcheckload; uint_t pc; @@ -9366,12 +8620,8 @@ dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; dtrace_diftype_t *vt, *et; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - uint_t id, ndx; -#else uint_t id; int ndx; -#endif /* __APPLE__ */ if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && v->dtdv_scope != DIFV_SCOPE_THREAD && @@ -9605,9 +8855,7 @@ dtrace_difo_validate_helper(dtrace_difo_t *dp) subr == DIF_SUBR_STRJOIN || subr == DIF_SUBR_STRRCHR || subr == DIF_SUBR_STRSTR || -#if defined(__APPLE__) subr == DIF_SUBR_COREPROFILE || -#endif /* __APPLE__ */ subr == DIF_SUBR_HTONS || subr == DIF_SUBR_HTONL || subr == DIF_SUBR_HTONLL || @@ -9635,11 +8883,7 @@ dtrace_difo_validate_helper(dtrace_difo_t *dp) static int dtrace_difo_cacheable(dtrace_difo_t *dp) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ if (dp == NULL) return (0); @@ -9684,11 +8928,7 @@ dtrace_difo_cacheable(dtrace_difo_t *dp) static void dtrace_difo_hold(dtrace_difo_t *dp) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); @@ -9720,11 +8960,7 @@ dtrace_difo_hold(dtrace_difo_t *dp) static void dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - uint64_t sval; -#else uint64_t sval = 0; -#endif /* __APPLE__ */ dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ const dif_instr_t *text = dp->dtdo_buf; uint_t pc, srd = 0; @@ -9859,25 +9095,16 @@ dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) static void dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i, oldsvars, osz, nsz, otlocals, ntlocals; - uint_t id; -#else int oldsvars, osz, nsz, otlocals, ntlocals; uint_t i, id; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - dtrace_statvar_t *svar, ***svarp; -#else dtrace_statvar_t *svar; dtrace_statvar_t ***svarp = NULL; -#endif /* __APPLE__ */ size_t dsize = 0; uint8_t scope = v->dtdv_scope; int *np = (int *)NULL; @@ -9889,11 +9116,7 @@ dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) switch (scope) { case DIFV_SCOPE_THREAD: -#if !defined(__APPLE__) /* Quiet compiler warnings */ - while (id >= (otlocals = vstate->dtvs_ntlocals)) { -#else while (id >= (uint_t)(otlocals = vstate->dtvs_ntlocals)) { -#endif /* __APPLE__ */ dtrace_difv_t *tlocals; if ((ntlocals = (otlocals << 1)) == 0) @@ -9943,11 +9166,7 @@ dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) ASSERT(0); } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - while (id >= (oldsvars = *np)) { -#else while (id >= (uint_t)(oldsvars = *np)) { -#endif /* __APPLE__ */ dtrace_statvar_t **statics; int newsvars, oldsize, newsize; @@ -10034,28 +9253,17 @@ dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) static void dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ ASSERT(dp->dtdo_refcnt == 0); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - dtrace_statvar_t *svar, **svarp; - uint_t id; - uint8_t scope = v->dtdv_scope; - int *np; -#else dtrace_statvar_t *svar; dtrace_statvar_t **svarp = NULL; uint_t id; uint8_t scope = v->dtdv_scope; int *np = NULL; -#endif /* __APPLE__ */ switch (scope) { case DIFV_SCOPE_THREAD: @@ -10080,11 +9288,7 @@ dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) id -= DIF_VAR_OTHER_UBASE; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(id < *np); -#else ASSERT(id < (uint_t)*np); -#endif /* __APPLE__ */ svar = svarp[id]; ASSERT(svar != NULL); @@ -10094,7 +9298,7 @@ dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) continue; if (svar->dtsv_size != 0) { - ASSERT(svar->dtsv_data != NULL); + ASSERT(svar->dtsv_data != 0); kmem_free((void *)(uintptr_t)svar->dtsv_data, svar->dtsv_size); } @@ -10114,11 +9318,7 @@ dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) static void dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); ASSERT(dp->dtdo_refcnt != 0); @@ -10295,8 +9495,8 @@ dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, { dtrace_actdesc_t *act; - ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && - arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); + ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != 0 && + arg >= KERNELBASE) || (arg == 0 && kind == DTRACEACT_PRINTA)); act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); act->dtad_kind = kind; @@ -10366,19 +9566,11 @@ dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) epid = state->dts_epid++; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (epid - 1 >= state->dts_necbs) { -#else if (epid - 1 >= (dtrace_epid_t)state->dts_necbs) { -#endif /* __APPLE__ */ dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; int necbs = state->dts_necbs << 1; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(epid == state->dts_necbs + 1); -#else ASSERT(epid == (dtrace_epid_t)state->dts_necbs + 1); -#endif /* __APPLE__ */ if (necbs == 0) { ASSERT(oecbs == NULL); @@ -10436,7 +9628,7 @@ dtrace_ecb_enable(dtrace_ecb_t *ecb) return(0); } - probe->dtpr_provider->ecb_count++; + probe->dtpr_provider->dtpv_ecb_count++; if (probe->dtpr_ecb == NULL) { dtrace_provider_t *prov = probe->dtpr_provider; @@ -10734,21 +9926,13 @@ success: aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, VM_BESTFIT | VM_SLEEP); -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (aggid - 1 >= state->dts_naggregations) { -#else if (aggid - 1 >= (dtrace_aggid_t)state->dts_naggregations) { -#endif /* __APPLE__ */ dtrace_aggregation_t **oaggs = state->dts_aggregations; dtrace_aggregation_t **aggs; int naggs = state->dts_naggregations << 1; int onaggs = state->dts_naggregations; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(aggid == state->dts_naggregations + 1); -#else ASSERT(aggid == (dtrace_aggid_t)state->dts_naggregations + 1); -#endif /* __APPLE */ if (naggs == 0) { ASSERT(oaggs == NULL); @@ -10806,12 +9990,8 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) uint16_t format = 0; dtrace_recdesc_t *rec; dtrace_state_t *state = ecb->dte_state; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - dtrace_optval_t *opt = state->dts_options, nframes, strsize; -#else dtrace_optval_t *opt = state->dts_options; dtrace_optval_t nframes=0, strsize; -#endif /* __APPLE__ */ uint64_t arg = desc->dtad_arg; lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); @@ -10852,11 +10032,11 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) * We know that our arg is a string -- turn it into a * format. */ - if (arg == NULL) { + if (arg == 0) { ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); format = 0; } else { - ASSERT(arg != NULL); + ASSERT(arg != 0); ASSERT(arg > KERNELBASE); format = dtrace_format_add(state, (char *)(uintptr_t)arg); @@ -10865,9 +10045,9 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) /*FALLTHROUGH*/ case DTRACEACT_LIBACT: case DTRACEACT_DIFEXPR: -#if defined(__APPLE__) - case DTRACEACT_APPLEBINARY: -#endif /* __APPLE__ */ + case DTRACEACT_TRACEMEM: + case DTRACEACT_TRACEMEM_DYNSIZE: + case DTRACEACT_APPLEBINARY: /* __APPLE__ */ if (dp == NULL) return (EINVAL); @@ -10954,9 +10134,7 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) case DTRACEACT_CHILL: case DTRACEACT_DISCARD: case DTRACEACT_RAISE: -#if defined(__APPLE__) - case DTRACEACT_PIDRESUME: -#endif /* __APPLE__ */ + case DTRACEACT_PIDRESUME: /* __APPLE__ */ if (dp == NULL) return (EINVAL); break; @@ -11122,7 +10300,7 @@ dtrace_ecb_disable(dtrace_ecb_t *ecb) probe->dtpr_ecb_last = prev; } - probe->dtpr_provider->ecb_count--; + probe->dtpr_provider->dtpv_ecb_count--; /* * The ECB has been disconnected from the probe; now sync to assure * that all CPUs have seen the change before returning. @@ -11308,11 +10486,7 @@ dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (id == 0 || id > state->dts_necbs) -#else - if (id == 0 || id > (dtrace_epid_t)state->dts_necbs) -#endif /* __APPLE__ */ + if (id == 0 || id > (dtrace_epid_t)state->dts_necbs) return (NULL); ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); @@ -11329,11 +10503,7 @@ dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (id == 0 || id > state->dts_naggregations) -#else if (id == 0 || id > (dtrace_aggid_t)state->dts_naggregations) -#endif /* __APPLE__ */ return (NULL); ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); @@ -11409,31 +10579,31 @@ dtrace_buffer_activate(dtrace_state_t *state) dtrace_interrupt_enable(cookie); } +static int +dtrace_buffer_canalloc(size_t size) +{ + if (size > (UINT64_MAX - dtrace_buffer_memory_inuse)) + return (B_FALSE); + if ((size + dtrace_buffer_memory_inuse) > dtrace_buffer_memory_maxsize) + return (B_FALSE); + + return (B_TRUE); +} + static int dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, processorid_t cpu) { dtrace_cpu_t *cp; dtrace_buffer_t *buf; + size_t size_before_alloc = dtrace_buffer_memory_inuse; lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED); lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (size > dtrace_nonroot_maxsize && - !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) - return (EFBIG); -#else if (size > (size_t)dtrace_nonroot_maxsize && !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) return (EFBIG); -#endif /* __APPLE__ */ - - -#if defined(__APPLE__) - if (size > (sane_size / 8) / (int)NCPU) /* As in kdbg_set_nkdbufs(), roughly. */ - return (ENOMEM); -#endif /* __APPLE__ */ cp = cpu_list; @@ -11455,8 +10625,12 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, ASSERT(buf->dtb_xamot == NULL); + /* DTrace, please do not eat all the memory. */ + if (dtrace_buffer_canalloc(size) == B_FALSE) + goto err; if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) goto err; + dtrace_buffer_memory_inuse += size; buf->dtb_size = size; buf->dtb_flags = flags; @@ -11466,10 +10640,16 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, if (flags & DTRACEBUF_NOSWITCH) continue; + /* DTrace, please do not eat all the memory. */ + if (dtrace_buffer_canalloc(size) == B_FALSE) + goto err; if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) goto err; + dtrace_buffer_memory_inuse += size; } while ((cp = cp->cpu_next) != cpu_list); + ASSERT(dtrace_buffer_memory_inuse <= dtrace_buffer_memory_maxsize); + return (0); err: @@ -11497,6 +10677,9 @@ err: buf->dtb_size = 0; } while ((cp = cp->cpu_next) != cpu_list); + /* Restore the size saved before allocating memory */ + dtrace_buffer_memory_inuse = size_before_alloc; + return (ENOMEM); } @@ -11548,11 +10731,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, offs += sizeof (uint32_t); } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if ((soffs = offs + needed) > buf->dtb_size) { -#else if ((uint64_t)(soffs = offs + needed) > buf->dtb_size) { -#endif /* __APPLE__ */ dtrace_buffer_drop(buf); return (-1); } @@ -11623,11 +10802,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, * there. We need to clear the buffer from the current * offset to the end (there may be old gunk there). */ -#if !defined(__APPLE__) /* Quiet compiler warnings */ - while (offs < buf->dtb_size) -#else while ((uint64_t)offs < buf->dtb_size) -#endif /* __APPLE__ */ tomax[offs++] = 0; /* @@ -11664,22 +10839,14 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, } } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - while (offs + total_off > woffs) { -#else while (offs + total_off > (size_t)woffs) { -#endif /* __APPLE__ */ dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); size_t size; if (epid == DTRACE_EPIDNONE) { size = sizeof (uint32_t); } else { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - ASSERT(epid <= state->dts_necbs); -#else ASSERT(epid <= (dtrace_epid_t)state->dts_necbs); -#endif /* __APPLE__ */ ASSERT(state->dts_ecbs[epid - 1] != NULL); size = state->dts_ecbs[epid - 1]->dte_size; @@ -11714,12 +10881,7 @@ dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, buf->dtb_offset = 0; woffs = total_off; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - while (woffs < buf->dtb_size) -#else while ((uint64_t)woffs < buf->dtb_size) -#endif /* __APPLE__ */ - tomax[woffs++] = 0; } @@ -11836,9 +10998,15 @@ dtrace_buffer_free(dtrace_buffer_t *bufs) if (buf->dtb_xamot != NULL) { ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); kmem_free(buf->dtb_xamot, buf->dtb_size); + + ASSERT(dtrace_buffer_memory_inuse >= buf->dtb_size); + dtrace_buffer_memory_inuse -= buf->dtb_size; } kmem_free(buf->dtb_tomax, buf->dtb_size); + ASSERT(dtrace_buffer_memory_inuse >= buf->dtb_size); + dtrace_buffer_memory_inuse -= buf->dtb_size; + buf->dtb_size = 0; buf->dtb_tomax = NULL; buf->dtb_xamot = NULL; @@ -11872,9 +11040,8 @@ dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) ASSERT(enab->dten_probegen == 0); ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); -#if defined(__APPLE__) - if (ecb == NULL) return; /* Note: protection against gcc 4.0 botch on x86 */ -#endif /* __APPLE__ */ + /* APPLE NOTE: this protects against gcc 4.0 botch on x86 */ + if (ecb == NULL) return; if (enab->dten_ndesc < enab->dten_maxdesc) { enab->dten_desc[enab->dten_ndesc++] = ecb; @@ -12076,19 +11243,7 @@ dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, dtrace_ecbdesc_t *ep = enab->dten_desc[i]; dtrace_probedesc_t *pd = &ep->dted_probe; -#if !defined(__APPLE__) - if (strcmp(pd->dtpd_provider, match->dtpd_provider)) - continue; - - if (strcmp(pd->dtpd_mod, match->dtpd_mod)) - continue; - - if (strcmp(pd->dtpd_func, match->dtpd_func)) - continue; - - if (strcmp(pd->dtpd_name, match->dtpd_name)) - continue; -#else /* Employ size bounded string operation. */ + /* APPLE NOTE: Darwin employs size bounded string operation. */ if (strncmp(pd->dtpd_provider, match->dtpd_provider, DTRACE_PROVNAMELEN)) continue; @@ -12100,7 +11255,6 @@ dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, if (strncmp(pd->dtpd_name, match->dtpd_name, DTRACE_NAMELEN)) continue; -#endif /* __APPLE__ */ /* * We have a winning probe! Add it to our growing @@ -12221,16 +11375,13 @@ dtrace_enabling_matchall(void) * however: the taskq_destroy() at the end of dtrace_detach() will * block pending our completion. */ - for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { -#if !defined(__APPLE__) - cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; - if (INGLOBALZONE(curproc) || - cr != NULL && getzoneid() == crgetzoneid(cr)) - (void) dtrace_enabling_match(enab, NULL); -#else - (void) dtrace_enabling_match(enab, NULL); /* As if always in "global" zone." */ -#endif /* __APPLE__ */ + /* + * Darwin doesn't do zones. + * Behave as if always in "global" zone." + */ + for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { + (void) dtrace_enabling_match(enab, NULL); } lck_mtx_unlock(&dtrace_lock); @@ -12361,11 +11512,7 @@ dtrace_dof_create(dtrace_state_t *state) lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) - dof = kmem_zalloc(len, KM_SLEEP); -#else dof = dt_kmem_zalloc_aligned(len, 8, KM_SLEEP); -#endif /* __APPLE__ */ dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; @@ -12412,11 +11559,7 @@ dtrace_dof_create(dtrace_state_t *state) } static dof_hdr_t * -#if !defined(__APPLE__) -dtrace_dof_copyin(uintptr_t uarg, int *errp) -#else dtrace_dof_copyin(user_addr_t uarg, int *errp) -#endif { dof_hdr_t hdr, *dof; @@ -12425,11 +11568,7 @@ dtrace_dof_copyin(user_addr_t uarg, int *errp) /* * First, we're going to copyin() the sizeof (dof_hdr_t). */ -#if !defined(__APPLE__) - if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { -#else if (copyin(uarg, &hdr, sizeof (hdr)) != 0) { -#endif dtrace_dof_error(NULL, "failed to copyin DOF header"); *errp = EFAULT; return (NULL); @@ -12439,11 +11578,7 @@ dtrace_dof_copyin(user_addr_t uarg, int *errp) * Now we'll allocate the entire DOF and copy it in -- provided * that the length isn't outrageous. */ -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { -#else if (hdr.dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) { -#endif /* __APPLE__ */ dtrace_dof_error(&hdr, "load size exceeds maximum"); *errp = E2BIG; return (NULL); @@ -12455,16 +11590,6 @@ dtrace_dof_copyin(user_addr_t uarg, int *errp) return (NULL); } -#if !defined(__APPLE__) - dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); - - if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || - dof->dofh_loadsz != hdr.dofh_loadsz) { - kmem_free(dof, hdr.dofh_loadsz); - *errp = EFAULT; - return (NULL); - } -#else dof = dt_kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP); if (copyin(uarg, dof, hdr.dofh_loadsz) != 0 || @@ -12473,13 +11598,10 @@ dtrace_dof_copyin(user_addr_t uarg, int *errp) *errp = EFAULT; return (NULL); } -#endif return (dof); } -#if defined(__APPLE__) - static dof_hdr_t * dtrace_dof_copyin_from_proc(proc_t* p, user_addr_t uarg, int *errp) { @@ -12523,8 +11645,6 @@ dtrace_dof_copyin_from_proc(proc_t* p, user_addr_t uarg, int *errp) return (dof); } -#endif /* __APPLE__ */ - static dof_hdr_t * dtrace_dof_property(const char *name) { @@ -12538,15 +11658,9 @@ dtrace_dof_property(const char *name) * only) interpreted to be integer arrays. We must read our DOF * as an integer array, and then squeeze it into a byte array. */ -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, - (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) - return (NULL); -#else if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, name, (int **)&buf, &len) != DDI_PROP_SUCCESS) return (NULL); -#endif /* __APPLE__ */ for (i = 0; i < len; i++) buf[i] = (uchar_t)(((int *)buf)[i]); @@ -12563,21 +11677,13 @@ dtrace_dof_property(const char *name) return (NULL); } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (loadsz >= dtrace_dof_maxsize) { -#else if (loadsz >= (uint64_t)dtrace_dof_maxsize) { -#endif /* __APPLE__ */ ddi_prop_free(buf); dtrace_dof_error(NULL, "oversized DOF"); return (NULL); } -#if !defined(__APPLE__) - dof = kmem_alloc(loadsz, KM_SLEEP); -#else dof = dt_kmem_alloc_aligned(loadsz, 8, KM_SLEEP); -#endif /* __APPLE__ */ bcopy(buf, dof, loadsz); ddi_prop_free(buf); @@ -12587,11 +11693,7 @@ dtrace_dof_property(const char *name) static void dtrace_dof_destroy(dof_hdr_t *dof) { -#if !defined(__APPLE__) - kmem_free(dof, dof->dofh_loadsz); -#else dt_kmem_free_aligned(dof, dof->dofh_loadsz); -#endif /* __APPLE__ */ } /* @@ -12665,9 +11767,9 @@ dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) (void) strncpy(desc->dtpd_provider, (char *)(str + probe->dofp_provider), MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); -#if defined(__APPLE__) /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ desc->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; -#endif /* __APPLE__ */ if (probe->dofp_mod >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe module"); @@ -12676,9 +11778,9 @@ dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); -#if defined(__APPLE__) /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ desc->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; -#endif /* __APPLE__ */ if (probe->dofp_func >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe function"); @@ -12687,9 +11789,9 @@ dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); -#if defined(__APPLE__) /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ desc->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; -#endif /* __APPLE__ */ if (probe->dofp_name >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe name"); @@ -12698,9 +11800,9 @@ dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); -#if defined(__APPLE__) /* Employ size bounded string operation. */ + + /* APPLE NOTE: Darwin employs size bounded string operation. */ desc->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; -#endif /* __APPLE__ */ return (desc); } @@ -12714,12 +11816,8 @@ dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, dof_difohdr_t *dofd; uintptr_t daddr = (uintptr_t)dof; size_t max_size = dtrace_difo_maxsize; -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i, l, n; -#else uint_t i; int l, n; -#endif /* __APPLE__ */ static const struct { @@ -12746,11 +11844,7 @@ dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), sizeof (uint_t), "multiple variable tables" }, -#if !defined(__APPLE__) - { DOF_SECT_NONE, 0, 0, 0, NULL } -#else { DOF_SECT_NONE, 0, 0, 0, 0, NULL } -#endif /* __APPLE__ */ }; if (sec->dofs_type != DOF_SECT_DIFOHDR) { @@ -12793,30 +11887,18 @@ dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (subsec->dofs_type != difo[i].section) - continue; -#else if (subsec->dofs_type != (uint32_t)difo[i].section) continue; -#endif /* __APPLE __ */ if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { dtrace_dof_error(dof, "section not loaded"); goto err; } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (subsec->dofs_align != difo[i].align) { - dtrace_dof_error(dof, "bad alignment"); - goto err; - } -#else if (subsec->dofs_align != (uint32_t)difo[i].align) { dtrace_dof_error(dof, "bad alignment"); goto err; } -#endif /* __APPLE__ */ bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); @@ -12826,17 +11908,10 @@ dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, goto err; } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (difo[i].entsize != subsec->dofs_entsize) { - dtrace_dof_error(dof, "entry size mismatch"); - goto err; - } -#else if ((uint32_t)difo[i].entsize != subsec->dofs_entsize) { dtrace_dof_error(dof, "entry size mismatch"); goto err; } -#endif /* __APPLE__ */ if (subsec->dofs_entsize != 0 && (subsec->dofs_size % subsec->dofs_entsize) != 0) { @@ -13112,76 +12187,10 @@ err: return (NULL); } -#if !defined(__APPLE__) /* APPLE dyld has already done this for us */ /* - * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the - * specified DOF. At present, this amounts to simply adding 'ubase' to the - * site of any user SETX relocations to account for load object base address. - * In the future, if we need other relocations, this function can be extended. + * APPLE NOTE: dyld handles dof relocation. + * Darwin does not need dtrace_dof_relocate() */ -static int -dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) -{ - uintptr_t daddr = (uintptr_t)dof; - dof_relohdr_t *dofr = - (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); - dof_sec_t *ss, *rs, *ts; - dof_relodesc_t *r; - uint_t i, n; - - if (sec->dofs_size < sizeof (dof_relohdr_t) || - sec->dofs_align != sizeof (dof_secidx_t)) { - dtrace_dof_error(dof, "invalid relocation header"); - return (-1); - } - - ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); - rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); - ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); - - if (ss == NULL || rs == NULL || ts == NULL) - return (-1); /* dtrace_dof_error() has been called already */ - - if (rs->dofs_entsize < sizeof (dof_relodesc_t) || - rs->dofs_align != sizeof (uint64_t)) { - dtrace_dof_error(dof, "invalid relocation section"); - return (-1); - } - - r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); - n = rs->dofs_size / rs->dofs_entsize; - - for (i = 0; i < n; i++) { - uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; - - switch (r->dofr_type) { - case DOF_RELO_NONE: - break; - case DOF_RELO_SETX: - if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + - sizeof (uint64_t) > ts->dofs_size) { - dtrace_dof_error(dof, "bad relocation offset"); - return (-1); - } - - if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { - dtrace_dof_error(dof, "misaligned setx relo"); - return (-1); - } - - *(uint64_t *)taddr += ubase; - break; - default: - dtrace_dof_error(dof, "invalid relocation type"); - return (-1); - } - - r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); - } - - return (0); -} -#endif /* __APPLE__ */ /* * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated @@ -13225,21 +12234,13 @@ dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, return (-1); } -#if !defined(__APPLE__) - if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && - dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { - dtrace_dof_error(dof, "DOF version mismatch"); - return (-1); - } -#else /* - * We only support DOF_VERSION_3 for now. + * APPLE NOTE: Darwin only supports DOF_VERSION_3 for now. */ if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_3) { dtrace_dof_error(dof, "DOF version mismatch"); return (-1); } -#endif if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { dtrace_dof_error(dof, "DOF uses unsupported instruction set"); @@ -13343,32 +12344,10 @@ dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, } } -#if !defined(__APPLE__) - /* - * Take a second pass through the sections and locate and perform any - * relocations that are present. We do this after the first pass to - * be sure that all sections have had their headers validated. - */ - for (i = 0; i < dof->dofh_secnum; i++) { - dof_sec_t *sec = (dof_sec_t *)(daddr + - (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); - - if (!(sec->dofs_flags & DOF_SECF_LOAD)) - continue; /* skip sections that are not loadable */ - - switch (sec->dofs_type) { - case DOF_SECT_URELHDR: - if (dtrace_dof_relocate(dof, sec, ubase) != 0) - return (-1); - break; - } - } -#else /* - * APPLE NOTE: We have no relocation to perform. All dof values are - * relative offsets. + * APPLE NOTE: We have no further relocation to perform. + * All dof values are relative offsets. */ -#endif /* __APPLE__ */ if ((enab = *enabp) == NULL) enab = *enabp = dtrace_enabling_create(vstate); @@ -13380,22 +12359,18 @@ dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, if (sec->dofs_type != DOF_SECT_ECBDESC) continue; -#if !defined(__APPLE__) - if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { - dtrace_enabling_destroy(enab); - *enabp = NULL; - return (-1); - } -#else - /* Note: Defend against gcc 4.0 botch on x86 (not all paths out of inlined dtrace_dof_ecbdesc - are checked for the NULL return value.) */ + /* + * APPLE NOTE: Defend against gcc 4.0 botch on x86. + * not all paths out of inlined dtrace_dof_ecbdesc + * are checked for the NULL return value. + * Check for NULL explicitly here. + */ ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr); if (ep == NULL) { dtrace_enabling_destroy(enab); *enabp = NULL; return (-1); } -#endif /* __APPLE__ */ dtrace_enabling_add(enab, ep); } @@ -13410,12 +12385,8 @@ dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, static int dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) { -#if !defined(__APPLE__) /* Quiet compiler warnings */ - int i, rval; -#else uint_t i; int rval; -#endif /* __APPLE__ */ uint32_t entsize; size_t offs; dof_optdesc_t *desc; @@ -13452,11 +12423,7 @@ dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) return (EINVAL); } -#if !defined(__APPLE__) /* Quiet compiler warnings */ - if (desc->dofo_value == DTRACEOPT_UNSET) { -#else if (desc->dofo_value == (uint64_t)DTRACEOPT_UNSET) { -#endif /* __APPLE __ */ dtrace_dof_error(dof, "unset option"); return (EINVAL); } @@ -13475,21 +12442,14 @@ dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) /* * DTrace Consumer State Functions */ -#if defined(__APPLE__) /* Quiet compiler warning. */ -static -#endif /* __APPLE__ */ -int +static int dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) { size_t hashsize, maxper, min_size, chunksize = dstate->dtds_chunksize; void *base; uintptr_t limit; dtrace_dynvar_t *dvar, *next, *start; -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i; -#else size_t i; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); @@ -13580,10 +12540,7 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) return (0); } -#if defined(__APPLE__) /* Quiet compiler warning. */ -static -#endif /* __APPLE__ */ -void +static void dtrace_dstate_fini(dtrace_dstate_t *dstate) { lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED); @@ -13658,13 +12615,8 @@ dtrace_state_deadman(dtrace_state_t *state) state->dts_alive = now; } -#if !defined(__APPLE__) -dtrace_state_t * -dtrace_state_create(dev_t *devp, cred_t *cr) -#else static int dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) -#endif /* __APPLE__ */ { minor_t minor; major_t major; @@ -13676,15 +12628,6 @@ dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) - minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, - VM_BESTFIT | VM_SLEEP); - - if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { - vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); - return (NULL); - } -#else /* Cause restart */ *new_state = NULL; @@ -13717,8 +12660,6 @@ dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) return (EAGAIN); /* temporary resource shortage */ } -#endif /* __APPLE__ */ - state = ddi_get_soft_state(dtrace_softstate, minor); state->dts_epid = DTRACE_EPIDNONE + 1; @@ -13829,18 +12770,13 @@ dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) * If we have all privs in whatever zone this is, * we can do destructive things to processes which * have altered credentials. + * + * APPLE NOTE: Darwin doesn't do zones. + * Behave as if zone always has destructive privs. */ -#if !defined(__APPLE__) - if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), - cr->cr_zone->zone_privset)) { - state->dts_cred.dcr_action |= - DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; - } -#else - /* Darwin doesn't do zones. */ + state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; -#endif /* __APPLE__ */ } /* @@ -13880,18 +12816,12 @@ dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) * If we have all privs in whatever zone this is, * we can do destructive things to processes which * have altered credentials. - */ -#if !defined(__APPLE__) - if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), - cr->cr_zone->zone_privset)) { - state->dts_cred.dcr_action |= - DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; - } -#else - /* Darwin doesn't do zones. */ + * + * APPLE NOTE: Darwin doesn't do zones. + * Behave as if zone always has destructive privs. + */ state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; -#endif /* __APPLE__ */ } /* @@ -13911,12 +12841,8 @@ dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) } } -#if !defined(__APPLE__) - return (state); -#else *new_state = state; return(0); /* Success */ -#endif /* __APPLE__ */ } static int @@ -13954,11 +12880,7 @@ dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) flags |= DTRACEBUF_INACTIVE; } -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { -#else for (size = opt[which]; (size_t)size >= sizeof (uint64_t); size >>= 1) { -#endif /* __APPLE__ */ /* * The size must be 8-byte aligned. If the size is not 8-byte * aligned, drop it down by the difference. @@ -14152,17 +13074,10 @@ dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) * If we have an aggregation buffer, we must also have * a buffer to use as scratch. */ -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || - opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { - opt[DTRACEOPT_BUFSIZE] = state->dts_needed; - } -#else if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || (size_t)opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { opt[DTRACEOPT_BUFSIZE] = state->dts_needed; } -#endif /* __APPLE__ */ } } @@ -14388,7 +13303,12 @@ dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, switch (option) { case DTRACEOPT_DESTRUCTIVE: - if (dtrace_destructive_disallow) + /* + * Prevent consumers from enabling destructive actions if DTrace + * is running in a restricted environment, or if actions are + * disallowed. + */ + if (dtrace_is_restricted() || dtrace_destructive_disallow) return (EACCES); state->dts_cred.dcr_destructive = 1; @@ -14605,15 +13525,9 @@ dtrace_anon_property(void) * If we haven't allocated an anonymous state, we'll do so now. */ if ((state = dtrace_anon.dta_state) == NULL) { -#if !defined(__APPLE__) - state = dtrace_state_create(NULL, NULL); - dtrace_anon.dta_state = state; - if (state == NULL) { -#else rv = dtrace_state_create(NULL, NULL, &state); dtrace_anon.dta_state = state; if (rv != 0 || state == NULL) { -#endif /* __APPLE__ */ /* * This basically shouldn't happen: the only * failure mode from dtrace_state_create() is a @@ -14678,23 +13592,15 @@ static void dtrace_helper_trace(dtrace_helper_action_t *helper, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) { -#if !defined(__APPLE__) /* Quiet compiler warning */ - uint32_t size, next, nnext, i; -#else uint32_t size, next, nnext; int i; -#endif /* __APPLE__ */ dtrace_helptrace_t *ent; uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; if (!dtrace_helptrace_enabled) return; -#if !defined(__APPLE__) /* Quiet compiler warning */ - ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); -#else ASSERT((uint32_t)vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); -#endif /* __APPLE__ */ /* * What would a tracing framework be without its own tracing @@ -14832,7 +13738,7 @@ err: mstate->dtms_arg[0] = sarg0; mstate->dtms_arg[1] = sarg1; - return (NULL); + return (0); } static void @@ -14854,23 +13760,12 @@ dtrace_helper_action_destroy(dtrace_helper_action_t *helper, kmem_free(helper, sizeof (dtrace_helper_action_t)); } -#if !defined(__APPLE__) -static int -dtrace_helper_destroygen(int gen) -{ - proc_t *p = curproc; -#else static int dtrace_helper_destroygen(proc_t* p, int gen) { -#endif dtrace_helpers_t *help = p->p_dtrace_helpers; dtrace_vstate_t *vstate; -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); @@ -14968,13 +13863,8 @@ dtrace_helper_validate(dtrace_helper_action_t *helper) return (err == 0); } -#if !defined(__APPLE__) -static int -dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) -#else static int dtrace_helper_action_add(proc_t* p, int which, dtrace_ecbdesc_t *ep) -#endif { dtrace_helpers_t *help; dtrace_helper_action_t *helper, *last; @@ -14986,11 +13876,7 @@ dtrace_helper_action_add(proc_t* p, int which, dtrace_ecbdesc_t *ep) if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) return (EINVAL); -#if !defined(__APPLE__) - help = curproc->p_dtrace_helpers; -#else help = p->p_dtrace_helpers; -#endif last = help->dthps_actions[which]; vstate = &help->dthps_vstate; @@ -15043,11 +13929,7 @@ dtrace_helper_action_add(proc_t* p, int which, dtrace_ecbdesc_t *ep) last->dtha_next = helper; } -#if !defined(__APPLE__) /* Quiet compiler warning */ - if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { -#else if ((uint32_t)vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { -#endif /* __APPLE__ */ dtrace_helptrace_nlocals = vstate->dtvs_nlocals; dtrace_helptrace_next = 0; } @@ -15105,11 +13987,7 @@ dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, * off to the meta provider. */ -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i; -#else uint_t i; -#endif /* __APPLE__ */ lck_mtx_unlock(&dtrace_lock); for (i = 0; i < help->dthps_nprovs; i++) { @@ -15121,25 +13999,15 @@ dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, lck_mtx_unlock(&dtrace_meta_lock); } -#if !defined(__APPLE__) -static int -dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) -#else static int dtrace_helper_provider_add(proc_t* p, dof_helper_t *dofhp, int gen) -#endif { dtrace_helpers_t *help; dtrace_helper_provider_t *hprov, **tmp_provs; uint_t tmp_maxprovs, i; lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); - -#if !defined(__APPLE__) - help = curproc->p_dtrace_helpers; -#else help = p->p_dtrace_helpers; -#endif ASSERT(help != NULL); /* @@ -15425,13 +14293,8 @@ dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) return (0); } -#if !defined(__APPLE__) -static int -dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) -#else static int dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) -#endif { dtrace_helpers_t *help; dtrace_vstate_t *vstate; @@ -15441,13 +14304,8 @@ dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED); -#if !defined(__APPLE__) - if ((help = curproc->p_dtrace_helpers) == NULL) - help = dtrace_helpers_create(curproc); -#else if ((help = p->p_dtrace_helpers) == NULL) help = dtrace_helpers_create(p); -#endif vstate = &help->dthps_vstate; @@ -15461,11 +14319,7 @@ dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) * Look for helper providers and validate their descriptions. */ if (dhp != NULL) { -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (i = 0; i < dof->dofh_secnum; i++) { -#else for (i = 0; (uint32_t)i < dof->dofh_secnum; i++) { -#endif /* __APPLE__ */ dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + i * dof->dofh_secsize); @@ -15489,16 +14343,7 @@ dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) dtrace_ecbdesc_t *ep = enab->dten_desc[i]; dtrace_probedesc_t *desc = &ep->dted_probe; -#if !defined(__APPLE__) - if (strcmp(desc->dtpd_provider, "dtrace") != 0) - continue; - - if (strcmp(desc->dtpd_mod, "helper") != 0) - continue; - - if (strcmp(desc->dtpd_func, "ustack") != 0) - continue; -#else /* Employ size bounded string operation. */ + /* APPLE NOTE: Darwin employs size bounded string operation. */ if (!LIT_STRNEQL(desc->dtpd_provider, "dtrace")) continue; @@ -15507,24 +14352,14 @@ dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) if (!LIT_STRNEQL(desc->dtpd_func, "ustack")) continue; -#endif /* __APPLE__ */ -#if !defined(__APPLE__) - if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, - ep)) != 0) { -#else if ((rv = dtrace_helper_action_add(p, DTRACE_HELPER_ACTION_USTACK, ep)) != 0) { -#endif /* * Adding this helper action failed -- we are now going * to rip out the entire generation and return failure. */ -#if !defined(__APPLE__) - (void) dtrace_helper_destroygen(help->dthps_generation); -#else (void) dtrace_helper_destroygen(p, help->dthps_generation); -#endif dtrace_enabling_destroy(enab); dtrace_dof_destroy(dof); return (-1); @@ -15541,17 +14376,9 @@ dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) if (dhp != NULL && nprovs > 0) { dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; -#if !defined(__APPLE__) - if (dtrace_helper_provider_add(dhp, gen) == 0) { -#else if (dtrace_helper_provider_add(p, dhp, gen) == 0) { -#endif lck_mtx_unlock(&dtrace_lock); -#if !defined(__APPLE__) - dtrace_helper_provider_register(curproc, help, dhp); -#else dtrace_helper_provider_register(p, help, dhp); -#endif lck_mtx_lock(&dtrace_lock); destroy = 0; @@ -15564,10 +14391,8 @@ dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp) return (gen); } -#if defined(__APPLE__) - /* - * DTrace lazy dof + * APPLE NOTE: DTrace lazy dof implementation * * DTrace user static probes (USDT probes) and helper actions are loaded * in a process by proccessing dof sections. The dof sections are passed @@ -15971,8 +14796,6 @@ dtrace_lazy_dofs_proc_iterate_doit(proc_t *p, void* ignored) return PROC_RETURNED; } -#endif /* __APPLE__ */ - static dtrace_helpers_t * dtrace_helpers_create(proc_t *p) { @@ -15991,22 +14814,12 @@ dtrace_helpers_create(proc_t *p) return (help); } -#if !defined(__APPLE__) -static void -dtrace_helpers_destroy(void) -{ - dtrace_helpers_t *help; - dtrace_vstate_t *vstate; - proc_t *p = curproc; - int i; -#else static void dtrace_helpers_destroy(proc_t* p) { dtrace_helpers_t *help; dtrace_vstate_t *vstate; uint_t i; -#endif lck_mtx_lock(&dtrace_lock); @@ -16099,12 +14912,8 @@ dtrace_helpers_duplicate(proc_t *from, proc_t *to) dtrace_helper_action_t *helper, *new, *last; dtrace_difo_t *dp; dtrace_vstate_t *vstate; -#if !defined(__APPLE__) /* Quiet compiler warning */ - int i, j, sz, hasprovs = 0; -#else uint_t i; int j, sz, hasprovs = 0; -#endif /* __APPLE__ */ lck_mtx_lock(&dtrace_lock); ASSERT(from->p_dtrace_helpers != NULL); @@ -16138,15 +14947,6 @@ dtrace_helpers_duplicate(proc_t *from, proc_t *to) sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; new->dtha_actions = kmem_alloc(sz, KM_SLEEP); -#if !defined(__APPLE__) /* Quiet compiler warning */ - for (j = 0; j < new->dtha_nactions; j++) { - dtrace_difo_t *dp = helper->dtha_actions[j]; - - ASSERT(dp != NULL); - dp = dtrace_difo_duplicate(dp, vstate); - new->dtha_actions[j] = dp; - } -#else for (j = 0; j < new->dtha_nactions; j++) { dtrace_difo_t *dpj = helper->dtha_actions[j]; @@ -16154,7 +14954,6 @@ dtrace_helpers_duplicate(proc_t *from, proc_t *to) dpj = dtrace_difo_duplicate(dpj, vstate); new->dtha_actions[j] = dpj; } -#endif /* __APPLE__ */ if (last != NULL) { last->dtha_next = new; @@ -16193,9 +14992,9 @@ dtrace_helpers_duplicate(proc_t *from, proc_t *to) * DTrace Hook Functions */ -#if defined(__APPLE__) /* - * Routines to manipulate the modctl list within dtrace + * APPLE NOTE: dtrace_modctl_* routines for kext support. + * Used to manipulate the modctl list within dtrace xnu. */ modctl_t *dtrace_modctl_list; @@ -16304,8 +15103,6 @@ dtrace_modctl_remove(struct modctl * ctl) kmem_free (ctl, sizeof(modctl_t)); } -#endif /* __APPLE__ */ - /* * APPLE NOTE: The kext loader will call dtrace_module_loaded * when the kext is loaded in memory, but before calling the @@ -16315,23 +15112,11 @@ dtrace_modctl_remove(struct modctl * ctl) * Return -1 on failure */ -#if !defined (__APPLE__) -static void -dtrace_module_loaded(struct modctl *ctl) -#else static int dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag) -#endif /* __APPLE__ */ { dtrace_provider_t *prv; -#if !defined(__APPLE__) - mutex_enter(&dtrace_provider_lock); - mutex_enter(&mod_lock); - - ASSERT(ctl->mod_busy); -#else - /* * If kernel symbols have been disabled, return immediately * DTRACE_KERNEL_SYMBOLS_NEVER is a permanent mode, it is safe to test without holding locks @@ -16442,7 +15227,6 @@ dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag) ctl->mod_flags |= MODCTL_HAS_KERNEL_SYMBOLS; lck_mtx_unlock(&dtrace_lock); -#endif /* __APPLE__ */ /* * We're going to call each providers per-module provide operation @@ -16451,14 +15235,12 @@ dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag) for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); -#if defined(__APPLE__) /* - * The contract with the kext loader is that once this function has completed, - * it may delete kernel symbols at will. We must set this while still holding - * the mod_lock. + * APPLE NOTE: The contract with the kext loader is that once this function + * has completed, it may delete kernel symbols at will. + * We must set this while still holding the mod_lock. */ ctl->mod_flags &= ~MODCTL_HAS_KERNEL_SYMBOLS; -#endif lck_mtx_unlock(&mod_lock); lck_mtx_unlock(&dtrace_provider_lock); @@ -16475,31 +15257,9 @@ dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag) if (dtrace_retained == NULL) { lck_mtx_unlock(&dtrace_lock); -#if !defined(__APPLE__) - return; -#else return 0; -#endif } -#if !defined(__APPLE__) - (void) taskq_dispatch(dtrace_taskq, - (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); - - mutex_exit(&dtrace_lock); - - /* - * And now, for a little heuristic sleaze: in general, we want to - * match modules as soon as they load. However, we cannot guarantee - * this, because it would lead us to the lock ordering violation - * outlined above. The common case, of course, is that cpu_lock is - * _not_ held -- so we delay here for a clock tick, hoping that that's - * long enough for the task queue to do its work. If it's not, it's - * not a serious problem -- it just means that the module that we - * just loaded may not be immediately instrumentable. - */ - delay(1); -#else /* APPLE NOTE! * * The cpu_lock mentioned above is only held by dtrace code, Apple's xnu never actually @@ -16512,105 +15272,7 @@ dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag) dtrace_enabling_matchall(); return 0; -#endif /* __APPLE__ */ -} - -#if !defined(__APPLE__) -static void -dtrace_module_unloaded(struct modctl *ctl) -{ - dtrace_probe_t template, *probe, *first, *next; - dtrace_provider_t *prov; - - template.dtpr_mod = ctl->mod_modname; - - mutex_enter(&dtrace_provider_lock); - mutex_enter(&mod_lock); - mutex_enter(&dtrace_lock); - - if (dtrace_bymod == NULL) { - /* - * The DTrace module is loaded (obviously) but not attached; - * we don't have any work to do. - */ - mutex_exit(&dtrace_provider_lock); - mutex_exit(&mod_lock); - mutex_exit(&dtrace_lock); - return; - } - - for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); - probe != NULL; probe = probe->dtpr_nextmod) { - if (probe->dtpr_ecb != NULL) { - mutex_exit(&dtrace_provider_lock); - mutex_exit(&mod_lock); - mutex_exit(&dtrace_lock); - - /* - * This shouldn't _actually_ be possible -- we're - * unloading a module that has an enabled probe in it. - * (It's normally up to the provider to make sure that - * this can't happen.) However, because dtps_enable() - * doesn't have a failure mode, there can be an - * enable/unload race. Upshot: we don't want to - * assert, but we're not going to disable the - * probe, either. - */ - if (dtrace_err_verbose) { - cmn_err(CE_WARN, "unloaded module '%s' had " - "enabled probes", ctl->mod_modname); - } - - return; - } - } - - probe = first; - - for (first = NULL; probe != NULL; probe = next) { - ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); - - dtrace_probes[probe->dtpr_id - 1] = NULL; - probe->dtpr_provider->probe_count--; - - next = probe->dtpr_nextmod; - dtrace_hash_remove(dtrace_bymod, probe); - dtrace_hash_remove(dtrace_byfunc, probe); - dtrace_hash_remove(dtrace_byname, probe); - - if (first == NULL) { - first = probe; - probe->dtpr_nextmod = NULL; - } else { - probe->dtpr_nextmod = first; - first = probe; - } - } - - /* - * We've removed all of the module's probes from the hash chains and - * from the probe array. Now issue a dtrace_sync() to be sure that - * everyone has cleared out from any probe array processing. - */ - dtrace_sync(); - - for (probe = first; probe != NULL; probe = first) { - first = probe->dtpr_nextmod; - prov = probe->dtpr_provider; - prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, - probe->dtpr_arg); - kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); - kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); - kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); - vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); - kmem_free(probe, sizeof (dtrace_probe_t)); - } - - mutex_exit(&dtrace_lock); - mutex_exit(&mod_lock); - mutex_exit(&dtrace_provider_lock); } -#else /* __APPLE__ */ /* * Return 0 on success @@ -16654,9 +15316,9 @@ dtrace_module_unloaded(struct kmod_info *kmod) */ if (ctl != NULL) (void)dtrace_modctl_remove(ctl); - lck_mtx_unlock(&dtrace_provider_lock); - lck_mtx_unlock(&mod_lock); lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&mod_lock); + lck_mtx_unlock(&dtrace_provider_lock); return(0); } @@ -16675,9 +15337,9 @@ syncloop: if (syncctl==NULL) { /* We have no more work to do */ - lck_mtx_unlock(&dtrace_provider_lock); - lck_mtx_unlock(&mod_lock); lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&mod_lock); + lck_mtx_unlock(&dtrace_provider_lock); return(0); } else { @@ -16709,9 +15371,9 @@ syncloop: goto syncloop; } - lck_mtx_unlock(&dtrace_provider_lock); - lck_mtx_unlock(&mod_lock); lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&mod_lock); + lck_mtx_unlock(&dtrace_provider_lock); if (dtrace_err_verbose) { cmn_err(CE_WARN, "unloaded module '%s' had " @@ -16727,7 +15389,7 @@ syncloop: ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); dtrace_probes[probe->dtpr_id - 1] = NULL; - probe->dtpr_provider->probe_count--; + probe->dtpr_provider->dtpv_probe_count--; next = probe->dtpr_nextmod; dtrace_hash_remove(dtrace_bymod, probe); @@ -16774,7 +15436,6 @@ syncloop: return(0); } -#endif /* __APPLE__ */ void dtrace_suspend(void) @@ -16882,8 +15543,8 @@ dtrace_toxrange_add(uintptr_t base, uintptr_t limit) dtrace_toxrange = range; } - ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); - ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); + ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); + ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); dtrace_toxrange[dtrace_toxranges].dtt_base = base; dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; @@ -16909,28 +15570,13 @@ dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) if (ddi_soft_state_init(&dtrace_softstate, sizeof (dtrace_state_t), 0) != 0) { cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); - lck_mtx_unlock(&cpu_lock); - lck_mtx_unlock(&dtrace_provider_lock); lck_mtx_unlock(&dtrace_lock); - return (DDI_FAILURE); - } - -#if !defined(__APPLE__) - if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, - DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || - ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, - DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { - cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); - ddi_remove_minor_node(devi, NULL); - ddi_soft_state_fini(&dtrace_softstate); - lck_mtx_unlock(&cpu_lock); lck_mtx_unlock(&dtrace_provider_lock); - lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&cpu_lock); return (DDI_FAILURE); } -#else + /* Darwin uses BSD cloning device driver to automagically obtain minor device number. */ -#endif /* __APPLE__ */ ddi_report_dev(devi); dtrace_devi = devi; @@ -16998,14 +15644,7 @@ dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); -#if !defined(__APPLE__) - dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) - dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); - dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) - dtrace_provider, NULL, NULL, "END", 0, NULL); - dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) - dtrace_provider, NULL, NULL, "ERROR", 1, NULL); -#elif defined (__x86_64__) +#if defined (__x86_64__) dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 1, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) @@ -17014,7 +15653,7 @@ dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) dtrace_provider, NULL, NULL, "ERROR", 3, NULL); #else #error Unknown Architecture -#endif /* __APPLE__ */ +#endif dtrace_anon_property(); lck_mtx_unlock(&cpu_lock); @@ -17040,14 +15679,12 @@ dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) if (dtrace_anon.dta_enabling != NULL) { ASSERT(dtrace_retained == dtrace_anon.dta_enabling); -#if defined(__APPLE__) /* - * If there is anonymous dof, we should switch symbol modes. + * APPLE NOTE: if handling anonymous dof, switch symbol modes. */ if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) { dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL; } -#endif dtrace_enabling_provide(NULL); state = dtrace_anon.dta_state; @@ -17094,23 +15731,9 @@ dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) uint32_t priv; uid_t uid; zoneid_t zoneid; -#if defined (__APPLE__) int rv; -#endif /* __APPLE__ */ -#if !defined(__APPLE__) - if (getminor(*devp) == DTRACEMNRN_HELPER) - return (0); - - /* - * If this wasn't an open with the "helper" minor, then it must be - * the "dtrace" minor. - */ - if (getminor(*devp) != DTRACEMNRN_DTRACE) - return (ENXIO); -#else - /* Darwin puts Helper on its own major device. */ -#endif /* __APPLE__ */ + /* APPLE: Darwin puts Helper on its own major device. */ /* * If no DTRACE_PRIV_* bits are set in the credential, then the @@ -17120,13 +15743,11 @@ dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) if (priv == DTRACE_PRIV_NONE) return (EACCES); -#if defined(__APPLE__) /* - * We delay the initialization of fasttrap as late as possible. + * APPLE NOTE: We delay the initialization of fasttrap as late as possible. * It certainly can't be later than now! */ fasttrap_init(); -#endif /* __APPLE__ */ /* * Ask all providers to provide all their probes. @@ -17146,981 +15767,146 @@ dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) */ if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { dtrace_opens--; - lck_mtx_unlock(&cpu_lock); - lck_mtx_unlock(&dtrace_lock); - return (EBUSY); - } - -#if !defined(__APPLE__) - state = dtrace_state_create(devp, cred_p); - lck_mtx_unlock(&cpu_lock); - - if (state == NULL) { - if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) - (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); - lck_mtx_unlock(&dtrace_lock); - return (EAGAIN); - } - - lck_mtx_unlock(&dtrace_lock); -#else - rv = dtrace_state_create(devp, cred_p, &state); - lck_mtx_unlock(&cpu_lock); - - if (rv != 0 || state == NULL) { - if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) - (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); lck_mtx_unlock(&dtrace_lock); - /* propagate EAGAIN or ERESTART */ - return (rv); - } - - lck_mtx_unlock(&dtrace_lock); - - lck_rw_lock_exclusive(&dtrace_dof_mode_lock); - - /* - * If we are currently lazy, transition states. - * - * Unlike dtrace_close, we do not need to check the - * value of dtrace_opens, as any positive value (and - * we count as 1) means we transition states. - */ - if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON) { - dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_OFF; - - /* - * Iterate all existing processes and load lazy dofs. - */ - proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS, - dtrace_lazy_dofs_proc_iterate_doit, - NULL, - dtrace_lazy_dofs_proc_iterate_filter, - NULL); - } - - lck_rw_unlock_exclusive(&dtrace_dof_mode_lock); - - /* - * Update kernel symbol state. - * - * We must own the provider and dtrace locks. - * - * NOTE! It may appear there is a race by setting this value so late - * after dtrace_probe_provide. However, any kext loaded after the - * call to probe provide and before we set LAZY_OFF will be marked as - * eligible for symbols from userspace. The same dtrace that is currently - * calling dtrace_open() (this call!) will get a list of kexts needing - * symbols and fill them in, thus closing the race window. - * - * We want to set this value only after it certain it will succeed, as - * this significantly reduces the complexity of error exits. - */ - lck_mtx_lock(&dtrace_lock); - if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) { - dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL; - } - lck_mtx_unlock(&dtrace_lock); -#endif /* __APPLE__ */ - - return (0); -} - -/*ARGSUSED*/ -static int -dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) -{ -#pragma unused(flag, otyp, cred_p) /* __APPLE__ */ - minor_t minor = getminor(dev); - dtrace_state_t *state; - -#if !defined(__APPLE__) - if (minor == DTRACEMNRN_HELPER) - return (0); -#else - /* Darwin puts Helper on its own major device. */ -#endif /* __APPLE__ */ - - state = ddi_get_soft_state(dtrace_softstate, minor); - - lck_mtx_lock(&cpu_lock); - lck_mtx_lock(&dtrace_lock); - - if (state->dts_anon) { - /* - * There is anonymous state. Destroy that first. - */ - ASSERT(dtrace_anon.dta_state == NULL); - dtrace_state_destroy(state->dts_anon); - } - - dtrace_state_destroy(state); - ASSERT(dtrace_opens > 0); - - /* - * Only relinquish control of the kernel debugger interface when there - * are no consumers and no anonymous enablings. - */ - if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) - (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); - - lck_mtx_unlock(&dtrace_lock); - lck_mtx_unlock(&cpu_lock); - -#if defined(__APPLE__) - /* - * Lock ordering requires the dof mode lock be taken before - * the dtrace_lock. - */ - lck_rw_lock_exclusive(&dtrace_dof_mode_lock); - lck_mtx_lock(&dtrace_lock); - - if (dtrace_opens == 0) { - /* - * If we are currently lazy-off, and this is the last close, transition to - * lazy state. - */ - if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF) { - dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON; - } - - /* - * If we are the last dtrace client, switch back to lazy (from userspace) symbols - */ - if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_KERNEL) { - dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE; - } - } - - lck_mtx_unlock(&dtrace_lock); - lck_rw_unlock_exclusive(&dtrace_dof_mode_lock); - - /* - * Kext probes may be retained past the end of the kext's lifespan. The - * probes are kept until the last reference to them has been removed. - * Since closing an active dtrace context is likely to drop that last reference, - * lets take a shot at cleaning out the orphaned probes now. - */ - dtrace_module_unloaded(NULL); -#endif /* __APPLE__ */ - - return (0); -} - -#if !defined(__APPLE__) -/*ARGSUSED*/ -static int -dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) -{ - int rval; - dof_helper_t help, *dhp = NULL; - - switch (cmd) { - case DTRACEHIOC_ADDDOF: - if (copyin((void *)arg, &help, sizeof (help)) != 0) { - dtrace_dof_error(NULL, "failed to copyin DOF helper"); - return (EFAULT); - } - - dhp = &help; - arg = (intptr_t)help.dofhp_dof; - /*FALLTHROUGH*/ - - case DTRACEHIOC_ADD: { - dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); - - if (dof == NULL) - return (rval); - - mutex_enter(&dtrace_lock); - - /* - * dtrace_helper_slurp() takes responsibility for the dof -- - * it may free it now or it may save it and free it later. - */ - if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { - *rv = rval; - rval = 0; - } else { - rval = EINVAL; - } - - mutex_exit(&dtrace_lock); - return (rval); - } - - case DTRACEHIOC_REMOVE: { - mutex_enter(&dtrace_lock); - rval = dtrace_helper_destroygen(arg); - mutex_exit(&dtrace_lock); - - return (rval); - } - - default: - break; - } - - return (ENOTTY); -} - -/*ARGSUSED*/ -static int -dtrace_ioctl(dev_t dev, u_long cmd, intptr_t arg, int md, cred_t *cr, int *rv) -{ - minor_t minor = getminor(dev); - dtrace_state_t *state; - int rval; - - if (minor == DTRACEMNRN_HELPER) - return (dtrace_ioctl_helper(cmd, arg, rv)); - - state = ddi_get_soft_state(dtrace_softstate, minor); - - if (state->dts_anon) { - ASSERT(dtrace_anon.dta_state == NULL); - state = state->dts_anon; - } - - switch (cmd) { - case DTRACEIOC_PROVIDER: { - dtrace_providerdesc_t pvd; - dtrace_provider_t *pvp; - - if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) - return (EFAULT); - - pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; - lck_mtx_lock(&dtrace_provider_lock); - - for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { - if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) - break; - } - - lck_mtx_unlock(&dtrace_provider_lock); - - if (pvp == NULL) - return (ESRCH); - - bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); - bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); - if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_EPROBE: { - dtrace_eprobedesc_t epdesc; - dtrace_ecb_t *ecb; - dtrace_action_t *act; - void *buf; - size_t size; - uintptr_t dest; - int nrecs; - - if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) - return (EFAULT); - - lck_mtx_lock(&dtrace_lock); - - if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } - - if (ecb->dte_probe == NULL) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } - - epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; - epdesc.dtepd_uarg = ecb->dte_uarg; - epdesc.dtepd_size = ecb->dte_size; - - nrecs = epdesc.dtepd_nrecs; - epdesc.dtepd_nrecs = 0; - for (act = ecb->dte_action; act != NULL; act = act->dta_next) { - if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) - continue; - - epdesc.dtepd_nrecs++; - } - - /* - * Now that we have the size, we need to allocate a temporary - * buffer in which to store the complete description. We need - * the temporary buffer to be able to drop dtrace_lock() - * across the copyout(), below. - */ - size = sizeof (dtrace_eprobedesc_t) + - (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); - - buf = kmem_alloc(size, KM_SLEEP); - dest = (uintptr_t)buf; - - bcopy(&epdesc, (void *)dest, sizeof (epdesc)); - dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); - - for (act = ecb->dte_action; act != NULL; act = act->dta_next) { - if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) - continue; - - if (nrecs-- == 0) - break; - - bcopy(&act->dta_rec, (void *)dest, - sizeof (dtrace_recdesc_t)); - dest += sizeof (dtrace_recdesc_t); - } - - lck_mtx_unlock(&dtrace_lock); - - if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { - kmem_free(buf, size); - return (EFAULT); - } - - kmem_free(buf, size); - return (0); - } - - case DTRACEIOC_AGGDESC: { - dtrace_aggdesc_t aggdesc; - dtrace_action_t *act; - dtrace_aggregation_t *agg; - int nrecs; - uint32_t offs; - dtrace_recdesc_t *lrec; - void *buf; - size_t size; - uintptr_t dest; - - if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) - return (EFAULT); - - lck_mtx_lock(&dtrace_lock); - - if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } - - aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; - - nrecs = aggdesc.dtagd_nrecs; - aggdesc.dtagd_nrecs = 0; - - offs = agg->dtag_base; - lrec = &agg->dtag_action.dta_rec; - aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; - - for (act = agg->dtag_first; ; act = act->dta_next) { - ASSERT(act->dta_intuple || - DTRACEACT_ISAGG(act->dta_kind)); - - /* - * If this action has a record size of zero, it - * denotes an argument to the aggregating action. - * Because the presence of this record doesn't (or - * shouldn't) affect the way the data is interpreted, - * we don't copy it out to save user-level the - * confusion of dealing with a zero-length record. - */ - if (act->dta_rec.dtrd_size == 0) { - ASSERT(agg->dtag_hasarg); - continue; - } - - aggdesc.dtagd_nrecs++; - - if (act == &agg->dtag_action) - break; - } - - /* - * Now that we have the size, we need to allocate a temporary - * buffer in which to store the complete description. We need - * the temporary buffer to be able to drop dtrace_lock() - * across the copyout(), below. - */ - size = sizeof (dtrace_aggdesc_t) + - (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); - - buf = kmem_alloc(size, KM_SLEEP); - dest = (uintptr_t)buf; - - bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); - dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); - - for (act = agg->dtag_first; ; act = act->dta_next) { - dtrace_recdesc_t rec = act->dta_rec; - - /* - * See the comment in the above loop for why we pass - * over zero-length records. - */ - if (rec.dtrd_size == 0) { - ASSERT(agg->dtag_hasarg); - continue; - } - - if (nrecs-- == 0) - break; - - rec.dtrd_offset -= offs; - bcopy(&rec, (void *)dest, sizeof (rec)); - dest += sizeof (dtrace_recdesc_t); - - if (act == &agg->dtag_action) - break; - } - - lck_mtx_unlock(&dtrace_lock); - - if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { - kmem_free(buf, size); - return (EFAULT); - } - - kmem_free(buf, size); - return (0); - } - - case DTRACEIOC_ENABLE: { - dof_hdr_t *dof; - dtrace_enabling_t *enab = NULL; - dtrace_vstate_t *vstate; - int err = 0; - - *rv = 0; - - /* - * If a NULL argument has been passed, we take this as our - * cue to reevaluate our enablings. - */ - if (arg == NULL) { - dtrace_enabling_matchall(); - - return (0); - } - - if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) - return (rval); - - lck_mtx_lock(&cpu_lock); - lck_mtx_lock(&dtrace_lock); - vstate = &state->dts_vstate; - - if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { - lck_mtx_unlock(&dtrace_lock); - lck_mtx_unlock(&cpu_lock); - dtrace_dof_destroy(dof); - return (EBUSY); - } - - if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { - lck_mtx_unlock(&dtrace_lock); - lck_mtx_unlock(&cpu_lock); - dtrace_dof_destroy(dof); - return (EINVAL); - } - - if ((rval = dtrace_dof_options(dof, state)) != 0) { - dtrace_enabling_destroy(enab); - lck_mtx_unlock(&dtrace_lock); - lck_mtx_unlock(&cpu_lock); - dtrace_dof_destroy(dof); - return (rval); - } - - if ((err = dtrace_enabling_match(enab, rv)) == 0) { - err = dtrace_enabling_retain(enab); - } else { - dtrace_enabling_destroy(enab); - } - - lck_mtx_unlock(&cpu_lock); - lck_mtx_unlock(&dtrace_lock); - dtrace_dof_destroy(dof); - - return (err); - } - - case DTRACEIOC_REPLICATE: { - dtrace_repldesc_t desc; - dtrace_probedesc_t *match = &desc.dtrpd_match; - dtrace_probedesc_t *create = &desc.dtrpd_create; - int err; - - if (copyin((void *)arg, &desc, sizeof (desc)) != 0) - return (EFAULT); - - match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; - match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; - match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; - match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; - - create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; - create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; - create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; - create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; - - lck_mtx_lock(&dtrace_lock); - err = dtrace_enabling_replicate(state, match, create); - lck_mtx_unlock(&dtrace_lock); - - return (err); - } - - case DTRACEIOC_PROBEMATCH: - case DTRACEIOC_PROBES: { - dtrace_probe_t *probe = NULL; - dtrace_probedesc_t desc; - dtrace_probekey_t pkey; - dtrace_id_t i; - int m = 0; - uint32_t priv; - uid_t uid; - zoneid_t zoneid; - - if (copyin((void *)arg, &desc, sizeof (desc)) != 0) - return (EFAULT); - - desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; - desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; - desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; - desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; - - /* - * Before we attempt to match this probe, we want to give - * all providers the opportunity to provide it. - */ - if (desc.dtpd_id == DTRACE_IDNONE) { - lck_mtx_lock(&dtrace_provider_lock); - dtrace_probe_provide(&desc, NULL); - lck_mtx_unlock(&dtrace_provider_lock); - desc.dtpd_id++; - } - - if (cmd == DTRACEIOC_PROBEMATCH) { - dtrace_probekey(&desc, &pkey); - pkey.dtpk_id = DTRACE_IDNONE; - } - - dtrace_cred2priv(cr, &priv, &uid, &zoneid); - - lck_mtx_lock(&dtrace_lock); - - if (cmd == DTRACEIOC_PROBEMATCH) { - for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { - if ((probe = dtrace_probes[i - 1]) != NULL && - (m = dtrace_match_probe(probe, &pkey, - priv, uid, zoneid)) != 0) - break; - } - - if (m < 0) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } - - } else { - for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { - if ((probe = dtrace_probes[i - 1]) != NULL && - dtrace_match_priv(probe, priv, uid, zoneid)) - break; - } - } - - if (probe == NULL) { - lck_mtx_unlock(&dtrace_lock); - return (ESRCH); - } - - dtrace_probe_description(probe, &desc); - lck_mtx_unlock(&dtrace_lock); - - if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_PROBEARG: { - dtrace_argdesc_t desc; - dtrace_probe_t *probe; - dtrace_provider_t *prov; - - if (copyin((void *)arg, &desc, sizeof (desc)) != 0) - return (EFAULT); - - if (desc.dtargd_id == DTRACE_IDNONE) - return (EINVAL); - - if (desc.dtargd_ndx == DTRACE_ARGNONE) - return (EINVAL); - - lck_mtx_lock(&dtrace_provider_lock); - lck_mtx_lock(&mod_lock); - lck_mtx_lock(&dtrace_lock); - - if (desc.dtargd_id > dtrace_nprobes) { - lck_mtx_unlock(&dtrace_lock); - lck_mtx_unlock(&mod_lock); - lck_mtx_unlock(&dtrace_provider_lock); - return (EINVAL); - } - - if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { - lck_mtx_unlock(&dtrace_lock); - lck_mtx_unlock(&mod_lock); - lck_mtx_unlock(&dtrace_provider_lock); - return (EINVAL); - } - - lck_mtx_unlock(&dtrace_lock); - - prov = probe->dtpr_provider; - - if (prov->dtpv_pops.dtps_getargdesc == NULL) { - /* - * There isn't any typed information for this probe. - * Set the argument number to DTRACE_ARGNONE. - */ - desc.dtargd_ndx = DTRACE_ARGNONE; - } else { - desc.dtargd_native[0] = '\0'; - desc.dtargd_xlate[0] = '\0'; - desc.dtargd_mapping = desc.dtargd_ndx; - - prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, - probe->dtpr_id, probe->dtpr_arg, &desc); - } - - lck_mtx_unlock(&mod_lock); - lck_mtx_unlock(&dtrace_provider_lock); - - if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_GO: { - processorid_t cpuid; - rval = dtrace_state_go(state, &cpuid); - - if (rval != 0) - return (rval); - - if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_STOP: { - processorid_t cpuid; - - lck_mtx_lock(&dtrace_lock); - rval = dtrace_state_stop(state, &cpuid); - lck_mtx_unlock(&dtrace_lock); - - if (rval != 0) - return (rval); - - if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_DOFGET: { - dof_hdr_t hdr, *dof; - uint64_t len; - - if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) - return (EFAULT); - - lck_mtx_lock(&dtrace_lock); - dof = dtrace_dof_create(state); - lck_mtx_unlock(&dtrace_lock); - - len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); - rval = copyout(dof, (void *)arg, len); - dtrace_dof_destroy(dof); - - return (rval == 0 ? 0 : EFAULT); - } - - case DTRACEIOC_AGGSNAP: - case DTRACEIOC_BUFSNAP: { - dtrace_bufdesc_t desc; - caddr_t cached; - dtrace_buffer_t *buf; - - if (copyin((void *)arg, &desc, sizeof (desc)) != 0) - return (EFAULT); - - if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) - return (EINVAL); - - lck_mtx_lock(&dtrace_lock); - - if (cmd == DTRACEIOC_BUFSNAP) { - buf = &state->dts_buffer[desc.dtbd_cpu]; - } else { - buf = &state->dts_aggbuffer[desc.dtbd_cpu]; - } - - if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { - size_t sz = buf->dtb_offset; - - if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { - lck_mtx_unlock(&dtrace_lock); - return (EBUSY); - } - - /* - * If this buffer has already been consumed, we're - * going to indicate that there's nothing left here - * to consume. - */ - if (buf->dtb_flags & DTRACEBUF_CONSUMED) { - lck_mtx_unlock(&dtrace_lock); - - desc.dtbd_size = 0; - desc.dtbd_drops = 0; - desc.dtbd_errors = 0; - desc.dtbd_oldest = 0; - sz = sizeof (desc); - - if (copyout(&desc, (void *)arg, sz) != 0) - return (EFAULT); - - return (0); - } - - /* - * If this is a ring buffer that has wrapped, we want - * to copy the whole thing out. - */ - if (buf->dtb_flags & DTRACEBUF_WRAPPED) { - dtrace_buffer_polish(buf); - sz = buf->dtb_size; - } - - if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { - lck_mtx_unlock(&dtrace_lock); - return (EFAULT); - } - - desc.dtbd_size = sz; - desc.dtbd_drops = buf->dtb_drops; - desc.dtbd_errors = buf->dtb_errors; - desc.dtbd_oldest = buf->dtb_xamot_offset; - - lck_mtx_unlock(&dtrace_lock); - - if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) - return (EFAULT); - - buf->dtb_flags |= DTRACEBUF_CONSUMED; - - return (0); - } - - if (buf->dtb_tomax == NULL) { - ASSERT(buf->dtb_xamot == NULL); - lck_mtx_unlock(&dtrace_lock); - return (ENOENT); - } - - cached = buf->dtb_tomax; - ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); - - dtrace_xcall(desc.dtbd_cpu, - (dtrace_xcall_t)dtrace_buffer_switch, buf); - - state->dts_errors += buf->dtb_xamot_errors; - - /* - * If the buffers did not actually switch, then the cross call - * did not take place -- presumably because the given CPU is - * not in the ready set. If this is the case, we'll return - * ENOENT. - */ - if (buf->dtb_tomax == cached) { - ASSERT(buf->dtb_xamot != cached); - lck_mtx_unlock(&dtrace_lock); - return (ENOENT); - } - - ASSERT(cached == buf->dtb_xamot); - - /* - * We have our snapshot; now copy it out. - */ - if (copyout(buf->dtb_xamot, desc.dtbd_data, - buf->dtb_xamot_offset) != 0) { - lck_mtx_unlock(&dtrace_lock); - return (EFAULT); - } - - desc.dtbd_size = buf->dtb_xamot_offset; - desc.dtbd_drops = buf->dtb_xamot_drops; - desc.dtbd_errors = buf->dtb_xamot_errors; - desc.dtbd_oldest = 0; - - lck_mtx_unlock(&dtrace_lock); - - /* - * Finally, copy out the buffer description. - */ - if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_CONF: { - dtrace_conf_t conf; - - bzero(&conf, sizeof (conf)); - conf.dtc_difversion = DIF_VERSION; - conf.dtc_difintregs = DIF_DIR_NREGS; - conf.dtc_diftupregs = DIF_DTR_NREGS; - conf.dtc_ctfmodel = CTF_MODEL_NATIVE; - - if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) - return (EFAULT); - - return (0); - } - - case DTRACEIOC_STATUS: { - dtrace_status_t stat; - dtrace_dstate_t *dstate; - int i, j; - uint64_t nerrs; - - /* - * See the comment in dtrace_state_deadman() for the reason - * for setting dts_laststatus to INT64_MAX before setting - * it to the correct value. - */ - state->dts_laststatus = INT64_MAX; - dtrace_membar_producer(); - state->dts_laststatus = dtrace_gethrtime(); - - bzero(&stat, sizeof (stat)); - - lck_mtx_lock(&dtrace_lock); - - if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { - lck_mtx_unlock(&dtrace_lock); - return (ENOENT); - } - - if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) - stat.dtst_exiting = 1; - - nerrs = state->dts_errors; - dstate = &state->dts_vstate.dtvs_dynvars; - - for (i = 0; i < NCPU; i++) { - dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; - - stat.dtst_dyndrops += dcpu->dtdsc_drops; - stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; - stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; - - if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) - stat.dtst_filled++; - - nerrs += state->dts_buffer[i].dtb_errors; + lck_mtx_unlock(&cpu_lock); + return (EBUSY); + } - for (j = 0; j < state->dts_nspeculations; j++) { - dtrace_speculation_t *spec; - dtrace_buffer_t *buf; + rv = dtrace_state_create(devp, cred_p, &state); + lck_mtx_unlock(&cpu_lock); - spec = &state->dts_speculations[j]; - buf = &spec->dtsp_buffer[i]; - stat.dtst_specdrops += buf->dtb_xamot_drops; - } - } + if (rv != 0 || state == NULL) { + if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) + (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); + lck_mtx_unlock(&dtrace_lock); + /* propagate EAGAIN or ERESTART */ + return (rv); + } + + lck_mtx_unlock(&dtrace_lock); - stat.dtst_specdrops_busy = state->dts_speculations_busy; - stat.dtst_specdrops_unavail = state->dts_speculations_unavail; - stat.dtst_stkstroverflows = state->dts_stkstroverflows; - stat.dtst_dblerrors = state->dts_dblerrors; - stat.dtst_killed = - (state->dts_activity == DTRACE_ACTIVITY_KILLED); - stat.dtst_errors = nerrs; + lck_rw_lock_exclusive(&dtrace_dof_mode_lock); - lck_mtx_unlock(&dtrace_lock); + /* + * If we are currently lazy, transition states. + * + * Unlike dtrace_close, we do not need to check the + * value of dtrace_opens, as any positive value (and + * we count as 1) means we transition states. + */ + if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON) { + dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_OFF; + + /* + * Iterate all existing processes and load lazy dofs. + */ + proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS, + dtrace_lazy_dofs_proc_iterate_doit, + NULL, + dtrace_lazy_dofs_proc_iterate_filter, + NULL); + } - if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) - return (EFAULT); + lck_rw_unlock_exclusive(&dtrace_dof_mode_lock); - return (0); + /* + * Update kernel symbol state. + * + * We must own the provider and dtrace locks. + * + * NOTE! It may appear there is a race by setting this value so late + * after dtrace_probe_provide. However, any kext loaded after the + * call to probe provide and before we set LAZY_OFF will be marked as + * eligible for symbols from userspace. The same dtrace that is currently + * calling dtrace_open() (this call!) will get a list of kexts needing + * symbols and fill them in, thus closing the race window. + * + * We want to set this value only after it certain it will succeed, as + * this significantly reduces the complexity of error exits. + */ + lck_mtx_lock(&dtrace_lock); + if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) { + dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL; } + lck_mtx_unlock(&dtrace_lock); - case DTRACEIOC_FORMAT: { - dtrace_fmtdesc_t fmt; - char *str; - int len; + return (0); +} - if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) - return (EFAULT); +/*ARGSUSED*/ +static int +dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) +{ +#pragma unused(flag, otyp, cred_p) /* __APPLE__ */ + minor_t minor = getminor(dev); + dtrace_state_t *state; - lck_mtx_lock(&dtrace_lock); + /* APPLE NOTE: Darwin puts Helper on its own major device. */ - if (fmt.dtfd_format == 0 || - fmt.dtfd_format > state->dts_nformats) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } + state = ddi_get_soft_state(dtrace_softstate, minor); + + lck_mtx_lock(&cpu_lock); + lck_mtx_lock(&dtrace_lock); + if (state->dts_anon) { /* - * Format strings are allocated contiguously and they are - * never freed; if a format index is less than the number - * of formats, we can assert that the format map is non-NULL - * and that the format for the specified index is non-NULL. + * There is anonymous state. Destroy that first. */ - ASSERT(state->dts_formats != NULL); - str = state->dts_formats[fmt.dtfd_format - 1]; - ASSERT(str != NULL); + ASSERT(dtrace_anon.dta_state == NULL); + dtrace_state_destroy(state->dts_anon); + } - len = strlen(str) + 1; + dtrace_state_destroy(state); + ASSERT(dtrace_opens > 0); - if (len > fmt.dtfd_length) { - fmt.dtfd_length = len; + /* + * Only relinquish control of the kernel debugger interface when there + * are no consumers and no anonymous enablings. + */ + if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) + (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); + + lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&cpu_lock); - if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } - } else { - if (copyout(str, fmt.dtfd_string, len) != 0) { - lck_mtx_unlock(&dtrace_lock); - return (EINVAL); - } + /* + * Lock ordering requires the dof mode lock be taken before + * the dtrace_lock. + */ + lck_rw_lock_exclusive(&dtrace_dof_mode_lock); + lck_mtx_lock(&dtrace_lock); + + if (dtrace_opens == 0) { + /* + * If we are currently lazy-off, and this is the last close, transition to + * lazy state. + */ + if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF) { + dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON; } - lck_mtx_unlock(&dtrace_lock); - return (0); - } - - default: - break; + /* + * If we are the last dtrace client, switch back to lazy (from userspace) symbols + */ + if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_KERNEL) { + dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE; + } } + + lck_mtx_unlock(&dtrace_lock); + lck_rw_unlock_exclusive(&dtrace_dof_mode_lock); + + /* + * Kext probes may be retained past the end of the kext's lifespan. The + * probes are kept until the last reference to them has been removed. + * Since closing an active dtrace context is likely to drop that last reference, + * lets take a shot at cleaning out the orphaned probes now. + */ + dtrace_module_unloaded(NULL); - return (ENOTTY); + return (0); } -#else + /*ARGSUSED*/ static int dtrace_ioctl_helper(u_long cmd, caddr_t arg, int *rv) @@ -18507,7 +16293,7 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv * If a NULL argument has been passed, we take this as our * cue to reevaluate our enablings. */ - if (arg == NULL) { + if (arg == 0) { dtrace_enabling_matchall(); return (0); @@ -18548,8 +16334,8 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv dtrace_enabling_destroy(enab); } - lck_mtx_unlock(&cpu_lock); lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&cpu_lock); dtrace_dof_destroy(dof); return (err); @@ -19028,7 +16814,16 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv size_t module_uuids_list_size; dtrace_module_uuids_list_t* uuids_list; uint64_t dtmul_count; - + + /* + * Security restrictions make this operation illegal, if this is enabled DTrace + * must refuse to provide any fbt probes. + */ + if (dtrace_is_restricted()) { + cmn_err(CE_WARN, "security restrictions disallow DTRACEIOC_MODUUIDSLIST"); + return (EPERM); + } + /* * Fail if the kernel symbol mode makes this operation illegal. * Both NEVER & ALWAYS_FROM_KERNEL are permanent states, it is legal to check @@ -19074,6 +16869,10 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv lck_mtx_lock(&mod_lock); struct modctl* ctl = dtrace_modctl_list; while (ctl) { + /* Update the private probes bit */ + if (dtrace_provide_private_probes) + ctl->mod_flags |= MODCTL_FBT_PROVIDE_PRIVATE_PROBES; + ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl)); if (!MOD_SYMBOLS_DONE(ctl)) { dtmul_count++; @@ -19121,6 +16920,10 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv struct modctl* ctl = dtrace_modctl_list; while (ctl) { + /* Update the private probes bit */ + if (dtrace_provide_private_probes) + ctl->mod_flags |= MODCTL_FBT_PROVIDE_PRIVATE_PROBES; + /* * We assume that userspace symbols will be "better" than kernel level symbols, * as userspace can search for dSYM(s) and symbol'd binaries. Even if kernel syms @@ -19166,7 +16969,16 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv size_t module_symbols_size; dtrace_module_symbols_t* module_symbols; uint64_t dtmodsyms_count; - + + /* + * Security restrictions make this operation illegal, if this is enabled DTrace + * must refuse to provide any fbt probes. + */ + if (dtrace_is_restricted()) { + cmn_err(CE_WARN, "security restrictions disallow DTRACEIOC_MODUUIDSLIST"); + return (EPERM); + } + /* * Fail if the kernel symbol mode makes this operation illegal. * Both NEVER & ALWAYS_FROM_KERNEL are permanent states, it is legal to check @@ -19229,6 +17041,10 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv struct modctl* ctl = dtrace_modctl_list; while (ctl) { + /* Update the private probes bit */ + if (dtrace_provide_private_probes) + ctl->mod_flags |= MODCTL_FBT_PROVIDE_PRIVATE_PROBES; + ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl)); if (MOD_HAS_UUID(ctl) && !MOD_SYMBOLS_DONE(ctl)) { if (memcmp(module_symbols->dtmodsyms_uuid, ctl->mod_uuid, sizeof(UUID)) == 0) { @@ -19269,15 +17085,47 @@ dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv return rval; } - - default: - break; + + case DTRACEIOC_PROCWAITFOR: { + dtrace_procdesc_t pdesc = { + .p_comm = {0}, + .p_pid = -1 + }; + + if ((rval = copyin(arg, &pdesc, sizeof(pdesc))) != 0) + goto proc_waitfor_error; + + if ((rval = dtrace_proc_waitfor(&pdesc)) != 0) + goto proc_waitfor_error; + + if ((rval = copyout(&pdesc, arg, sizeof(pdesc))) != 0) + goto proc_waitfor_error; + + return 0; + + proc_waitfor_error: + /* The process was suspended, revert this since the client will not do it. */ + if (pdesc.p_pid != -1) { + proc_t *proc = proc_find(pdesc.p_pid); + if (proc != PROC_NULL) { + task_pidresume(proc->task); + proc_rele(proc); + } + } + + return rval; + } + + default: + break; } return (ENOTTY); } -#endif /* __APPLE__ */ +/* + * APPLE NOTE: dtrace_detach not implemented + */ #if !defined(__APPLE__) /*ARGSUSED*/ static int @@ -19303,15 +17151,15 @@ dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) ASSERT(dtrace_opens == 0); if (dtrace_helpers > 0) { - lck_mtx_unlock(&dtrace_provider_lock); lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&dtrace_provider_lock); lck_mtx_unlock(&cpu_lock); return (DDI_FAILURE); } if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { - lck_mtx_unlock(&dtrace_provider_lock); lck_mtx_unlock(&dtrace_lock); + lck_mtx_unlock(&dtrace_provider_lock); lck_mtx_unlock(&cpu_lock); return (DDI_FAILURE); } @@ -19403,90 +17251,7 @@ dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) return (DDI_SUCCESS); } - -/*ARGSUSED*/ -static int -dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) -{ - int error; - - switch (infocmd) { - case DDI_INFO_DEVT2DEVINFO: - *result = (void *)dtrace_devi; - error = DDI_SUCCESS; - break; - case DDI_INFO_DEVT2INSTANCE: - *result = (void *)0; - error = DDI_SUCCESS; - break; - default: - error = DDI_FAILURE; - } - return (error); -} - -static struct cb_ops dtrace_cb_ops = { - dtrace_open, /* open */ - dtrace_close, /* close */ - nulldev, /* strategy */ - nulldev, /* print */ - nodev, /* dump */ - nodev, /* read */ - nodev, /* write */ - dtrace_ioctl, /* ioctl */ - nodev, /* devmap */ - nodev, /* mmap */ - nodev, /* segmap */ - nochpoll, /* poll */ - ddi_prop_op, /* cb_prop_op */ - 0, /* streamtab */ - D_NEW | D_MP /* Driver compatibility flag */ -}; - -static struct dev_ops dtrace_ops = { - DEVO_REV, /* devo_rev */ - 0, /* refcnt */ - dtrace_info, /* get_dev_info */ - nulldev, /* identify */ - nulldev, /* probe */ - dtrace_attach, /* attach */ - dtrace_detach, /* detach */ - nodev, /* reset */ - &dtrace_cb_ops, /* driver operations */ - NULL, /* bus operations */ - nodev /* dev power */ -}; - -static struct modldrv modldrv = { - &mod_driverops, /* module type (this is a pseudo driver) */ - "Dynamic Tracing", /* name of module */ - &dtrace_ops, /* driver ops */ -}; - -static struct modlinkage modlinkage = { - MODREV_1, - (void *)&modldrv, - NULL -}; - -int -_init(void) -{ - return (mod_install(&modlinkage)); -} - -int -_info(struct modinfo *modinfop) -{ - return (mod_info(&modlinkage, modinfop)); -} - -int -_fini(void) -{ - return (mod_remove(&modlinkage)); -} -#else /* Darwin BSD driver model. */ +#endif /* __APPLE__ */ d_open_t _dtrace_open, helper_open; d_close_t _dtrace_close, helper_close; @@ -19686,6 +17451,7 @@ dtrace_init( void ) { if (0 == gDTraceInited) { int i, ncpu; + size_t size = sizeof(dtrace_buffer_memory_maxsize); /* * DTrace allocates buffers based on the maximum number @@ -19694,7 +17460,28 @@ dtrace_init( void ) */ ASSERT(dtrace_max_cpus == 0); ncpu = dtrace_max_cpus = ml_get_max_cpus(); - + + /* + * Retrieve the size of the physical memory in order to define + * the state buffer memory maximal size. If we cannot retrieve + * this value, we'll consider that we have 1Gb of memory per CPU, that's + * still better than raising a kernel panic. + */ + if (0 != kernel_sysctlbyname("hw.memsize", &dtrace_buffer_memory_maxsize, + &size, NULL, 0)) + { + dtrace_buffer_memory_maxsize = ncpu * 1024 * 1024 * 1024; + printf("dtrace_init: failed to retrieve the hw.memsize, defaulted to %lld bytes\n", + dtrace_buffer_memory_maxsize); + } + + /* + * Finally, divide by three to prevent DTrace from eating too + * much memory. + */ + dtrace_buffer_memory_maxsize /= 3; + ASSERT(dtrace_buffer_memory_maxsize > 0); + gMajDevNo = cdevsw_add(DTRACE_MAJOR, &dtrace_cdevsw); if (gMajDevNo < 0) { @@ -19738,6 +17525,7 @@ dtrace_init( void ) lck_mtx_init(&dtrace_lock, dtrace_lck_grp, dtrace_lck_attr); lck_mtx_init(&dtrace_provider_lock, dtrace_lck_grp, dtrace_lck_attr); lck_mtx_init(&dtrace_meta_lock, dtrace_lck_grp, dtrace_lck_attr); + lck_mtx_init(&dtrace_procwaitfor_lock, dtrace_lck_grp, dtrace_lck_attr); #if DEBUG lck_mtx_init(&dtrace_errlock, dtrace_lck_grp, dtrace_lck_attr); #endif @@ -19751,8 +17539,14 @@ dtrace_init( void ) * the structure is sized to avoid false sharing. */ lck_mtx_init(&cpu_lock, dtrace_lck_grp, dtrace_lck_attr); + lck_mtx_init(&cyc_lock, dtrace_lck_grp, dtrace_lck_attr); lck_mtx_init(&mod_lock, dtrace_lck_grp, dtrace_lck_attr); + /* + * Initialize the CPU offline/online hooks. + */ + dtrace_install_cpu_hooks(); + dtrace_modctl_list = NULL; cpu_core = (cpu_core_t *)kmem_zalloc( ncpu * sizeof(cpu_core_t), KM_SLEEP ); @@ -19764,6 +17558,7 @@ dtrace_init( void ) for (i = 0; i < ncpu; ++i) { cpu_list[i].cpu_id = (processorid_t)i; cpu_list[i].cpu_next = &(cpu_list[(i+1) % ncpu]); + LIST_INIT(&cpu_list[i].cpu_cyc_list); lck_rw_init(&cpu_list[i].cpu_ft_lock, dtrace_lck_grp, dtrace_lck_attr); } @@ -19868,4 +17663,3 @@ unregister_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2) { #pragma unused(ignore1,ignore2) } -#endif /* __APPLE__ */