+ if (help->dthps_nprovs > 0) {
+ newhelp->dthps_nprovs = help->dthps_nprovs;
+ newhelp->dthps_maxprovs = help->dthps_nprovs;
+ newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
+ sizeof (dtrace_helper_provider_t *), KM_SLEEP);
+ for (i = 0; i < newhelp->dthps_nprovs; i++) {
+ newhelp->dthps_provs[i] = help->dthps_provs[i];
+ newhelp->dthps_provs[i]->dthp_ref++;
+ }
+
+ hasprovs = 1;
+ }
+
+ lck_mtx_unlock(&dtrace_lock);
+
+ if (hasprovs)
+ dtrace_helper_provider_register(to, newhelp, NULL);
+
+ lck_mtx_unlock(&dtrace_meta_lock);
+}
+
+/**
+ * DTrace Process functions
+ */
+
+void
+dtrace_proc_fork(proc_t *parent_proc, proc_t *child_proc, int spawn)
+{
+ /*
+ * This code applies to new processes who are copying the task
+ * and thread state and address spaces of their parent process.
+ */
+ if (!spawn) {
+ /*
+ * APPLE NOTE: Solaris does a sprlock() and drops the
+ * proc_lock here. We're cheating a bit and only taking
+ * the p_dtrace_sprlock lock. A full sprlock would
+ * task_suspend the parent.
+ */
+ dtrace_sprlock(parent_proc);
+
+ /*
+ * Remove all DTrace tracepoints from the child process. We
+ * need to do this _before_ duplicating USDT providers since
+ * any associated probes may be immediately enabled.
+ */
+ if (parent_proc->p_dtrace_count > 0) {
+ dtrace_fasttrap_fork(parent_proc, child_proc);
+ }
+
+ dtrace_sprunlock(parent_proc);
+
+ /*
+ * Duplicate any lazy dof(s). This must be done while NOT
+ * holding the parent sprlock! Lock ordering is
+ * dtrace_dof_mode_lock, then sprlock. It is imperative we
+ * always call dtrace_lazy_dofs_duplicate, rather than null
+ * check and call if !NULL. If we NULL test, during lazy dof
+ * faulting we can race with the faulting code and proceed
+ * from here to beyond the helpers copy. The lazy dof
+ * faulting will then fail to copy the helpers to the child
+ * process. We return if we duplicated lazy dofs as a process
+ * can only have one at the same time to avoid a race between
+ * a dtrace client and dtrace_proc_fork where a process would
+ * end up with both lazy dofs and helpers.
+ */
+ if (dtrace_lazy_dofs_duplicate(parent_proc, child_proc) == DTRACE_LAZY_DOFS_DUPLICATED) {
+ return;
+ }
+
+ /*
+ * Duplicate any helper actions and providers if they haven't
+ * already.
+ */
+#if !defined(__APPLE__)
+ /*
+ * The SFORKING
+ * we set above informs the code to enable USDT probes that
+ * sprlock() may fail because the child is being forked.
+ */
+#endif
+ /*
+ * APPLE NOTE: As best I can tell, Apple's sprlock() equivalent
+ * never fails to find the child. We do not set SFORKING.
+ */
+ if (parent_proc->p_dtrace_helpers != NULL && dtrace_helpers_fork) {
+ (*dtrace_helpers_fork)(parent_proc, child_proc);
+ }
+ }
+}
+
+void
+dtrace_proc_exec(proc_t *p)
+{
+ /*
+ * Invalidate any predicate evaluation already cached for this thread by DTrace.
+ * That's because we've just stored to p_comm and DTrace refers to that when it
+ * evaluates the "execname" special variable. uid and gid may have changed as well.
+ */
+ dtrace_set_thread_predcache(current_thread(), 0);
+
+ /*
+ * Free any outstanding lazy dof entries. It is imperative we
+ * always call dtrace_lazy_dofs_destroy, rather than null check
+ * and call if !NULL. If we NULL test, during lazy dof faulting
+ * we can race with the faulting code and proceed from here to
+ * beyond the helpers cleanup. The lazy dof faulting will then
+ * install new helpers which no longer belong to this process!
+ */
+ dtrace_lazy_dofs_destroy(p);
+
+
+ /*
+ * Clean up any DTrace helpers for the process.
+ */
+ if (p->p_dtrace_helpers != NULL && dtrace_helpers_cleanup) {
+ (*dtrace_helpers_cleanup)(p);
+ }
+
+ /*
+ * Cleanup the DTrace provider associated with this process.
+ */
+ proc_lock(p);
+ if (p->p_dtrace_probes && dtrace_fasttrap_exec_ptr) {
+ (*dtrace_fasttrap_exec_ptr)(p);
+ }
+ proc_unlock(p);
+}
+
+void
+dtrace_proc_exit(proc_t *p)
+{
+ /*
+ * Free any outstanding lazy dof entries. It is imperative we
+ * always call dtrace_lazy_dofs_destroy, rather than null check
+ * and call if !NULL. If we NULL test, during lazy dof faulting
+ * we can race with the faulting code and proceed from here to
+ * beyond the helpers cleanup. The lazy dof faulting will then
+ * install new helpers which will never be cleaned up, and leak.
+ */
+ dtrace_lazy_dofs_destroy(p);
+
+ /*
+ * Clean up any DTrace helper actions or probes for the process.
+ */
+ if (p->p_dtrace_helpers != NULL) {
+ (*dtrace_helpers_cleanup)(p);
+ }
+
+ /*
+ * Clean up any DTrace probes associated with this process.
+ */
+ /*
+ * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(),
+ * call this after dtrace_helpers_cleanup()
+ */
+ proc_lock(p);
+ if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) {
+ (*dtrace_fasttrap_exit_ptr)(p);
+ }
+ proc_unlock(p);
+}
+
+/*
+ * DTrace Hook Functions
+ */
+
+/*
+ * APPLE NOTE: dtrace_modctl_* routines for kext support.
+ * Used to manipulate the modctl list within dtrace xnu.
+ */
+
+modctl_t *dtrace_modctl_list;
+
+static void
+dtrace_modctl_add(struct modctl * newctl)
+{
+ struct modctl *nextp, *prevp;
+
+ ASSERT(newctl != NULL);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
+
+ // Insert new module at the front of the list,
+
+ newctl->mod_next = dtrace_modctl_list;
+ dtrace_modctl_list = newctl;
+
+ /*
+ * If a module exists with the same name, then that module
+ * must have been unloaded with enabled probes. We will move
+ * the unloaded module to the new module's stale chain and
+ * then stop traversing the list.
+ */
+
+ prevp = newctl;
+ nextp = newctl->mod_next;
+
+ while (nextp != NULL) {
+ if (nextp->mod_loaded) {
+ /* This is a loaded module. Keep traversing. */
+ prevp = nextp;
+ nextp = nextp->mod_next;
+ continue;
+ }
+ else {
+ /* Found an unloaded module */
+ if (strncmp (newctl->mod_modname, nextp->mod_modname, KMOD_MAX_NAME)) {
+ /* Names don't match. Keep traversing. */
+ prevp = nextp;
+ nextp = nextp->mod_next;
+ continue;
+ }
+ else {
+ /* We found a stale entry, move it. We're done. */
+ prevp->mod_next = nextp->mod_next;
+ newctl->mod_stale = nextp;
+ nextp->mod_next = NULL;
+ break;
+ }
+ }
+ }
+}
+
+static modctl_t *
+dtrace_modctl_lookup(struct kmod_info * kmod)
+{
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
+
+ struct modctl * ctl;
+
+ for (ctl = dtrace_modctl_list; ctl; ctl=ctl->mod_next) {
+ if (ctl->mod_id == kmod->id)
+ return(ctl);
+ }
+ return (NULL);
+}
+
+/*
+ * This routine is called from dtrace_module_unloaded().
+ * It removes a modctl structure and its stale chain
+ * from the kext shadow list.
+ */
+static void
+dtrace_modctl_remove(struct modctl * ctl)
+{
+ ASSERT(ctl != NULL);
+ LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
+ modctl_t *prevp, *nextp, *curp;
+
+ // Remove stale chain first
+ for (curp=ctl->mod_stale; curp != NULL; curp=nextp) {
+ nextp = curp->mod_stale;
+ /* There should NEVER be user symbols allocated at this point */
+ ASSERT(curp->mod_user_symbols == NULL);
+ kmem_free(curp, sizeof(modctl_t));
+ }
+
+ prevp = NULL;
+ curp = dtrace_modctl_list;
+
+ while (curp != ctl) {
+ prevp = curp;
+ curp = curp->mod_next;
+ }
+
+ if (prevp != NULL) {
+ prevp->mod_next = ctl->mod_next;
+ }
+ else {
+ dtrace_modctl_list = ctl->mod_next;
+ }
+
+ /* There should NEVER be user symbols allocated at this point */
+ ASSERT(ctl->mod_user_symbols == NULL);
+
+ kmem_free (ctl, sizeof(modctl_t));
+}
+
+/*
+ * APPLE NOTE: The kext loader will call dtrace_module_loaded
+ * when the kext is loaded in memory, but before calling the
+ * kext's start routine.
+ *
+ * Return 0 on success
+ * Return -1 on failure
+ */
+
+static int
+dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag)
+{
+ dtrace_provider_t *prv;
+
+ /*
+ * If kernel symbols have been disabled, return immediately
+ * DTRACE_KERNEL_SYMBOLS_NEVER is a permanent mode, it is safe to test without holding locks
+ */
+ if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER)
+ return 0;
+
+ struct modctl *ctl = NULL;
+ if (!kmod || kmod->address == 0 || kmod->size == 0)
+ return(-1);
+
+ lck_mtx_lock(&dtrace_provider_lock);
+ lck_mtx_lock(&mod_lock);
+
+ /*
+ * Have we seen this kext before?
+ */
+
+ ctl = dtrace_modctl_lookup(kmod);
+
+ if (ctl != NULL) {
+ /* bail... we already have this kext in the modctl list */
+ lck_mtx_unlock(&mod_lock);
+ lck_mtx_unlock(&dtrace_provider_lock);
+ if (dtrace_err_verbose)
+ cmn_err(CE_WARN, "dtrace load module already exists '%s %u' is failing against '%s %u'", kmod->name, (uint_t)kmod->id, ctl->mod_modname, ctl->mod_id);
+ return(-1);
+ }
+ else {
+ ctl = kmem_alloc(sizeof(struct modctl), KM_SLEEP);
+ if (ctl == NULL) {
+ if (dtrace_err_verbose)
+ cmn_err(CE_WARN, "dtrace module load '%s %u' is failing ", kmod->name, (uint_t)kmod->id);
+ lck_mtx_unlock(&mod_lock);
+ lck_mtx_unlock(&dtrace_provider_lock);
+ return (-1);
+ }
+ ctl->mod_next = NULL;
+ ctl->mod_stale = NULL;
+ strlcpy (ctl->mod_modname, kmod->name, sizeof(ctl->mod_modname));
+ ctl->mod_loadcnt = kmod->id;
+ ctl->mod_nenabled = 0;
+ ctl->mod_address = kmod->address;
+ ctl->mod_size = kmod->size;
+ ctl->mod_id = kmod->id;
+ ctl->mod_loaded = 1;
+ ctl->mod_flags = 0;
+ ctl->mod_user_symbols = NULL;
+
+ /*
+ * Find the UUID for this module, if it has one
+ */
+ kernel_mach_header_t* header = (kernel_mach_header_t *)ctl->mod_address;
+ struct load_command* load_cmd = (struct load_command *)&header[1];
+ uint32_t i;
+ for (i = 0; i < header->ncmds; i++) {
+ if (load_cmd->cmd == LC_UUID) {
+ struct uuid_command* uuid_cmd = (struct uuid_command *)load_cmd;
+ memcpy(ctl->mod_uuid, uuid_cmd->uuid, sizeof(uuid_cmd->uuid));
+ ctl->mod_flags |= MODCTL_HAS_UUID;
+ break;
+ }
+ load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
+ }
+
+ if (ctl->mod_address == g_kernel_kmod_info.address) {
+ ctl->mod_flags |= MODCTL_IS_MACH_KERNEL;
+ memcpy(dtrace_kerneluuid, ctl->mod_uuid, sizeof(dtrace_kerneluuid));
+ }
+ /*
+ * Static kexts have a UUID that is not used for symbolication, as all their
+ * symbols are in kernel
+ */
+ else if ((flag & KMOD_DTRACE_STATIC_KEXT) == KMOD_DTRACE_STATIC_KEXT) {
+ memcpy(ctl->mod_uuid, dtrace_kerneluuid, sizeof(dtrace_kerneluuid));
+ ctl->mod_flags |= MODCTL_IS_STATIC_KEXT;
+ }
+ }
+ dtrace_modctl_add(ctl);
+
+ /*
+ * We must hold the dtrace_lock to safely test non permanent dtrace_fbt_symbol_mode(s)
+ */
+ lck_mtx_lock(&dtrace_lock);
+
+ /*
+ * DTrace must decide if it will instrument modules lazily via
+ * userspace symbols (default mode), or instrument immediately via
+ * kernel symbols (non-default mode)
+ *
+ * When in default/lazy mode, DTrace will only support modules
+ * built with a valid UUID.
+ *
+ * Overriding the default can be done explicitly in one of
+ * the following two ways.
+ *
+ * A module can force symbols from kernel space using the plist key,
+ * OSBundleForceDTraceInit (see kmod.h). If this per kext state is set,
+ * we fall through and instrument this module now.
+ *
+ * Or, the boot-arg, dtrace_kernel_symbol_mode, can be set to force symbols
+ * from kernel space (see dtrace_impl.h). If this system state is set
+ * to a non-userspace mode, we fall through and instrument the module now.
+ */