]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace.c
CommitLineData
2d21ac55
A
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
39236c6e 22/*
39037602 23 * Portions Copyright (c) 2013, 2016, Joyent, Inc. All rights reserved.
3e170ce0 24 * Portions Copyright (c) 2013 by Delphix. All rights reserved.
39236c6e
A
25 */
26
2d21ac55 27/*
6d2010ae 28 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
2d21ac55
A
29 * Use is subject to license terms.
30 */
31
2d21ac55
A
32/*
33 * DTrace - Dynamic Tracing for Solaris
34 *
35 * This is the implementation of the Solaris Dynamic Tracing framework
36 * (DTrace). The user-visible interface to DTrace is described at length in
37 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
38 * library, the in-kernel DTrace framework, and the DTrace providers are
39 * described in the block comments in the <sys/dtrace.h> header file. The
40 * internal architecture of DTrace is described in the block comments in the
41 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
42 * implementation very much assume mastery of all of these sources; if one has
43 * an unanswered question about the implementation, one should consult them
44 * first.
45 *
46 * The functions here are ordered roughly as follows:
47 *
48 * - Probe context functions
49 * - Probe hashing functions
50 * - Non-probe context utility functions
51 * - Matching functions
52 * - Provider-to-Framework API functions
53 * - Probe management functions
54 * - DIF object functions
55 * - Format functions
56 * - Predicate functions
57 * - ECB functions
58 * - Buffer functions
59 * - Enabling functions
60 * - DOF functions
61 * - Anonymous enabling functions
39037602 62 * - Process functions
2d21ac55
A
63 * - Consumer state functions
64 * - Helper functions
65 * - Hook functions
66 * - Driver cookbook functions
67 *
68 * Each group of functions begins with a block comment labelled the "DTrace
69 * [Group] Functions", allowing one to find each block by searching forward
70 * on capital-f functions.
71 */
2d21ac55
A
72#include <sys/errno.h>
73#include <sys/types.h>
74#include <sys/stat.h>
75#include <sys/conf.h>
cb323159 76#include <sys/random.h>
2d21ac55
A
77#include <sys/systm.h>
78#include <sys/dtrace_impl.h>
79#include <sys/param.h>
6d2010ae 80#include <sys/proc_internal.h>
2d21ac55
A
81#include <sys/ioctl.h>
82#include <sys/fcntl.h>
83#include <miscfs/devfs/devfs.h>
84#include <sys/malloc.h>
85#include <sys/kernel_types.h>
86#include <sys/proc_internal.h>
87#include <sys/uio_internal.h>
88#include <sys/kauth.h>
89#include <vm/pmap.h>
90#include <sys/user.h>
91#include <mach/exception_types.h>
92#include <sys/signalvar.h>
6d2010ae 93#include <mach/task.h>
2d21ac55 94#include <kern/zalloc.h>
b0d623f7 95#include <kern/ast.h>
39037602 96#include <kern/sched_prim.h>
fe8ab488 97#include <kern/task.h>
b0d623f7 98#include <netinet/in.h>
39037602
A
99#include <libkern/sysctl.h>
100#include <sys/kdebug.h>
b0d623f7 101
5ba3f43e
A
102#if MONOTONIC
103#include <kern/monotonic.h>
104#include <machine/monotonic.h>
105#endif /* MONOTONIC */
106
cb323159
A
107#include "dtrace_xoroshiro128_plus.h"
108
a39ff7e2
A
109#include <IOKit/IOPlatformExpert.h>
110
6d2010ae 111#include <kern/cpu_data.h>
f427ee49
A
112
113extern addr64_t kvtophys(vm_offset_t va);
114
b0d623f7
A
115extern uint32_t pmap_find_phys(void *, uint64_t);
116extern boolean_t pmap_valid_page(uint32_t);
6d2010ae
A
117extern void OSKextRegisterKextsWithDTrace(void);
118extern kmod_info_t g_kernel_kmod_info;
cb323159 119extern void commpage_update_dof(boolean_t enabled);
b0d623f7
A
120
121/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
122#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
2d21ac55
A
123
124#define t_predcache t_dtrace_predcache /* Cosmetic. Helps readability of thread.h */
125
126extern void dtrace_suspend(void);
127extern void dtrace_resume(void);
d9a64523
A
128extern void dtrace_early_init(void);
129extern int dtrace_keep_kernel_symbols(void);
2d21ac55
A
130extern void dtrace_init(void);
131extern void helper_init(void);
b0d623f7 132extern void fasttrap_init(void);
39037602
A
133
134static int dtrace_lazy_dofs_duplicate(proc_t *, proc_t *);
b0d623f7
A
135extern void dtrace_lazy_dofs_destroy(proc_t *);
136extern void dtrace_postinit(void);
2d21ac55 137
39037602
A
138extern void dtrace_proc_fork(proc_t*, proc_t*, int);
139extern void dtrace_proc_exec(proc_t*);
140extern void dtrace_proc_exit(proc_t*);
d9a64523 141
2d21ac55
A
142/*
143 * DTrace Tunable Variables
144 *
fe8ab488
A
145 * The following variables may be dynamically tuned by using sysctl(8), the
146 * variables being stored in the kern.dtrace namespace. For example:
147 * sysctl kern.dtrace.dof_maxsize = 1048575 # 1M
2d21ac55
A
148 *
149 * In general, the only variables that one should be tuning this way are those
150 * that affect system-wide DTrace behavior, and for which the default behavior
151 * is undesirable. Most of these variables are tunable on a per-consumer
152 * basis using DTrace options, and need not be tuned on a system-wide basis.
153 * When tuning these variables, avoid pathological values; while some attempt
154 * is made to verify the integrity of these variables, they are not considered
155 * part of the supported interface to DTrace, and they are therefore not
fe8ab488 156 * checked comprehensively.
2d21ac55 157 */
fe8ab488
A
158uint64_t dtrace_buffer_memory_maxsize = 0; /* initialized in dtrace_init */
159uint64_t dtrace_buffer_memory_inuse = 0;
f427ee49 160int dtrace_destructive_disallow = 1;
2d21ac55
A
161dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
162size_t dtrace_difo_maxsize = (256 * 1024);
5ba3f43e 163dtrace_optval_t dtrace_dof_maxsize = (512 * 1024);
ecc0ceb4
A
164dtrace_optval_t dtrace_statvar_maxsize = (16 * 1024);
165dtrace_optval_t dtrace_statvar_maxsize_max = (16 * 10 * 1024);
2d21ac55
A
166size_t dtrace_actions_max = (16 * 1024);
167size_t dtrace_retain_max = 1024;
168dtrace_optval_t dtrace_helper_actions_max = 32;
6d2010ae 169dtrace_optval_t dtrace_helper_providers_max = 64;
2d21ac55
A
170dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
171size_t dtrace_strsize_default = 256;
39037602
A
172dtrace_optval_t dtrace_strsize_min = 8;
173dtrace_optval_t dtrace_strsize_max = 65536;
39236c6e
A
174dtrace_optval_t dtrace_cleanrate_default = 990099000; /* 1.1 hz */
175dtrace_optval_t dtrace_cleanrate_min = 20000000; /* 50 hz */
2d21ac55
A
176dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
177dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
178dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
179dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
180dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
181dtrace_optval_t dtrace_nspec_default = 1;
182dtrace_optval_t dtrace_specsize_default = 32 * 1024;
183dtrace_optval_t dtrace_stackframes_default = 20;
184dtrace_optval_t dtrace_ustackframes_default = 20;
185dtrace_optval_t dtrace_jstackframes_default = 50;
186dtrace_optval_t dtrace_jstackstrsize_default = 512;
39037602
A
187dtrace_optval_t dtrace_buflimit_default = 75;
188dtrace_optval_t dtrace_buflimit_min = 1;
189dtrace_optval_t dtrace_buflimit_max = 99;
cb323159 190size_t dtrace_nprobes_default = 4;
2d21ac55
A
191int dtrace_msgdsize_max = 128;
192hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
193hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
194int dtrace_devdepth_max = 32;
195int dtrace_err_verbose;
196hrtime_t dtrace_deadman_interval = NANOSEC;
197hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
198hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
199
200/*
201 * DTrace External Variables
202 *
203 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
204 * available to DTrace consumers via the backtick (`) syntax. One of these,
205 * dtrace_zero, is made deliberately so: it is provided as a source of
206 * well-known, zero-filled memory. While this variable is not documented,
207 * it is used by some translators as an implementation detail.
208 */
209const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
39236c6e 210unsigned int dtrace_max_cpus = 0; /* number of enabled cpus */
2d21ac55
A
211/*
212 * DTrace Internal Variables
213 */
214static dev_info_t *dtrace_devi; /* device info */
215static vmem_t *dtrace_arena; /* probe ID arena */
2d21ac55
A
216static dtrace_probe_t **dtrace_probes; /* array of all probes */
217static int dtrace_nprobes; /* number of probes */
218static dtrace_provider_t *dtrace_provider; /* provider list */
219static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
220static int dtrace_opens; /* number of opens */
221static int dtrace_helpers; /* number of helpers */
d9a64523
A
222static dtrace_hash_t *dtrace_strings;
223static dtrace_hash_t *dtrace_byprov; /* probes hashed by provider */
2d21ac55
A
224static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
225static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
226static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
227static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
228static int dtrace_toxranges; /* number of toxic ranges */
229static int dtrace_toxranges_max; /* size of toxic range array */
230static dtrace_anon_t dtrace_anon; /* anonymous enabling */
231static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
232static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
233static kthread_t *dtrace_panicked; /* panicking thread */
234static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
235static dtrace_genid_t dtrace_probegen; /* current probe generation */
236static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
237static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
b0d623f7 238static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
2d21ac55 239static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
fe8ab488 240
b0d623f7 241static int dtrace_dof_mode; /* See dtrace_impl.h for a description of Darwin's dof modes. */
6d2010ae
A
242
243 /*
244 * This does't quite fit as an internal variable, as it must be accessed in
245 * fbt_provide and sdt_provide. Its clearly not a dtrace tunable variable either...
246 */
247int dtrace_kernel_symbol_mode; /* See dtrace_impl.h for a description of Darwin's kernel symbol modes. */
39037602 248static uint32_t dtrace_wake_clients;
d9a64523 249static uint8_t dtrace_kerneluuid[16]; /* the 128-bit uuid */
fe8ab488 250
2d21ac55
A
251/*
252 * To save memory, some common memory allocations are given a
b0d623f7 253 * unique zone. For example, dtrace_probe_t is 72 bytes in size,
2d21ac55
A
254 * which means it would fall into the kalloc.128 bucket. With
255 * 20k elements allocated, the space saved is substantial.
256 */
257
f427ee49
A
258static ZONE_DECLARE(dtrace_probe_t_zone, "dtrace.dtrace_probe_t",
259 sizeof(dtrace_probe_t), ZC_NONE);
6d2010ae
A
260
261static int dtrace_module_unloaded(struct kmod_info *kmod);
2d21ac55
A
262
263/*
264 * DTrace Locking
265 * DTrace is protected by three (relatively coarse-grained) locks:
266 *
267 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
268 * including enabling state, probes, ECBs, consumer state, helper state,
269 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
270 * probe context is lock-free -- synchronization is handled via the
271 * dtrace_sync() cross call mechanism.
272 *
273 * (2) dtrace_provider_lock is required when manipulating provider state, or
274 * when provider state must be held constant.
275 *
276 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
277 * when meta provider state must be held constant.
278 *
279 * The lock ordering between these three locks is dtrace_meta_lock before
280 * dtrace_provider_lock before dtrace_lock. (In particular, there are
281 * several places where dtrace_provider_lock is held by the framework as it
282 * calls into the providers -- which then call back into the framework,
283 * grabbing dtrace_lock.)
284 *
285 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
286 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
287 * role as a coarse-grained lock; it is acquired before both of these locks.
288 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
289 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
290 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
291 * acquired _between_ dtrace_provider_lock and dtrace_lock.
292 */
293
fe8ab488 294
2d21ac55
A
295/*
296 * APPLE NOTE:
297 *
fe8ab488
A
298 * For porting purposes, all kmutex_t vars have been changed
299 * to lck_mtx_t, which require explicit initialization.
2d21ac55 300 *
fe8ab488 301 * kmutex_t becomes lck_mtx_t
2d21ac55
A
302 * mutex_enter() becomes lck_mtx_lock()
303 * mutex_exit() becomes lck_mtx_unlock()
304 *
305 * Lock asserts are changed like this:
306 *
307 * ASSERT(MUTEX_HELD(&cpu_lock));
308 * becomes:
5ba3f43e 309 * LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 310 *
2d21ac55 311 */
c3c9b80d
A
312static LCK_MTX_DECLARE_ATTR(dtrace_lock,
313 &dtrace_lck_grp, &dtrace_lck_attr); /* probe state lock */
314static LCK_MTX_DECLARE_ATTR(dtrace_provider_lock,
315 &dtrace_lck_grp, &dtrace_lck_attr); /* provider state lock */
316static LCK_MTX_DECLARE_ATTR(dtrace_meta_lock,
317 &dtrace_lck_grp, &dtrace_lck_attr); /* meta-provider state lock */
318static LCK_RW_DECLARE_ATTR(dtrace_dof_mode_lock,
319 &dtrace_lck_grp, &dtrace_lck_attr); /* dof mode lock */
2d21ac55
A
320
321/*
322 * DTrace Provider Variables
323 *
324 * These are the variables relating to DTrace as a provider (that is, the
325 * provider of the BEGIN, END, and ERROR probes).
326 */
327static dtrace_pattr_t dtrace_provider_attr = {
328{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
329{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
330{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
331{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
332{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
333};
334
335static void
0a7de745
A
336dtrace_provide_nullop(void *arg, const dtrace_probedesc_t *desc)
337{
338#pragma unused(arg, desc)
339}
340
341static void
342dtrace_provide_module_nullop(void *arg, struct modctl *ctl)
343{
344#pragma unused(arg, ctl)
345}
2d21ac55 346
6d2010ae 347static int
0a7de745 348dtrace_enable_nullop(void *arg, dtrace_id_t id, void *parg)
6d2010ae 349{
0a7de745 350#pragma unused(arg, id, parg)
6d2010ae
A
351 return (0);
352}
353
0a7de745
A
354static void
355dtrace_disable_nullop(void *arg, dtrace_id_t id, void *parg)
356{
357#pragma unused(arg, id, parg)
358}
359
360static void
361dtrace_suspend_nullop(void *arg, dtrace_id_t id, void *parg)
362{
363#pragma unused(arg, id, parg)
364}
365
366static void
367dtrace_resume_nullop(void *arg, dtrace_id_t id, void *parg)
368{
369#pragma unused(arg, id, parg)
370}
371
372static void
373dtrace_destroy_nullop(void *arg, dtrace_id_t id, void *parg)
374{
375#pragma unused(arg, id, parg)
376}
377
378
d9a64523 379static dtrace_pops_t dtrace_provider_ops = {
0a7de745
A
380 .dtps_provide = dtrace_provide_nullop,
381 .dtps_provide_module = dtrace_provide_module_nullop,
382 .dtps_enable = dtrace_enable_nullop,
383 .dtps_disable = dtrace_disable_nullop,
384 .dtps_suspend = dtrace_suspend_nullop,
385 .dtps_resume = dtrace_resume_nullop,
d9a64523
A
386 .dtps_getargdesc = NULL,
387 .dtps_getargval = NULL,
388 .dtps_usermode = NULL,
0a7de745 389 .dtps_destroy = dtrace_destroy_nullop,
2d21ac55
A
390};
391
392static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
393static dtrace_id_t dtrace_probeid_end; /* special END probe */
394dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
395
396/*
397 * DTrace Helper Tracing Variables
398 */
399uint32_t dtrace_helptrace_next = 0;
400uint32_t dtrace_helptrace_nlocals;
401char *dtrace_helptrace_buffer;
b0d623f7 402size_t dtrace_helptrace_bufsize = 512 * 1024;
2d21ac55 403
b0d623f7 404#if DEBUG
2d21ac55
A
405int dtrace_helptrace_enabled = 1;
406#else
407int dtrace_helptrace_enabled = 0;
408#endif
409
5ba3f43e
A
410#if defined (__arm64__)
411/*
412 * The ioctl for adding helper DOF is based on the
413 * size of a user_addr_t. We need to recognize both
414 * U32 and U64 as the same action.
415 */
416#define DTRACEHIOC_ADDDOF_U32 _IOW('h', 4, user32_addr_t)
417#define DTRACEHIOC_ADDDOF_U64 _IOW('h', 4, user64_addr_t)
418#endif /* __arm64__ */
fe8ab488 419
2d21ac55
A
420/*
421 * DTrace Error Hashing
422 *
423 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
424 * table. This is very useful for checking coverage of tests that are
425 * expected to induce DIF or DOF processing errors, and may be useful for
426 * debugging problems in the DIF code generator or in DOF generation . The
427 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
428 */
b0d623f7 429#if DEBUG
2d21ac55
A
430static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
431static const char *dtrace_errlast;
432static kthread_t *dtrace_errthread;
c3c9b80d 433static LCK_MTX_DECLARE_ATTR(dtrace_errlock, &dtrace_lck_grp, &dtrace_lck_attr);
2d21ac55
A
434#endif
435
436/*
437 * DTrace Macros and Constants
438 *
439 * These are various macros that are useful in various spots in the
440 * implementation, along with a few random constants that have no meaning
441 * outside of the implementation. There is no real structure to this cpp
442 * mishmash -- but is there ever?
443 */
2d21ac55 444
d9a64523
A
445#define DTRACE_GETSTR(hash, elm) \
446 (hash->dth_getstr(elm, hash->dth_stroffs))
447
448#define DTRACE_HASHSTR(hash, elm) \
449 dtrace_hash_str(DTRACE_GETSTR(hash, elm))
450
451#define DTRACE_HASHNEXT(hash, elm) \
452 (void**)((uintptr_t)(elm) + (hash)->dth_nextoffs)
2d21ac55 453
d9a64523
A
454#define DTRACE_HASHPREV(hash, elm) \
455 (void**)((uintptr_t)(elm) + (hash)->dth_prevoffs)
2d21ac55
A
456
457#define DTRACE_HASHEQ(hash, lhs, rhs) \
d9a64523
A
458 (strcmp(DTRACE_GETSTR(hash, lhs), \
459 DTRACE_GETSTR(hash, rhs)) == 0)
2d21ac55
A
460
461#define DTRACE_AGGHASHSIZE_SLEW 17
462
b0d623f7
A
463#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
464
2d21ac55
A
465/*
466 * The key for a thread-local variable consists of the lower 61 bits of the
fe8ab488 467 * current_thread(), plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
2d21ac55
A
468 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
469 * equal to a variable identifier. This is necessary (but not sufficient) to
470 * assure that global associative arrays never collide with thread-local
471 * variables. To guarantee that they cannot collide, we must also define the
472 * order for keying dynamic variables. That order is:
473 *
474 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
475 *
476 * Because the variable-key and the tls-key are in orthogonal spaces, there is
477 * no way for a global variable key signature to match a thread-local key
478 * signature.
479 */
39236c6e 480#if defined (__x86_64__)
b0d623f7
A
481/* FIXME: two function calls!! */
482#define DTRACE_TLS_THRKEY(where) { \
483 uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
484 uint64_t thr = (uintptr_t)current_thread(); \
485 ASSERT(intr < (1 << 3)); \
486 (where) = ((thr + DIF_VARIABLE_MAX) & \
487 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
488}
5ba3f43e
A
489#elif defined(__arm__)
490/* FIXME: three function calls!!! */
491#define DTRACE_TLS_THRKEY(where) { \
492 uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
493 uint64_t thr = (uintptr_t)current_thread(); \
494 uint_t pid = (uint_t)dtrace_proc_selfpid(); \
495 ASSERT(intr < (1 << 3)); \
496 (where) = (((thr << 32 | pid) + DIF_VARIABLE_MAX) & \
497 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
498}
499#elif defined (__arm64__)
500/* FIXME: two function calls!! */
501#define DTRACE_TLS_THRKEY(where) { \
502 uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
503 uint64_t thr = (uintptr_t)current_thread(); \
504 ASSERT(intr < (1 << 3)); \
505 (where) = ((thr + DIF_VARIABLE_MAX) & \
506 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
507}
2d21ac55 508#else
39236c6e 509#error Unknown architecture
b0d623f7 510#endif
2d21ac55 511
b0d623f7
A
512#define DT_BSWAP_8(x) ((x) & 0xff)
513#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
514#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
515#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
516
517#define DT_MASK_LO 0x00000000FFFFFFFFULL
518
2d21ac55
A
519#define DTRACE_STORE(type, tomax, offset, what) \
520 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
521
39236c6e 522
b0d623f7
A
523#define DTRACE_ALIGNCHECK(addr, size, flags) \
524 if (addr & (MIN(size,4) - 1)) { \
525 *flags |= CPU_DTRACE_BADALIGN; \
526 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
527 return (0); \
528 }
b0d623f7 529
39037602
A
530#define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \
531do { \
532 if ((remp) != NULL) { \
533 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \
534 } \
535} while (0)
536
537
b0d623f7
A
538/*
539 * Test whether a range of memory starting at testaddr of size testsz falls
540 * within the range of memory described by addr, sz. We take care to avoid
541 * problems with overflow and underflow of the unsigned quantities, and
542 * disallow all negative sizes. Ranges of size 0 are allowed.
543 */
544#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
545 ((testaddr) - (baseaddr) < (basesz) && \
546 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
547 (testaddr) + (testsz) >= (testaddr))
548
549/*
550 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
551 * alloc_sz on the righthand side of the comparison in order to avoid overflow
552 * or underflow in the comparison with it. This is simpler than the INRANGE
553 * check above, because we know that the dtms_scratch_ptr is valid in the
554 * range. Allocations of size zero are allowed.
555 */
556#define DTRACE_INSCRATCH(mstate, alloc_sz) \
557 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
558 (mstate)->dtms_scratch_ptr >= (alloc_sz))
2d21ac55 559
6d2010ae 560#define RECOVER_LABEL(bits) dtraceLoadRecover##bits:
2d21ac55 561
39037602 562#if defined (__x86_64__) || (defined (__arm__) || defined (__arm64__))
2d21ac55
A
563#define DTRACE_LOADFUNC(bits) \
564/*CSTYLED*/ \
2d21ac55
A
565uint##bits##_t dtrace_load##bits(uintptr_t addr); \
566 \
567uint##bits##_t \
568dtrace_load##bits(uintptr_t addr) \
569{ \
570 size_t size = bits / NBBY; \
571 /*CSTYLED*/ \
572 uint##bits##_t rval = 0; \
573 int i; \
2d21ac55
A
574 volatile uint16_t *flags = (volatile uint16_t *) \
575 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
576 \
577 DTRACE_ALIGNCHECK(addr, size, flags); \
578 \
579 for (i = 0; i < dtrace_toxranges; i++) { \
580 if (addr >= dtrace_toxrange[i].dtt_limit) \
581 continue; \
582 \
583 if (addr + size <= dtrace_toxrange[i].dtt_base) \
584 continue; \
585 \
586 /* \
587 * This address falls within a toxic region; return 0. \
588 */ \
589 *flags |= CPU_DTRACE_BADADDR; \
590 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
591 return (0); \
592 } \
593 \
b0d623f7 594 { \
6d2010ae 595 volatile vm_offset_t recover = (vm_offset_t)&&dtraceLoadRecover##bits; \
b0d623f7 596 *flags |= CPU_DTRACE_NOFAULT; \
0a7de745 597 recover = dtrace_sign_and_set_thread_recover(current_thread(), recover); \
b0d623f7
A
598 /*CSTYLED*/ \
599 /* \
600 * PR6394061 - avoid device memory that is unpredictably \
601 * mapped and unmapped \
602 */ \
603 if (pmap_valid_page(pmap_find_phys(kernel_pmap, addr))) \
604 rval = *((volatile uint##bits##_t *)addr); \
39037602
A
605 else { \
606 *flags |= CPU_DTRACE_BADADDR; \
607 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
608 return (0); \
609 } \
610 \
b0d623f7
A
611 RECOVER_LABEL(bits); \
612 (void)dtrace_set_thread_recover(current_thread(), recover); \
613 *flags &= ~CPU_DTRACE_NOFAULT; \
614 } \
615 \
616 return (rval); \
617}
618#else /* all other architectures */
39236c6e 619#error Unknown Architecture
b0d623f7 620#endif
2d21ac55 621
2d21ac55
A
622#ifdef __LP64__
623#define dtrace_loadptr dtrace_load64
624#else
625#define dtrace_loadptr dtrace_load32
626#endif
627
628#define DTRACE_DYNHASH_FREE 0
629#define DTRACE_DYNHASH_SINK 1
630#define DTRACE_DYNHASH_VALID 2
631
6d2010ae 632#define DTRACE_MATCH_FAIL -1
2d21ac55
A
633#define DTRACE_MATCH_NEXT 0
634#define DTRACE_MATCH_DONE 1
635#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
636#define DTRACE_STATE_ALIGN 64
637
638#define DTRACE_FLAGS2FLT(flags) \
639 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
640 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
641 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
642 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
643 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
644 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
645 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
646 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
b0d623f7 647 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
2d21ac55
A
648 DTRACEFLT_UNKNOWN)
649
650#define DTRACEACT_ISSTRING(act) \
651 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
652 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
653
b0d623f7 654
b0d623f7 655static size_t dtrace_strlen(const char *, size_t);
2d21ac55
A
656static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
657static void dtrace_enabling_provide(dtrace_provider_t *);
39037602
A
658static int dtrace_enabling_match(dtrace_enabling_t *, int *, dtrace_match_cond_t *cond);
659static void dtrace_enabling_matchall_with_cond(dtrace_match_cond_t *cond);
2d21ac55
A
660static void dtrace_enabling_matchall(void);
661static dtrace_state_t *dtrace_anon_grab(void);
662static uint64_t dtrace_helper(int, dtrace_mstate_t *,
663 dtrace_state_t *, uint64_t, uint64_t);
664static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
665static void dtrace_buffer_drop(dtrace_buffer_t *);
666static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
667 dtrace_state_t *, dtrace_mstate_t *);
668static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
669 dtrace_optval_t);
d190cdc3 670static int dtrace_ecb_create_enable(dtrace_probe_t *, void *, void *);
2d21ac55 671static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
39037602
A
672static int dtrace_canload_remains(uint64_t, size_t, size_t *,
673 dtrace_mstate_t *, dtrace_vstate_t *);
674static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
675 dtrace_mstate_t *, dtrace_vstate_t *);
2d21ac55 676
fe8ab488
A
677
678/*
679 * DTrace sysctl handlers
680 *
681 * These declarations and functions are used for a deeper DTrace configuration.
682 * Most of them are not per-consumer basis and may impact the other DTrace
683 * consumers. Correctness may not be supported for all the variables, so you
684 * should be careful about what values you are using.
685 */
686
687SYSCTL_DECL(_kern_dtrace);
688SYSCTL_NODE(_kern, OID_AUTO, dtrace, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "dtrace");
689
690static int
691sysctl_dtrace_err_verbose SYSCTL_HANDLER_ARGS
692{
693#pragma unused(oidp, arg2)
694 int changed, error;
695 int value = *(int *) arg1;
696
697 error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
698 if (error || !changed)
699 return (error);
700
701 if (value != 0 && value != 1)
702 return (ERANGE);
703
704 lck_mtx_lock(&dtrace_lock);
705 dtrace_err_verbose = value;
706 lck_mtx_unlock(&dtrace_lock);
707
708 return (0);
709}
710
711/*
712 * kern.dtrace.err_verbose
713 *
714 * Set DTrace verbosity when an error occured (0 = disabled, 1 = enabld).
715 * Errors are reported when a DIFO or a DOF has been rejected by the kernel.
716 */
717SYSCTL_PROC(_kern_dtrace, OID_AUTO, err_verbose,
718 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
719 &dtrace_err_verbose, 0,
720 sysctl_dtrace_err_verbose, "I", "dtrace error verbose");
721
722static int
723sysctl_dtrace_buffer_memory_maxsize SYSCTL_HANDLER_ARGS
724{
725#pragma unused(oidp, arg2, req)
726 int changed, error;
727 uint64_t value = *(uint64_t *) arg1;
728
729 error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
730 if (error || !changed)
731 return (error);
732
733 if (value <= dtrace_buffer_memory_inuse)
734 return (ERANGE);
735
736 lck_mtx_lock(&dtrace_lock);
737 dtrace_buffer_memory_maxsize = value;
738 lck_mtx_unlock(&dtrace_lock);
739
740 return (0);
741}
742
743/*
744 * kern.dtrace.buffer_memory_maxsize
745 *
746 * Set DTrace maximal size in bytes used by all the consumers' state buffers. By default
747 * the limit is PHYS_MEM / 3 for *all* consumers. Attempting to set a null, a negative value
748 * or a value <= to dtrace_buffer_memory_inuse will result in a failure.
749 */
750SYSCTL_PROC(_kern_dtrace, OID_AUTO, buffer_memory_maxsize,
751 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
752 &dtrace_buffer_memory_maxsize, 0,
753 sysctl_dtrace_buffer_memory_maxsize, "Q", "dtrace state buffer memory maxsize");
754
755/*
756 * kern.dtrace.buffer_memory_inuse
757 *
758 * Current state buffer memory used, in bytes, by all the DTrace consumers.
759 * This value is read-only.
760 */
761SYSCTL_QUAD(_kern_dtrace, OID_AUTO, buffer_memory_inuse, CTLFLAG_RD | CTLFLAG_LOCKED,
762 &dtrace_buffer_memory_inuse, "dtrace state buffer memory in-use");
763
764static int
765sysctl_dtrace_difo_maxsize SYSCTL_HANDLER_ARGS
766{
767#pragma unused(oidp, arg2, req)
768 int changed, error;
769 size_t value = *(size_t*) arg1;
770
771 error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
772 if (error || !changed)
773 return (error);
774
775 if (value <= 0)
776 return (ERANGE);
777
778 lck_mtx_lock(&dtrace_lock);
779 dtrace_difo_maxsize = value;
780 lck_mtx_unlock(&dtrace_lock);
781
782 return (0);
783}
784
785/*
786 * kern.dtrace.difo_maxsize
787 *
788 * Set the DIFO max size in bytes, check the definition of dtrace_difo_maxsize
789 * to get the default value. Attempting to set a null or negative size will
790 * result in a failure.
791 */
792SYSCTL_PROC(_kern_dtrace, OID_AUTO, difo_maxsize,
793 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
794 &dtrace_difo_maxsize, 0,
795 sysctl_dtrace_difo_maxsize, "Q", "dtrace difo maxsize");
796
797static int
798sysctl_dtrace_dof_maxsize SYSCTL_HANDLER_ARGS
799{
800#pragma unused(oidp, arg2, req)
801 int changed, error;
802 dtrace_optval_t value = *(dtrace_optval_t *) arg1;
803
804 error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
805 if (error || !changed)
806 return (error);
807
808 if (value <= 0)
809 return (ERANGE);
810
d9a64523
A
811 if (value >= dtrace_copy_maxsize())
812 return (ERANGE);
813
fe8ab488
A
814 lck_mtx_lock(&dtrace_lock);
815 dtrace_dof_maxsize = value;
816 lck_mtx_unlock(&dtrace_lock);
817
818 return (0);
819}
820
821/*
822 * kern.dtrace.dof_maxsize
823 *
824 * Set the DOF max size in bytes, check the definition of dtrace_dof_maxsize to
825 * get the default value. Attempting to set a null or negative size will result
826 * in a failure.
827 */
828SYSCTL_PROC(_kern_dtrace, OID_AUTO, dof_maxsize,
829 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
830 &dtrace_dof_maxsize, 0,
831 sysctl_dtrace_dof_maxsize, "Q", "dtrace dof maxsize");
832
833static int
ecc0ceb4 834sysctl_dtrace_statvar_maxsize SYSCTL_HANDLER_ARGS
fe8ab488
A
835{
836#pragma unused(oidp, arg2, req)
837 int changed, error;
838 dtrace_optval_t value = *(dtrace_optval_t*) arg1;
839
840 error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
841 if (error || !changed)
842 return (error);
843
844 if (value <= 0)
845 return (ERANGE);
ecc0ceb4
A
846 if (value > dtrace_statvar_maxsize_max)
847 return (ERANGE);
fe8ab488
A
848
849 lck_mtx_lock(&dtrace_lock);
ecc0ceb4 850 dtrace_statvar_maxsize = value;
fe8ab488
A
851 lck_mtx_unlock(&dtrace_lock);
852
853 return (0);
854}
855
856/*
857 * kern.dtrace.global_maxsize
858 *
ecc0ceb4
A
859 * Set the variable max size in bytes, check the definition of
860 * dtrace_statvar_maxsize to get the default value. Attempting to set a null,
861 * too high or negative size will result in a failure.
fe8ab488
A
862 */
863SYSCTL_PROC(_kern_dtrace, OID_AUTO, global_maxsize,
864 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
ecc0ceb4
A
865 &dtrace_statvar_maxsize, 0,
866 sysctl_dtrace_statvar_maxsize, "Q", "dtrace statvar maxsize");
fe8ab488 867
fe8ab488
A
868
869/*
870 * kern.dtrace.provide_private_probes
871 *
872 * Set whether the providers must provide the private probes. This is
cb323159 873 * kept as compatibility as they are always provided.
fe8ab488 874 */
cb323159
A
875SYSCTL_INT(_kern_dtrace, OID_AUTO, provide_private_probes,
876 CTLFLAG_RD | CTLFLAG_LOCKED,
877 (int *)NULL, 1, "provider must provide the private probes");
fe8ab488 878
d9a64523
A
879/*
880 * kern.dtrace.dof_mode
881 *
882 * Returns the current DOF mode.
883 * This value is read-only.
884 */
885SYSCTL_INT(_kern_dtrace, OID_AUTO, dof_mode, CTLFLAG_RD | CTLFLAG_LOCKED,
886 &dtrace_dof_mode, 0, "dtrace dof mode");
887
2d21ac55
A
888/*
889 * DTrace Probe Context Functions
890 *
891 * These functions are called from probe context. Because probe context is
892 * any context in which C may be called, arbitrarily locks may be held,
893 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
894 * As a result, functions called from probe context may only call other DTrace
895 * support functions -- they may not interact at all with the system at large.
896 * (Note that the ASSERT macro is made probe-context safe by redefining it in
897 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
898 * loads are to be performed from probe context, they _must_ be in terms of
899 * the safe dtrace_load*() variants.
900 *
901 * Some functions in this block are not actually called from probe context;
902 * for these functions, there will be a comment above the function reading
903 * "Note: not called from probe context."
904 */
2d21ac55
A
905
906int
907dtrace_assfail(const char *a, const char *f, int l)
908{
316670eb 909 panic("dtrace: assertion failed: %s, file: %s, line: %d", a, f, l);
2d21ac55
A
910
911 /*
912 * We just need something here that even the most clever compiler
913 * cannot optimize away.
914 */
915 return (a[(uintptr_t)f]);
916}
917
918/*
919 * Atomically increment a specified error counter from probe context.
920 */
921static void
922dtrace_error(uint32_t *counter)
923{
924 /*
925 * Most counters stored to in probe context are per-CPU counters.
926 * However, there are some error conditions that are sufficiently
927 * arcane that they don't merit per-CPU storage. If these counters
928 * are incremented concurrently on different CPUs, scalability will be
929 * adversely affected -- but we don't expect them to be white-hot in a
930 * correctly constructed enabling...
931 */
932 uint32_t oval, nval;
933
934 do {
935 oval = *counter;
936
937 if ((nval = oval + 1) == 0) {
938 /*
939 * If the counter would wrap, set it to 1 -- assuring
940 * that the counter is never zero when we have seen
941 * errors. (The counter must be 32-bits because we
942 * aren't guaranteed a 64-bit compare&swap operation.)
943 * To save this code both the infamy of being fingered
944 * by a priggish news story and the indignity of being
945 * the target of a neo-puritan witch trial, we're
946 * carefully avoiding any colorful description of the
947 * likelihood of this condition -- but suffice it to
948 * say that it is only slightly more likely than the
949 * overflow of predicate cache IDs, as discussed in
950 * dtrace_predicate_create().
951 */
952 nval = 1;
953 }
954 } while (dtrace_cas32(counter, oval, nval) != oval);
955}
956
957/*
958 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
959 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
960 */
961DTRACE_LOADFUNC(8)
962DTRACE_LOADFUNC(16)
963DTRACE_LOADFUNC(32)
964DTRACE_LOADFUNC(64)
965
966static int
967dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
968{
969 if (dest < mstate->dtms_scratch_base)
970 return (0);
971
972 if (dest + size < dest)
973 return (0);
974
975 if (dest + size > mstate->dtms_scratch_ptr)
976 return (0);
977
978 return (1);
979}
980
981static int
39037602 982dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
2d21ac55
A
983 dtrace_statvar_t **svars, int nsvars)
984{
985 int i;
986
ecc0ceb4
A
987 size_t maxglobalsize, maxlocalsize;
988
39037602
A
989 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
990 maxlocalsize = (maxglobalsize) * NCPU;
ecc0ceb4
A
991
992 if (nsvars == 0)
993 return (0);
994
2d21ac55
A
995 for (i = 0; i < nsvars; i++) {
996 dtrace_statvar_t *svar = svars[i];
ecc0ceb4
A
997 uint8_t scope;
998 size_t size;
2d21ac55 999
ecc0ceb4 1000 if (svar == NULL || (size = svar->dtsv_size) == 0)
2d21ac55
A
1001 continue;
1002
ecc0ceb4
A
1003 scope = svar->dtsv_var.dtdv_scope;
1004
1005 /**
1006 * We verify that our size is valid in the spirit of providing
1007 * defense in depth: we want to prevent attackers from using
1008 * DTrace to escalate an orthogonal kernel heap corruption bug
1009 * into the ability to store to arbitrary locations in memory.
1010 */
39037602
A
1011 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
1012 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
ecc0ceb4 1013
39037602
A
1014 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) {
1015 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
1016 svar->dtsv_size);
2d21ac55 1017 return (1);
39037602 1018 }
2d21ac55
A
1019 }
1020
1021 return (0);
1022}
1023
1024/*
1025 * Check to see if the address is within a memory region to which a store may
1026 * be issued. This includes the DTrace scratch areas, and any DTrace variable
1027 * region. The caller of dtrace_canstore() is responsible for performing any
1028 * alignment checks that are needed before stores are actually executed.
1029 */
1030static int
1031dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
1032 dtrace_vstate_t *vstate)
39037602
A
1033{
1034 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
1035}
1036/*
1037 * Implementation of dtrace_canstore which communicates the upper bound of the
1038 * allowed memory region.
1039 */
1040static int
1041dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
1042 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
2d21ac55 1043{
2d21ac55
A
1044 /*
1045 * First, check to see if the address is in scratch space...
1046 */
b0d623f7 1047 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
39037602
A
1048 mstate->dtms_scratch_size)) {
1049 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
1050 mstate->dtms_scratch_size);
2d21ac55 1051 return (1);
39037602 1052 }
2d21ac55
A
1053 /*
1054 * Now check to see if it's a dynamic variable. This check will pick
1055 * up both thread-local variables and any global dynamically-allocated
1056 * variables.
1057 */
b0d623f7
A
1058 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
1059 vstate->dtvs_dynvars.dtds_size)) {
1060 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
1061 uintptr_t base = (uintptr_t)dstate->dtds_base +
1062 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
1063 uintptr_t chunkoffs;
39037602 1064 dtrace_dynvar_t *dvar;
b0d623f7
A
1065
1066 /*
1067 * Before we assume that we can store here, we need to make
1068 * sure that it isn't in our metadata -- storing to our
1069 * dynamic variable metadata would corrupt our state. For
1070 * the range to not include any dynamic variable metadata,
1071 * it must:
1072 *
1073 * (1) Start above the hash table that is at the base of
1074 * the dynamic variable space
1075 *
1076 * (2) Have a starting chunk offset that is beyond the
1077 * dtrace_dynvar_t that is at the base of every chunk
1078 *
1079 * (3) Not span a chunk boundary
1080 *
39037602
A
1081 * (4) Not be in the tuple space of a dynamic variable
1082 *
b0d623f7
A
1083 */
1084 if (addr < base)
1085 return (0);
1086
1087 chunkoffs = (addr - base) % dstate->dtds_chunksize;
1088
1089 if (chunkoffs < sizeof (dtrace_dynvar_t))
1090 return (0);
1091
1092 if (chunkoffs + sz > dstate->dtds_chunksize)
1093 return (0);
1094
39037602
A
1095 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
1096
1097 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
1098 return (0);
1099
1100 if (chunkoffs < sizeof (dtrace_dynvar_t) +
1101 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
1102 return (0);
1103
2d21ac55 1104 return (1);
b0d623f7 1105 }
2d21ac55
A
1106
1107 /*
1108 * Finally, check the static local and global variables. These checks
1109 * take the longest, so we perform them last.
1110 */
39037602 1111 if (dtrace_canstore_statvar(addr, sz, remain,
2d21ac55
A
1112 vstate->dtvs_locals, vstate->dtvs_nlocals))
1113 return (1);
1114
39037602 1115 if (dtrace_canstore_statvar(addr, sz, remain,
2d21ac55
A
1116 vstate->dtvs_globals, vstate->dtvs_nglobals))
1117 return (1);
1118
1119 return (0);
1120}
1121
b0d623f7
A
1122
1123/*
1124 * Convenience routine to check to see if the address is within a memory
1125 * region in which a load may be issued given the user's privilege level;
1126 * if not, it sets the appropriate error flags and loads 'addr' into the
1127 * illegal value slot.
1128 *
1129 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
1130 * appropriate memory access protection.
1131 */
5ba3f43e 1132int
b0d623f7
A
1133dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
1134 dtrace_vstate_t *vstate)
39037602
A
1135{
1136 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
1137}
1138
1139/*
1140 * Implementation of dtrace_canload which communicates the upper bound of the
1141 * allowed memory region.
1142 */
1143static int
1144dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
1145 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
b0d623f7 1146{
b0d623f7 1147 volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
b0d623f7
A
1148
1149 /*
1150 * If we hold the privilege to read from kernel memory, then
1151 * everything is readable.
1152 */
39037602
A
1153 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1154 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
b0d623f7 1155 return (1);
39037602 1156 }
b0d623f7
A
1157
1158 /*
1159 * You can obviously read that which you can store.
1160 */
39037602 1161 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
b0d623f7
A
1162 return (1);
1163
1164 /*
1165 * We're allowed to read from our own string table.
1166 */
1167 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
39037602
A
1168 mstate->dtms_difo->dtdo_strlen)) {
1169 DTRACE_RANGE_REMAIN(remain, addr,
1170 mstate->dtms_difo->dtdo_strtab,
1171 mstate->dtms_difo->dtdo_strlen);
b0d623f7 1172 return (1);
39037602 1173 }
b0d623f7
A
1174
1175 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
1176 *illval = addr;
1177 return (0);
1178}
1179
1180/*
1181 * Convenience routine to check to see if a given string is within a memory
1182 * region in which a load may be issued given the user's privilege level;
1183 * this exists so that we don't need to issue unnecessary dtrace_strlen()
1184 * calls in the event that the user has all privileges.
1185 */
1186static int
39037602
A
1187dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
1188 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
b0d623f7 1189{
39037602 1190 size_t rsize;
b0d623f7
A
1191
1192 /*
1193 * If we hold the privilege to read from kernel memory, then
1194 * everything is readable.
1195 */
39037602
A
1196 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1197 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
b0d623f7 1198 return (1);
39037602 1199 }
b0d623f7 1200
39037602
A
1201 /*
1202 * Even if the caller is uninterested in querying the remaining valid
1203 * range, it is required to ensure that the access is allowed.
1204 */
1205 if (remain == NULL) {
1206 remain = &rsize;
1207 }
1208 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
1209 size_t strsz;
1210 /*
1211 * Perform the strlen after determining the length of the
1212 * memory region which is accessible. This prevents timing
1213 * information from being used to find NULs in memory which is
1214 * not accessible to the caller.
1215 */
1216 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
1217 MIN(sz, *remain));
1218 if (strsz <= *remain) {
1219 return (1);
1220 }
1221 }
b0d623f7
A
1222
1223 return (0);
1224}
1225
1226/*
1227 * Convenience routine to check to see if a given variable is within a memory
1228 * region in which a load may be issued given the user's privilege level.
1229 */
1230static int
39037602
A
1231dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
1232 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
b0d623f7
A
1233{
1234 size_t sz;
1235 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1236
39037602
A
1237 /*
1238 * Calculate the max size before performing any checks since even
1239 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
1240 * return the max length via 'remain'.
1241 */
1242 if (type->dtdt_kind == DIF_TYPE_STRING) {
1243 dtrace_state_t *state = vstate->dtvs_state;
1244
1245 if (state != NULL) {
1246 sz = state->dts_options[DTRACEOPT_STRSIZE];
1247 } else {
1248 /*
1249 * In helper context, we have a NULL state; fall back
1250 * to using the system-wide default for the string size
1251 * in this case.
1252 */
1253 sz = dtrace_strsize_default;
1254 }
1255 } else {
1256 sz = type->dtdt_size;
1257 }
1258
b0d623f7
A
1259 /*
1260 * If we hold the privilege to read from kernel memory, then
1261 * everything is readable.
1262 */
39037602
A
1263 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1264 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
b0d623f7 1265 return (1);
39037602 1266 }
b0d623f7 1267
39037602
A
1268 if (type->dtdt_kind == DIF_TYPE_STRING) {
1269 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1270 vstate));
1271 }
1272 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1273 vstate));
b0d623f7
A
1274}
1275
cb323159
A
1276#define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
1277#define islower(ch) ((ch) >= 'a' && (ch) <= 'z')
1278#define isspace(ch) (((ch) == ' ') || ((ch) == '\r') || ((ch) == '\n') || \
1279 ((ch) == '\t') || ((ch) == '\f'))
1280#define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
1281 ((ch) >= 'A' && (ch) <= 'F'))
1282#define lisalnum(x) \
1283 (isdigit(x) || ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z'))
1284
1285#define DIGIT(x) \
1286 (isdigit(x) ? (x) - '0' : islower(x) ? (x) + 10 - 'a' : (x) + 10 - 'A')
1287
1288/*
1289 * Convert a string to a signed integer using safe loads.
1290 */
1291static int64_t
1292dtrace_strtoll(char *input, int base, size_t limit)
1293{
1294 uintptr_t pos = (uintptr_t)input;
1295 int64_t val = 0;
1296 int x;
1297 boolean_t neg = B_FALSE;
1298 char c, cc, ccc;
1299 uintptr_t end = pos + limit;
1300
1301 /*
1302 * Consume any whitespace preceding digits.
1303 */
1304 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1305 pos++;
1306
1307 /*
1308 * Handle an explicit sign if one is present.
1309 */
1310 if (c == '-' || c == '+') {
1311 if (c == '-')
1312 neg = B_TRUE;
1313 c = dtrace_load8(++pos);
1314 }
1315
1316 /*
1317 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1318 * if present.
1319 */
1320 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1321 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1322 pos += 2;
1323 c = ccc;
1324 }
1325
1326 /*
1327 * Read in contiguous digits until the first non-digit character.
1328 */
1329 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1330 c = dtrace_load8(++pos))
1331 val = val * base + x;
1332
1333 return (neg ? -val : val);
1334}
1335
1336
2d21ac55
A
1337/*
1338 * Compare two strings using safe loads.
1339 */
1340static int
cb323159 1341dtrace_strncmp(const char *s1, const char *s2, size_t limit)
2d21ac55
A
1342{
1343 uint8_t c1, c2;
1344 volatile uint16_t *flags;
1345
1346 if (s1 == s2 || limit == 0)
1347 return (0);
1348
1349 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1350
1351 do {
b0d623f7 1352 if (s1 == NULL) {
2d21ac55 1353 c1 = '\0';
b0d623f7 1354 } else {
2d21ac55 1355 c1 = dtrace_load8((uintptr_t)s1++);
b0d623f7 1356 }
2d21ac55 1357
b0d623f7 1358 if (s2 == NULL) {
2d21ac55 1359 c2 = '\0';
b0d623f7 1360 } else {
2d21ac55 1361 c2 = dtrace_load8((uintptr_t)s2++);
b0d623f7 1362 }
2d21ac55
A
1363
1364 if (c1 != c2)
1365 return (c1 - c2);
1366 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1367
1368 return (0);
1369}
1370
1371/*
1372 * Compute strlen(s) for a string using safe memory accesses. The additional
1373 * len parameter is used to specify a maximum length to ensure completion.
1374 */
1375static size_t
1376dtrace_strlen(const char *s, size_t lim)
1377{
1378 uint_t len;
1379
b0d623f7 1380 for (len = 0; len != lim; len++) {
2d21ac55
A
1381 if (dtrace_load8((uintptr_t)s++) == '\0')
1382 break;
b0d623f7 1383 }
2d21ac55
A
1384
1385 return (len);
1386}
1387
1388/*
1389 * Check if an address falls within a toxic region.
1390 */
1391static int
1392dtrace_istoxic(uintptr_t kaddr, size_t size)
1393{
1394 uintptr_t taddr, tsize;
1395 int i;
1396
1397 for (i = 0; i < dtrace_toxranges; i++) {
1398 taddr = dtrace_toxrange[i].dtt_base;
1399 tsize = dtrace_toxrange[i].dtt_limit - taddr;
1400
1401 if (kaddr - taddr < tsize) {
1402 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1403 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
1404 return (1);
1405 }
1406
1407 if (taddr - kaddr < size) {
1408 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1409 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
1410 return (1);
1411 }
1412 }
1413
1414 return (0);
1415}
1416
1417/*
1418 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
1419 * memory specified by the DIF program. The dst is assumed to be safe memory
1420 * that we can store to directly because it is managed by DTrace. As with
1421 * standard bcopy, overlapping copies are handled properly.
1422 */
1423static void
1424dtrace_bcopy(const void *src, void *dst, size_t len)
1425{
1426 if (len != 0) {
1427 uint8_t *s1 = dst;
1428 const uint8_t *s2 = src;
1429
1430 if (s1 <= s2) {
1431 do {
1432 *s1++ = dtrace_load8((uintptr_t)s2++);
1433 } while (--len != 0);
1434 } else {
1435 s2 += len;
1436 s1 += len;
1437
1438 do {
1439 *--s1 = dtrace_load8((uintptr_t)--s2);
1440 } while (--len != 0);
1441 }
1442 }
1443}
1444
1445/*
1446 * Copy src to dst using safe memory accesses, up to either the specified
1447 * length, or the point that a nul byte is encountered. The src is assumed to
1448 * be unsafe memory specified by the DIF program. The dst is assumed to be
1449 * safe memory that we can store to directly because it is managed by DTrace.
1450 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1451 */
1452static void
1453dtrace_strcpy(const void *src, void *dst, size_t len)
1454{
1455 if (len != 0) {
1456 uint8_t *s1 = dst, c;
1457 const uint8_t *s2 = src;
1458
1459 do {
1460 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1461 } while (--len != 0 && c != '\0');
1462 }
1463}
1464
1465/*
1466 * Copy src to dst, deriving the size and type from the specified (BYREF)
1467 * variable type. The src is assumed to be unsafe memory specified by the DIF
1468 * program. The dst is assumed to be DTrace variable memory that is of the
1469 * specified type; we assume that we can store to directly.
1470 */
1471static void
39037602 1472dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
2d21ac55
A
1473{
1474 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1475
b0d623f7 1476 if (type->dtdt_kind == DIF_TYPE_STRING) {
39037602 1477 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
b0d623f7 1478 } else {
39037602
A
1479 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1480 }
b0d623f7 1481}
2d21ac55
A
1482
1483/*
1484 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1485 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1486 * safe memory that we can access directly because it is managed by DTrace.
1487 */
1488static int
1489dtrace_bcmp(const void *s1, const void *s2, size_t len)
1490{
1491 volatile uint16_t *flags;
1492
1493 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1494
1495 if (s1 == s2)
1496 return (0);
1497
1498 if (s1 == NULL || s2 == NULL)
1499 return (1);
1500
1501 if (s1 != s2 && len != 0) {
1502 const uint8_t *ps1 = s1;
1503 const uint8_t *ps2 = s2;
1504
1505 do {
1506 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1507 return (1);
1508 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1509 }
1510 return (0);
1511}
1512
1513/*
1514 * Zero the specified region using a simple byte-by-byte loop. Note that this
1515 * is for safe DTrace-managed memory only.
1516 */
1517static void
1518dtrace_bzero(void *dst, size_t len)
1519{
1520 uchar_t *cp;
1521
1522 for (cp = dst; len != 0; len--)
1523 *cp++ = 0;
1524}
1525
b0d623f7
A
1526static void
1527dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1528{
1529 uint64_t result[2];
1530
1531 result[0] = addend1[0] + addend2[0];
1532 result[1] = addend1[1] + addend2[1] +
1533 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1534
1535 sum[0] = result[0];
1536 sum[1] = result[1];
1537}
1538
1539/*
1540 * Shift the 128-bit value in a by b. If b is positive, shift left.
1541 * If b is negative, shift right.
1542 */
1543static void
1544dtrace_shift_128(uint64_t *a, int b)
1545{
1546 uint64_t mask;
1547
1548 if (b == 0)
1549 return;
1550
1551 if (b < 0) {
1552 b = -b;
1553 if (b >= 64) {
1554 a[0] = a[1] >> (b - 64);
1555 a[1] = 0;
1556 } else {
1557 a[0] >>= b;
1558 mask = 1LL << (64 - b);
1559 mask -= 1;
1560 a[0] |= ((a[1] & mask) << (64 - b));
1561 a[1] >>= b;
1562 }
1563 } else {
1564 if (b >= 64) {
1565 a[1] = a[0] << (b - 64);
1566 a[0] = 0;
1567 } else {
1568 a[1] <<= b;
1569 mask = a[0] >> (64 - b);
1570 a[1] |= mask;
1571 a[0] <<= b;
1572 }
1573 }
1574}
1575
1576/*
1577 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1578 * use native multiplication on those, and then re-combine into the
1579 * resulting 128-bit value.
1580 *
1581 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1582 * hi1 * hi2 << 64 +
1583 * hi1 * lo2 << 32 +
1584 * hi2 * lo1 << 32 +
1585 * lo1 * lo2
1586 */
1587static void
1588dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1589{
1590 uint64_t hi1, hi2, lo1, lo2;
1591 uint64_t tmp[2];
1592
1593 hi1 = factor1 >> 32;
1594 hi2 = factor2 >> 32;
1595
1596 lo1 = factor1 & DT_MASK_LO;
1597 lo2 = factor2 & DT_MASK_LO;
1598
1599 product[0] = lo1 * lo2;
1600 product[1] = hi1 * hi2;
1601
1602 tmp[0] = hi1 * lo2;
1603 tmp[1] = 0;
1604 dtrace_shift_128(tmp, 32);
1605 dtrace_add_128(product, tmp, product);
1606
1607 tmp[0] = hi2 * lo1;
1608 tmp[1] = 0;
1609 dtrace_shift_128(tmp, 32);
1610 dtrace_add_128(product, tmp, product);
1611}
1612
2d21ac55
A
1613/*
1614 * This privilege check should be used by actions and subroutines to
1615 * verify that the user credentials of the process that enabled the
1616 * invoking ECB match the target credentials
1617 */
1618static int
1619dtrace_priv_proc_common_user(dtrace_state_t *state)
1620{
1621 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1622
1623 /*
1624 * We should always have a non-NULL state cred here, since if cred
1625 * is null (anonymous tracing), we fast-path bypass this routine.
1626 */
1627 ASSERT(s_cr != NULL);
1628
2d21ac55 1629 if ((cr = dtrace_CRED()) != NULL &&
6d2010ae
A
1630 posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_uid &&
1631 posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_ruid &&
1632 posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_suid &&
1633 posix_cred_get(s_cr)->cr_gid == posix_cred_get(cr)->cr_gid &&
1634 posix_cred_get(s_cr)->cr_gid == posix_cred_get(cr)->cr_rgid &&
1635 posix_cred_get(s_cr)->cr_gid == posix_cred_get(cr)->cr_sgid)
2d21ac55
A
1636 return (1);
1637
1638 return (0);
1639}
1640
1641/*
1642 * This privilege check should be used by actions and subroutines to
1643 * verify that the zone of the process that enabled the invoking ECB
1644 * matches the target credentials
1645 */
1646static int
1647dtrace_priv_proc_common_zone(dtrace_state_t *state)
1648{
1649 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
fe8ab488 1650#pragma unused(cr, s_cr, state) /* __APPLE__ */
2d21ac55
A
1651
1652 /*
1653 * We should always have a non-NULL state cred here, since if cred
1654 * is null (anonymous tracing), we fast-path bypass this routine.
1655 */
1656 ASSERT(s_cr != NULL);
1657
fe8ab488 1658 return 1; /* APPLE NOTE: Darwin doesn't do zones. */
2d21ac55
A
1659}
1660
1661/*
1662 * This privilege check should be used by actions and subroutines to
1663 * verify that the process has not setuid or changed credentials.
1664 */
2d21ac55
A
1665static int
1666dtrace_priv_proc_common_nocd(void)
1667{
1668 return 1; /* Darwin omits "No Core Dump" flag. */
1669}
2d21ac55
A
1670
1671static int
1672dtrace_priv_proc_destructive(dtrace_state_t *state)
1673{
1674 int action = state->dts_cred.dcr_action;
1675
cf7d32b8
A
1676 if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
1677 goto bad;
fe8ab488
A
1678
1679 if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc()))
1680 goto bad;
cf7d32b8 1681
2d21ac55
A
1682 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1683 dtrace_priv_proc_common_zone(state) == 0)
1684 goto bad;
1685
1686 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1687 dtrace_priv_proc_common_user(state) == 0)
1688 goto bad;
1689
1690 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1691 dtrace_priv_proc_common_nocd() == 0)
1692 goto bad;
1693
1694 return (1);
1695
1696bad:
1697 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1698
1699 return (0);
1700}
1701
1702static int
1703dtrace_priv_proc_control(dtrace_state_t *state)
1704{
cf7d32b8
A
1705 if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
1706 goto bad;
fe8ab488
A
1707
1708 if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc()))
1709 goto bad;
cf7d32b8 1710
2d21ac55
A
1711 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1712 return (1);
1713
1714 if (dtrace_priv_proc_common_zone(state) &&
1715 dtrace_priv_proc_common_user(state) &&
1716 dtrace_priv_proc_common_nocd())
1717 return (1);
1718
cf7d32b8 1719bad:
2d21ac55
A
1720 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1721
1722 return (0);
1723}
1724
1725static int
1726dtrace_priv_proc(dtrace_state_t *state)
1727{
cf7d32b8
A
1728 if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
1729 goto bad;
fe8ab488 1730
39037602 1731 if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed() && !dtrace_can_attach_to_proc(current_proc()))
fe8ab488 1732 goto bad;
cf7d32b8 1733
2d21ac55
A
1734 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1735 return (1);
1736
cf7d32b8 1737bad:
2d21ac55
A
1738 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1739
1740 return (0);
1741}
1742
fe8ab488
A
1743/*
1744 * The P_LNOATTACH check is an Apple specific check.
1745 * We need a version of dtrace_priv_proc() that omits
1746 * that check for PID and EXECNAME accesses
1747 */
935ed37a
A
1748static int
1749dtrace_priv_proc_relaxed(dtrace_state_t *state)
1750{
1751
1752 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1753 return (1);
1754
1755 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1756
1757 return (0);
1758}
935ed37a 1759
2d21ac55
A
1760static int
1761dtrace_priv_kernel(dtrace_state_t *state)
1762{
39037602 1763 if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed())
fe8ab488
A
1764 goto bad;
1765
2d21ac55
A
1766 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1767 return (1);
1768
fe8ab488 1769bad:
2d21ac55
A
1770 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1771
1772 return (0);
1773}
1774
1775static int
1776dtrace_priv_kernel_destructive(dtrace_state_t *state)
1777{
fe8ab488
A
1778 if (dtrace_is_restricted())
1779 goto bad;
1780
2d21ac55
A
1781 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1782 return (1);
1783
fe8ab488 1784bad:
2d21ac55
A
1785 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1786
1787 return (0);
1788}
1789
1790/*
1791 * Note: not called from probe context. This function is called
1792 * asynchronously (and at a regular interval) from outside of probe context to
1793 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1794 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1795 */
fe8ab488 1796static void
2d21ac55
A
1797dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1798{
1799 dtrace_dynvar_t *dirty;
1800 dtrace_dstate_percpu_t *dcpu;
1801 int i, work = 0;
1802
c910b4d9 1803 for (i = 0; i < (int)NCPU; i++) {
2d21ac55
A
1804 dcpu = &dstate->dtds_percpu[i];
1805
1806 ASSERT(dcpu->dtdsc_rinsing == NULL);
1807
1808 /*
1809 * If the dirty list is NULL, there is no dirty work to do.
1810 */
1811 if (dcpu->dtdsc_dirty == NULL)
1812 continue;
1813
1814 /*
1815 * If the clean list is non-NULL, then we're not going to do
1816 * any work for this CPU -- it means that there has not been
1817 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1818 * since the last time we cleaned house.
1819 */
1820 if (dcpu->dtdsc_clean != NULL)
1821 continue;
1822
1823 work = 1;
1824
1825 /*
1826 * Atomically move the dirty list aside.
1827 */
1828 do {
1829 dirty = dcpu->dtdsc_dirty;
1830
1831 /*
1832 * Before we zap the dirty list, set the rinsing list.
1833 * (This allows for a potential assertion in
1834 * dtrace_dynvar(): if a free dynamic variable appears
1835 * on a hash chain, either the dirty list or the
1836 * rinsing list for some CPU must be non-NULL.)
1837 */
1838 dcpu->dtdsc_rinsing = dirty;
1839 dtrace_membar_producer();
1840 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1841 dirty, NULL) != dirty);
1842 }
1843
1844 if (!work) {
1845 /*
1846 * We have no work to do; we can simply return.
1847 */
1848 return;
1849 }
1850
1851 dtrace_sync();
1852
c910b4d9 1853 for (i = 0; i < (int)NCPU; i++) {
2d21ac55
A
1854 dcpu = &dstate->dtds_percpu[i];
1855
1856 if (dcpu->dtdsc_rinsing == NULL)
1857 continue;
1858
1859 /*
1860 * We are now guaranteed that no hash chain contains a pointer
1861 * into this dirty list; we can make it clean.
1862 */
1863 ASSERT(dcpu->dtdsc_clean == NULL);
1864 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1865 dcpu->dtdsc_rinsing = NULL;
1866 }
1867
1868 /*
1869 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1870 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1871 * This prevents a race whereby a CPU incorrectly decides that
1872 * the state should be something other than DTRACE_DSTATE_CLEAN
1873 * after dtrace_dynvar_clean() has completed.
1874 */
1875 dtrace_sync();
1876
1877 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1878}
1879
1880/*
1881 * Depending on the value of the op parameter, this function looks-up,
1882 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1883 * allocation is requested, this function will return a pointer to a
1884 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1885 * variable can be allocated. If NULL is returned, the appropriate counter
1886 * will be incremented.
1887 */
fe8ab488 1888static dtrace_dynvar_t *
2d21ac55 1889dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
b0d623f7
A
1890 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1891 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
2d21ac55
A
1892{
1893 uint64_t hashval = DTRACE_DYNHASH_VALID;
1894 dtrace_dynhash_t *hash = dstate->dtds_hash;
1895 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1896 processorid_t me = CPU->cpu_id, cpu = me;
1897 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1898 size_t bucket, ksize;
1899 size_t chunksize = dstate->dtds_chunksize;
1900 uintptr_t kdata, lock, nstate;
1901 uint_t i;
1902
1903 ASSERT(nkeys != 0);
1904
1905 /*
1906 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1907 * algorithm. For the by-value portions, we perform the algorithm in
1908 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1909 * bit, and seems to have only a minute effect on distribution. For
1910 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1911 * over each referenced byte. It's painful to do this, but it's much
1912 * better than pathological hash distribution. The efficacy of the
1913 * hashing algorithm (and a comparison with other algorithms) may be
1914 * found by running the ::dtrace_dynstat MDB dcmd.
1915 */
1916 for (i = 0; i < nkeys; i++) {
1917 if (key[i].dttk_size == 0) {
1918 uint64_t val = key[i].dttk_value;
1919
1920 hashval += (val >> 48) & 0xffff;
1921 hashval += (hashval << 10);
1922 hashval ^= (hashval >> 6);
1923
1924 hashval += (val >> 32) & 0xffff;
1925 hashval += (hashval << 10);
1926 hashval ^= (hashval >> 6);
1927
1928 hashval += (val >> 16) & 0xffff;
1929 hashval += (hashval << 10);
1930 hashval ^= (hashval >> 6);
1931
1932 hashval += val & 0xffff;
1933 hashval += (hashval << 10);
1934 hashval ^= (hashval >> 6);
1935 } else {
1936 /*
1937 * This is incredibly painful, but it beats the hell
1938 * out of the alternative.
1939 */
1940 uint64_t j, size = key[i].dttk_size;
1941 uintptr_t base = (uintptr_t)key[i].dttk_value;
1942
b0d623f7
A
1943 if (!dtrace_canload(base, size, mstate, vstate))
1944 break;
1945
2d21ac55
A
1946 for (j = 0; j < size; j++) {
1947 hashval += dtrace_load8(base + j);
1948 hashval += (hashval << 10);
1949 hashval ^= (hashval >> 6);
1950 }
1951 }
1952 }
1953
b0d623f7
A
1954 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1955 return (NULL);
1956
2d21ac55
A
1957 hashval += (hashval << 3);
1958 hashval ^= (hashval >> 11);
1959 hashval += (hashval << 15);
1960
1961 /*
1962 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1963 * comes out to be one of our two sentinel hash values. If this
1964 * actually happens, we set the hashval to be a value known to be a
1965 * non-sentinel value.
1966 */
1967 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1968 hashval = DTRACE_DYNHASH_VALID;
1969
1970 /*
1971 * Yes, it's painful to do a divide here. If the cycle count becomes
1972 * important here, tricks can be pulled to reduce it. (However, it's
1973 * critical that hash collisions be kept to an absolute minimum;
1974 * they're much more painful than a divide.) It's better to have a
1975 * solution that generates few collisions and still keeps things
1976 * relatively simple.
1977 */
1978 bucket = hashval % dstate->dtds_hashsize;
1979
1980 if (op == DTRACE_DYNVAR_DEALLOC) {
1981 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1982
1983 for (;;) {
1984 while ((lock = *lockp) & 1)
1985 continue;
1986
b0d623f7
A
1987 if (dtrace_casptr((void *)(uintptr_t)lockp,
1988 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1989 break;
2d21ac55
A
1990 }
1991
1992 dtrace_membar_producer();
1993 }
1994
1995top:
1996 prev = NULL;
1997 lock = hash[bucket].dtdh_lock;
1998
1999 dtrace_membar_consumer();
2000
2001 start = hash[bucket].dtdh_chain;
2002 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
2003 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
2004 op != DTRACE_DYNVAR_DEALLOC));
2005
2006 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
2007 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
2008 dtrace_key_t *dkey = &dtuple->dtt_key[0];
2009
2010 if (dvar->dtdv_hashval != hashval) {
2011 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
2012 /*
2013 * We've reached the sink, and therefore the
2014 * end of the hash chain; we can kick out of
2015 * the loop knowing that we have seen a valid
2016 * snapshot of state.
2017 */
2018 ASSERT(dvar->dtdv_next == NULL);
2019 ASSERT(dvar == &dtrace_dynhash_sink);
2020 break;
2021 }
2022
2023 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
2024 /*
2025 * We've gone off the rails: somewhere along
2026 * the line, one of the members of this hash
2027 * chain was deleted. Note that we could also
2028 * detect this by simply letting this loop run
2029 * to completion, as we would eventually hit
2030 * the end of the dirty list. However, we
2031 * want to avoid running the length of the
2032 * dirty list unnecessarily (it might be quite
2033 * long), so we catch this as early as
2034 * possible by detecting the hash marker. In
2035 * this case, we simply set dvar to NULL and
2036 * break; the conditional after the loop will
2037 * send us back to top.
2038 */
2039 dvar = NULL;
2040 break;
2041 }
2042
2043 goto next;
2044 }
2045
2046 if (dtuple->dtt_nkeys != nkeys)
2047 goto next;
2048
2049 for (i = 0; i < nkeys; i++, dkey++) {
2050 if (dkey->dttk_size != key[i].dttk_size)
2051 goto next; /* size or type mismatch */
2052
2053 if (dkey->dttk_size != 0) {
2054 if (dtrace_bcmp(
2055 (void *)(uintptr_t)key[i].dttk_value,
2056 (void *)(uintptr_t)dkey->dttk_value,
2057 dkey->dttk_size))
2058 goto next;
2059 } else {
2060 if (dkey->dttk_value != key[i].dttk_value)
2061 goto next;
2062 }
2063 }
2064
2065 if (op != DTRACE_DYNVAR_DEALLOC)
2066 return (dvar);
2067
2068 ASSERT(dvar->dtdv_next == NULL ||
2069 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
2070
2071 if (prev != NULL) {
2072 ASSERT(hash[bucket].dtdh_chain != dvar);
2073 ASSERT(start != dvar);
2074 ASSERT(prev->dtdv_next == dvar);
2075 prev->dtdv_next = dvar->dtdv_next;
2076 } else {
2077 if (dtrace_casptr(&hash[bucket].dtdh_chain,
2078 start, dvar->dtdv_next) != start) {
2079 /*
2080 * We have failed to atomically swing the
2081 * hash table head pointer, presumably because
2082 * of a conflicting allocation on another CPU.
2083 * We need to reread the hash chain and try
2084 * again.
2085 */
2086 goto top;
2087 }
2088 }
2089
2090 dtrace_membar_producer();
2091
2092 /*
2093 * Now set the hash value to indicate that it's free.
2094 */
2095 ASSERT(hash[bucket].dtdh_chain != dvar);
2096 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2097
2098 dtrace_membar_producer();
2099
2100 /*
2101 * Set the next pointer to point at the dirty list, and
2102 * atomically swing the dirty pointer to the newly freed dvar.
2103 */
2104 do {
2105 next = dcpu->dtdsc_dirty;
2106 dvar->dtdv_next = next;
2107 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
2108
2109 /*
2110 * Finally, unlock this hash bucket.
2111 */
2112 ASSERT(hash[bucket].dtdh_lock == lock);
2113 ASSERT(lock & 1);
2114 hash[bucket].dtdh_lock++;
2115
2116 return (NULL);
2117next:
2118 prev = dvar;
2119 continue;
2120 }
2121
2122 if (dvar == NULL) {
2123 /*
2124 * If dvar is NULL, it is because we went off the rails:
2125 * one of the elements that we traversed in the hash chain
2126 * was deleted while we were traversing it. In this case,
2127 * we assert that we aren't doing a dealloc (deallocs lock
2128 * the hash bucket to prevent themselves from racing with
2129 * one another), and retry the hash chain traversal.
2130 */
2131 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
2132 goto top;
2133 }
2134
2135 if (op != DTRACE_DYNVAR_ALLOC) {
2136 /*
2137 * If we are not to allocate a new variable, we want to
2138 * return NULL now. Before we return, check that the value
2139 * of the lock word hasn't changed. If it has, we may have
2140 * seen an inconsistent snapshot.
2141 */
2142 if (op == DTRACE_DYNVAR_NOALLOC) {
2143 if (hash[bucket].dtdh_lock != lock)
2144 goto top;
2145 } else {
2146 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2147 ASSERT(hash[bucket].dtdh_lock == lock);
2148 ASSERT(lock & 1);
2149 hash[bucket].dtdh_lock++;
2150 }
2151
2152 return (NULL);
2153 }
2154
2155 /*
2156 * We need to allocate a new dynamic variable. The size we need is the
2157 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2158 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2159 * the size of any referred-to data (dsize). We then round the final
2160 * size up to the chunksize for allocation.
2161 */
2162 for (ksize = 0, i = 0; i < nkeys; i++)
2163 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2164
2165 /*
2166 * This should be pretty much impossible, but could happen if, say,
2167 * strange DIF specified the tuple. Ideally, this should be an
2168 * assertion and not an error condition -- but that requires that the
2169 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2170 * bullet-proof. (That is, it must not be able to be fooled by
2171 * malicious DIF.) Given the lack of backwards branches in DIF,
2172 * solving this would presumably not amount to solving the Halting
2173 * Problem -- but it still seems awfully hard.
2174 */
2175 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2176 ksize + dsize > chunksize) {
2177 dcpu->dtdsc_drops++;
2178 return (NULL);
2179 }
2180
2181 nstate = DTRACE_DSTATE_EMPTY;
2182
2183 do {
2184retry:
2185 free = dcpu->dtdsc_free;
2186
2187 if (free == NULL) {
2188 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2189 void *rval;
2190
2191 if (clean == NULL) {
2192 /*
2193 * We're out of dynamic variable space on
2194 * this CPU. Unless we have tried all CPUs,
2195 * we'll try to allocate from a different
2196 * CPU.
2197 */
2198 switch (dstate->dtds_state) {
2199 case DTRACE_DSTATE_CLEAN: {
2200 void *sp = &dstate->dtds_state;
2201
c910b4d9 2202 if (++cpu >= (int)NCPU)
2d21ac55
A
2203 cpu = 0;
2204
2205 if (dcpu->dtdsc_dirty != NULL &&
2206 nstate == DTRACE_DSTATE_EMPTY)
2207 nstate = DTRACE_DSTATE_DIRTY;
2208
2209 if (dcpu->dtdsc_rinsing != NULL)
2210 nstate = DTRACE_DSTATE_RINSING;
2211
2212 dcpu = &dstate->dtds_percpu[cpu];
2213
2214 if (cpu != me)
2215 goto retry;
2216
2217 (void) dtrace_cas32(sp,
2218 DTRACE_DSTATE_CLEAN, nstate);
2219
2220 /*
2221 * To increment the correct bean
2222 * counter, take another lap.
2223 */
2224 goto retry;
2225 }
2226
2227 case DTRACE_DSTATE_DIRTY:
2228 dcpu->dtdsc_dirty_drops++;
2229 break;
2230
2231 case DTRACE_DSTATE_RINSING:
2232 dcpu->dtdsc_rinsing_drops++;
2233 break;
2234
2235 case DTRACE_DSTATE_EMPTY:
2236 dcpu->dtdsc_drops++;
2237 break;
2238 }
2239
2240 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2241 return (NULL);
2242 }
2243
2244 /*
2245 * The clean list appears to be non-empty. We want to
2246 * move the clean list to the free list; we start by
2247 * moving the clean pointer aside.
2248 */
2249 if (dtrace_casptr(&dcpu->dtdsc_clean,
2250 clean, NULL) != clean) {
2251 /*
2252 * We are in one of two situations:
2253 *
2254 * (a) The clean list was switched to the
2255 * free list by another CPU.
2256 *
2257 * (b) The clean list was added to by the
2258 * cleansing cyclic.
2259 *
2260 * In either of these situations, we can
2261 * just reattempt the free list allocation.
2262 */
2263 goto retry;
2264 }
2265
2266 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2267
2268 /*
2269 * Now we'll move the clean list to the free list.
2270 * It's impossible for this to fail: the only way
2271 * the free list can be updated is through this
2272 * code path, and only one CPU can own the clean list.
2273 * Thus, it would only be possible for this to fail if
2274 * this code were racing with dtrace_dynvar_clean().
2275 * (That is, if dtrace_dynvar_clean() updated the clean
2276 * list, and we ended up racing to update the free
2277 * list.) This race is prevented by the dtrace_sync()
2278 * in dtrace_dynvar_clean() -- which flushes the
2279 * owners of the clean lists out before resetting
2280 * the clean lists.
2281 */
2282 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2283 ASSERT(rval == NULL);
2284 goto retry;
2285 }
2286
2287 dvar = free;
2288 new_free = dvar->dtdv_next;
2289 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2290
2291 /*
2292 * We have now allocated a new chunk. We copy the tuple keys into the
2293 * tuple array and copy any referenced key data into the data space
2294 * following the tuple array. As we do this, we relocate dttk_value
2295 * in the final tuple to point to the key data address in the chunk.
2296 */
2297 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2298 dvar->dtdv_data = (void *)(kdata + ksize);
2299 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2300
2301 for (i = 0; i < nkeys; i++) {
2302 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2303 size_t kesize = key[i].dttk_size;
2304
2305 if (kesize != 0) {
2306 dtrace_bcopy(
2307 (const void *)(uintptr_t)key[i].dttk_value,
2308 (void *)kdata, kesize);
2309 dkey->dttk_value = kdata;
2310 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2311 } else {
2312 dkey->dttk_value = key[i].dttk_value;
2313 }
2314
2315 dkey->dttk_size = kesize;
2316 }
2317
2318 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2319 dvar->dtdv_hashval = hashval;
2320 dvar->dtdv_next = start;
2321
2322 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2323 return (dvar);
2324
2325 /*
2326 * The cas has failed. Either another CPU is adding an element to
2327 * this hash chain, or another CPU is deleting an element from this
2328 * hash chain. The simplest way to deal with both of these cases
2329 * (though not necessarily the most efficient) is to free our
2330 * allocated block and tail-call ourselves. Note that the free is
2331 * to the dirty list and _not_ to the free list. This is to prevent
2332 * races with allocators, above.
2333 */
2334 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2335
2336 dtrace_membar_producer();
2337
2338 do {
2339 free = dcpu->dtdsc_dirty;
2340 dvar->dtdv_next = free;
2341 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2342
b0d623f7 2343 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
2d21ac55
A
2344}
2345
2346/*ARGSUSED*/
2347static void
2348dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2349{
b0d623f7
A
2350#pragma unused(arg) /* __APPLE__ */
2351 if ((int64_t)nval < (int64_t)*oval)
2d21ac55
A
2352 *oval = nval;
2353}
2354
2355/*ARGSUSED*/
2356static void
2357dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2358{
b0d623f7
A
2359#pragma unused(arg) /* __APPLE__ */
2360 if ((int64_t)nval > (int64_t)*oval)
2d21ac55
A
2361 *oval = nval;
2362}
2363
2364static void
2365dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2366{
2367 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2368 int64_t val = (int64_t)nval;
2369
2370 if (val < 0) {
2371 for (i = 0; i < zero; i++) {
2372 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2373 quanta[i] += incr;
2374 return;
2375 }
2376 }
2377 } else {
2378 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2379 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2380 quanta[i - 1] += incr;
2381 return;
2382 }
2383 }
2384
2385 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2386 return;
2387 }
2388
2389 ASSERT(0);
2390}
2391
2392static void
2393dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2394{
2395 uint64_t arg = *lquanta++;
2396 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2397 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2398 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2399 int32_t val = (int32_t)nval, level;
2400
2401 ASSERT(step != 0);
2402 ASSERT(levels != 0);
2403
2404 if (val < base) {
2405 /*
2406 * This is an underflow.
2407 */
2408 lquanta[0] += incr;
2409 return;
2410 }
2411
2412 level = (val - base) / step;
2413
2414 if (level < levels) {
2415 lquanta[level + 1] += incr;
2416 return;
2417 }
2418
2419 /*
2420 * This is an overflow.
2421 */
2422 lquanta[levels + 1] += incr;
2423}
2424
39236c6e
A
2425static int
2426dtrace_aggregate_llquantize_bucket(int16_t factor, int16_t low, int16_t high,
2427 int16_t nsteps, int64_t value)
2428{
2429 int64_t this = 1, last, next;
2430 int base = 1, order;
2431
2432 for (order = 0; order < low; ++order)
2433 this *= factor;
2434
2435 /*
2436 * If our value is less than our factor taken to the power of the
2437 * low order of magnitude, it goes into the zeroth bucket.
2438 */
2439 if (value < this)
2440 return 0;
2441 else
2442 last = this;
2443
2444 for (this *= factor; order <= high; ++order) {
2445 int nbuckets = this > nsteps ? nsteps : this;
2446
2447 /*
2448 * We should not generally get log/linear quantizations
2449 * with a high magnitude that allows 64-bits to
2450 * overflow, but we nonetheless protect against this
2451 * by explicitly checking for overflow, and clamping
2452 * our value accordingly.
2453 */
2454 next = this * factor;
2455 if (next < this) {
2456 value = this - 1;
2457 }
2458
2459 /*
2460 * If our value lies within this order of magnitude,
2461 * determine its position by taking the offset within
2462 * the order of magnitude, dividing by the bucket
2463 * width, and adding to our (accumulated) base.
2464 */
2465 if (value < this) {
2466 return (base + (value - last) / (this / nbuckets));
2467 }
2468
2469 base += nbuckets - (nbuckets / factor);
2470 last = this;
2471 this = next;
2472 }
2473
2474 /*
2475 * Our value is greater than or equal to our factor taken to the
2476 * power of one plus the high magnitude -- return the top bucket.
2477 */
2478 return base;
2479}
2480
2481static void
2482dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2483{
2484 uint64_t arg = *llquanta++;
2485 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2486 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2487 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
15129b1c 2488 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
39236c6e
A
2489
2490 llquanta[dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, nval)] += incr;
2491}
2492
2d21ac55
A
2493/*ARGSUSED*/
2494static void
2495dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2496{
b0d623f7 2497#pragma unused(arg) /* __APPLE__ */
2d21ac55
A
2498 data[0]++;
2499 data[1] += nval;
2500}
2501
2502/*ARGSUSED*/
2503static void
b0d623f7 2504dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2d21ac55 2505{
b0d623f7
A
2506#pragma unused(arg) /* __APPLE__ */
2507 int64_t snval = (int64_t)nval;
2508 uint64_t tmp[2];
2509
2510 data[0]++;
2511 data[1] += nval;
2512
2513 /*
2514 * What we want to say here is:
2515 *
2516 * data[2] += nval * nval;
2517 *
2518 * But given that nval is 64-bit, we could easily overflow, so
2519 * we do this as 128-bit arithmetic.
2520 */
2521 if (snval < 0)
2522 snval = -snval;
2523
2524 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2525 dtrace_add_128(data + 2, tmp, data + 2);
2d21ac55
A
2526}
2527
2528/*ARGSUSED*/
2529static void
b0d623f7 2530dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2d21ac55 2531{
b0d623f7
A
2532#pragma unused(nval, arg) /* __APPLE__ */
2533 *oval = *oval + 1;
2534}
2535
2536/*ARGSUSED*/
2537static void
2538dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2539{
2540#pragma unused(arg) /* __APPLE__ */
2d21ac55
A
2541 *oval += nval;
2542}
2543
2544/*
2545 * Aggregate given the tuple in the principal data buffer, and the aggregating
2546 * action denoted by the specified dtrace_aggregation_t. The aggregation
2547 * buffer is specified as the buf parameter. This routine does not return
2548 * failure; if there is no space in the aggregation buffer, the data will be
2549 * dropped, and a corresponding counter incremented.
2550 */
f427ee49 2551__attribute__((noinline))
2d21ac55
A
2552static void
2553dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2554 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2555{
c910b4d9 2556#pragma unused(arg)
2d21ac55
A
2557 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2558 uint32_t i, ndx, size, fsize;
2559 uint32_t align = sizeof (uint64_t) - 1;
2560 dtrace_aggbuffer_t *agb;
2561 dtrace_aggkey_t *key;
2562 uint32_t hashval = 0, limit, isstr;
2563 caddr_t tomax, data, kdata;
2564 dtrace_actkind_t action;
2565 dtrace_action_t *act;
2566 uintptr_t offs;
2567
2568 if (buf == NULL)
2569 return;
2570
2571 if (!agg->dtag_hasarg) {
2572 /*
2573 * Currently, only quantize() and lquantize() take additional
2574 * arguments, and they have the same semantics: an increment
2575 * value that defaults to 1 when not present. If additional
2576 * aggregating actions take arguments, the setting of the
2577 * default argument value will presumably have to become more
2578 * sophisticated...
2579 */
2580 arg = 1;
2581 }
2582
2583 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2584 size = rec->dtrd_offset - agg->dtag_base;
2585 fsize = size + rec->dtrd_size;
2586
2587 ASSERT(dbuf->dtb_tomax != NULL);
2588 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2589
2590 if ((tomax = buf->dtb_tomax) == NULL) {
2591 dtrace_buffer_drop(buf);
2592 return;
2593 }
2594
2595 /*
2596 * The metastructure is always at the bottom of the buffer.
2597 */
2598 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2599 sizeof (dtrace_aggbuffer_t));
2600
2601 if (buf->dtb_offset == 0) {
2602 /*
2603 * We just kludge up approximately 1/8th of the size to be
2604 * buckets. If this guess ends up being routinely
2605 * off-the-mark, we may need to dynamically readjust this
2606 * based on past performance.
2607 */
2608 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2609
2610 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2611 (uintptr_t)tomax || hashsize == 0) {
2612 /*
2613 * We've been given a ludicrously small buffer;
2614 * increment our drop count and leave.
2615 */
2616 dtrace_buffer_drop(buf);
2617 return;
2618 }
2619
2620 /*
2621 * And now, a pathetic attempt to try to get a an odd (or
2622 * perchance, a prime) hash size for better hash distribution.
2623 */
2624 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2625 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2626
2627 agb->dtagb_hashsize = hashsize;
2628 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2629 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2630 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2631
2632 for (i = 0; i < agb->dtagb_hashsize; i++)
2633 agb->dtagb_hash[i] = NULL;
2634 }
2635
2636 ASSERT(agg->dtag_first != NULL);
2637 ASSERT(agg->dtag_first->dta_intuple);
2638
2639 /*
2640 * Calculate the hash value based on the key. Note that we _don't_
2641 * include the aggid in the hashing (but we will store it as part of
2642 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2643 * algorithm: a simple, quick algorithm that has no known funnels, and
2644 * gets good distribution in practice. The efficacy of the hashing
2645 * algorithm (and a comparison with other algorithms) may be found by
2646 * running the ::dtrace_aggstat MDB dcmd.
2647 */
2648 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2649 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2650 limit = i + act->dta_rec.dtrd_size;
2651 ASSERT(limit <= size);
2652 isstr = DTRACEACT_ISSTRING(act);
2653
2654 for (; i < limit; i++) {
2655 hashval += data[i];
2656 hashval += (hashval << 10);
2657 hashval ^= (hashval >> 6);
2658
2659 if (isstr && data[i] == '\0')
2660 break;
2661 }
2662 }
2663
2664 hashval += (hashval << 3);
2665 hashval ^= (hashval >> 11);
2666 hashval += (hashval << 15);
2667
2668 /*
2669 * Yes, the divide here is expensive -- but it's generally the least
2670 * of the performance issues given the amount of data that we iterate
2671 * over to compute hash values, compare data, etc.
2672 */
2673 ndx = hashval % agb->dtagb_hashsize;
2674
2675 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2676 ASSERT((caddr_t)key >= tomax);
2677 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2678
2679 if (hashval != key->dtak_hashval || key->dtak_size != size)
2680 continue;
2681
2682 kdata = key->dtak_data;
2683 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2684
2685 for (act = agg->dtag_first; act->dta_intuple;
2686 act = act->dta_next) {
2687 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2688 limit = i + act->dta_rec.dtrd_size;
2689 ASSERT(limit <= size);
2690 isstr = DTRACEACT_ISSTRING(act);
2691
2692 for (; i < limit; i++) {
2693 if (kdata[i] != data[i])
2694 goto next;
2695
2696 if (isstr && data[i] == '\0')
2697 break;
2698 }
2699 }
2700
2701 if (action != key->dtak_action) {
2702 /*
2703 * We are aggregating on the same value in the same
2704 * aggregation with two different aggregating actions.
2705 * (This should have been picked up in the compiler,
2706 * so we may be dealing with errant or devious DIF.)
2707 * This is an error condition; we indicate as much,
2708 * and return.
2709 */
2710 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2711 return;
2712 }
2713
2714 /*
2715 * This is a hit: we need to apply the aggregator to
2716 * the value at this key.
2717 */
2718 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2719 return;
2720next:
2721 continue;
2722 }
2723
2724 /*
2725 * We didn't find it. We need to allocate some zero-filled space,
2726 * link it into the hash table appropriately, and apply the aggregator
2727 * to the (zero-filled) value.
2728 */
2729 offs = buf->dtb_offset;
2730 while (offs & (align - 1))
2731 offs += sizeof (uint32_t);
2732
2733 /*
2734 * If we don't have enough room to both allocate a new key _and_
2735 * its associated data, increment the drop count and return.
2736 */
2737 if ((uintptr_t)tomax + offs + fsize >
2738 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2739 dtrace_buffer_drop(buf);
2740 return;
2741 }
2742
2743 /*CONSTCOND*/
2744 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2745 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2746 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2747
2748 key->dtak_data = kdata = tomax + offs;
2749 buf->dtb_offset = offs + fsize;
2750
2751 /*
2752 * Now copy the data across.
2753 */
2754 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2755
2756 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2757 kdata[i] = data[i];
2758
2759 /*
2760 * Because strings are not zeroed out by default, we need to iterate
2761 * looking for actions that store strings, and we need to explicitly
2762 * pad these strings out with zeroes.
2763 */
2764 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2765 int nul;
2766
2767 if (!DTRACEACT_ISSTRING(act))
2768 continue;
2769
2770 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2771 limit = i + act->dta_rec.dtrd_size;
2772 ASSERT(limit <= size);
2773
2774 for (nul = 0; i < limit; i++) {
2775 if (nul) {
2776 kdata[i] = '\0';
2777 continue;
2778 }
2779
2780 if (data[i] != '\0')
2781 continue;
2782
2783 nul = 1;
2784 }
2785 }
2786
2787 for (i = size; i < fsize; i++)
2788 kdata[i] = 0;
2789
2790 key->dtak_hashval = hashval;
2791 key->dtak_size = size;
2792 key->dtak_action = action;
2793 key->dtak_next = agb->dtagb_hash[ndx];
2794 agb->dtagb_hash[ndx] = key;
2795
2796 /*
2797 * Finally, apply the aggregator.
2798 */
2799 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2800 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2801}
2802
2803/*
2804 * Given consumer state, this routine finds a speculation in the INACTIVE
2805 * state and transitions it into the ACTIVE state. If there is no speculation
2806 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2807 * incremented -- it is up to the caller to take appropriate action.
2808 */
2809static int
2810dtrace_speculation(dtrace_state_t *state)
2811{
2812 int i = 0;
2813 dtrace_speculation_state_t current;
2814 uint32_t *stat = &state->dts_speculations_unavail, count;
2815
2816 while (i < state->dts_nspeculations) {
2817 dtrace_speculation_t *spec = &state->dts_speculations[i];
2818
2819 current = spec->dtsp_state;
2820
2821 if (current != DTRACESPEC_INACTIVE) {
2822 if (current == DTRACESPEC_COMMITTINGMANY ||
2823 current == DTRACESPEC_COMMITTING ||
2824 current == DTRACESPEC_DISCARDING)
2825 stat = &state->dts_speculations_busy;
2826 i++;
2827 continue;
2828 }
2829
2830 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2831 current, DTRACESPEC_ACTIVE) == current)
2832 return (i + 1);
2833 }
2834
2835 /*
2836 * We couldn't find a speculation. If we found as much as a single
2837 * busy speculation buffer, we'll attribute this failure as "busy"
2838 * instead of "unavail".
2839 */
2840 do {
2841 count = *stat;
2842 } while (dtrace_cas32(stat, count, count + 1) != count);
2843
2844 return (0);
2845}
2846
2847/*
2848 * This routine commits an active speculation. If the specified speculation
2849 * is not in a valid state to perform a commit(), this routine will silently do
2850 * nothing. The state of the specified speculation is transitioned according
2851 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2852 */
2853static void
2854dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2855 dtrace_specid_t which)
2856{
2857 dtrace_speculation_t *spec;
2858 dtrace_buffer_t *src, *dest;
04b8595b 2859 uintptr_t daddr, saddr, dlimit, slimit;
b0d623f7 2860 dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE;
2d21ac55 2861 intptr_t offs;
04b8595b 2862 uint64_t timestamp;
2d21ac55
A
2863
2864 if (which == 0)
2865 return;
2866
b0d623f7
A
2867 if (which > (dtrace_specid_t)state->dts_nspeculations) {
2868 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2869 return;
2870 }
b0d623f7 2871
2d21ac55
A
2872 spec = &state->dts_speculations[which - 1];
2873 src = &spec->dtsp_buffer[cpu];
2874 dest = &state->dts_buffer[cpu];
2875
2876 do {
2877 current = spec->dtsp_state;
2878
2879 if (current == DTRACESPEC_COMMITTINGMANY)
2880 break;
2881
2882 switch (current) {
2883 case DTRACESPEC_INACTIVE:
2884 case DTRACESPEC_DISCARDING:
2885 return;
2886
2887 case DTRACESPEC_COMMITTING:
2888 /*
2889 * This is only possible if we are (a) commit()'ing
2890 * without having done a prior speculate() on this CPU
2891 * and (b) racing with another commit() on a different
2892 * CPU. There's nothing to do -- we just assert that
2893 * our offset is 0.
2894 */
2895 ASSERT(src->dtb_offset == 0);
2896 return;
2897
2898 case DTRACESPEC_ACTIVE:
2899 new = DTRACESPEC_COMMITTING;
2900 break;
2901
2902 case DTRACESPEC_ACTIVEONE:
2903 /*
2904 * This speculation is active on one CPU. If our
2905 * buffer offset is non-zero, we know that the one CPU
2906 * must be us. Otherwise, we are committing on a
2907 * different CPU from the speculate(), and we must
2908 * rely on being asynchronously cleaned.
2909 */
2910 if (src->dtb_offset != 0) {
2911 new = DTRACESPEC_COMMITTING;
2912 break;
2913 }
f427ee49 2914 OS_FALLTHROUGH;
2d21ac55
A
2915
2916 case DTRACESPEC_ACTIVEMANY:
2917 new = DTRACESPEC_COMMITTINGMANY;
2918 break;
2919
2920 default:
2921 ASSERT(0);
2922 }
2923 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2924 current, new) != current);
2925
2926 /*
2927 * We have set the state to indicate that we are committing this
2928 * speculation. Now reserve the necessary space in the destination
2929 * buffer.
2930 */
2931 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2932 sizeof (uint64_t), state, NULL)) < 0) {
2933 dtrace_buffer_drop(dest);
2934 goto out;
2935 }
2936
2937 /*
04b8595b
A
2938 * We have sufficient space to copy the speculative buffer into the
2939 * primary buffer. First, modify the speculative buffer, filling
2940 * in the timestamp of all entries with the current time. The data
2941 * must have the commit() time rather than the time it was traced,
2942 * so that all entries in the primary buffer are in timestamp order.
2943 */
2944 timestamp = dtrace_gethrtime();
2945 saddr = (uintptr_t)src->dtb_tomax;
2946 slimit = saddr + src->dtb_offset;
2947 while (saddr < slimit) {
2948 size_t size;
2949 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2950
2951 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2952 saddr += sizeof (dtrace_epid_t);
2953 continue;
2954 }
2955
2956 ASSERT(dtrh->dtrh_epid <= ((dtrace_epid_t) state->dts_necbs));
2957 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2958
2959 ASSERT(saddr + size <= slimit);
2960 ASSERT(size >= sizeof(dtrace_rechdr_t));
2961 ASSERT(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh) == UINT64_MAX);
2962
2963 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2964
2965 saddr += size;
2966 }
2967
2968 /*
2969 * Copy the buffer across. (Note that this is a
2d21ac55
A
2970 * highly subobtimal bcopy(); in the unlikely event that this becomes
2971 * a serious performance issue, a high-performance DTrace-specific
2972 * bcopy() should obviously be invented.)
2973 */
2974 daddr = (uintptr_t)dest->dtb_tomax + offs;
2975 dlimit = daddr + src->dtb_offset;
2976 saddr = (uintptr_t)src->dtb_tomax;
2977
2978 /*
2979 * First, the aligned portion.
2980 */
2981 while (dlimit - daddr >= sizeof (uint64_t)) {
2982 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2983
2984 daddr += sizeof (uint64_t);
2985 saddr += sizeof (uint64_t);
2986 }
2987
2988 /*
2989 * Now any left-over bit...
2990 */
2991 while (dlimit - daddr)
2992 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2993
2994 /*
2995 * Finally, commit the reserved space in the destination buffer.
2996 */
2997 dest->dtb_offset = offs + src->dtb_offset;
2998
2999out:
3000 /*
3001 * If we're lucky enough to be the only active CPU on this speculation
3002 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
3003 */
3004 if (current == DTRACESPEC_ACTIVE ||
3005 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
3006 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
3007 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
b0d623f7 3008#pragma unused(rval) /* __APPLE__ */
2d21ac55
A
3009
3010 ASSERT(rval == DTRACESPEC_COMMITTING);
3011 }
3012
3013 src->dtb_offset = 0;
3014 src->dtb_xamot_drops += src->dtb_drops;
3015 src->dtb_drops = 0;
3016}
3017
3018/*
3019 * This routine discards an active speculation. If the specified speculation
3020 * is not in a valid state to perform a discard(), this routine will silently
3021 * do nothing. The state of the specified speculation is transitioned
3022 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
3023 */
f427ee49 3024__attribute__((noinline))
2d21ac55
A
3025static void
3026dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
3027 dtrace_specid_t which)
3028{
3029 dtrace_speculation_t *spec;
b0d623f7 3030 dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE;
2d21ac55
A
3031 dtrace_buffer_t *buf;
3032
3033 if (which == 0)
3034 return;
3035
b0d623f7
A
3036 if (which > (dtrace_specid_t)state->dts_nspeculations) {
3037 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3038 return;
3039 }
2d21ac55
A
3040
3041 spec = &state->dts_speculations[which - 1];
3042 buf = &spec->dtsp_buffer[cpu];
3043
3044 do {
3045 current = spec->dtsp_state;
3046
3047 switch (current) {
3048 case DTRACESPEC_INACTIVE:
3049 case DTRACESPEC_COMMITTINGMANY:
3050 case DTRACESPEC_COMMITTING:
3051 case DTRACESPEC_DISCARDING:
3052 return;
3053
3054 case DTRACESPEC_ACTIVE:
3055 case DTRACESPEC_ACTIVEMANY:
3056 new = DTRACESPEC_DISCARDING;
3057 break;
3058
3059 case DTRACESPEC_ACTIVEONE:
3060 if (buf->dtb_offset != 0) {
3061 new = DTRACESPEC_INACTIVE;
3062 } else {
3063 new = DTRACESPEC_DISCARDING;
3064 }
3065 break;
3066
3067 default:
3068 ASSERT(0);
3069 }
3070 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3071 current, new) != current);
3072
3073 buf->dtb_offset = 0;
3074 buf->dtb_drops = 0;
3075}
3076
3077/*
3078 * Note: not called from probe context. This function is called
3079 * asynchronously from cross call context to clean any speculations that are
3080 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
3081 * transitioned back to the INACTIVE state until all CPUs have cleaned the
3082 * speculation.
3083 */
3084static void
3085dtrace_speculation_clean_here(dtrace_state_t *state)
3086{
3087 dtrace_icookie_t cookie;
3088 processorid_t cpu = CPU->cpu_id;
3089 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
3090 dtrace_specid_t i;
3091
3092 cookie = dtrace_interrupt_disable();
3093
3094 if (dest->dtb_tomax == NULL) {
3095 dtrace_interrupt_enable(cookie);
3096 return;
3097 }
3098
b0d623f7 3099 for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) {
2d21ac55
A
3100 dtrace_speculation_t *spec = &state->dts_speculations[i];
3101 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
3102
3103 if (src->dtb_tomax == NULL)
3104 continue;
3105
3106 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
3107 src->dtb_offset = 0;
3108 continue;
3109 }
3110
3111 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3112 continue;
3113
3114 if (src->dtb_offset == 0)
3115 continue;
3116
3117 dtrace_speculation_commit(state, cpu, i + 1);
3118 }
3119
3120 dtrace_interrupt_enable(cookie);
3121}
3122
3123/*
3124 * Note: not called from probe context. This function is called
3125 * asynchronously (and at a regular interval) to clean any speculations that
3126 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
3127 * is work to be done, it cross calls all CPUs to perform that work;
3128 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
3129 * INACTIVE state until they have been cleaned by all CPUs.
3130 */
3131static void
3132dtrace_speculation_clean(dtrace_state_t *state)
3133{
b0d623f7
A
3134 int work = 0;
3135 uint32_t rv;
2d21ac55
A
3136 dtrace_specid_t i;
3137
b0d623f7 3138 for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) {
2d21ac55
A
3139 dtrace_speculation_t *spec = &state->dts_speculations[i];
3140
3141 ASSERT(!spec->dtsp_cleaning);
3142
3143 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
3144 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3145 continue;
3146
3147 work++;
3148 spec->dtsp_cleaning = 1;
3149 }
3150
3151 if (!work)
3152 return;
3153
3154 dtrace_xcall(DTRACE_CPUALL,
3155 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3156
3157 /*
3158 * We now know that all CPUs have committed or discarded their
3159 * speculation buffers, as appropriate. We can now set the state
3160 * to inactive.
3161 */
b0d623f7 3162 for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) {
2d21ac55
A
3163 dtrace_speculation_t *spec = &state->dts_speculations[i];
3164 dtrace_speculation_state_t current, new;
3165
3166 if (!spec->dtsp_cleaning)
3167 continue;
3168
3169 current = spec->dtsp_state;
3170 ASSERT(current == DTRACESPEC_DISCARDING ||
3171 current == DTRACESPEC_COMMITTINGMANY);
3172
3173 new = DTRACESPEC_INACTIVE;
3174
3175 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
3176 ASSERT(rv == current);
3177 spec->dtsp_cleaning = 0;
3178 }
3179}
3180
3181/*
3182 * Called as part of a speculate() to get the speculative buffer associated
3183 * with a given speculation. Returns NULL if the specified speculation is not
3184 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
3185 * the active CPU is not the specified CPU -- the speculation will be
3186 * atomically transitioned into the ACTIVEMANY state.
3187 */
f427ee49 3188__attribute__((noinline))
2d21ac55
A
3189static dtrace_buffer_t *
3190dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3191 dtrace_specid_t which)
3192{
3193 dtrace_speculation_t *spec;
b0d623f7 3194 dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE;
2d21ac55
A
3195 dtrace_buffer_t *buf;
3196
3197 if (which == 0)
3198 return (NULL);
3199
b0d623f7 3200 if (which > (dtrace_specid_t)state->dts_nspeculations) {
2d21ac55
A
3201 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3202 return (NULL);
3203 }
3204
3205 spec = &state->dts_speculations[which - 1];
3206 buf = &spec->dtsp_buffer[cpuid];
3207
3208 do {
3209 current = spec->dtsp_state;
3210
3211 switch (current) {
3212 case DTRACESPEC_INACTIVE:
3213 case DTRACESPEC_COMMITTINGMANY:
3214 case DTRACESPEC_DISCARDING:
3215 return (NULL);
3216
3217 case DTRACESPEC_COMMITTING:
3218 ASSERT(buf->dtb_offset == 0);
3219 return (NULL);
3220
3221 case DTRACESPEC_ACTIVEONE:
3222 /*
3223 * This speculation is currently active on one CPU.
3224 * Check the offset in the buffer; if it's non-zero,
3225 * that CPU must be us (and we leave the state alone).
3226 * If it's zero, assume that we're starting on a new
3227 * CPU -- and change the state to indicate that the
3228 * speculation is active on more than one CPU.
3229 */
3230 if (buf->dtb_offset != 0)
3231 return (buf);
3232
3233 new = DTRACESPEC_ACTIVEMANY;
3234 break;
3235
3236 case DTRACESPEC_ACTIVEMANY:
3237 return (buf);
3238
3239 case DTRACESPEC_ACTIVE:
3240 new = DTRACESPEC_ACTIVEONE;
3241 break;
3242
3243 default:
3244 ASSERT(0);
3245 }
3246 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3247 current, new) != current);
3248
3249 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3250 return (buf);
3251}
3252
b0d623f7
A
3253/*
3254 * Return a string. In the event that the user lacks the privilege to access
3255 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3256 * don't fail access checking.
3257 *
3258 * dtrace_dif_variable() uses this routine as a helper for various
3259 * builtin values such as 'execname' and 'probefunc.'
3260 */
b0d623f7 3261static
b0d623f7
A
3262uintptr_t
3263dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3264 dtrace_mstate_t *mstate)
3265{
3266 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3267 uintptr_t ret;
3268 size_t strsz;
3269
3270 /*
3271 * The easy case: this probe is allowed to read all of memory, so
3272 * we can just return this as a vanilla pointer.
3273 */
3274 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3275 return (addr);
3276
3277 /*
3278 * This is the tougher case: we copy the string in question from
3279 * kernel memory into scratch memory and return it that way: this
3280 * ensures that we won't trip up when access checking tests the
3281 * BYREF return value.
3282 */
3283 strsz = dtrace_strlen((char *)addr, size) + 1;
3284
3285 if (mstate->dtms_scratch_ptr + strsz >
3286 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3287 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 3288 return (0);
b0d623f7
A
3289 }
3290
3291 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3292 strsz);
3293 ret = mstate->dtms_scratch_ptr;
3294 mstate->dtms_scratch_ptr += strsz;
3295 return (ret);
3296}
3297
2d21ac55
A
3298/*
3299 * This function implements the DIF emulator's variable lookups. The emulator
3300 * passes a reserved variable identifier and optional built-in array index.
3301 */
3302static uint64_t
3303dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3304 uint64_t ndx)
3305{
3306 /*
3307 * If we're accessing one of the uncached arguments, we'll turn this
3308 * into a reference in the args array.
3309 */
3310 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3311 ndx = v - DIF_VAR_ARG0;
3312 v = DIF_VAR_ARGS;
3313 }
3314
3315 switch (v) {
3316 case DIF_VAR_ARGS:
3317 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3318 if (ndx >= sizeof (mstate->dtms_arg) /
3319 sizeof (mstate->dtms_arg[0])) {
cb323159 3320 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
5ba3f43e 3321 dtrace_vstate_t *vstate = &state->dts_vstate;
2d21ac55
A
3322 dtrace_provider_t *pv;
3323 uint64_t val;
3324
3325 pv = mstate->dtms_probe->dtpr_provider;
3326 if (pv->dtpv_pops.dtps_getargval != NULL)
3327 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3328 mstate->dtms_probe->dtpr_id,
3329 mstate->dtms_probe->dtpr_arg, ndx, aframes);
b0d623f7 3330 /* Special case access of arg5 as passed to dtrace_probe_error() (which see.) */
2d21ac55 3331 else if (mstate->dtms_probe->dtpr_id == dtrace_probeid_error && ndx == 5) {
b0d623f7 3332 return ((dtrace_state_t *)(uintptr_t)(mstate->dtms_arg[0]))->dts_arg_error_illval;
2d21ac55 3333 }
fe8ab488 3334
2d21ac55 3335 else
5ba3f43e 3336 val = dtrace_getarg(ndx, aframes, mstate, vstate);
2d21ac55
A
3337
3338 /*
3339 * This is regrettably required to keep the compiler
3340 * from tail-optimizing the call to dtrace_getarg().
3341 * The condition always evaluates to true, but the
3342 * compiler has no way of figuring that out a priori.
3343 * (None of this would be necessary if the compiler
3344 * could be relied upon to _always_ tail-optimize
3345 * the call to dtrace_getarg() -- but it can't.)
3346 */
3347 if (mstate->dtms_probe != NULL)
3348 return (val);
3349
3350 ASSERT(0);
3351 }
3352
3353 return (mstate->dtms_arg[ndx]);
3354
2d21ac55
A
3355 case DIF_VAR_UREGS: {
3356 thread_t thread;
3357
3358 if (!dtrace_priv_proc(state))
3359 return (0);
3360
3361 if ((thread = current_thread()) == NULL) {
3362 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3363 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = 0;
3364 return (0);
3365 }
3366
3367 return (dtrace_getreg(find_user_regs(thread), ndx));
3368 }
2d21ac55 3369
f427ee49
A
3370 case DIF_VAR_VMREGS: {
3371 uint64_t rval;
3372
3373 if (!dtrace_priv_kernel(state))
3374 return (0);
3375
3376 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3377
3378 rval = dtrace_getvmreg(ndx);
3379
3380 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3381
3382 return (rval);
3383 }
fe8ab488 3384
2d21ac55
A
3385 case DIF_VAR_CURTHREAD:
3386 if (!dtrace_priv_kernel(state))
3387 return (0);
3388
3389 return ((uint64_t)(uintptr_t)current_thread());
2d21ac55
A
3390
3391 case DIF_VAR_TIMESTAMP:
3392 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3393 mstate->dtms_timestamp = dtrace_gethrtime();
3394 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3395 }
3396 return (mstate->dtms_timestamp);
3397
2d21ac55
A
3398 case DIF_VAR_VTIMESTAMP:
3399 ASSERT(dtrace_vtime_references != 0);
3400 return (dtrace_get_thread_vtime(current_thread()));
2d21ac55
A
3401
3402 case DIF_VAR_WALLTIMESTAMP:
3403 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3404 mstate->dtms_walltimestamp = dtrace_gethrestime();
3405 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3406 }
3407 return (mstate->dtms_walltimestamp);
3408
fe8ab488
A
3409 case DIF_VAR_MACHTIMESTAMP:
3410 if (!(mstate->dtms_present & DTRACE_MSTATE_MACHTIMESTAMP)) {
3411 mstate->dtms_machtimestamp = mach_absolute_time();
3412 mstate->dtms_present |= DTRACE_MSTATE_MACHTIMESTAMP;
3413 }
3414 return (mstate->dtms_machtimestamp);
3415
f427ee49
A
3416 case DIF_VAR_MACHCTIMESTAMP:
3417 if (!(mstate->dtms_present & DTRACE_MSTATE_MACHCTIMESTAMP)) {
3418 mstate->dtms_machctimestamp = mach_continuous_time();
3419 mstate->dtms_present |= DTRACE_MSTATE_MACHCTIMESTAMP;
3420 }
3421 return (mstate->dtms_machctimestamp);
3422
3423
3e170ce0
A
3424 case DIF_VAR_CPU:
3425 return ((uint64_t) dtrace_get_thread_last_cpu_id(current_thread()));
3426
2d21ac55
A
3427 case DIF_VAR_IPL:
3428 if (!dtrace_priv_kernel(state))
3429 return (0);
3430 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3431 mstate->dtms_ipl = dtrace_getipl();
3432 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3433 }
3434 return (mstate->dtms_ipl);
3435
3436 case DIF_VAR_EPID:
3437 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3438 return (mstate->dtms_epid);
3439
3440 case DIF_VAR_ID:
3441 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3442 return (mstate->dtms_probe->dtpr_id);
3443
3444 case DIF_VAR_STACKDEPTH:
3445 if (!dtrace_priv_kernel(state))
3446 return (0);
3447 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
cb323159 3448 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2d21ac55
A
3449
3450 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3451 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3452 }
3453 return (mstate->dtms_stackdepth);
3454
3455 case DIF_VAR_USTACKDEPTH:
3456 if (!dtrace_priv_proc(state))
3457 return (0);
3458 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3459 /*
3460 * See comment in DIF_VAR_PID.
3461 */
3462 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3463 CPU_ON_INTR(CPU)) {
3464 mstate->dtms_ustackdepth = 0;
3465 } else {
3466 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3467 mstate->dtms_ustackdepth =
3468 dtrace_getustackdepth();
3469 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3470 }
3471 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3472 }
3473 return (mstate->dtms_ustackdepth);
3474
3475 case DIF_VAR_CALLER:
3476 if (!dtrace_priv_kernel(state))
3477 return (0);
3478 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
cb323159 3479 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2d21ac55
A
3480
3481 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3482 /*
3483 * If this is an unanchored probe, we are
3484 * required to go through the slow path:
3485 * dtrace_caller() only guarantees correct
3486 * results for anchored probes.
3487 */
3488 pc_t caller[2];
3489
3490 dtrace_getpcstack(caller, 2, aframes,
3491 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3492 mstate->dtms_caller = caller[1];
3493 } else if ((mstate->dtms_caller =
fe8ab488 3494 dtrace_caller(aframes)) == (uintptr_t)-1) {
2d21ac55
A
3495 /*
3496 * We have failed to do this the quick way;
3497 * we must resort to the slower approach of
3498 * calling dtrace_getpcstack().
3499 */
3500 pc_t caller;
3501
3502 dtrace_getpcstack(&caller, 1, aframes, NULL);
3503 mstate->dtms_caller = caller;
3504 }
3505
3506 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3507 }
3508 return (mstate->dtms_caller);
3509
3510 case DIF_VAR_UCALLER:
3511 if (!dtrace_priv_proc(state))
3512 return (0);
3513
3514 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3515 uint64_t ustack[3];
3516
3517 /*
3518 * dtrace_getupcstack() fills in the first uint64_t
3519 * with the current PID. The second uint64_t will
3520 * be the program counter at user-level. The third
3521 * uint64_t will contain the caller, which is what
3522 * we're after.
3523 */
fe8ab488 3524 ustack[2] = 0;
b0d623f7 3525 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2d21ac55 3526 dtrace_getupcstack(ustack, 3);
b0d623f7 3527 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2d21ac55
A
3528 mstate->dtms_ucaller = ustack[2];
3529 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3530 }
3531
3532 return (mstate->dtms_ucaller);
3533
3534 case DIF_VAR_PROBEPROV:
3535 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
b0d623f7
A
3536 return (dtrace_dif_varstr(
3537 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3538 state, mstate));
2d21ac55
A
3539
3540 case DIF_VAR_PROBEMOD:
3541 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
b0d623f7
A
3542 return (dtrace_dif_varstr(
3543 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3544 state, mstate));
2d21ac55
A
3545
3546 case DIF_VAR_PROBEFUNC:
3547 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
b0d623f7
A
3548 return (dtrace_dif_varstr(
3549 (uintptr_t)mstate->dtms_probe->dtpr_func,
3550 state, mstate));
2d21ac55
A
3551
3552 case DIF_VAR_PROBENAME:
3553 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
b0d623f7
A
3554 return (dtrace_dif_varstr(
3555 (uintptr_t)mstate->dtms_probe->dtpr_name,
3556 state, mstate));
2d21ac55 3557
2d21ac55 3558 case DIF_VAR_PID:
935ed37a 3559 if (!dtrace_priv_proc_relaxed(state))
2d21ac55
A
3560 return (0);
3561
3562 /*
3563 * Note that we are assuming that an unanchored probe is
3564 * always due to a high-level interrupt. (And we're assuming
3565 * that there is only a single high level interrupt.)
3566 */
3567 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3568 /* Anchored probe that fires while on an interrupt accrues to process 0 */
3569 return 0;
3570
39236c6e 3571 return ((uint64_t)dtrace_proc_selfpid());
2d21ac55 3572
2d21ac55 3573 case DIF_VAR_PPID:
935ed37a 3574 if (!dtrace_priv_proc_relaxed(state))
2d21ac55
A
3575 return (0);
3576
3577 /*
3578 * See comment in DIF_VAR_PID.
3579 */
3580 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3581 return (0);
3582
39236c6e 3583 return ((uint64_t)dtrace_proc_selfppid());
2d21ac55 3584
2d21ac55 3585 case DIF_VAR_TID:
b0d623f7
A
3586 /* We do not need to check for null current_thread() */
3587 return thread_tid(current_thread()); /* globally unique */
3588
3589 case DIF_VAR_PTHREAD_SELF:
3590 if (!dtrace_priv_proc(state))
3591 return (0);
3592
3593 /* Not currently supported, but we should be able to delta the dispatchqaddr and dispatchqoffset to get pthread_self */
3594 return 0;
3595
3596 case DIF_VAR_DISPATCHQADDR:
3597 if (!dtrace_priv_proc(state))
2d21ac55
A
3598 return (0);
3599
b0d623f7
A
3600 /* We do not need to check for null current_thread() */
3601 return thread_dispatchqaddr(current_thread());
2d21ac55 3602
2d21ac55
A
3603 case DIF_VAR_EXECNAME:
3604 {
3605 char *xname = (char *)mstate->dtms_scratch_ptr;
f427ee49
A
3606 char *pname = proc_best_name(curproc);
3607 size_t scratch_size = sizeof(proc_name_t);
2d21ac55
A
3608
3609 /* The scratch allocation's lifetime is that of the clause. */
b0d623f7
A
3610 if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
3611 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2d21ac55 3612 return 0;
b0d623f7 3613 }
2d21ac55 3614
935ed37a 3615 if (!dtrace_priv_proc_relaxed(state))
2d21ac55
A
3616 return (0);
3617
3618 mstate->dtms_scratch_ptr += scratch_size;
f427ee49 3619 strlcpy(xname, pname, scratch_size);
2d21ac55
A
3620
3621 return ((uint64_t)(uintptr_t)xname);
3622 }
2d21ac55 3623
2d21ac55 3624
2d21ac55 3625 case DIF_VAR_ZONENAME:
5ba3f43e
A
3626 {
3627 /* scratch_size is equal to length('global') + 1 for the null-terminator. */
3628 char *zname = (char *)mstate->dtms_scratch_ptr;
3629 size_t scratch_size = 6 + 1;
39236c6e 3630
2d21ac55
A
3631 if (!dtrace_priv_proc(state))
3632 return (0);
39236c6e 3633
5ba3f43e
A
3634 /* The scratch allocation's lifetime is that of the clause. */
3635 if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
3636 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3637 return 0;
3638 }
3639
3640 mstate->dtms_scratch_ptr += scratch_size;
39236c6e 3641
5ba3f43e
A
3642 /* The kernel does not provide zonename, it will always return 'global'. */
3643 strlcpy(zname, "global", scratch_size);
39236c6e 3644
5ba3f43e
A
3645 return ((uint64_t)(uintptr_t)zname);
3646 }
39236c6e 3647
5ba3f43e
A
3648#if MONOTONIC
3649 case DIF_VAR_CPUINSTRS:
3650 return mt_cur_cpu_instrs();
3651
3652 case DIF_VAR_CPUCYCLES:
3653 return mt_cur_cpu_cycles();
3654
3655 case DIF_VAR_VINSTRS:
3656 return mt_cur_thread_instrs();
3657
3658 case DIF_VAR_VCYCLES:
3659 return mt_cur_thread_cycles();
3660#else /* MONOTONIC */
3661 case DIF_VAR_CPUINSTRS: /* FALLTHROUGH */
3662 case DIF_VAR_CPUCYCLES: /* FALLTHROUGH */
3663 case DIF_VAR_VINSTRS: /* FALLTHROUGH */
3664 case DIF_VAR_VCYCLES: /* FALLTHROUGH */
3665 return 0;
3666#endif /* !MONOTONIC */
2d21ac55 3667
2d21ac55 3668 case DIF_VAR_UID:
39236c6e 3669 if (!dtrace_priv_proc_relaxed(state))
2d21ac55
A
3670 return (0);
3671
3672 /*
3673 * See comment in DIF_VAR_PID.
3674 */
3675 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3676 return (0);
3677
39236c6e 3678 return ((uint64_t) dtrace_proc_selfruid());
2d21ac55 3679
2d21ac55
A
3680 case DIF_VAR_GID:
3681 if (!dtrace_priv_proc(state))
3682 return (0);
3683
3684 /*
3685 * See comment in DIF_VAR_PID.
3686 */
3687 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3688 return (0);
3689
3690 if (dtrace_CRED() != NULL)
b0d623f7 3691 /* Credential does not require lazy initialization. */
2d21ac55 3692 return ((uint64_t)kauth_getgid());
b0d623f7
A
3693 else {
3694 /* proc_lock would be taken under kauth_cred_proc_ref() in kauth_cred_get(). */
3695 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3696 return -1ULL;
3697 }
2d21ac55 3698
2d21ac55
A
3699 case DIF_VAR_ERRNO: {
3700 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
3701 if (!dtrace_priv_proc(state))
3702 return (0);
3703
3704 /*
3705 * See comment in DIF_VAR_PID.
3706 */
3707 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3708 return (0);
3709
b0d623f7
A
3710 if (uthread)
3711 return (uint64_t)uthread->t_dtrace_errno;
3712 else {
3713 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3714 return -1ULL;
3715 }
2d21ac55 3716 }
2d21ac55
A
3717
3718 default:
3719 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3720 return (0);
3721 }
3722}
3723
cb323159
A
3724typedef enum dtrace_json_state {
3725 DTRACE_JSON_REST = 1,
3726 DTRACE_JSON_OBJECT,
3727 DTRACE_JSON_STRING,
3728 DTRACE_JSON_STRING_ESCAPE,
3729 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3730 DTRACE_JSON_COLON,
3731 DTRACE_JSON_COMMA,
3732 DTRACE_JSON_VALUE,
3733 DTRACE_JSON_IDENTIFIER,
3734 DTRACE_JSON_NUMBER,
3735 DTRACE_JSON_NUMBER_FRAC,
3736 DTRACE_JSON_NUMBER_EXP,
3737 DTRACE_JSON_COLLECT_OBJECT
3738} dtrace_json_state_t;
3739
3740/*
3741 * This function possesses just enough knowledge about JSON to extract a single
3742 * value from a JSON string and store it in the scratch buffer. It is able
3743 * to extract nested object values, and members of arrays by index.
3744 *
3745 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3746 * be looked up as we descend into the object tree. e.g.
3747 *
3748 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3749 * with nelems = 5.
3750 *
3751 * The run time of this function must be bounded above by strsize to limit the
3752 * amount of work done in probe context. As such, it is implemented as a
3753 * simple state machine, reading one character at a time using safe loads
3754 * until we find the requested element, hit a parsing error or run off the
3755 * end of the object or string.
3756 *
3757 * As there is no way for a subroutine to return an error without interrupting
3758 * clause execution, we simply return NULL in the event of a missing key or any
3759 * other error condition. Each NULL return in this function is commented with
3760 * the error condition it represents -- parsing or otherwise.
3761 *
3762 * The set of states for the state machine closely matches the JSON
3763 * specification (http://json.org/). Briefly:
3764 *
3765 * DTRACE_JSON_REST:
3766 * Skip whitespace until we find either a top-level Object, moving
3767 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3768 *
3769 * DTRACE_JSON_OBJECT:
3770 * Locate the next key String in an Object. Sets a flag to denote
3771 * the next String as a key string and moves to DTRACE_JSON_STRING.
3772 *
3773 * DTRACE_JSON_COLON:
3774 * Skip whitespace until we find the colon that separates key Strings
3775 * from their values. Once found, move to DTRACE_JSON_VALUE.
3776 *
3777 * DTRACE_JSON_VALUE:
3778 * Detects the type of the next value (String, Number, Identifier, Object
3779 * or Array) and routes to the states that process that type. Here we also
3780 * deal with the element selector list if we are requested to traverse down
3781 * into the object tree.
3782 *
3783 * DTRACE_JSON_COMMA:
3784 * Skip whitespace until we find the comma that separates key-value pairs
3785 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3786 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3787 * states return to this state at the end of their value, unless otherwise
3788 * noted.
3789 *
3790 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3791 * Processes a Number literal from the JSON, including any exponent
3792 * component that may be present. Numbers are returned as strings, which
3793 * may be passed to strtoll() if an integer is required.
3794 *
3795 * DTRACE_JSON_IDENTIFIER:
3796 * Processes a "true", "false" or "null" literal in the JSON.
3797 *
3798 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3799 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3800 * Processes a String literal from the JSON, whether the String denotes
3801 * a key, a value or part of a larger Object. Handles all escape sequences
3802 * present in the specification, including four-digit unicode characters,
3803 * but merely includes the escape sequence without converting it to the
3804 * actual escaped character. If the String is flagged as a key, we
3805 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3806 *
3807 * DTRACE_JSON_COLLECT_OBJECT:
3808 * This state collects an entire Object (or Array), correctly handling
3809 * embedded strings. If the full element selector list matches this nested
3810 * object, we return the Object in full as a string. If not, we use this
3811 * state to skip to the next value at this level and continue processing.
3812 */
3813static char *
3814dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3815 char *dest)
3816{
3817 dtrace_json_state_t state = DTRACE_JSON_REST;
3818 int64_t array_elem = INT64_MIN;
3819 int64_t array_pos = 0;
3820 uint8_t escape_unicount = 0;
3821 boolean_t string_is_key = B_FALSE;
3822 boolean_t collect_object = B_FALSE;
3823 boolean_t found_key = B_FALSE;
3824 boolean_t in_array = B_FALSE;
3825 uint32_t braces = 0, brackets = 0;
3826 char *elem = elemlist;
3827 char *dd = dest;
3828 uintptr_t cur;
3829
3830 for (cur = json; cur < json + size; cur++) {
3831 char cc = dtrace_load8(cur);
3832 if (cc == '\0')
3833 return (NULL);
3834
3835 switch (state) {
3836 case DTRACE_JSON_REST:
3837 if (isspace(cc))
3838 break;
3839
3840 if (cc == '{') {
3841 state = DTRACE_JSON_OBJECT;
3842 break;
3843 }
3844
3845 if (cc == '[') {
3846 in_array = B_TRUE;
3847 array_pos = 0;
3848 array_elem = dtrace_strtoll(elem, 10, size);
3849 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3850 state = DTRACE_JSON_VALUE;
3851 break;
3852 }
3853
3854 /*
3855 * ERROR: expected to find a top-level object or array.
3856 */
3857 return (NULL);
3858 case DTRACE_JSON_OBJECT:
3859 if (isspace(cc))
3860 break;
3861
3862 if (cc == '"') {
3863 state = DTRACE_JSON_STRING;
3864 string_is_key = B_TRUE;
3865 break;
3866 }
3867
3868 /*
3869 * ERROR: either the object did not start with a key
3870 * string, or we've run off the end of the object
3871 * without finding the requested key.
3872 */
3873 return (NULL);
3874 case DTRACE_JSON_STRING:
3875 if (cc == '\\') {
3876 *dd++ = '\\';
3877 state = DTRACE_JSON_STRING_ESCAPE;
3878 break;
3879 }
3880
3881 if (cc == '"') {
3882 if (collect_object) {
3883 /*
3884 * We don't reset the dest here, as
3885 * the string is part of a larger
3886 * object being collected.
3887 */
3888 *dd++ = cc;
3889 collect_object = B_FALSE;
3890 state = DTRACE_JSON_COLLECT_OBJECT;
3891 break;
3892 }
3893 *dd = '\0';
3894 dd = dest; /* reset string buffer */
3895 if (string_is_key) {
3896 if (dtrace_strncmp(dest, elem,
3897 size) == 0)
3898 found_key = B_TRUE;
3899 } else if (found_key) {
3900 if (nelems > 1) {
3901 /*
3902 * We expected an object, not
3903 * this string.
3904 */
3905 return (NULL);
3906 }
3907 return (dest);
3908 }
3909 state = string_is_key ? DTRACE_JSON_COLON :
3910 DTRACE_JSON_COMMA;
3911 string_is_key = B_FALSE;
3912 break;
3913 }
3914
3915 *dd++ = cc;
3916 break;
3917 case DTRACE_JSON_STRING_ESCAPE:
3918 *dd++ = cc;
3919 if (cc == 'u') {
3920 escape_unicount = 0;
3921 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3922 } else {
3923 state = DTRACE_JSON_STRING;
3924 }
3925 break;
3926 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3927 if (!isxdigit(cc)) {
3928 /*
3929 * ERROR: invalid unicode escape, expected
3930 * four valid hexidecimal digits.
3931 */
3932 return (NULL);
3933 }
3934
3935 *dd++ = cc;
3936 if (++escape_unicount == 4)
3937 state = DTRACE_JSON_STRING;
3938 break;
3939 case DTRACE_JSON_COLON:
3940 if (isspace(cc))
3941 break;
3942
3943 if (cc == ':') {
3944 state = DTRACE_JSON_VALUE;
3945 break;
3946 }
3947
3948 /*
3949 * ERROR: expected a colon.
3950 */
3951 return (NULL);
3952 case DTRACE_JSON_COMMA:
3953 if (isspace(cc))
3954 break;
3955
3956 if (cc == ',') {
3957 if (in_array) {
3958 state = DTRACE_JSON_VALUE;
3959 if (++array_pos == array_elem)
3960 found_key = B_TRUE;
3961 } else {
3962 state = DTRACE_JSON_OBJECT;
3963 }
3964 break;
3965 }
3966
3967 /*
3968 * ERROR: either we hit an unexpected character, or
3969 * we reached the end of the object or array without
3970 * finding the requested key.
3971 */
3972 return (NULL);
3973 case DTRACE_JSON_IDENTIFIER:
3974 if (islower(cc)) {
3975 *dd++ = cc;
3976 break;
3977 }
3978
3979 *dd = '\0';
3980 dd = dest; /* reset string buffer */
3981
3982 if (dtrace_strncmp(dest, "true", 5) == 0 ||
3983 dtrace_strncmp(dest, "false", 6) == 0 ||
3984 dtrace_strncmp(dest, "null", 5) == 0) {
3985 if (found_key) {
3986 if (nelems > 1) {
3987 /*
3988 * ERROR: We expected an object,
3989 * not this identifier.
3990 */
3991 return (NULL);
3992 }
3993 return (dest);
3994 } else {
3995 cur--;
3996 state = DTRACE_JSON_COMMA;
3997 break;
3998 }
3999 }
4000
4001 /*
4002 * ERROR: we did not recognise the identifier as one
4003 * of those in the JSON specification.
4004 */
4005 return (NULL);
4006 case DTRACE_JSON_NUMBER:
4007 if (cc == '.') {
4008 *dd++ = cc;
4009 state = DTRACE_JSON_NUMBER_FRAC;
4010 break;
4011 }
4012
4013 if (cc == 'x' || cc == 'X') {
4014 /*
4015 * ERROR: specification explicitly excludes
4016 * hexidecimal or octal numbers.
4017 */
4018 return (NULL);
4019 }
4020
f427ee49 4021 OS_FALLTHROUGH;
cb323159
A
4022 case DTRACE_JSON_NUMBER_FRAC:
4023 if (cc == 'e' || cc == 'E') {
4024 *dd++ = cc;
4025 state = DTRACE_JSON_NUMBER_EXP;
4026 break;
4027 }
4028
4029 if (cc == '+' || cc == '-') {
4030 /*
4031 * ERROR: expect sign as part of exponent only.
4032 */
4033 return (NULL);
4034 }
f427ee49 4035 OS_FALLTHROUGH;
cb323159
A
4036 case DTRACE_JSON_NUMBER_EXP:
4037 if (isdigit(cc) || cc == '+' || cc == '-') {
4038 *dd++ = cc;
4039 break;
4040 }
4041
4042 *dd = '\0';
4043 dd = dest; /* reset string buffer */
4044 if (found_key) {
4045 if (nelems > 1) {
4046 /*
4047 * ERROR: We expected an object, not
4048 * this number.
4049 */
4050 return (NULL);
4051 }
4052 return (dest);
4053 }
4054
4055 cur--;
4056 state = DTRACE_JSON_COMMA;
4057 break;
4058 case DTRACE_JSON_VALUE:
4059 if (isspace(cc))
4060 break;
4061
4062 if (cc == '{' || cc == '[') {
4063 if (nelems > 1 && found_key) {
4064 in_array = cc == '[' ? B_TRUE : B_FALSE;
4065 /*
4066 * If our element selector directs us
4067 * to descend into this nested object,
4068 * then move to the next selector
4069 * element in the list and restart the
4070 * state machine.
4071 */
4072 while (*elem != '\0')
4073 elem++;
4074 elem++; /* skip the inter-element NUL */
4075 nelems--;
4076 dd = dest;
4077 if (in_array) {
4078 state = DTRACE_JSON_VALUE;
4079 array_pos = 0;
4080 array_elem = dtrace_strtoll(
4081 elem, 10, size);
4082 found_key = array_elem == 0 ?
4083 B_TRUE : B_FALSE;
4084 } else {
4085 found_key = B_FALSE;
4086 state = DTRACE_JSON_OBJECT;
4087 }
4088 break;
4089 }
4090
4091 /*
4092 * Otherwise, we wish to either skip this
4093 * nested object or return it in full.
4094 */
4095 if (cc == '[')
4096 brackets = 1;
4097 else
4098 braces = 1;
4099 *dd++ = cc;
4100 state = DTRACE_JSON_COLLECT_OBJECT;
4101 break;
4102 }
4103
4104 if (cc == '"') {
4105 state = DTRACE_JSON_STRING;
4106 break;
4107 }
4108
4109 if (islower(cc)) {
4110 /*
4111 * Here we deal with true, false and null.
4112 */
4113 *dd++ = cc;
4114 state = DTRACE_JSON_IDENTIFIER;
4115 break;
4116 }
4117
4118 if (cc == '-' || isdigit(cc)) {
4119 *dd++ = cc;
4120 state = DTRACE_JSON_NUMBER;
4121 break;
4122 }
4123
4124 /*
4125 * ERROR: unexpected character at start of value.
4126 */
4127 return (NULL);
4128 case DTRACE_JSON_COLLECT_OBJECT:
4129 if (cc == '\0')
4130 /*
4131 * ERROR: unexpected end of input.
4132 */
4133 return (NULL);
4134
4135 *dd++ = cc;
4136 if (cc == '"') {
4137 collect_object = B_TRUE;
4138 state = DTRACE_JSON_STRING;
4139 break;
4140 }
4141
4142 if (cc == ']') {
4143 if (brackets-- == 0) {
4144 /*
4145 * ERROR: unbalanced brackets.
4146 */
4147 return (NULL);
4148 }
4149 } else if (cc == '}') {
4150 if (braces-- == 0) {
4151 /*
4152 * ERROR: unbalanced braces.
4153 */
4154 return (NULL);
4155 }
4156 } else if (cc == '{') {
4157 braces++;
4158 } else if (cc == '[') {
4159 brackets++;
4160 }
4161
4162 if (brackets == 0 && braces == 0) {
4163 if (found_key) {
4164 *dd = '\0';
4165 return (dest);
4166 }
4167 dd = dest; /* reset string buffer */
4168 state = DTRACE_JSON_COMMA;
4169 }
4170 break;
4171 }
4172 }
4173 return (NULL);
4174}
4175
2d21ac55
A
4176/*
4177 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4178 * Notice that we don't bother validating the proper number of arguments or
4179 * their types in the tuple stack. This isn't needed because all argument
4180 * interpretation is safe because of our load safety -- the worst that can
4181 * happen is that a bogus program can obtain bogus results.
4182 */
4183static void
4184dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4185 dtrace_key_t *tupregs, int nargs,
4186 dtrace_mstate_t *mstate, dtrace_state_t *state)
4187{
4188 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
2d21ac55 4189 volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
b0d623f7 4190 dtrace_vstate_t *vstate = &state->dts_vstate;
2d21ac55
A
4191
4192#if !defined(__APPLE__)
4193 union {
4194 mutex_impl_t mi;
4195 uint64_t mx;
4196 } m;
4197
4198 union {
4199 krwlock_t ri;
4200 uintptr_t rw;
4201 } r;
4202#else
b0d623f7 4203/* FIXME: awaits lock/mutex work */
2d21ac55
A
4204#endif /* __APPLE__ */
4205
4206 switch (subr) {
4207 case DIF_SUBR_RAND:
cb323159
A
4208 regs[rd] = dtrace_xoroshiro128_plus_next(
4209 state->dts_rstate[CPU->cpu_id]);
2d21ac55
A
4210 break;
4211
4212#if !defined(__APPLE__)
4213 case DIF_SUBR_MUTEX_OWNED:
b0d623f7
A
4214 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4215 mstate, vstate)) {
fe8ab488 4216 regs[rd] = 0;
b0d623f7
A
4217 break;
4218 }
4219
2d21ac55
A
4220 m.mx = dtrace_load64(tupregs[0].dttk_value);
4221 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4222 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4223 else
4224 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4225 break;
4226
4227 case DIF_SUBR_MUTEX_OWNER:
b0d623f7
A
4228 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4229 mstate, vstate)) {
fe8ab488 4230 regs[rd] = 0;
b0d623f7
A
4231 break;
4232 }
4233
2d21ac55
A
4234 m.mx = dtrace_load64(tupregs[0].dttk_value);
4235 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4236 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4237 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4238 else
4239 regs[rd] = 0;
4240 break;
4241
4242 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
b0d623f7
A
4243 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4244 mstate, vstate)) {
fe8ab488 4245 regs[rd] = 0;
b0d623f7
A
4246 break;
4247 }
4248
2d21ac55
A
4249 m.mx = dtrace_load64(tupregs[0].dttk_value);
4250 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4251 break;
4252
4253 case DIF_SUBR_MUTEX_TYPE_SPIN:
b0d623f7
A
4254 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4255 mstate, vstate)) {
fe8ab488 4256 regs[rd] = 0;
b0d623f7
A
4257 break;
4258 }
4259
2d21ac55
A
4260 m.mx = dtrace_load64(tupregs[0].dttk_value);
4261 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4262 break;
4263
4264 case DIF_SUBR_RW_READ_HELD: {
4265 uintptr_t tmp;
4266
b0d623f7
A
4267 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4268 mstate, vstate)) {
fe8ab488 4269 regs[rd] = 0;
b0d623f7
A
4270 break;
4271 }
4272
2d21ac55
A
4273 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4274 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4275 break;
4276 }
4277
4278 case DIF_SUBR_RW_WRITE_HELD:
b0d623f7
A
4279 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4280 mstate, vstate)) {
fe8ab488 4281 regs[rd] = 0;
b0d623f7
A
4282 break;
4283 }
4284
2d21ac55
A
4285 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4286 regs[rd] = _RW_WRITE_HELD(&r.ri);
4287 break;
4288
4289 case DIF_SUBR_RW_ISWRITER:
b0d623f7
A
4290 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4291 mstate, vstate)) {
fe8ab488 4292 regs[rd] = 0;
b0d623f7
A
4293 break;
4294 }
4295
2d21ac55
A
4296 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4297 regs[rd] = _RW_ISWRITER(&r.ri);
4298 break;
4299#else
b0d623f7 4300/* FIXME: awaits lock/mutex work */
2d21ac55
A
4301#endif /* __APPLE__ */
4302
4303 case DIF_SUBR_BCOPY: {
4304 /*
4305 * We need to be sure that the destination is in the scratch
4306 * region -- no other region is allowed.
4307 */
4308 uintptr_t src = tupregs[0].dttk_value;
4309 uintptr_t dest = tupregs[1].dttk_value;
4310 size_t size = tupregs[2].dttk_value;
4311
4312 if (!dtrace_inscratch(dest, size, mstate)) {
4313 *flags |= CPU_DTRACE_BADADDR;
4314 *illval = regs[rd];
4315 break;
4316 }
4317
b0d623f7 4318 if (!dtrace_canload(src, size, mstate, vstate)) {
fe8ab488 4319 regs[rd] = 0;
b0d623f7
A
4320 break;
4321 }
4322
2d21ac55
A
4323 dtrace_bcopy((void *)src, (void *)dest, size);
4324 break;
4325 }
4326
4327 case DIF_SUBR_ALLOCA:
4328 case DIF_SUBR_COPYIN: {
4329 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4330 uint64_t size =
4331 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4332 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4333
39037602
A
4334 /*
4335 * Check whether the user can access kernel memory
4336 */
4337 if (dtrace_priv_kernel(state) == 0) {
4338 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
4339 regs[rd] = 0;
4340 break;
4341 }
2d21ac55
A
4342 /*
4343 * This action doesn't require any credential checks since
4344 * probes will not activate in user contexts to which the
4345 * enabling user does not have permissions.
4346 */
b0d623f7
A
4347
4348 /*
4349 * Rounding up the user allocation size could have overflowed
4350 * a large, bogus allocation (like -1ULL) to 0.
4351 */
4352 if (scratch_size < size ||
4353 !DTRACE_INSCRATCH(mstate, scratch_size)) {
2d21ac55 4354 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4355 regs[rd] = 0;
2d21ac55
A
4356 break;
4357 }
4358
4359 if (subr == DIF_SUBR_COPYIN) {
4360 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
cf7d32b8 4361 if (dtrace_priv_proc(state))
b0d623f7 4362 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
2d21ac55
A
4363 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4364 }
4365
4366 mstate->dtms_scratch_ptr += scratch_size;
4367 regs[rd] = dest;
4368 break;
4369 }
4370
4371 case DIF_SUBR_COPYINTO: {
4372 uint64_t size = tupregs[1].dttk_value;
4373 uintptr_t dest = tupregs[2].dttk_value;
4374
4375 /*
4376 * This action doesn't require any credential checks since
4377 * probes will not activate in user contexts to which the
4378 * enabling user does not have permissions.
4379 */
4380 if (!dtrace_inscratch(dest, size, mstate)) {
4381 *flags |= CPU_DTRACE_BADADDR;
4382 *illval = regs[rd];
4383 break;
4384 }
4385
4386 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
cf7d32b8 4387 if (dtrace_priv_proc(state))
b0d623f7 4388 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
2d21ac55
A
4389 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4390 break;
4391 }
4392
4393 case DIF_SUBR_COPYINSTR: {
4394 uintptr_t dest = mstate->dtms_scratch_ptr;
4395 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4396
4397 if (nargs > 1 && tupregs[1].dttk_value < size)
4398 size = tupregs[1].dttk_value + 1;
4399
4400 /*
4401 * This action doesn't require any credential checks since
4402 * probes will not activate in user contexts to which the
4403 * enabling user does not have permissions.
4404 */
b0d623f7 4405 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 4406 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4407 regs[rd] = 0;
2d21ac55
A
4408 break;
4409 }
4410
4411 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
cf7d32b8 4412 if (dtrace_priv_proc(state))
b0d623f7 4413 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
2d21ac55
A
4414 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4415
4416 ((char *)dest)[size - 1] = '\0';
4417 mstate->dtms_scratch_ptr += size;
4418 regs[rd] = dest;
4419 break;
4420 }
4421
2d21ac55
A
4422 case DIF_SUBR_MSGSIZE:
4423 case DIF_SUBR_MSGDSIZE: {
4424 /* Darwin does not implement SysV streams messages */
b0d623f7 4425 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2d21ac55
A
4426 regs[rd] = 0;
4427 break;
4428 }
2d21ac55 4429
2d21ac55
A
4430 case DIF_SUBR_PROGENYOF: {
4431 pid_t pid = tupregs[0].dttk_value;
4432 struct proc *p = current_proc();
4433 int rval = 0, lim = nprocs;
4434
4435 while(p && (lim-- > 0)) {
4436 pid_t ppid;
4437
4438 ppid = (pid_t)dtrace_load32((uintptr_t)&(p->p_pid));
4439 if (*flags & CPU_DTRACE_FAULT)
4440 break;
4441
4442 if (ppid == pid) {
4443 rval = 1;
4444 break;
4445 }
4446
4447 if (ppid == 0)
4448 break; /* Can't climb process tree any further. */
4449
4450 p = (struct proc *)dtrace_loadptr((uintptr_t)&(p->p_pptr));
f427ee49
A
4451#if __has_feature(ptrauth_calls)
4452 p = ptrauth_strip(p, ptrauth_key_process_independent_data);
4453#endif
2d21ac55
A
4454 if (*flags & CPU_DTRACE_FAULT)
4455 break;
4456 }
4457
4458 regs[rd] = rval;
4459 break;
4460 }
2d21ac55
A
4461
4462 case DIF_SUBR_SPECULATION:
4463 regs[rd] = dtrace_speculation(state);
4464 break;
4465
fe8ab488 4466
2d21ac55
A
4467 case DIF_SUBR_COPYOUT: {
4468 uintptr_t kaddr = tupregs[0].dttk_value;
fe8ab488 4469 user_addr_t uaddr = tupregs[1].dttk_value;
2d21ac55
A
4470 uint64_t size = tupregs[2].dttk_value;
4471
4472 if (!dtrace_destructive_disallow &&
4473 dtrace_priv_proc_control(state) &&
ecc0ceb4
A
4474 !dtrace_istoxic(kaddr, size) &&
4475 dtrace_canload(kaddr, size, mstate, vstate)) {
2d21ac55 4476 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
b0d623f7 4477 dtrace_copyout(kaddr, uaddr, size, flags);
2d21ac55
A
4478 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4479 }
4480 break;
4481 }
4482
4483 case DIF_SUBR_COPYOUTSTR: {
4484 uintptr_t kaddr = tupregs[0].dttk_value;
fe8ab488 4485 user_addr_t uaddr = tupregs[1].dttk_value;
2d21ac55 4486 uint64_t size = tupregs[2].dttk_value;
39037602 4487 size_t lim;
2d21ac55
A
4488
4489 if (!dtrace_destructive_disallow &&
4490 dtrace_priv_proc_control(state) &&
ecc0ceb4 4491 !dtrace_istoxic(kaddr, size) &&
39037602 4492 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
2d21ac55 4493 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
39037602 4494 dtrace_copyoutstr(kaddr, uaddr, lim, flags);
2d21ac55
A
4495 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4496 }
4497 break;
4498 }
2d21ac55 4499
b0d623f7 4500 case DIF_SUBR_STRLEN: {
39037602 4501 size_t size = state->dts_options[DTRACEOPT_STRSIZE];
b0d623f7 4502 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
39037602 4503 size_t lim;
b0d623f7 4504
39037602 4505 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
fe8ab488 4506 regs[rd] = 0;
b0d623f7
A
4507 break;
4508 }
4509
39037602 4510 regs[rd] = dtrace_strlen((char *)addr, lim);
b0d623f7 4511
2d21ac55 4512 break;
b0d623f7 4513 }
2d21ac55
A
4514
4515 case DIF_SUBR_STRCHR:
4516 case DIF_SUBR_STRRCHR: {
4517 /*
4518 * We're going to iterate over the string looking for the
4519 * specified character. We will iterate until we have reached
4520 * the string length or we have found the character. If this
4521 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4522 * of the specified character instead of the first.
4523 */
4524 uintptr_t addr = tupregs[0].dttk_value;
39037602
A
4525 uintptr_t addr_limit;
4526 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4527 size_t lim;
2d21ac55
A
4528 char c, target = (char)tupregs[1].dttk_value;
4529
39037602 4530 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
5ba3f43e 4531 regs[rd] = 0;
39037602
A
4532 break;
4533 }
4534 addr_limit = addr + lim;
4535
4536 for (regs[rd] = 0; addr < addr_limit; addr++) {
2d21ac55
A
4537 if ((c = dtrace_load8(addr)) == target) {
4538 regs[rd] = addr;
4539
4540 if (subr == DIF_SUBR_STRCHR)
4541 break;
4542 }
4543
4544 if (c == '\0')
4545 break;
4546 }
4547
4548 break;
4549 }
4550
4551 case DIF_SUBR_STRSTR:
4552 case DIF_SUBR_INDEX:
4553 case DIF_SUBR_RINDEX: {
4554 /*
4555 * We're going to iterate over the string looking for the
4556 * specified string. We will iterate until we have reached
4557 * the string length or we have found the string. (Yes, this
4558 * is done in the most naive way possible -- but considering
4559 * that the string we're searching for is likely to be
4560 * relatively short, the complexity of Rabin-Karp or similar
4561 * hardly seems merited.)
4562 */
4563 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4564 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4565 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4566 size_t len = dtrace_strlen(addr, size);
4567 size_t sublen = dtrace_strlen(substr, size);
4568 char *limit = addr + len, *orig = addr;
4569 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4570 int inc = 1;
4571
4572 regs[rd] = notfound;
4573
b0d623f7 4574 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
fe8ab488 4575 regs[rd] = 0;
b0d623f7
A
4576 break;
4577 }
4578
4579 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4580 vstate)) {
fe8ab488 4581 regs[rd] = 0;
b0d623f7
A
4582 break;
4583 }
4584
2d21ac55
A
4585 /*
4586 * strstr() and index()/rindex() have similar semantics if
4587 * both strings are the empty string: strstr() returns a
4588 * pointer to the (empty) string, and index() and rindex()
4589 * both return index 0 (regardless of any position argument).
4590 */
4591 if (sublen == 0 && len == 0) {
4592 if (subr == DIF_SUBR_STRSTR)
4593 regs[rd] = (uintptr_t)addr;
4594 else
4595 regs[rd] = 0;
4596 break;
4597 }
4598
4599 if (subr != DIF_SUBR_STRSTR) {
4600 if (subr == DIF_SUBR_RINDEX) {
4601 limit = orig - 1;
4602 addr += len;
4603 inc = -1;
4604 }
4605
4606 /*
4607 * Both index() and rindex() take an optional position
4608 * argument that denotes the starting position.
4609 */
4610 if (nargs == 3) {
4611 int64_t pos = (int64_t)tupregs[2].dttk_value;
4612
4613 /*
4614 * If the position argument to index() is
4615 * negative, Perl implicitly clamps it at
4616 * zero. This semantic is a little surprising
4617 * given the special meaning of negative
4618 * positions to similar Perl functions like
4619 * substr(), but it appears to reflect a
4620 * notion that index() can start from a
4621 * negative index and increment its way up to
4622 * the string. Given this notion, Perl's
4623 * rindex() is at least self-consistent in
4624 * that it implicitly clamps positions greater
4625 * than the string length to be the string
4626 * length. Where Perl completely loses
4627 * coherence, however, is when the specified
4628 * substring is the empty string (""). In
4629 * this case, even if the position is
4630 * negative, rindex() returns 0 -- and even if
4631 * the position is greater than the length,
4632 * index() returns the string length. These
4633 * semantics violate the notion that index()
4634 * should never return a value less than the
4635 * specified position and that rindex() should
4636 * never return a value greater than the
4637 * specified position. (One assumes that
4638 * these semantics are artifacts of Perl's
4639 * implementation and not the results of
4640 * deliberate design -- it beggars belief that
4641 * even Larry Wall could desire such oddness.)
4642 * While in the abstract one would wish for
4643 * consistent position semantics across
4644 * substr(), index() and rindex() -- or at the
4645 * very least self-consistent position
4646 * semantics for index() and rindex() -- we
4647 * instead opt to keep with the extant Perl
4648 * semantics, in all their broken glory. (Do
4649 * we have more desire to maintain Perl's
4650 * semantics than Perl does? Probably.)
4651 */
4652 if (subr == DIF_SUBR_RINDEX) {
4653 if (pos < 0) {
4654 if (sublen == 0)
4655 regs[rd] = 0;
4656 break;
4657 }
4658
b0d623f7 4659 if ((size_t)pos > len)
2d21ac55
A
4660 pos = len;
4661 } else {
4662 if (pos < 0)
4663 pos = 0;
4664
b0d623f7 4665 if ((size_t)pos >= len) {
2d21ac55
A
4666 if (sublen == 0)
4667 regs[rd] = len;
4668 break;
4669 }
4670 }
4671
4672 addr = orig + pos;
4673 }
4674 }
4675
4676 for (regs[rd] = notfound; addr != limit; addr += inc) {
4677 if (dtrace_strncmp(addr, substr, sublen) == 0) {
4678 if (subr != DIF_SUBR_STRSTR) {
4679 /*
4680 * As D index() and rindex() are
4681 * modeled on Perl (and not on awk),
4682 * we return a zero-based (and not a
4683 * one-based) index. (For you Perl
4684 * weenies: no, we're not going to add
4685 * $[ -- and shouldn't you be at a con
4686 * or something?)
4687 */
4688 regs[rd] = (uintptr_t)(addr - orig);
4689 break;
4690 }
4691
4692 ASSERT(subr == DIF_SUBR_STRSTR);
4693 regs[rd] = (uintptr_t)addr;
4694 break;
4695 }
4696 }
4697
4698 break;
4699 }
4700
4701 case DIF_SUBR_STRTOK: {
4702 uintptr_t addr = tupregs[0].dttk_value;
4703 uintptr_t tokaddr = tupregs[1].dttk_value;
4704 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
39037602
A
4705 uintptr_t limit, toklimit;
4706 size_t clim;
2d21ac55 4707 char *dest = (char *)mstate->dtms_scratch_ptr;
b0d623f7
A
4708 uint8_t c='\0', tokmap[32]; /* 256 / 8 */
4709 uint64_t i = 0;
b0d623f7
A
4710
4711 /*
4712 * Check both the token buffer and (later) the input buffer,
4713 * since both could be non-scratch addresses.
4714 */
39037602 4715 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
fe8ab488 4716 regs[rd] = 0;
b0d623f7
A
4717 break;
4718 }
39037602 4719 toklimit = tokaddr + clim;
2d21ac55 4720
b0d623f7 4721 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 4722 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4723 regs[rd] = 0;
2d21ac55
A
4724 break;
4725 }
4726
fe8ab488 4727 if (addr == 0) {
2d21ac55
A
4728 /*
4729 * If the address specified is NULL, we use our saved
4730 * strtok pointer from the mstate. Note that this
4731 * means that the saved strtok pointer is _only_
4732 * valid within multiple enablings of the same probe --
4733 * it behaves like an implicit clause-local variable.
4734 */
4735 addr = mstate->dtms_strtok;
39037602 4736 limit = mstate->dtms_strtok_limit;
b0d623f7
A
4737 } else {
4738 /*
4739 * If the user-specified address is non-NULL we must
4740 * access check it. This is the only time we have
4741 * a chance to do so, since this address may reside
4742 * in the string table of this clause-- future calls
4743 * (when we fetch addr from mstate->dtms_strtok)
4744 * would fail this access check.
4745 */
39037602
A
4746 if (!dtrace_strcanload(addr, size, &clim, mstate,
4747 vstate)) {
fe8ab488 4748 regs[rd] = 0;
b0d623f7 4749 break;
fe8ab488 4750 }
39037602 4751 limit = addr + clim;
2d21ac55
A
4752 }
4753
4754 /*
4755 * First, zero the token map, and then process the token
4756 * string -- setting a bit in the map for every character
4757 * found in the token string.
4758 */
c910b4d9 4759 for (i = 0; i < (int)sizeof (tokmap); i++)
2d21ac55
A
4760 tokmap[i] = 0;
4761
4762 for (; tokaddr < toklimit; tokaddr++) {
4763 if ((c = dtrace_load8(tokaddr)) == '\0')
4764 break;
4765
4766 ASSERT((c >> 3) < sizeof (tokmap));
4767 tokmap[c >> 3] |= (1 << (c & 0x7));
4768 }
4769
39037602 4770 for (; addr < limit; addr++) {
2d21ac55 4771 /*
39037602
A
4772 * We're looking for a character that is _not_
4773 * contained in the token string.
2d21ac55
A
4774 */
4775 if ((c = dtrace_load8(addr)) == '\0')
4776 break;
4777
4778 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4779 break;
4780 }
4781
4782 if (c == '\0') {
4783 /*
4784 * We reached the end of the string without finding
4785 * any character that was not in the token string.
4786 * We return NULL in this case, and we set the saved
4787 * address to NULL as well.
4788 */
fe8ab488
A
4789 regs[rd] = 0;
4790 mstate->dtms_strtok = 0;
5ba3f43e 4791 mstate->dtms_strtok_limit = 0;
2d21ac55
A
4792 break;
4793 }
4794
4795 /*
4796 * From here on, we're copying into the destination string.
4797 */
4798 for (i = 0; addr < limit && i < size - 1; addr++) {
4799 if ((c = dtrace_load8(addr)) == '\0')
4800 break;
4801
4802 if (tokmap[c >> 3] & (1 << (c & 0x7)))
4803 break;
4804
4805 ASSERT(i < size);
4806 dest[i++] = c;
4807 }
4808
4809 ASSERT(i < size);
4810 dest[i] = '\0';
4811 regs[rd] = (uintptr_t)dest;
4812 mstate->dtms_scratch_ptr += size;
4813 mstate->dtms_strtok = addr;
39037602 4814 mstate->dtms_strtok_limit = limit;
2d21ac55
A
4815 break;
4816 }
4817
4818 case DIF_SUBR_SUBSTR: {
4819 uintptr_t s = tupregs[0].dttk_value;
4820 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4821 char *d = (char *)mstate->dtms_scratch_ptr;
4822 int64_t index = (int64_t)tupregs[1].dttk_value;
4823 int64_t remaining = (int64_t)tupregs[2].dttk_value;
4824 size_t len = dtrace_strlen((char *)s, size);
4825 int64_t i = 0;
4826
b0d623f7 4827 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
fe8ab488 4828 regs[rd] = 0;
b0d623f7
A
4829 break;
4830 }
2d21ac55 4831
b0d623f7 4832 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 4833 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4834 regs[rd] = 0;
2d21ac55
A
4835 break;
4836 }
4837
b0d623f7
A
4838 if (nargs <= 2)
4839 remaining = (int64_t)size;
4840
2d21ac55
A
4841 if (index < 0) {
4842 index += len;
4843
4844 if (index < 0 && index + remaining > 0) {
4845 remaining += index;
4846 index = 0;
4847 }
4848 }
4849
b0d623f7
A
4850 if ((size_t)index >= len || index < 0) {
4851 remaining = 0;
4852 } else if (remaining < 0) {
4853 remaining += len - index;
4854 } else if ((uint64_t)index + (uint64_t)remaining > size) {
4855 remaining = size - index;
4856 }
fe8ab488 4857
b0d623f7
A
4858 for (i = 0; i < remaining; i++) {
4859 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
2d21ac55
A
4860 break;
4861 }
b0d623f7
A
4862
4863 d[i] = '\0';
2d21ac55
A
4864
4865 mstate->dtms_scratch_ptr += size;
4866 regs[rd] = (uintptr_t)d;
4867 break;
4868 }
4869
2d21ac55
A
4870 case DIF_SUBR_GETMAJOR:
4871 regs[rd] = (uintptr_t)major( (dev_t)tupregs[0].dttk_value );
4872 break;
2d21ac55 4873
2d21ac55
A
4874 case DIF_SUBR_GETMINOR:
4875 regs[rd] = (uintptr_t)minor( (dev_t)tupregs[0].dttk_value );
4876 break;
2d21ac55 4877
2d21ac55 4878 case DIF_SUBR_DDI_PATHNAME: {
fe8ab488 4879 /* APPLE NOTE: currently unsupported on Darwin */
b0d623f7 4880 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
fe8ab488 4881 regs[rd] = 0;
2d21ac55
A
4882 break;
4883 }
2d21ac55
A
4884
4885 case DIF_SUBR_STRJOIN: {
4886 char *d = (char *)mstate->dtms_scratch_ptr;
4887 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4888 uintptr_t s1 = tupregs[0].dttk_value;
4889 uintptr_t s2 = tupregs[1].dttk_value;
39037602
A
4890 uint64_t i = 0, j = 0;
4891 size_t lim1, lim2;
4892 char c;
b0d623f7 4893
39037602
A
4894 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
4895 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
fe8ab488 4896 regs[rd] = 0;
b0d623f7
A
4897 break;
4898 }
2d21ac55 4899
b0d623f7 4900 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 4901 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4902 regs[rd] = 0;
2d21ac55
A
4903 break;
4904 }
4905
4906 for (;;) {
4907 if (i >= size) {
4908 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4909 regs[rd] = 0;
2d21ac55
A
4910 break;
4911 }
39037602
A
4912 c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
4913 if ((d[i++] = c) == '\0') {
2d21ac55
A
4914 i--;
4915 break;
4916 }
4917 }
4918
4919 for (;;) {
4920 if (i >= size) {
4921 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4922 regs[rd] = 0;
2d21ac55
A
4923 break;
4924 }
39037602
A
4925 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
4926 if ((d[i++] = c) == '\0')
2d21ac55
A
4927 break;
4928 }
4929
4930 if (i < size) {
4931 mstate->dtms_scratch_ptr += i;
4932 regs[rd] = (uintptr_t)d;
4933 }
4934
4935 break;
4936 }
4937
cb323159
A
4938 case DIF_SUBR_STRTOLL: {
4939 uintptr_t s = tupregs[0].dttk_value;
4940 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4941 size_t lim;
4942 int base = 10;
4943
4944 if (nargs > 1) {
4945 if ((base = tupregs[1].dttk_value) <= 1 ||
4946 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4947 *flags |= CPU_DTRACE_ILLOP;
4948 break;
4949 }
4950 }
4951
4952 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
4953 regs[rd] = INT64_MIN;
4954 break;
4955 }
4956
4957 regs[rd] = dtrace_strtoll((char *)s, base, lim);
4958 break;
4959 }
4960
2d21ac55
A
4961 case DIF_SUBR_LLTOSTR: {
4962 int64_t i = (int64_t)tupregs[0].dttk_value;
5ba3f43e
A
4963 uint64_t val, digit;
4964 uint64_t size = 65; /* enough room for 2^64 in binary */
2d21ac55 4965 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5ba3f43e
A
4966 int base = 10;
4967
4968 if (nargs > 1) {
4969 if ((base = tupregs[1].dttk_value) <= 1 ||
4970 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4971 *flags |= CPU_DTRACE_ILLOP;
4972 break;
4973 }
4974 }
4975
4976 val = (base == 10 && i < 0) ? i * -1 : i;
2d21ac55 4977
b0d623f7 4978 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 4979 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 4980 regs[rd] = 0;
2d21ac55
A
4981 break;
4982 }
4983
5ba3f43e
A
4984 for (*end-- = '\0'; val; val /= base) {
4985 if ((digit = val % base) <= '9' - '0') {
4986 *end-- = '0' + digit;
4987 } else {
4988 *end-- = 'a' + (digit - ('9' - '0') - 1);
4989 }
4990 }
2d21ac55 4991
5ba3f43e 4992 if (i == 0 && base == 16)
2d21ac55
A
4993 *end-- = '0';
4994
5ba3f43e
A
4995 if (base == 16)
4996 *end-- = 'x';
4997
4998 if (i == 0 || base == 8 || base == 16)
4999 *end-- = '0';
5000
5001 if (i < 0 && base == 10)
2d21ac55
A
5002 *end-- = '-';
5003
5004 regs[rd] = (uintptr_t)end + 1;
5005 mstate->dtms_scratch_ptr += size;
5006 break;
5007 }
5008
b0d623f7
A
5009 case DIF_SUBR_HTONS:
5010 case DIF_SUBR_NTOHS:
5011#ifdef _BIG_ENDIAN
5012 regs[rd] = (uint16_t)tupregs[0].dttk_value;
5013#else
5014 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5015#endif
5016 break;
5017
5018
5019 case DIF_SUBR_HTONL:
5020 case DIF_SUBR_NTOHL:
5021#ifdef _BIG_ENDIAN
5022 regs[rd] = (uint32_t)tupregs[0].dttk_value;
5023#else
5024 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5025#endif
5026 break;
5027
5028
5029 case DIF_SUBR_HTONLL:
5030 case DIF_SUBR_NTOHLL:
5031#ifdef _BIG_ENDIAN
5032 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5033#else
5034 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5035#endif
5036 break;
5037
5038
2d21ac55
A
5039 case DIF_SUBR_DIRNAME:
5040 case DIF_SUBR_BASENAME: {
5041 char *dest = (char *)mstate->dtms_scratch_ptr;
5042 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5043 uintptr_t src = tupregs[0].dttk_value;
5044 int i, j, len = dtrace_strlen((char *)src, size);
5045 int lastbase = -1, firstbase = -1, lastdir = -1;
5046 int start, end;
5047
b0d623f7 5048 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
fe8ab488 5049 regs[rd] = 0;
b0d623f7
A
5050 break;
5051 }
5052
5053 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 5054 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 5055 regs[rd] = 0;
2d21ac55
A
5056 break;
5057 }
5058
5059 /*
5060 * The basename and dirname for a zero-length string is
5061 * defined to be "."
5062 */
5063 if (len == 0) {
5064 len = 1;
5065 src = (uintptr_t)".";
5066 }
5067
5068 /*
5069 * Start from the back of the string, moving back toward the
5070 * front until we see a character that isn't a slash. That
5071 * character is the last character in the basename.
5072 */
5073 for (i = len - 1; i >= 0; i--) {
5074 if (dtrace_load8(src + i) != '/')
5075 break;
5076 }
5077
5078 if (i >= 0)
5079 lastbase = i;
5080
5081 /*
5082 * Starting from the last character in the basename, move
5083 * towards the front until we find a slash. The character
5084 * that we processed immediately before that is the first
5085 * character in the basename.
5086 */
5087 for (; i >= 0; i--) {
5088 if (dtrace_load8(src + i) == '/')
5089 break;
5090 }
5091
5092 if (i >= 0)
5093 firstbase = i + 1;
5094
5095 /*
5096 * Now keep going until we find a non-slash character. That
5097 * character is the last character in the dirname.
5098 */
5099 for (; i >= 0; i--) {
5100 if (dtrace_load8(src + i) != '/')
5101 break;
5102 }
5103
5104 if (i >= 0)
5105 lastdir = i;
5106
5107 ASSERT(!(lastbase == -1 && firstbase != -1));
5108 ASSERT(!(firstbase == -1 && lastdir != -1));
5109
5110 if (lastbase == -1) {
5111 /*
5112 * We didn't find a non-slash character. We know that
5113 * the length is non-zero, so the whole string must be
5114 * slashes. In either the dirname or the basename
5115 * case, we return '/'.
5116 */
5117 ASSERT(firstbase == -1);
5118 firstbase = lastbase = lastdir = 0;
5119 }
5120
5121 if (firstbase == -1) {
5122 /*
5123 * The entire string consists only of a basename
5124 * component. If we're looking for dirname, we need
5125 * to change our string to be just "."; if we're
5126 * looking for a basename, we'll just set the first
5127 * character of the basename to be 0.
5128 */
5129 if (subr == DIF_SUBR_DIRNAME) {
5130 ASSERT(lastdir == -1);
5131 src = (uintptr_t)".";
5132 lastdir = 0;
5133 } else {
5134 firstbase = 0;
5135 }
5136 }
5137
5138 if (subr == DIF_SUBR_DIRNAME) {
5139 if (lastdir == -1) {
5140 /*
5141 * We know that we have a slash in the name --
5142 * or lastdir would be set to 0, above. And
5143 * because lastdir is -1, we know that this
5144 * slash must be the first character. (That
5145 * is, the full string must be of the form
5146 * "/basename".) In this case, the last
5147 * character of the directory name is 0.
5148 */
5149 lastdir = 0;
5150 }
5151
5152 start = 0;
5153 end = lastdir;
5154 } else {
5155 ASSERT(subr == DIF_SUBR_BASENAME);
5156 ASSERT(firstbase != -1 && lastbase != -1);
5157 start = firstbase;
5158 end = lastbase;
5159 }
5160
b0d623f7
A
5161 for (i = start, j = 0; i <= end && (uint64_t)j < size - 1; i++, j++)
5162 dest[j] = dtrace_load8(src + i);
2d21ac55
A
5163
5164 dest[j] = '\0';
5165 regs[rd] = (uintptr_t)dest;
5166 mstate->dtms_scratch_ptr += size;
5167 break;
5168 }
5169
5170 case DIF_SUBR_CLEANPATH: {
5171 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5172 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5173 uintptr_t src = tupregs[0].dttk_value;
39037602
A
5174 size_t lim;
5175 size_t i = 0, j = 0;
2d21ac55 5176
39037602 5177 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
fe8ab488 5178 regs[rd] = 0;
b0d623f7
A
5179 break;
5180 }
5181
5182 if (!DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 5183 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 5184 regs[rd] = 0;
2d21ac55
A
5185 break;
5186 }
5187
5188 /*
5189 * Move forward, loading each character.
5190 */
5191 do {
39037602 5192 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
2d21ac55 5193next:
b0d623f7
A
5194 if ((uint64_t)(j + 5) >= size) /* 5 = strlen("/..c\0") */
5195 break;
2d21ac55
A
5196
5197 if (c != '/') {
5198 dest[j++] = c;
5199 continue;
5200 }
5201
39037602 5202 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
2d21ac55
A
5203
5204 if (c == '/') {
5205 /*
5206 * We have two slashes -- we can just advance
5207 * to the next character.
5208 */
5209 goto next;
5210 }
5211
5212 if (c != '.') {
5213 /*
5214 * This is not "." and it's not ".." -- we can
5215 * just store the "/" and this character and
5216 * drive on.
5217 */
5218 dest[j++] = '/';
5219 dest[j++] = c;
5220 continue;
5221 }
5222
39037602 5223 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
2d21ac55
A
5224
5225 if (c == '/') {
5226 /*
5227 * This is a "/./" component. We're not going
5228 * to store anything in the destination buffer;
5229 * we're just going to go to the next component.
5230 */
5231 goto next;
5232 }
5233
5234 if (c != '.') {
5235 /*
5236 * This is not ".." -- we can just store the
5237 * "/." and this character and continue
5238 * processing.
5239 */
5240 dest[j++] = '/';
5241 dest[j++] = '.';
5242 dest[j++] = c;
5243 continue;
5244 }
5245
39037602 5246 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
2d21ac55
A
5247
5248 if (c != '/' && c != '\0') {
5249 /*
5250 * This is not ".." -- it's "..[mumble]".
5251 * We'll store the "/.." and this character
5252 * and continue processing.
5253 */
5254 dest[j++] = '/';
5255 dest[j++] = '.';
5256 dest[j++] = '.';
5257 dest[j++] = c;
5258 continue;
5259 }
5260
5261 /*
5262 * This is "/../" or "/..\0". We need to back up
5263 * our destination pointer until we find a "/".
5264 */
5265 i--;
5266 while (j != 0 && dest[--j] != '/')
5267 continue;
5268
5269 if (c == '\0')
5270 dest[++j] = '/';
5271 } while (c != '\0');
5272
5273 dest[j] = '\0';
5274 regs[rd] = (uintptr_t)dest;
5275 mstate->dtms_scratch_ptr += size;
5276 break;
5277 }
2d21ac55 5278
b0d623f7
A
5279 case DIF_SUBR_INET_NTOA:
5280 case DIF_SUBR_INET_NTOA6:
5281 case DIF_SUBR_INET_NTOP: {
5282 size_t size;
5283 int af, argi, i;
5284 char *base, *end;
2d21ac55 5285
b0d623f7
A
5286 if (subr == DIF_SUBR_INET_NTOP) {
5287 af = (int)tupregs[0].dttk_value;
5288 argi = 1;
5289 } else {
5290 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5291 argi = 0;
2d21ac55
A
5292 }
5293
b0d623f7
A
5294 if (af == AF_INET) {
5295#if !defined(__APPLE__)
5296 ipaddr_t ip4;
5297#else
6d2010ae 5298 uint32_t ip4;
b0d623f7
A
5299#endif /* __APPLE__ */
5300 uint8_t *ptr8, val;
5301
5302 /*
5303 * Safely load the IPv4 address.
5304 */
6d2010ae 5305#if !defined(__APPLE__)
b0d623f7 5306 ip4 = dtrace_load32(tupregs[argi].dttk_value);
6d2010ae 5307#else
39037602
A
5308 if (!dtrace_canload(tupregs[argi].dttk_value, sizeof(ip4),
5309 mstate, vstate)) {
5310 regs[rd] = 0;
5311 break;
5312 }
5313
6d2010ae
A
5314 dtrace_bcopy(
5315 (void *)(uintptr_t)tupregs[argi].dttk_value,
5316 (void *)(uintptr_t)&ip4, sizeof (ip4));
5317#endif /* __APPLE__ */
b0d623f7
A
5318 /*
5319 * Check an IPv4 string will fit in scratch.
5320 */
5321#if !defined(__APPLE__)
5322 size = INET_ADDRSTRLEN;
5323#else
5324 size = MAX_IPv4_STR_LEN;
5325#endif /* __APPLE__ */
5326 if (!DTRACE_INSCRATCH(mstate, size)) {
5327 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 5328 regs[rd] = 0;
b0d623f7
A
5329 break;
5330 }
5331 base = (char *)mstate->dtms_scratch_ptr;
5332 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5333
5334 /*
5335 * Stringify as a dotted decimal quad.
5336 */
5337 *end-- = '\0';
5338 ptr8 = (uint8_t *)&ip4;
5339 for (i = 3; i >= 0; i--) {
5340 val = ptr8[i];
5341
5342 if (val == 0) {
5343 *end-- = '0';
5344 } else {
5345 for (; val; val /= 10) {
5346 *end-- = '0' + (val % 10);
5347 }
5348 }
5349
5350 if (i > 0)
5351 *end-- = '.';
5352 }
5353 ASSERT(end + 1 >= base);
5354
5355 } else if (af == AF_INET6) {
5356#if defined(__APPLE__)
5357#define _S6_un __u6_addr
5358#define _S6_u8 __u6_addr8
5359#endif /* __APPLE__ */
5360 struct in6_addr ip6;
5361 int firstzero, tryzero, numzero, v6end;
5362 uint16_t val;
5363 const char digits[] = "0123456789abcdef";
5364
5365 /*
5366 * Stringify using RFC 1884 convention 2 - 16 bit
5367 * hexadecimal values with a zero-run compression.
5368 * Lower case hexadecimal digits are used.
5369 * eg, fe80::214:4fff:fe0b:76c8.
5370 * The IPv4 embedded form is returned for inet_ntop,
5371 * just the IPv4 string is returned for inet_ntoa6.
5372 */
5373
39037602
A
5374 if (!dtrace_canload(tupregs[argi].dttk_value,
5375 sizeof(struct in6_addr), mstate, vstate)) {
5376 regs[rd] = 0;
5377 break;
5378 }
5379
b0d623f7
A
5380 /*
5381 * Safely load the IPv6 address.
5382 */
5383 dtrace_bcopy(
5384 (void *)(uintptr_t)tupregs[argi].dttk_value,
5385 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5386
5387 /*
5388 * Check an IPv6 string will fit in scratch.
5389 */
5390 size = INET6_ADDRSTRLEN;
5391 if (!DTRACE_INSCRATCH(mstate, size)) {
5392 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 5393 regs[rd] = 0;
b0d623f7
A
5394 break;
5395 }
5396 base = (char *)mstate->dtms_scratch_ptr;
5397 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5398 *end-- = '\0';
5399
5400 /*
5401 * Find the longest run of 16 bit zero values
5402 * for the single allowed zero compression - "::".
5403 */
5404 firstzero = -1;
5405 tryzero = -1;
5406 numzero = 1;
b0d623f7 5407 for (i = 0; i < (int)sizeof (struct in6_addr); i++) {
b0d623f7
A
5408 if (ip6._S6_un._S6_u8[i] == 0 &&
5409 tryzero == -1 && i % 2 == 0) {
5410 tryzero = i;
5411 continue;
5412 }
5413
5414 if (tryzero != -1 &&
5415 (ip6._S6_un._S6_u8[i] != 0 ||
5416 i == sizeof (struct in6_addr) - 1)) {
5417
5418 if (i - tryzero <= numzero) {
5419 tryzero = -1;
5420 continue;
5421 }
5422
5423 firstzero = tryzero;
5424 numzero = i - i % 2 - tryzero;
5425 tryzero = -1;
5426
5427 if (ip6._S6_un._S6_u8[i] == 0 &&
5428 i == sizeof (struct in6_addr) - 1)
5429 numzero += 2;
5430 }
5431 }
b0d623f7 5432 ASSERT(firstzero + numzero <= (int)sizeof (struct in6_addr));
b0d623f7
A
5433
5434 /*
5435 * Check for an IPv4 embedded address.
5436 */
5437 v6end = sizeof (struct in6_addr) - 2;
5438 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5439 IN6_IS_ADDR_V4COMPAT(&ip6)) {
b0d623f7
A
5440 for (i = sizeof (struct in6_addr) - 1;
5441 i >= (int)DTRACE_V4MAPPED_OFFSET; i--) {
b0d623f7
A
5442 ASSERT(end >= base);
5443
5444 val = ip6._S6_un._S6_u8[i];
5445
5446 if (val == 0) {
5447 *end-- = '0';
5448 } else {
5449 for (; val; val /= 10) {
5450 *end-- = '0' + val % 10;
5451 }
5452 }
5453
b0d623f7
A
5454 if (i > (int)DTRACE_V4MAPPED_OFFSET)
5455 *end-- = '.';
b0d623f7
A
5456 }
5457
5458 if (subr == DIF_SUBR_INET_NTOA6)
5459 goto inetout;
5460
5461 /*
5462 * Set v6end to skip the IPv4 address that
5463 * we have already stringified.
5464 */
5465 v6end = 10;
5466 }
5467
5468 /*
5469 * Build the IPv6 string by working through the
5470 * address in reverse.
5471 */
5472 for (i = v6end; i >= 0; i -= 2) {
5473 ASSERT(end >= base);
5474
5475 if (i == firstzero + numzero - 2) {
5476 *end-- = ':';
5477 *end-- = ':';
5478 i -= numzero - 2;
5479 continue;
5480 }
5481
5482 if (i < 14 && i != firstzero - 2)
5483 *end-- = ':';
5484
5485 val = (ip6._S6_un._S6_u8[i] << 8) +
5486 ip6._S6_un._S6_u8[i + 1];
5487
5488 if (val == 0) {
5489 *end-- = '0';
5490 } else {
5491 for (; val; val /= 16) {
5492 *end-- = digits[val % 16];
5493 }
5494 }
5495 }
5496 ASSERT(end + 1 >= base);
5497
5498#if defined(__APPLE__)
5499#undef _S6_un
5500#undef _S6_u8
5501#endif /* __APPLE__ */
5502 } else {
5503 /*
5504 * The user didn't use AH_INET or AH_INET6.
5505 */
5506 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
fe8ab488 5507 regs[rd] = 0;
b0d623f7
A
5508 break;
5509 }
5510
5511inetout: regs[rd] = (uintptr_t)end + 1;
5512 mstate->dtms_scratch_ptr += size;
5513 break;
5514 }
b0d623f7 5515
cb323159
A
5516 case DIF_SUBR_JSON: {
5517 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5518 uintptr_t json = tupregs[0].dttk_value;
5519 size_t jsonlen = dtrace_strlen((char *)json, size);
5520 uintptr_t elem = tupregs[1].dttk_value;
5521 size_t elemlen = dtrace_strlen((char *)elem, size);
5522
5523 char *dest = (char *)mstate->dtms_scratch_ptr;
5524 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
5525 char *ee = elemlist;
5526 int nelems = 1;
5527 uintptr_t cur;
5528
5529 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
5530 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
5531 regs[rd] = 0;
5532 break;
5533 }
5534
5535 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
5536 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5537 regs[rd] = 0;
5538 break;
5539 }
5540
5541 /*
5542 * Read the element selector and split it up into a packed list
5543 * of strings.
5544 */
5545 for (cur = elem; cur < elem + elemlen; cur++) {
5546 char cc = dtrace_load8(cur);
5547
5548 if (cur == elem && cc == '[') {
5549 /*
5550 * If the first element selector key is
5551 * actually an array index then ignore the
5552 * bracket.
5553 */
5554 continue;
5555 }
5556
5557 if (cc == ']')
5558 continue;
5559
5560 if (cc == '.' || cc == '[') {
5561 nelems++;
5562 cc = '\0';
5563 }
5564
5565 *ee++ = cc;
5566 }
5567 *ee++ = '\0';
5568
5569 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
5570 nelems, dest)) != 0)
5571 mstate->dtms_scratch_ptr += jsonlen + 1;
5572 break;
5573 }
5574
fe8ab488
A
5575 case DIF_SUBR_TOUPPER:
5576 case DIF_SUBR_TOLOWER: {
5577 uintptr_t src = tupregs[0].dttk_value;
5578 char *dest = (char *)mstate->dtms_scratch_ptr;
5579 char lower, upper, base, c;
5580 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5581 size_t len = dtrace_strlen((char*) src, size);
5582 size_t i = 0;
5583
5584 lower = (subr == DIF_SUBR_TOUPPER) ? 'a' : 'A';
5585 upper = (subr == DIF_SUBR_TOUPPER) ? 'z' : 'Z';
5586 base = (subr == DIF_SUBR_TOUPPER) ? 'A' : 'a';
5587
5588 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5589 regs[rd] = 0;
5590 break;
5591 }
5592
5593 if (!DTRACE_INSCRATCH(mstate, size)) {
5594 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5595 regs[rd] = 0;
5596 break;
5597 }
5598
5599 for (i = 0; i < size - 1; ++i) {
5600 if ((c = dtrace_load8(src + i)) == '\0')
5601 break;
5602 if (c >= lower && c <= upper)
5603 c = base + (c - lower);
5604 dest[i] = c;
5605 }
5606
5607 ASSERT(i < size);
5608
5609 dest[i] = '\0';
5610 regs[rd] = (uintptr_t) dest;
5611 mstate->dtms_scratch_ptr += size;
5612
5613 break;
5614 }
f427ee49 5615
cb323159
A
5616 case DIF_SUBR_STRIP:
5617 if (!dtrace_is_valid_ptrauth_key(tupregs[1].dttk_value)) {
5618 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5619 break;
5620 }
5621 regs[rd] = (uint64_t)dtrace_ptrauth_strip(
5622 (void*)tupregs[0].dttk_value, tupregs[1].dttk_value);
5623 break;
fe8ab488 5624
39037602 5625#if defined(__APPLE__)
3e170ce0
A
5626 case DIF_SUBR_VM_KERNEL_ADDRPERM: {
5627 if (!dtrace_priv_kernel(state)) {
5628 regs[rd] = 0;
5629 } else {
5630 regs[rd] = VM_KERNEL_ADDRPERM((vm_offset_t) tupregs[0].dttk_value);
5631 }
5632
5633 break;
5634 }
39037602
A
5635
5636 case DIF_SUBR_KDEBUG_TRACE: {
5637 uint32_t debugid;
5638 uintptr_t args[4] = {0};
5639 int i;
5640
5641 if (nargs < 2 || nargs > 5) {
5642 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5643 break;
b0d623f7 5644 }
b0d623f7 5645
f427ee49
A
5646 if (dtrace_destructive_disallow ||
5647 !dtrace_priv_kernel_destructive(state)) {
39037602 5648 return;
f427ee49 5649 }
39037602
A
5650
5651 debugid = tupregs[0].dttk_value;
5652 for (i = 0; i < nargs - 1; i++)
5653 args[i] = tupregs[i + 1].dttk_value;
5654
5655 kernel_debug(debugid, args[0], args[1], args[2], args[3], 0);
5656
5657 break;
5658 }
5659
5660 case DIF_SUBR_KDEBUG_TRACE_STRING: {
5661 if (nargs != 3) {
5662 break;
b0d623f7
A
5663 }
5664
f427ee49
A
5665 if (dtrace_destructive_disallow ||
5666 !dtrace_priv_kernel_destructive(state)) {
39037602 5667 return;
f427ee49 5668 }
39037602
A
5669
5670 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5671 uint32_t debugid = tupregs[0].dttk_value;
5672 uint64_t str_id = tupregs[1].dttk_value;
5673 uintptr_t src = tupregs[2].dttk_value;
5674 size_t lim;
5675 char buf[size];
5676 char* str = NULL;
5677
5678 if (src != (uintptr_t)0) {
5679 str = buf;
5680 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5681 break;
5682 }
5683 dtrace_strcpy((void*)src, buf, size);
2d21ac55 5684 }
b0d623f7 5685
39037602
A
5686 (void)kernel_debug_string(debugid, &str_id, str);
5687 regs[rd] = str_id;
5688
2d21ac55
A
5689 break;
5690 }
f427ee49
A
5691
5692 case DIF_SUBR_MTONS:
5693 absolutetime_to_nanoseconds(tupregs[0].dttk_value, &regs[rd]);
5694
5695 break;
5696 case DIF_SUBR_PHYSMEM_READ: {
5697#if DEBUG || DEVELOPMENT
5698 if (dtrace_destructive_disallow ||
5699 !dtrace_priv_kernel_destructive(state)) {
5700 return;
5701 }
5702 regs[rd] = dtrace_physmem_read(tupregs[0].dttk_value,
5703 tupregs[1].dttk_value);
5704#else
5705 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5706#endif /* DEBUG || DEVELOPMENT */
5707 break;
5708 }
5709 case DIF_SUBR_PHYSMEM_WRITE: {
5710#if DEBUG || DEVELOPMENT
5711 if (dtrace_destructive_disallow ||
5712 !dtrace_priv_kernel_destructive(state)) {
5713 return;
5714 }
5715
5716 dtrace_physmem_write(tupregs[0].dttk_value,
5717 tupregs[1].dttk_value, (size_t)tupregs[2].dttk_value);
5718#else
5719 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5720#endif /* DEBUG || DEVELOPMENT */
5721 break;
5722 }
5723
5724 case DIF_SUBR_KVTOPHYS: {
5725#if DEBUG || DEVELOPMENT
5726 regs[rd] = kvtophys(tupregs[0].dttk_value);
5727#else
5728 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5729#endif /* DEBUG || DEVELOPMENT */
5730 break;
5731 }
5732#endif /* defined(__APPLE__) */
39037602 5733
2d21ac55
A
5734 }
5735}
5736
5737/*
5738 * Emulate the execution of DTrace IR instructions specified by the given
5739 * DIF object. This function is deliberately void of assertions as all of
5740 * the necessary checks are handled by a call to dtrace_difo_validate().
5741 */
5742static uint64_t
5743dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5744 dtrace_vstate_t *vstate, dtrace_state_t *state)
5745{
5746 const dif_instr_t *text = difo->dtdo_buf;
5747 const uint_t textlen = difo->dtdo_len;
5748 const char *strtab = difo->dtdo_strtab;
5749 const uint64_t *inttab = difo->dtdo_inttab;
5750
5751 uint64_t rval = 0;
5752 dtrace_statvar_t *svar;
5753 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5754 dtrace_difv_t *v;
5755 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
2d21ac55 5756 volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
2d21ac55
A
5757
5758 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5759 uint64_t regs[DIF_DIR_NREGS];
5760 uint64_t *tmp;
5761
5762 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5763 int64_t cc_r;
b0d623f7 5764 uint_t pc = 0, id, opc = 0;
2d21ac55
A
5765 uint8_t ttop = 0;
5766 dif_instr_t instr;
5767 uint_t r1, r2, rd;
5768
b0d623f7
A
5769 /*
5770 * We stash the current DIF object into the machine state: we need it
5771 * for subsequent access checking.
5772 */
5773 mstate->dtms_difo = difo;
5774
2d21ac55
A
5775 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
5776
5777 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5778 opc = pc;
5779
5780 instr = text[pc++];
5781 r1 = DIF_INSTR_R1(instr);
5782 r2 = DIF_INSTR_R2(instr);
5783 rd = DIF_INSTR_RD(instr);
5784
5785 switch (DIF_INSTR_OP(instr)) {
5786 case DIF_OP_OR:
5787 regs[rd] = regs[r1] | regs[r2];
5788 break;
5789 case DIF_OP_XOR:
5790 regs[rd] = regs[r1] ^ regs[r2];
5791 break;
5792 case DIF_OP_AND:
5793 regs[rd] = regs[r1] & regs[r2];
5794 break;
5795 case DIF_OP_SLL:
5796 regs[rd] = regs[r1] << regs[r2];
5797 break;
5798 case DIF_OP_SRL:
5799 regs[rd] = regs[r1] >> regs[r2];
5800 break;
5801 case DIF_OP_SUB:
5802 regs[rd] = regs[r1] - regs[r2];
5803 break;
5804 case DIF_OP_ADD:
5805 regs[rd] = regs[r1] + regs[r2];
5806 break;
5807 case DIF_OP_MUL:
5808 regs[rd] = regs[r1] * regs[r2];
5809 break;
5810 case DIF_OP_SDIV:
5811 if (regs[r2] == 0) {
5812 regs[rd] = 0;
5813 *flags |= CPU_DTRACE_DIVZERO;
5814 } else {
5815 regs[rd] = (int64_t)regs[r1] /
5816 (int64_t)regs[r2];
5817 }
5818 break;
5819
5820 case DIF_OP_UDIV:
5821 if (regs[r2] == 0) {
5822 regs[rd] = 0;
5823 *flags |= CPU_DTRACE_DIVZERO;
5824 } else {
5825 regs[rd] = regs[r1] / regs[r2];
5826 }
5827 break;
5828
5829 case DIF_OP_SREM:
5830 if (regs[r2] == 0) {
5831 regs[rd] = 0;
5832 *flags |= CPU_DTRACE_DIVZERO;
5833 } else {
5834 regs[rd] = (int64_t)regs[r1] %
5835 (int64_t)regs[r2];
5836 }
5837 break;
5838
5839 case DIF_OP_UREM:
5840 if (regs[r2] == 0) {
5841 regs[rd] = 0;
5842 *flags |= CPU_DTRACE_DIVZERO;
5843 } else {
5844 regs[rd] = regs[r1] % regs[r2];
5845 }
5846 break;
5847
5848 case DIF_OP_NOT:
5849 regs[rd] = ~regs[r1];
5850 break;
5851 case DIF_OP_MOV:
5852 regs[rd] = regs[r1];
5853 break;
5854 case DIF_OP_CMP:
5855 cc_r = regs[r1] - regs[r2];
5856 cc_n = cc_r < 0;
5857 cc_z = cc_r == 0;
5858 cc_v = 0;
5859 cc_c = regs[r1] < regs[r2];
5860 break;
5861 case DIF_OP_TST:
5862 cc_n = cc_v = cc_c = 0;
5863 cc_z = regs[r1] == 0;
5864 break;
5865 case DIF_OP_BA:
5866 pc = DIF_INSTR_LABEL(instr);
5867 break;
5868 case DIF_OP_BE:
5869 if (cc_z)
5870 pc = DIF_INSTR_LABEL(instr);
5871 break;
5872 case DIF_OP_BNE:
5873 if (cc_z == 0)
5874 pc = DIF_INSTR_LABEL(instr);
5875 break;
5876 case DIF_OP_BG:
5877 if ((cc_z | (cc_n ^ cc_v)) == 0)
5878 pc = DIF_INSTR_LABEL(instr);
5879 break;
5880 case DIF_OP_BGU:
5881 if ((cc_c | cc_z) == 0)
5882 pc = DIF_INSTR_LABEL(instr);
5883 break;
5884 case DIF_OP_BGE:
5885 if ((cc_n ^ cc_v) == 0)
5886 pc = DIF_INSTR_LABEL(instr);
5887 break;
5888 case DIF_OP_BGEU:
5889 if (cc_c == 0)
5890 pc = DIF_INSTR_LABEL(instr);
5891 break;
5892 case DIF_OP_BL:
5893 if (cc_n ^ cc_v)
5894 pc = DIF_INSTR_LABEL(instr);
5895 break;
5896 case DIF_OP_BLU:
5897 if (cc_c)
5898 pc = DIF_INSTR_LABEL(instr);
5899 break;
5900 case DIF_OP_BLE:
5901 if (cc_z | (cc_n ^ cc_v))
5902 pc = DIF_INSTR_LABEL(instr);
5903 break;
5904 case DIF_OP_BLEU:
5905 if (cc_c | cc_z)
5906 pc = DIF_INSTR_LABEL(instr);
5907 break;
5908 case DIF_OP_RLDSB:
5909 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5910 *flags |= CPU_DTRACE_KPRIV;
5911 *illval = regs[r1];
5912 break;
5913 }
f427ee49 5914 OS_FALLTHROUGH;
2d21ac55
A
5915 case DIF_OP_LDSB:
5916 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5917 break;
5918 case DIF_OP_RLDSH:
5919 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5920 *flags |= CPU_DTRACE_KPRIV;
5921 *illval = regs[r1];
5922 break;
5923 }
f427ee49 5924 OS_FALLTHROUGH;
2d21ac55
A
5925 case DIF_OP_LDSH:
5926 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5927 break;
5928 case DIF_OP_RLDSW:
5929 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5930 *flags |= CPU_DTRACE_KPRIV;
5931 *illval = regs[r1];
5932 break;
5933 }
f427ee49 5934 OS_FALLTHROUGH;
2d21ac55
A
5935 case DIF_OP_LDSW:
5936 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5937 break;
5938 case DIF_OP_RLDUB:
5939 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5940 *flags |= CPU_DTRACE_KPRIV;
5941 *illval = regs[r1];
5942 break;
5943 }
f427ee49 5944 OS_FALLTHROUGH;
2d21ac55
A
5945 case DIF_OP_LDUB:
5946 regs[rd] = dtrace_load8(regs[r1]);
5947 break;
5948 case DIF_OP_RLDUH:
5949 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5950 *flags |= CPU_DTRACE_KPRIV;
5951 *illval = regs[r1];
5952 break;
5953 }
f427ee49 5954 OS_FALLTHROUGH;
2d21ac55
A
5955 case DIF_OP_LDUH:
5956 regs[rd] = dtrace_load16(regs[r1]);
5957 break;
5958 case DIF_OP_RLDUW:
5959 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5960 *flags |= CPU_DTRACE_KPRIV;
5961 *illval = regs[r1];
5962 break;
5963 }
f427ee49 5964 OS_FALLTHROUGH;
2d21ac55
A
5965 case DIF_OP_LDUW:
5966 regs[rd] = dtrace_load32(regs[r1]);
5967 break;
5968 case DIF_OP_RLDX:
5969 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
5970 *flags |= CPU_DTRACE_KPRIV;
5971 *illval = regs[r1];
5972 break;
5973 }
f427ee49 5974 OS_FALLTHROUGH;
2d21ac55
A
5975 case DIF_OP_LDX:
5976 regs[rd] = dtrace_load64(regs[r1]);
5977 break;
fe8ab488
A
5978/*
5979 * Darwin 32-bit kernel may fetch from 64-bit user.
5980 * Do not cast regs to uintptr_t
5981 * DIF_OP_ULDSB,DIF_OP_ULDSH, DIF_OP_ULDSW, DIF_OP_ULDUB
5982 * DIF_OP_ULDUH, DIF_OP_ULDUW, DIF_OP_ULDX
5983 */
2d21ac55
A
5984 case DIF_OP_ULDSB:
5985 regs[rd] = (int8_t)
5986 dtrace_fuword8(regs[r1]);
5987 break;
5988 case DIF_OP_ULDSH:
5989 regs[rd] = (int16_t)
5990 dtrace_fuword16(regs[r1]);
5991 break;
5992 case DIF_OP_ULDSW:
5993 regs[rd] = (int32_t)
5994 dtrace_fuword32(regs[r1]);
5995 break;
5996 case DIF_OP_ULDUB:
5997 regs[rd] =
5998 dtrace_fuword8(regs[r1]);
5999 break;
6000 case DIF_OP_ULDUH:
6001 regs[rd] =
6002 dtrace_fuword16(regs[r1]);
6003 break;
6004 case DIF_OP_ULDUW:
6005 regs[rd] =
6006 dtrace_fuword32(regs[r1]);
6007 break;
6008 case DIF_OP_ULDX:
6009 regs[rd] =
6010 dtrace_fuword64(regs[r1]);
6011 break;
6012 case DIF_OP_RET:
6013 rval = regs[rd];
b0d623f7 6014 pc = textlen;
2d21ac55
A
6015 break;
6016 case DIF_OP_NOP:
6017 break;
6018 case DIF_OP_SETX:
6019 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
6020 break;
6021 case DIF_OP_SETS:
6022 regs[rd] = (uint64_t)(uintptr_t)
6023 (strtab + DIF_INSTR_STRING(instr));
6024 break;
b0d623f7
A
6025 case DIF_OP_SCMP: {
6026 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
6027 uintptr_t s1 = regs[r1];
6028 uintptr_t s2 = regs[r2];
39037602 6029 size_t lim1 = sz, lim2 = sz;
b0d623f7 6030
fe8ab488 6031 if (s1 != 0 &&
39037602 6032 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
b0d623f7 6033 break;
fe8ab488 6034 if (s2 != 0 &&
39037602 6035 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
b0d623f7
A
6036 break;
6037
39037602
A
6038 cc_r = dtrace_strncmp((char *)s1, (char *)s2,
6039 MIN(lim1, lim2));
2d21ac55
A
6040
6041 cc_n = cc_r < 0;
6042 cc_z = cc_r == 0;
6043 cc_v = cc_c = 0;
6044 break;
b0d623f7 6045 }
2d21ac55
A
6046 case DIF_OP_LDGA:
6047 regs[rd] = dtrace_dif_variable(mstate, state,
6048 r1, regs[r2]);
6049 break;
6050 case DIF_OP_LDGS:
6051 id = DIF_INSTR_VAR(instr);
6052
6053 if (id >= DIF_VAR_OTHER_UBASE) {
6054 uintptr_t a;
6055
6056 id -= DIF_VAR_OTHER_UBASE;
6057 svar = vstate->dtvs_globals[id];
6058 ASSERT(svar != NULL);
6059 v = &svar->dtsv_var;
6060
6061 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6062 regs[rd] = svar->dtsv_data;
6063 break;
6064 }
6065
6066 a = (uintptr_t)svar->dtsv_data;
6067
6068 if (*(uint8_t *)a == UINT8_MAX) {
6069 /*
6070 * If the 0th byte is set to UINT8_MAX
6071 * then this is to be treated as a
6072 * reference to a NULL variable.
6073 */
fe8ab488 6074 regs[rd] = 0;
2d21ac55
A
6075 } else {
6076 regs[rd] = a + sizeof (uint64_t);
6077 }
6078
6079 break;
6080 }
6081
6082 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6083 break;
6084
6085 case DIF_OP_STGS:
6086 id = DIF_INSTR_VAR(instr);
6087
6088 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6089 id -= DIF_VAR_OTHER_UBASE;
6090
39037602 6091 VERIFY(id < (uint_t)vstate->dtvs_nglobals);
2d21ac55
A
6092 svar = vstate->dtvs_globals[id];
6093 ASSERT(svar != NULL);
6094 v = &svar->dtsv_var;
6095
6096 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6097 uintptr_t a = (uintptr_t)svar->dtsv_data;
39037602 6098 size_t lim;
2d21ac55 6099
fe8ab488 6100 ASSERT(a != 0);
2d21ac55
A
6101 ASSERT(svar->dtsv_size != 0);
6102
fe8ab488 6103 if (regs[rd] == 0) {
2d21ac55
A
6104 *(uint8_t *)a = UINT8_MAX;
6105 break;
6106 } else {
6107 *(uint8_t *)a = 0;
6108 a += sizeof (uint64_t);
6109 }
b0d623f7
A
6110 if (!dtrace_vcanload(
6111 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
39037602 6112 &lim, mstate, vstate))
b0d623f7 6113 break;
2d21ac55
A
6114
6115 dtrace_vcopy((void *)(uintptr_t)regs[rd],
39037602 6116 (void *)a, &v->dtdv_type, lim);
2d21ac55
A
6117 break;
6118 }
6119
6120 svar->dtsv_data = regs[rd];
6121 break;
6122
6123 case DIF_OP_LDTA:
6124 /*
6125 * There are no DTrace built-in thread-local arrays at
6126 * present. This opcode is saved for future work.
6127 */
6128 *flags |= CPU_DTRACE_ILLOP;
6129 regs[rd] = 0;
6130 break;
6131
6132 case DIF_OP_LDLS:
6133 id = DIF_INSTR_VAR(instr);
6134
6135 if (id < DIF_VAR_OTHER_UBASE) {
6136 /*
6137 * For now, this has no meaning.
6138 */
6139 regs[rd] = 0;
6140 break;
6141 }
6142
6143 id -= DIF_VAR_OTHER_UBASE;
6144
b0d623f7 6145 ASSERT(id < (uint_t)vstate->dtvs_nlocals);
2d21ac55 6146 ASSERT(vstate->dtvs_locals != NULL);
2d21ac55
A
6147 svar = vstate->dtvs_locals[id];
6148 ASSERT(svar != NULL);
6149 v = &svar->dtsv_var;
6150
6151 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6152 uintptr_t a = (uintptr_t)svar->dtsv_data;
6153 size_t sz = v->dtdv_type.dtdt_size;
6154
6155 sz += sizeof (uint64_t);
c910b4d9 6156 ASSERT(svar->dtsv_size == (int)NCPU * sz);
2d21ac55
A
6157 a += CPU->cpu_id * sz;
6158
6159 if (*(uint8_t *)a == UINT8_MAX) {
6160 /*
6161 * If the 0th byte is set to UINT8_MAX
6162 * then this is to be treated as a
6163 * reference to a NULL variable.
6164 */
fe8ab488 6165 regs[rd] = 0;
2d21ac55
A
6166 } else {
6167 regs[rd] = a + sizeof (uint64_t);
6168 }
6169
6170 break;
6171 }
6172
c910b4d9 6173 ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
2d21ac55
A
6174 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6175 regs[rd] = tmp[CPU->cpu_id];
6176 break;
6177
6178 case DIF_OP_STLS:
6179 id = DIF_INSTR_VAR(instr);
6180
6181 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6182 id -= DIF_VAR_OTHER_UBASE;
39037602 6183 VERIFY(id < (uint_t)vstate->dtvs_nlocals);
2d21ac55
A
6184 ASSERT(vstate->dtvs_locals != NULL);
6185 svar = vstate->dtvs_locals[id];
6186 ASSERT(svar != NULL);
6187 v = &svar->dtsv_var;
6188
6189 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6190 uintptr_t a = (uintptr_t)svar->dtsv_data;
6191 size_t sz = v->dtdv_type.dtdt_size;
39037602 6192 size_t lim;
2d21ac55
A
6193
6194 sz += sizeof (uint64_t);
c910b4d9 6195 ASSERT(svar->dtsv_size == (int)NCPU * sz);
2d21ac55
A
6196 a += CPU->cpu_id * sz;
6197
fe8ab488 6198 if (regs[rd] == 0) {
2d21ac55
A
6199 *(uint8_t *)a = UINT8_MAX;
6200 break;
6201 } else {
6202 *(uint8_t *)a = 0;
6203 a += sizeof (uint64_t);
6204 }
6205
b0d623f7
A
6206 if (!dtrace_vcanload(
6207 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
39037602 6208 &lim, mstate, vstate))
b0d623f7
A
6209 break;
6210
2d21ac55 6211 dtrace_vcopy((void *)(uintptr_t)regs[rd],
39037602 6212 (void *)a, &v->dtdv_type, lim);
2d21ac55
A
6213 break;
6214 }
6215
c910b4d9 6216 ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
2d21ac55
A
6217 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6218 tmp[CPU->cpu_id] = regs[rd];
6219 break;
6220
6221 case DIF_OP_LDTS: {
6222 dtrace_dynvar_t *dvar;
6223 dtrace_key_t *key;
6224
6225 id = DIF_INSTR_VAR(instr);
6226 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6227 id -= DIF_VAR_OTHER_UBASE;
6228 v = &vstate->dtvs_tlocals[id];
6229
6230 key = &tupregs[DIF_DTR_NREGS];
6231 key[0].dttk_value = (uint64_t)id;
6232 key[0].dttk_size = 0;
6233 DTRACE_TLS_THRKEY(key[1].dttk_value);
6234 key[1].dttk_size = 0;
6235
6236 dvar = dtrace_dynvar(dstate, 2, key,
b0d623f7
A
6237 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6238 mstate, vstate);
2d21ac55
A
6239
6240 if (dvar == NULL) {
6241 regs[rd] = 0;
6242 break;
6243 }
6244
6245 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6246 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6247 } else {
6248 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6249 }
6250
6251 break;
6252 }
6253
6254 case DIF_OP_STTS: {
6255 dtrace_dynvar_t *dvar;
6256 dtrace_key_t *key;
6257
6258 id = DIF_INSTR_VAR(instr);
6259 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6260 id -= DIF_VAR_OTHER_UBASE;
39037602 6261 VERIFY(id < (uint_t)vstate->dtvs_ntlocals);
2d21ac55
A
6262
6263 key = &tupregs[DIF_DTR_NREGS];
6264 key[0].dttk_value = (uint64_t)id;
6265 key[0].dttk_size = 0;
6266 DTRACE_TLS_THRKEY(key[1].dttk_value);
6267 key[1].dttk_size = 0;
6268 v = &vstate->dtvs_tlocals[id];
6269
6270 dvar = dtrace_dynvar(dstate, 2, key,
6271 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6272 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6273 regs[rd] ? DTRACE_DYNVAR_ALLOC :
b0d623f7 6274 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
2d21ac55
A
6275
6276 /*
6277 * Given that we're storing to thread-local data,
6278 * we need to flush our predicate cache.
6279 */
2d21ac55 6280 dtrace_set_thread_predcache(current_thread(), 0);
2d21ac55 6281
2d21ac55
A
6282 if (dvar == NULL)
6283 break;
6284
6285 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
39037602
A
6286 size_t lim;
6287
b0d623f7
A
6288 if (!dtrace_vcanload(
6289 (void *)(uintptr_t)regs[rd],
39037602 6290 &v->dtdv_type, &lim, mstate, vstate))
b0d623f7
A
6291 break;
6292
2d21ac55 6293 dtrace_vcopy((void *)(uintptr_t)regs[rd],
39037602 6294 dvar->dtdv_data, &v->dtdv_type, lim);
2d21ac55
A
6295 } else {
6296 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6297 }
6298
6299 break;
6300 }
6301
6302 case DIF_OP_SRA:
6303 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6304 break;
6305
6306 case DIF_OP_CALL:
6307 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6308 regs, tupregs, ttop, mstate, state);
6309 break;
6310
6311 case DIF_OP_PUSHTR:
6312 if (ttop == DIF_DTR_NREGS) {
6313 *flags |= CPU_DTRACE_TUPOFLOW;
6314 break;
6315 }
6316
6317 if (r1 == DIF_TYPE_STRING) {
6318 /*
6319 * If this is a string type and the size is 0,
6320 * we'll use the system-wide default string
6321 * size. Note that we are _not_ looking at
6322 * the value of the DTRACEOPT_STRSIZE option;
6323 * had this been set, we would expect to have
6324 * a non-zero size value in the "pushtr".
6325 */
6326 tupregs[ttop].dttk_size =
6327 dtrace_strlen((char *)(uintptr_t)regs[rd],
6328 regs[r2] ? regs[r2] :
6329 dtrace_strsize_default) + 1;
6330 } else {
ecc0ceb4
A
6331 if (regs[r2] > LONG_MAX) {
6332 *flags |= CPU_DTRACE_ILLOP;
6333 break;
6334 }
2d21ac55
A
6335 tupregs[ttop].dttk_size = regs[r2];
6336 }
6337
6338 tupregs[ttop++].dttk_value = regs[rd];
6339 break;
6340
6341 case DIF_OP_PUSHTV:
6342 if (ttop == DIF_DTR_NREGS) {
6343 *flags |= CPU_DTRACE_TUPOFLOW;
6344 break;
6345 }
6346
6347 tupregs[ttop].dttk_value = regs[rd];
6348 tupregs[ttop++].dttk_size = 0;
6349 break;
6350
6351 case DIF_OP_POPTS:
6352 if (ttop != 0)
6353 ttop--;
6354 break;
6355
6356 case DIF_OP_FLUSHTS:
6357 ttop = 0;
6358 break;
6359
6360 case DIF_OP_LDGAA:
6361 case DIF_OP_LDTAA: {
6362 dtrace_dynvar_t *dvar;
6363 dtrace_key_t *key = tupregs;
6364 uint_t nkeys = ttop;
6365
6366 id = DIF_INSTR_VAR(instr);
6367 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6368 id -= DIF_VAR_OTHER_UBASE;
6369
6370 key[nkeys].dttk_value = (uint64_t)id;
6371 key[nkeys++].dttk_size = 0;
6372
6373 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6374 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6375 key[nkeys++].dttk_size = 0;
39037602 6376 VERIFY(id < (uint_t)vstate->dtvs_ntlocals);
2d21ac55
A
6377 v = &vstate->dtvs_tlocals[id];
6378 } else {
39037602 6379 VERIFY(id < (uint_t)vstate->dtvs_nglobals);
2d21ac55
A
6380 v = &vstate->dtvs_globals[id]->dtsv_var;
6381 }
6382
6383 dvar = dtrace_dynvar(dstate, nkeys, key,
6384 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6385 v->dtdv_type.dtdt_size : sizeof (uint64_t),
b0d623f7 6386 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
2d21ac55
A
6387
6388 if (dvar == NULL) {
6389 regs[rd] = 0;
6390 break;
6391 }
6392
6393 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6394 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6395 } else {
6396 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6397 }
6398
6399 break;
6400 }
6401
6402 case DIF_OP_STGAA:
6403 case DIF_OP_STTAA: {
6404 dtrace_dynvar_t *dvar;
6405 dtrace_key_t *key = tupregs;
6406 uint_t nkeys = ttop;
6407
6408 id = DIF_INSTR_VAR(instr);
6409 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6410 id -= DIF_VAR_OTHER_UBASE;
6411
6412 key[nkeys].dttk_value = (uint64_t)id;
6413 key[nkeys++].dttk_size = 0;
6414
6415 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6416 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6417 key[nkeys++].dttk_size = 0;
39037602 6418 VERIFY(id < (uint_t)vstate->dtvs_ntlocals);
2d21ac55
A
6419 v = &vstate->dtvs_tlocals[id];
6420 } else {
39037602 6421 VERIFY(id < (uint_t)vstate->dtvs_nglobals);
2d21ac55
A
6422 v = &vstate->dtvs_globals[id]->dtsv_var;
6423 }
6424
6425 dvar = dtrace_dynvar(dstate, nkeys, key,
6426 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6427 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6428 regs[rd] ? DTRACE_DYNVAR_ALLOC :
b0d623f7 6429 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
2d21ac55
A
6430
6431 if (dvar == NULL)
6432 break;
6433
6434 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
39037602
A
6435 size_t lim;
6436
b0d623f7
A
6437 if (!dtrace_vcanload(
6438 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
39037602 6439 &lim, mstate, vstate))
b0d623f7
A
6440 break;
6441
2d21ac55 6442 dtrace_vcopy((void *)(uintptr_t)regs[rd],
39037602 6443 dvar->dtdv_data, &v->dtdv_type, lim);
2d21ac55
A
6444 } else {
6445 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6446 }
6447
6448 break;
6449 }
6450
6451 case DIF_OP_ALLOCS: {
6452 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6453 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6454
b0d623f7
A
6455 /*
6456 * Rounding up the user allocation size could have
6457 * overflowed large, bogus allocations (like -1ULL) to
6458 * 0.
6459 */
6460 if (size < regs[r1] ||
6461 !DTRACE_INSCRATCH(mstate, size)) {
2d21ac55 6462 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
fe8ab488 6463 regs[rd] = 0;
b0d623f7
A
6464 break;
6465 }
6466
6467 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
2d21ac55
A
6468 mstate->dtms_scratch_ptr += size;
6469 regs[rd] = ptr;
2d21ac55
A
6470 break;
6471 }
6472
6473 case DIF_OP_COPYS:
6474 if (!dtrace_canstore(regs[rd], regs[r2],
6475 mstate, vstate)) {
6476 *flags |= CPU_DTRACE_BADADDR;
6477 *illval = regs[rd];
6478 break;
6479 }
6480
b0d623f7
A
6481 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6482 break;
6483
2d21ac55
A
6484 dtrace_bcopy((void *)(uintptr_t)regs[r1],
6485 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6486 break;
6487
6488 case DIF_OP_STB:
6489 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6490 *flags |= CPU_DTRACE_BADADDR;
6491 *illval = regs[rd];
6492 break;
6493 }
6494 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6495 break;
6496
6497 case DIF_OP_STH:
6498 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6499 *flags |= CPU_DTRACE_BADADDR;
6500 *illval = regs[rd];
6501 break;
6502 }
6503 if (regs[rd] & 1) {
6504 *flags |= CPU_DTRACE_BADALIGN;
6505 *illval = regs[rd];
6506 break;
6507 }
6508 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6509 break;
6510
6511 case DIF_OP_STW:
6512 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6513 *flags |= CPU_DTRACE_BADADDR;
6514 *illval = regs[rd];
6515 break;
6516 }
6517 if (regs[rd] & 3) {
6518 *flags |= CPU_DTRACE_BADALIGN;
6519 *illval = regs[rd];
6520 break;
6521 }
6522 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6523 break;
6524
6525 case DIF_OP_STX:
6526 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6527 *flags |= CPU_DTRACE_BADADDR;
6528 *illval = regs[rd];
6529 break;
6530 }
fe8ab488
A
6531
6532 /*
6533 * Darwin kmem_zalloc() called from
6534 * dtrace_difo_init() is 4-byte aligned.
6535 */
6536 if (regs[rd] & 3) {
2d21ac55
A
6537 *flags |= CPU_DTRACE_BADALIGN;
6538 *illval = regs[rd];
6539 break;
6540 }
6541 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6542 break;
cb323159
A
6543 case DIF_OP_STRIP:
6544 regs[rd] = (uint64_t)dtrace_ptrauth_strip(
6545 (void*)regs[r1], r2);
6546 break;
2d21ac55
A
6547 }
6548 }
6549
6550 if (!(*flags & CPU_DTRACE_FAULT))
6551 return (rval);
6552
6553 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6554 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6555
6556 return (0);
6557}
6558
f427ee49 6559__attribute__((noinline))
2d21ac55
A
6560static void
6561dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6562{
6563 dtrace_probe_t *probe = ecb->dte_probe;
6564 dtrace_provider_t *prov = probe->dtpr_provider;
6565 char c[DTRACE_FULLNAMELEN + 80], *str;
b0d623f7
A
6566 const char *msg = "dtrace: breakpoint action at probe ";
6567 const char *ecbmsg = " (ecb ";
2d21ac55
A
6568 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6569 uintptr_t val = (uintptr_t)ecb;
6570 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6571
6572 if (dtrace_destructive_disallow)
6573 return;
6574
6575 /*
6576 * It's impossible to be taking action on the NULL probe.
6577 */
6578 ASSERT(probe != NULL);
6579
6580 /*
6581 * This is a poor man's (destitute man's?) sprintf(): we want to
6582 * print the provider name, module name, function name and name of
6583 * the probe, along with the hex address of the ECB with the breakpoint
6584 * action -- all of which we must place in the character buffer by
6585 * hand.
6586 */
6587 while (*msg != '\0')
6588 c[i++] = *msg++;
6589
6590 for (str = prov->dtpv_name; *str != '\0'; str++)
6591 c[i++] = *str;
6592 c[i++] = ':';
6593
6594 for (str = probe->dtpr_mod; *str != '\0'; str++)
6595 c[i++] = *str;
6596 c[i++] = ':';
6597
6598 for (str = probe->dtpr_func; *str != '\0'; str++)
6599 c[i++] = *str;
6600 c[i++] = ':';
6601
6602 for (str = probe->dtpr_name; *str != '\0'; str++)
6603 c[i++] = *str;
6604
6605 while (*ecbmsg != '\0')
6606 c[i++] = *ecbmsg++;
6607
6608 while (shift >= 0) {
6609 mask = (uintptr_t)0xf << shift;
6610
6611 if (val >= ((uintptr_t)1 << shift))
6612 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6613 shift -= 4;
6614 }
6615
6616 c[i++] = ')';
6617 c[i] = '\0';
6618
6619 debug_enter(c);
6620}
6621
f427ee49 6622__attribute__((noinline))
2d21ac55
A
6623static void
6624dtrace_action_panic(dtrace_ecb_t *ecb)
6625{
6626 dtrace_probe_t *probe = ecb->dte_probe;
6627
6628 /*
6629 * It's impossible to be taking action on the NULL probe.
6630 */
6631 ASSERT(probe != NULL);
6632
6633 if (dtrace_destructive_disallow)
6634 return;
6635
6636 if (dtrace_panicked != NULL)
6637 return;
6638
2d21ac55
A
6639 if (dtrace_casptr(&dtrace_panicked, NULL, current_thread()) != NULL)
6640 return;
2d21ac55
A
6641
6642 /*
6643 * We won the right to panic. (We want to be sure that only one
6644 * thread calls panic() from dtrace_probe(), and that panic() is
6645 * called exactly once.)
6646 */
316670eb 6647 panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
2d21ac55
A
6648 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6649 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6650
fe8ab488
A
6651 /*
6652 * APPLE NOTE: this was for an old Mac OS X debug feature
6653 * allowing a return from panic(). Revisit someday.
6654 */
2d21ac55 6655 dtrace_panicked = NULL;
2d21ac55
A
6656}
6657
6658static void
6659dtrace_action_raise(uint64_t sig)
6660{
6661 if (dtrace_destructive_disallow)
6662 return;
6663
6664 if (sig >= NSIG) {
6665 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6666 return;
6667 }
6668
2d21ac55
A
6669 /*
6670 * raise() has a queue depth of 1 -- we ignore all subsequent
6671 * invocations of the raise() action.
6672 */
2d21ac55 6673
2d21ac55
A
6674 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
6675
6676 if (uthread && uthread->t_dtrace_sig == 0) {
6677 uthread->t_dtrace_sig = sig;
6d2010ae 6678 act_set_astbsd(current_thread());
2d21ac55 6679 }
2d21ac55
A
6680}
6681
6682static void
6683dtrace_action_stop(void)
6684{
6685 if (dtrace_destructive_disallow)
6686 return;
6687
6d2010ae
A
6688 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
6689 if (uthread) {
6690 /*
6691 * The currently running process will be set to task_suspend
6692 * when it next leaves the kernel.
6693 */
b0d623f7 6694 uthread->t_dtrace_stop = 1;
6d2010ae 6695 act_set_astbsd(current_thread());
b0d623f7 6696 }
2d21ac55
A
6697}
6698
fe8ab488
A
6699
6700/*
6701 * APPLE NOTE: pidresume works in conjunction with the dtrace stop action.
6702 * Both activate only when the currently running process next leaves the
6703 * kernel.
6704 */
6d2010ae
A
6705static void
6706dtrace_action_pidresume(uint64_t pid)
6707{
6708 if (dtrace_destructive_disallow)
6709 return;
6710
6711 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
6712 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6713 return;
6714 }
6d2010ae
A
6715 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
6716
6717 /*
6718 * When the currently running process leaves the kernel, it attempts to
6719 * task_resume the process (denoted by pid), if that pid appears to have
6720 * been stopped by dtrace_action_stop().
6721 * The currently running process has a pidresume() queue depth of 1 --
6722 * subsequent invocations of the pidresume() action are ignored.
6723 */
6724
6725 if (pid != 0 && uthread && uthread->t_dtrace_resumepid == 0) {
6726 uthread->t_dtrace_resumepid = pid;
6727 act_set_astbsd(current_thread());
6728 }
6729}
6d2010ae 6730
f427ee49 6731__attribute__((noinline))
2d21ac55
A
6732static void
6733dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6734{
6735 hrtime_t now;
6736 volatile uint16_t *flags;
6d2010ae 6737 dtrace_cpu_t *cpu = CPU;
2d21ac55
A
6738
6739 if (dtrace_destructive_disallow)
6740 return;
6741
6742 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6743
6744 now = dtrace_gethrtime();
6745
6746 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6747 /*
6748 * We need to advance the mark to the current time.
6749 */
6750 cpu->cpu_dtrace_chillmark = now;
6751 cpu->cpu_dtrace_chilled = 0;
6752 }
6753
6754 /*
6755 * Now check to see if the requested chill time would take us over
6756 * the maximum amount of time allowed in the chill interval. (Or
6757 * worse, if the calculation itself induces overflow.)
6758 */
6759 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6760 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6761 *flags |= CPU_DTRACE_ILLOP;
6762 return;
6763 }
6764
6765 while (dtrace_gethrtime() - now < val)
6766 continue;
6767
6768 /*
6769 * Normally, we assure that the value of the variable "timestamp" does
6770 * not change within an ECB. The presence of chill() represents an
6771 * exception to this rule, however.
6772 */
6773 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6774 cpu->cpu_dtrace_chilled += val;
6775}
6776
f427ee49 6777__attribute__((noinline))
2d21ac55
A
6778static void
6779dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6780 uint64_t *buf, uint64_t arg)
6781{
6782 int nframes = DTRACE_USTACK_NFRAMES(arg);
6783 int strsize = DTRACE_USTACK_STRSIZE(arg);
6784 uint64_t *pcs = &buf[1], *fps;
6785 char *str = (char *)&pcs[nframes];
6786 int size, offs = 0, i, j;
6787 uintptr_t old = mstate->dtms_scratch_ptr, saved;
6788 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6789 char *sym;
6790
6791 /*
6792 * Should be taking a faster path if string space has not been
6793 * allocated.
6794 */
6795 ASSERT(strsize != 0);
6796
6797 /*
6798 * We will first allocate some temporary space for the frame pointers.
6799 */
6800 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6801 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6802 (nframes * sizeof (uint64_t));
6803
b0d623f7 6804 if (!DTRACE_INSCRATCH(mstate, (uintptr_t)size)) {
2d21ac55
A
6805 /*
6806 * Not enough room for our frame pointers -- need to indicate
6807 * that we ran out of scratch space.
6808 */
6809 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6810 return;
6811 }
6812
6813 mstate->dtms_scratch_ptr += size;
6814 saved = mstate->dtms_scratch_ptr;
6815
6816 /*
6817 * Now get a stack with both program counters and frame pointers.
6818 */
6819 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6820 dtrace_getufpstack(buf, fps, nframes + 1);
6821 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6822
6823 /*
6824 * If that faulted, we're cooked.
6825 */
6826 if (*flags & CPU_DTRACE_FAULT)
6827 goto out;
6828
6829 /*
6830 * Now we want to walk up the stack, calling the USTACK helper. For
6831 * each iteration, we restore the scratch pointer.
6832 */
6833 for (i = 0; i < nframes; i++) {
6834 mstate->dtms_scratch_ptr = saved;
6835
6836 if (offs >= strsize)
6837 break;
6838
6839 sym = (char *)(uintptr_t)dtrace_helper(
6840 DTRACE_HELPER_ACTION_USTACK,
6841 mstate, state, pcs[i], fps[i]);
6842
6843 /*
6844 * If we faulted while running the helper, we're going to
6845 * clear the fault and null out the corresponding string.
6846 */
6847 if (*flags & CPU_DTRACE_FAULT) {
6848 *flags &= ~CPU_DTRACE_FAULT;
6849 str[offs++] = '\0';
6850 continue;
6851 }
6852
6853 if (sym == NULL) {
6854 str[offs++] = '\0';
6855 continue;
6856 }
6857
6858 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6859
6860 /*
6861 * Now copy in the string that the helper returned to us.
6862 */
6863 for (j = 0; offs + j < strsize; j++) {
6864 if ((str[offs + j] = sym[j]) == '\0')
6865 break;
6866 }
6867
6868 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6869
6870 offs += j + 1;
6871 }
6872
6873 if (offs >= strsize) {
6874 /*
6875 * If we didn't have room for all of the strings, we don't
6876 * abort processing -- this needn't be a fatal error -- but we
6877 * still want to increment a counter (dts_stkstroverflows) to
6878 * allow this condition to be warned about. (If this is from
6879 * a jstack() action, it is easily tuned via jstackstrsize.)
6880 */
6881 dtrace_error(&state->dts_stkstroverflows);
6882 }
6883
6884 while (offs < strsize)
6885 str[offs++] = '\0';
6886
6887out:
6888 mstate->dtms_scratch_ptr = old;
6889}
6890
f427ee49 6891__attribute__((noinline))
3e170ce0
A
6892static void
6893dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6894 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6895{
6896 volatile uint16_t *flags;
6897 uint64_t val = *valp;
6898 size_t valoffs = *valoffsp;
6899
6900 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6901 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6902
6903 /*
6904 * If this is a string, we're going to only load until we find the zero
6905 * byte -- after which we'll store zero bytes.
6906 */
6907 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6908 char c = '\0' + 1;
6909 size_t s;
6910
6911 for (s = 0; s < size; s++) {
6912 if (c != '\0' && dtkind == DIF_TF_BYREF) {
6913 c = dtrace_load8(val++);
6914 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6915 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6916 c = dtrace_fuword8((user_addr_t)(uintptr_t)val++);
6917 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6918 if (*flags & CPU_DTRACE_FAULT)
6919 break;
6920 }
6921
6922 DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6923
6924 if (c == '\0' && intuple)
6925 break;
6926 }
6927 } else {
6928 uint8_t c;
6929 while (valoffs < end) {
6930 if (dtkind == DIF_TF_BYREF) {
6931 c = dtrace_load8(val++);
6932 } else if (dtkind == DIF_TF_BYUREF) {
6933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6934 c = dtrace_fuword8((user_addr_t)(uintptr_t)val++);
6935 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6936 if (*flags & CPU_DTRACE_FAULT)
6937 break;
6938 }
6939
6940 DTRACE_STORE(uint8_t, tomax,
6941 valoffs++, c);
6942 }
6943 }
6944
6945 *valp = val;
6946 *valoffsp = valoffs;
6947}
6948
cb323159
A
6949/*
6950 * Disables interrupts and sets the per-thread inprobe flag. When DEBUG is
6951 * defined, we also assert that we are not recursing unless the probe ID is an
6952 * error probe.
6953 */
6954static dtrace_icookie_t
6955dtrace_probe_enter(dtrace_id_t id)
6956{
6957 thread_t thread = current_thread();
6958 uint16_t inprobe;
6959
6960 dtrace_icookie_t cookie;
6961
6962 cookie = dtrace_interrupt_disable();
6963
6964 /*
6965 * Unless this is an ERROR probe, we are not allowed to recurse in
6966 * dtrace_probe(). Recursing into DTrace probe usually means that a
6967 * function is instrumented that should not have been instrumented or
6968 * that the ordering guarantee of the records will be violated,
6969 * resulting in unexpected output. If there is an exception to this
6970 * assertion, a new case should be added.
6971 */
6972 inprobe = dtrace_get_thread_inprobe(thread);
6973 VERIFY(inprobe == 0 ||
6974 id == dtrace_probeid_error);
6975 ASSERT(inprobe < UINT16_MAX);
6976 dtrace_set_thread_inprobe(thread, inprobe + 1);
6977
6978 return (cookie);
6979}
6980
6981/*
6982 * Clears the per-thread inprobe flag and enables interrupts.
6983 */
6984static void
6985dtrace_probe_exit(dtrace_icookie_t cookie)
6986{
6987 thread_t thread = current_thread();
6988 uint16_t inprobe = dtrace_get_thread_inprobe(thread);
6989
6990 ASSERT(inprobe > 0);
6991 dtrace_set_thread_inprobe(thread, inprobe - 1);
6992
6993#if INTERRUPT_MASKED_DEBUG
6994 ml_spin_debug_reset(thread);
6995#endif /* INTERRUPT_MASKED_DEBUG */
6996
6997 dtrace_interrupt_enable(cookie);
6998}
6999
2d21ac55
A
7000/*
7001 * If you're looking for the epicenter of DTrace, you just found it. This
7002 * is the function called by the provider to fire a probe -- from which all
7003 * subsequent probe-context DTrace activity emanates.
7004 */
cb323159
A
7005void
7006dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
2d21ac55 7007 uint64_t arg2, uint64_t arg3, uint64_t arg4)
2d21ac55
A
7008{
7009 processorid_t cpuid;
7010 dtrace_icookie_t cookie;
7011 dtrace_probe_t *probe;
7012 dtrace_mstate_t mstate;
7013 dtrace_ecb_t *ecb;
7014 dtrace_action_t *act;
7015 intptr_t offs;
7016 size_t size;
7017 int vtime, onintr;
7018 volatile uint16_t *flags;
7019 hrtime_t now;
7020
cb323159 7021 cookie = dtrace_probe_enter(id);
f427ee49
A
7022
7023 /* Ensure that probe id is valid. */
7024 if (id - 1 >= (dtrace_id_t)dtrace_nprobes) {
7025 dtrace_probe_exit(cookie);
7026 return;
7027 }
7028
2d21ac55 7029 probe = dtrace_probes[id - 1];
f427ee49
A
7030 if (probe == NULL) {
7031 dtrace_probe_exit(cookie);
7032 return;
7033 }
7034
2d21ac55
A
7035 cpuid = CPU->cpu_id;
7036 onintr = CPU_ON_INTR(CPU);
7037
2d21ac55
A
7038 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
7039 probe->dtpr_predcache == dtrace_get_thread_predcache(current_thread())) {
2d21ac55
A
7040 /*
7041 * We have hit in the predicate cache; we know that
7042 * this predicate would evaluate to be false.
7043 */
cb323159 7044 dtrace_probe_exit(cookie);
2d21ac55
A
7045 return;
7046 }
7047
7048 if (panic_quiesce) {
7049 /*
7050 * We don't trace anything if we're panicking.
7051 */
cb323159 7052 dtrace_probe_exit(cookie);
2d21ac55
A
7053 return;
7054 }
7055
7056#if !defined(__APPLE__)
7057 now = dtrace_gethrtime();
7058 vtime = dtrace_vtime_references != 0;
7059
7060 if (vtime && curthread->t_dtrace_start)
7061 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
7062#else
fe8ab488
A
7063 /*
7064 * APPLE NOTE: The time spent entering DTrace and arriving
7065 * to this point, is attributed to the current thread.
7066 * Instead it should accrue to DTrace. FIXME
7067 */
2d21ac55
A
7068 vtime = dtrace_vtime_references != 0;
7069
7070 if (vtime)
7071 {
7072 int64_t dtrace_accum_time, recent_vtime;
7073 thread_t thread = current_thread();
7074
7075 dtrace_accum_time = dtrace_get_thread_tracing(thread); /* Time spent inside DTrace so far (nanoseconds) */
7076
7077 if (dtrace_accum_time >= 0) {
7078 recent_vtime = dtrace_abs_to_nano(dtrace_calc_thread_recent_vtime(thread)); /* up to the moment thread vtime */
7079
7080 recent_vtime = recent_vtime - dtrace_accum_time; /* Time without DTrace contribution */
7081
7082 dtrace_set_thread_vtime(thread, recent_vtime);
7083 }
7084 }
7085
7086 now = dtrace_gethrtime(); /* must not precede dtrace_calc_thread_recent_vtime() call! */
7087#endif /* __APPLE__ */
7088
cf7d32b8 7089 /*
fe8ab488
A
7090 * APPLE NOTE: A provider may call dtrace_probe_error() in lieu of
7091 * dtrace_probe() in some circumstances. See, e.g. fasttrap_isa.c.
7092 * However the provider has no access to ECB context, so passes
7093 * 0 through "arg0" and the probe_id of the overridden probe as arg1.
7094 * Detect that here and cons up a viable state (from the probe_id).
cf7d32b8 7095 */
b0d623f7 7096 if (dtrace_probeid_error == id && 0 == arg0) {
cf7d32b8
A
7097 dtrace_id_t ftp_id = (dtrace_id_t)arg1;
7098 dtrace_probe_t *ftp_probe = dtrace_probes[ftp_id - 1];
7099 dtrace_ecb_t *ftp_ecb = ftp_probe->dtpr_ecb;
7100
7101 if (NULL != ftp_ecb) {
7102 dtrace_state_t *ftp_state = ftp_ecb->dte_state;
7103
7104 arg0 = (uint64_t)(uintptr_t)ftp_state;
7105 arg1 = ftp_ecb->dte_epid;
7106 /*
7107 * args[2-4] established by caller.
7108 */
7109 ftp_state->dts_arg_error_illval = -1; /* arg5 */
7110 }
7111 }
cf7d32b8 7112
b0d623f7 7113 mstate.dtms_difo = NULL;
2d21ac55 7114 mstate.dtms_probe = probe;
fe8ab488 7115 mstate.dtms_strtok = 0;
2d21ac55
A
7116 mstate.dtms_arg[0] = arg0;
7117 mstate.dtms_arg[1] = arg1;
7118 mstate.dtms_arg[2] = arg2;
7119 mstate.dtms_arg[3] = arg3;
7120 mstate.dtms_arg[4] = arg4;
7121
7122 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
7123
7124 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
7125 dtrace_predicate_t *pred = ecb->dte_predicate;
7126 dtrace_state_t *state = ecb->dte_state;
7127 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
7128 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
7129 dtrace_vstate_t *vstate = &state->dts_vstate;
7130 dtrace_provider_t *prov = probe->dtpr_provider;
fe8ab488 7131 uint64_t tracememsize = 0;
2d21ac55
A
7132 int committed = 0;
7133 caddr_t tomax;
7134
7135 /*
7136 * A little subtlety with the following (seemingly innocuous)
7137 * declaration of the automatic 'val': by looking at the
7138 * code, you might think that it could be declared in the
7139 * action processing loop, below. (That is, it's only used in
7140 * the action processing loop.) However, it must be declared
7141 * out of that scope because in the case of DIF expression
7142 * arguments to aggregating actions, one iteration of the
7143 * action loop will use the last iteration's value.
7144 */
7145#ifdef lint
7146 uint64_t val = 0;
7147#else
c910b4d9 7148 uint64_t val = 0;
2d21ac55
A
7149#endif
7150
7151 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
7152 *flags &= ~CPU_DTRACE_ERROR;
7153
7154 if (prov == dtrace_provider) {
7155 /*
7156 * If dtrace itself is the provider of this probe,
7157 * we're only going to continue processing the ECB if
7158 * arg0 (the dtrace_state_t) is equal to the ECB's
7159 * creating state. (This prevents disjoint consumers
7160 * from seeing one another's metaprobes.)
7161 */
7162 if (arg0 != (uint64_t)(uintptr_t)state)
7163 continue;
7164 }
7165
7166 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
7167 /*
7168 * We're not currently active. If our provider isn't
7169 * the dtrace pseudo provider, we're not interested.
7170 */
7171 if (prov != dtrace_provider)
7172 continue;
7173
7174 /*
7175 * Now we must further check if we are in the BEGIN
7176 * probe. If we are, we will only continue processing
7177 * if we're still in WARMUP -- if one BEGIN enabling
7178 * has invoked the exit() action, we don't want to
7179 * evaluate subsequent BEGIN enablings.
7180 */
7181 if (probe->dtpr_id == dtrace_probeid_begin &&
7182 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7183 ASSERT(state->dts_activity ==
7184 DTRACE_ACTIVITY_DRAINING);
7185 continue;
7186 }
7187 }
7188
2d21ac55
A
7189 if (ecb->dte_cond) {
7190 /*
7191 * If the dte_cond bits indicate that this
7192 * consumer is only allowed to see user-mode firings
7193 * of this probe, call the provider's dtps_usermode()
7194 * entry point to check that the probe was fired
7195 * while in a user context. Skip this ECB if that's
7196 * not the case.
7197 */
7198 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
39037602 7199 prov->dtpv_pops.dtps_usermode &&
2d21ac55
A
7200 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
7201 probe->dtpr_id, probe->dtpr_arg) == 0)
7202 continue;
7203
7204 /*
7205 * This is more subtle than it looks. We have to be
7206 * absolutely certain that CRED() isn't going to
7207 * change out from under us so it's only legit to
7208 * examine that structure if we're in constrained
7209 * situations. Currently, the only times we'll this
7210 * check is if a non-super-user has enabled the
7211 * profile or syscall providers -- providers that
7212 * allow visibility of all processes. For the
7213 * profile case, the check above will ensure that
7214 * we're examining a user context.
7215 */
7216 if (ecb->dte_cond & DTRACE_COND_OWNER) {
7217 cred_t *cr;
7218 cred_t *s_cr =
7219 ecb->dte_state->dts_cred.dcr_cred;
7220 proc_t *proc;
b0d623f7 7221#pragma unused(proc) /* __APPLE__ */
2d21ac55
A
7222
7223 ASSERT(s_cr != NULL);
7224
6d2010ae
A
7225 /*
7226 * XXX this is hackish, but so is setting a variable
7227 * XXX in a McCarthy OR...
7228 */
2d21ac55 7229 if ((cr = dtrace_CRED()) == NULL ||
6d2010ae
A
7230 posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_uid ||
7231 posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_ruid ||
7232 posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_suid ||
7233 posix_cred_get(s_cr)->cr_gid != posix_cred_get(cr)->cr_gid ||
7234 posix_cred_get(s_cr)->cr_gid != posix_cred_get(cr)->cr_rgid ||
7235 posix_cred_get(s_cr)->cr_gid != posix_cred_get(cr)->cr_sgid ||
2d21ac55
A
7236#if !defined(__APPLE__)
7237 (proc = ttoproc(curthread)) == NULL ||
7238 (proc->p_flag & SNOCD))
7239#else
fe8ab488 7240 1) /* APPLE NOTE: Darwin omits "No Core Dump" flag */
2d21ac55
A
7241#endif /* __APPLE__ */
7242 continue;
7243 }
7244
7245 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
7246 cred_t *cr;
7247 cred_t *s_cr =
7248 ecb->dte_state->dts_cred.dcr_cred;
b0d623f7 7249#pragma unused(cr, s_cr) /* __APPLE__ */
2d21ac55
A
7250
7251 ASSERT(s_cr != NULL);
7252
b0d623f7 7253#if !defined(__APPLE__)
2d21ac55
A
7254 if ((cr = CRED()) == NULL ||
7255 s_cr->cr_zone->zone_id !=
7256 cr->cr_zone->zone_id)
7257 continue;
b0d623f7 7258#else
fe8ab488 7259 /* APPLE NOTE: Darwin doesn't do zones. */
2d21ac55
A
7260#endif /* __APPLE__ */
7261 }
7262 }
7263
7264 if (now - state->dts_alive > dtrace_deadman_timeout) {
7265 /*
7266 * We seem to be dead. Unless we (a) have kernel
7267 * destructive permissions (b) have expicitly enabled
7268 * destructive actions and (c) destructive actions have
7269 * not been disabled, we're going to transition into
7270 * the KILLED state, from which no further processing
7271 * on this state will be performed.
7272 */
7273 if (!dtrace_priv_kernel_destructive(state) ||
7274 !state->dts_cred.dcr_destructive ||
7275 dtrace_destructive_disallow) {
7276 void *activity = &state->dts_activity;
7277 dtrace_activity_t current;
7278
7279 do {
7280 current = state->dts_activity;
7281 } while (dtrace_cas32(activity, current,
7282 DTRACE_ACTIVITY_KILLED) != current);
7283
7284 continue;
7285 }
7286 }
7287
7288 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7289 ecb->dte_alignment, state, &mstate)) < 0)
7290 continue;
7291
7292 tomax = buf->dtb_tomax;
7293 ASSERT(tomax != NULL);
7294
04b8595b
A
7295 /*
7296 * Build and store the record header corresponding to the ECB.
7297 */
7298 if (ecb->dte_size != 0) {
7299 dtrace_rechdr_t dtrh;
7300
7301 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7302 mstate.dtms_timestamp = dtrace_gethrtime();
7303 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7304 }
7305
7306 ASSERT(ecb->dte_size >= sizeof(dtrace_rechdr_t));
7307
7308 dtrh.dtrh_epid = ecb->dte_epid;
7309 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, mstate.dtms_timestamp);
7310 DTRACE_STORE(dtrace_rechdr_t, tomax, offs, dtrh);
7311 }
2d21ac55
A
7312
7313 mstate.dtms_epid = ecb->dte_epid;
7314 mstate.dtms_present |= DTRACE_MSTATE_EPID;
7315
b0d623f7
A
7316 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7317 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
7318 else
7319 mstate.dtms_access = 0;
7320
2d21ac55
A
7321 if (pred != NULL) {
7322 dtrace_difo_t *dp = pred->dtp_difo;
5ba3f43e 7323 uint64_t rval;
2d21ac55
A
7324
7325 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7326
7327 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7328 dtrace_cacheid_t cid = probe->dtpr_predcache;
7329
7330 if (cid != DTRACE_CACHEIDNONE && !onintr) {
7331 /*
7332 * Update the predicate cache...
7333 */
7334 ASSERT(cid == pred->dtp_cacheid);
fe8ab488 7335
2d21ac55 7336 dtrace_set_thread_predcache(current_thread(), cid);
2d21ac55
A
7337 }
7338
7339 continue;
7340 }
7341 }
7342
7343 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7344 act != NULL; act = act->dta_next) {
7345 size_t valoffs;
7346 dtrace_difo_t *dp;
7347 dtrace_recdesc_t *rec = &act->dta_rec;
7348
7349 size = rec->dtrd_size;
7350 valoffs = offs + rec->dtrd_offset;
7351
7352 if (DTRACEACT_ISAGG(act->dta_kind)) {
7353 uint64_t v = 0xbad;
7354 dtrace_aggregation_t *agg;
7355
7356 agg = (dtrace_aggregation_t *)act;
7357
7358 if ((dp = act->dta_difo) != NULL)
7359 v = dtrace_dif_emulate(dp,
7360 &mstate, vstate, state);
7361
7362 if (*flags & CPU_DTRACE_ERROR)
7363 continue;
7364
7365 /*
7366 * Note that we always pass the expression
7367 * value from the previous iteration of the
7368 * action loop. This value will only be used
7369 * if there is an expression argument to the
7370 * aggregating action, denoted by the
7371 * dtag_hasarg field.
7372 */
7373 dtrace_aggregate(agg, buf,
7374 offs, aggbuf, v, val);
7375 continue;
7376 }
7377
7378 switch (act->dta_kind) {
7379 case DTRACEACT_STOP:
7380 if (dtrace_priv_proc_destructive(state))
7381 dtrace_action_stop();
7382 continue;
7383
7384 case DTRACEACT_BREAKPOINT:
7385 if (dtrace_priv_kernel_destructive(state))
7386 dtrace_action_breakpoint(ecb);
7387 continue;
7388
7389 case DTRACEACT_PANIC:
7390 if (dtrace_priv_kernel_destructive(state))
7391 dtrace_action_panic(ecb);
7392 continue;
7393
7394 case DTRACEACT_STACK:
7395 if (!dtrace_priv_kernel(state))
7396 continue;
7397
b0d623f7
A
7398 dtrace_getpcstack((pc_t *)(tomax + valoffs),
7399 size / sizeof (pc_t), probe->dtpr_aframes,
7400 DTRACE_ANCHORED(probe) ? NULL :
7401 (uint32_t *)(uintptr_t)arg0);
2d21ac55
A
7402 continue;
7403
7404 case DTRACEACT_JSTACK:
7405 case DTRACEACT_USTACK:
7406 if (!dtrace_priv_proc(state))
7407 continue;
7408
7409 /*
7410 * See comment in DIF_VAR_PID.
7411 */
7412 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7413 CPU_ON_INTR(CPU)) {
7414 int depth = DTRACE_USTACK_NFRAMES(
7415 rec->dtrd_arg) + 1;
7416
7417 dtrace_bzero((void *)(tomax + valoffs),
7418 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7419 + depth * sizeof (uint64_t));
7420
7421 continue;
7422 }
7423
7424 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7425 curproc->p_dtrace_helpers != NULL) {
7426 /*
7427 * This is the slow path -- we have
7428 * allocated string space, and we're
7429 * getting the stack of a process that
7430 * has helpers. Call into a separate
7431 * routine to perform this processing.
7432 */
7433 dtrace_action_ustack(&mstate, state,
7434 (uint64_t *)(tomax + valoffs),
7435 rec->dtrd_arg);
7436 continue;
7437 }
7438
7439 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7440 dtrace_getupcstack((uint64_t *)
7441 (tomax + valoffs),
7442 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7443 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7444 continue;
7445
7446 default:
7447 break;
7448 }
7449
7450 dp = act->dta_difo;
7451 ASSERT(dp != NULL);
7452
7453 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7454
7455 if (*flags & CPU_DTRACE_ERROR)
7456 continue;
7457
7458 switch (act->dta_kind) {
04b8595b
A
7459 case DTRACEACT_SPECULATE: {
7460 dtrace_rechdr_t *dtrh = NULL;
7461
2d21ac55
A
7462 ASSERT(buf == &state->dts_buffer[cpuid]);
7463 buf = dtrace_speculation_buffer(state,
7464 cpuid, val);
7465
7466 if (buf == NULL) {
7467 *flags |= CPU_DTRACE_DROP;
7468 continue;
7469 }
7470
7471 offs = dtrace_buffer_reserve(buf,
7472 ecb->dte_needed, ecb->dte_alignment,
7473 state, NULL);
7474
7475 if (offs < 0) {
7476 *flags |= CPU_DTRACE_DROP;
7477 continue;
7478 }
7479
7480 tomax = buf->dtb_tomax;
7481 ASSERT(tomax != NULL);
7482
39037602 7483 if (ecb->dte_size == 0)
04b8595b
A
7484 continue;
7485
7486 ASSERT(ecb->dte_size >= sizeof(dtrace_rechdr_t));
7487 dtrh = ((void *)(tomax + offs));
7488 dtrh->dtrh_epid = ecb->dte_epid;
7489
7490 /*
7491 * When the speculation is committed, all of
7492 * the records in the speculative buffer will
7493 * have their timestamps set to the commit
7494 * time. Until then, it is set to a sentinel
7495 * value, for debugability.
7496 */
7497 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7498
7499 continue;
7500 }
2d21ac55
A
7501
7502 case DTRACEACT_CHILL:
7503 if (dtrace_priv_kernel_destructive(state))
7504 dtrace_action_chill(&mstate, val);
7505 continue;
7506
7507 case DTRACEACT_RAISE:
7508 if (dtrace_priv_proc_destructive(state))
7509 dtrace_action_raise(val);
7510 continue;
7511
fe8ab488 7512 case DTRACEACT_PIDRESUME: /* __APPLE__ */
6d2010ae
A
7513 if (dtrace_priv_proc_destructive(state))
7514 dtrace_action_pidresume(val);
7515 continue;
6d2010ae 7516
2d21ac55
A
7517 case DTRACEACT_COMMIT:
7518 ASSERT(!committed);
7519
7520 /*
7521 * We need to commit our buffer state.
7522 */
7523 if (ecb->dte_size)
7524 buf->dtb_offset = offs + ecb->dte_size;
7525 buf = &state->dts_buffer[cpuid];
7526 dtrace_speculation_commit(state, cpuid, val);
7527 committed = 1;
7528 continue;
7529
7530 case DTRACEACT_DISCARD:
7531 dtrace_speculation_discard(state, cpuid, val);
7532 continue;
7533
7534 case DTRACEACT_DIFEXPR:
7535 case DTRACEACT_LIBACT:
7536 case DTRACEACT_PRINTF:
7537 case DTRACEACT_PRINTA:
7538 case DTRACEACT_SYSTEM:
7539 case DTRACEACT_FREOPEN:
fe8ab488
A
7540 case DTRACEACT_APPLEBINARY: /* __APPLE__ */
7541 case DTRACEACT_TRACEMEM:
7542 break;
7543
7544 case DTRACEACT_TRACEMEM_DYNSIZE:
7545 tracememsize = val;
2d21ac55
A
7546 break;
7547
7548 case DTRACEACT_SYM:
7549 case DTRACEACT_MOD:
7550 if (!dtrace_priv_kernel(state))
7551 continue;
7552 break;
7553
2d21ac55
A
7554 case DTRACEACT_USYM:
7555 case DTRACEACT_UMOD:
7556 case DTRACEACT_UADDR: {
7557 if (!dtrace_priv_proc(state))
7558 continue;
7559
7560 DTRACE_STORE(uint64_t, tomax,
39236c6e 7561 valoffs, (uint64_t)dtrace_proc_selfpid());
2d21ac55
A
7562 DTRACE_STORE(uint64_t, tomax,
7563 valoffs + sizeof (uint64_t), val);
7564
7565 continue;
7566 }
2d21ac55
A
7567
7568 case DTRACEACT_EXIT: {
7569 /*
7570 * For the exit action, we are going to attempt
7571 * to atomically set our activity to be
7572 * draining. If this fails (either because
7573 * another CPU has beat us to the exit action,
7574 * or because our current activity is something
7575 * other than ACTIVE or WARMUP), we will
7576 * continue. This assures that the exit action
7577 * can be successfully recorded at most once
7578 * when we're in the ACTIVE state. If we're
7579 * encountering the exit() action while in
7580 * COOLDOWN, however, we want to honor the new
7581 * status code. (We know that we're the only
7582 * thread in COOLDOWN, so there is no race.)
7583 */
7584 void *activity = &state->dts_activity;
7585 dtrace_activity_t current = state->dts_activity;
7586
7587 if (current == DTRACE_ACTIVITY_COOLDOWN)
7588 break;
7589
7590 if (current != DTRACE_ACTIVITY_WARMUP)
7591 current = DTRACE_ACTIVITY_ACTIVE;
7592
7593 if (dtrace_cas32(activity, current,
7594 DTRACE_ACTIVITY_DRAINING) != current) {
7595 *flags |= CPU_DTRACE_DROP;
7596 continue;
7597 }
7598
7599 break;
7600 }
7601
7602 default:
7603 ASSERT(0);
7604 }
7605
3e170ce0 7606 if (dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF)) {
2d21ac55
A
7607 uintptr_t end = valoffs + size;
7608
fe8ab488
A
7609 if (tracememsize != 0 &&
7610 valoffs + tracememsize < end)
7611 {
7612 end = valoffs + tracememsize;
7613 tracememsize = 0;
7614 }
7615
3e170ce0
A
7616 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7617 !dtrace_vcanload((void *)(uintptr_t)val,
39037602 7618 &dp->dtdo_rtype, NULL, &mstate, vstate))
3e170ce0 7619 {
2d21ac55
A
7620 continue;
7621 }
7622
3e170ce0
A
7623 dtrace_store_by_ref(dp, tomax, size, &valoffs,
7624 &val, end, act->dta_intuple,
7625 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7626 DIF_TF_BYREF: DIF_TF_BYUREF);
2d21ac55
A
7627
7628 continue;
7629 }
7630
7631 switch (size) {
7632 case 0:
7633 break;
7634
7635 case sizeof (uint8_t):
7636 DTRACE_STORE(uint8_t, tomax, valoffs, val);
7637 break;
7638 case sizeof (uint16_t):
7639 DTRACE_STORE(uint16_t, tomax, valoffs, val);
7640 break;
7641 case sizeof (uint32_t):
7642 DTRACE_STORE(uint32_t, tomax, valoffs, val);
7643 break;
7644 case sizeof (uint64_t):
7645 DTRACE_STORE(uint64_t, tomax, valoffs, val);
7646 break;
7647 default:
7648 /*
7649 * Any other size should have been returned by
7650 * reference, not by value.
7651 */
7652 ASSERT(0);
7653 break;
7654 }
7655 }
7656
7657 if (*flags & CPU_DTRACE_DROP)
7658 continue;
7659
7660 if (*flags & CPU_DTRACE_FAULT) {
7661 int ndx;
7662 dtrace_action_t *err;
7663
7664 buf->dtb_errors++;
7665
7666 if (probe->dtpr_id == dtrace_probeid_error) {
7667 /*
7668 * There's nothing we can do -- we had an
7669 * error on the error probe. We bump an
7670 * error counter to at least indicate that
7671 * this condition happened.
7672 */
7673 dtrace_error(&state->dts_dblerrors);
7674 continue;
7675 }
7676
7677 if (vtime) {
7678 /*
7679 * Before recursing on dtrace_probe(), we
7680 * need to explicitly clear out our start
7681 * time to prevent it from being accumulated
7682 * into t_dtrace_vtime.
7683 */
fe8ab488
A
7684
7685 /*
7686 * Darwin sets the sign bit on t_dtrace_tracing
7687 * to suspend accumulation to it.
7688 */
2d21ac55 7689 dtrace_set_thread_tracing(current_thread(),
fe8ab488
A
7690 (1ULL<<63) | dtrace_get_thread_tracing(current_thread()));
7691
2d21ac55
A
7692 }
7693
7694 /*
7695 * Iterate over the actions to figure out which action
7696 * we were processing when we experienced the error.
7697 * Note that act points _past_ the faulting action; if
7698 * act is ecb->dte_action, the fault was in the
7699 * predicate, if it's ecb->dte_action->dta_next it's
7700 * in action #1, and so on.
7701 */
7702 for (err = ecb->dte_action, ndx = 0;
7703 err != act; err = err->dta_next, ndx++)
7704 continue;
7705
7706 dtrace_probe_error(state, ecb->dte_epid, ndx,
7707 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7708 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7709 cpu_core[cpuid].cpuc_dtrace_illval);
7710
7711 continue;
7712 }
7713
7714 if (!committed)
7715 buf->dtb_offset = offs + ecb->dte_size;
7716 }
7717
fe8ab488 7718 /* FIXME: On Darwin the time spent leaving DTrace from this point to the rti is attributed
b0d623f7 7719 to the current thread. Instead it should accrue to DTrace. */
2d21ac55
A
7720 if (vtime) {
7721 thread_t thread = current_thread();
7722 int64_t t = dtrace_get_thread_tracing(thread);
7723
cb323159 7724 if (t >= 0) {
2d21ac55
A
7725 /* Usual case, accumulate time spent here into t_dtrace_tracing */
7726 dtrace_set_thread_tracing(thread, t + (dtrace_gethrtime() - now));
cb323159 7727 } else {
2d21ac55 7728 /* Return from error recursion. No accumulation, just clear the sign bit on t_dtrace_tracing. */
cb323159 7729 dtrace_set_thread_tracing(thread, (~(1ULL<<63)) & t);
2d21ac55
A
7730 }
7731 }
2d21ac55 7732
cb323159 7733 dtrace_probe_exit(cookie);
2d21ac55 7734}
2d21ac55
A
7735
7736/*
7737 * DTrace Probe Hashing Functions
7738 *
7739 * The functions in this section (and indeed, the functions in remaining
7740 * sections) are not _called_ from probe context. (Any exceptions to this are
7741 * marked with a "Note:".) Rather, they are called from elsewhere in the
7742 * DTrace framework to look-up probes in, add probes to and remove probes from
7743 * the DTrace probe hashes. (Each probe is hashed by each element of the
7744 * probe tuple -- allowing for fast lookups, regardless of what was
7745 * specified.)
7746 */
7747static uint_t
b0d623f7 7748dtrace_hash_str(const char *p)
2d21ac55
A
7749{
7750 unsigned int g;
7751 uint_t hval = 0;
7752
7753 while (*p) {
7754 hval = (hval << 4) + *p++;
7755 if ((g = (hval & 0xf0000000)) != 0)
7756 hval ^= g >> 24;
7757 hval &= ~g;
7758 }
7759 return (hval);
7760}
7761
d9a64523
A
7762static const char*
7763dtrace_strkey_probe_provider(void *elm, uintptr_t offs)
7764{
7765#pragma unused(offs)
7766 dtrace_probe_t *probe = (dtrace_probe_t*)elm;
7767 return probe->dtpr_provider->dtpv_name;
7768}
7769
7770static const char*
7771dtrace_strkey_offset(void *elm, uintptr_t offs)
7772{
7773 return ((char *)((uintptr_t)(elm) + offs));
7774}
7775
7776static const char*
7777dtrace_strkey_deref_offset(void *elm, uintptr_t offs)
7778{
7779 return *((char **)((uintptr_t)(elm) + offs));
7780}
7781
2d21ac55 7782static dtrace_hash_t *
d9a64523 7783dtrace_hash_create(dtrace_strkey_f func, uintptr_t arg, uintptr_t nextoffs, uintptr_t prevoffs)
2d21ac55
A
7784{
7785 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7786
d9a64523
A
7787 hash->dth_getstr = func;
7788 hash->dth_stroffs = arg;
2d21ac55
A
7789 hash->dth_nextoffs = nextoffs;
7790 hash->dth_prevoffs = prevoffs;
7791
7792 hash->dth_size = 1;
7793 hash->dth_mask = hash->dth_size - 1;
7794
7795 hash->dth_tab = kmem_zalloc(hash->dth_size *
7796 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7797
7798 return (hash);
7799}
7800
fe8ab488
A
7801/*
7802 * APPLE NOTE: dtrace_hash_destroy is not used.
7803 * It is called by dtrace_detach which is not
7804 * currently implemented. Revisit someday.
7805 */
7806#if !defined(__APPLE__)
2d21ac55
A
7807static void
7808dtrace_hash_destroy(dtrace_hash_t *hash)
7809{
b0d623f7 7810#if DEBUG
2d21ac55
A
7811 int i;
7812
7813 for (i = 0; i < hash->dth_size; i++)
7814 ASSERT(hash->dth_tab[i] == NULL);
7815#endif
7816
7817 kmem_free(hash->dth_tab,
7818 hash->dth_size * sizeof (dtrace_hashbucket_t *));
7819 kmem_free(hash, sizeof (dtrace_hash_t));
7820}
7821#endif /* __APPLE__ */
7822
7823static void
7824dtrace_hash_resize(dtrace_hash_t *hash)
7825{
7826 int size = hash->dth_size, i, ndx;
7827 int new_size = hash->dth_size << 1;
7828 int new_mask = new_size - 1;
7829 dtrace_hashbucket_t **new_tab, *bucket, *next;
7830
7831 ASSERT((new_size & new_mask) == 0);
7832
7833 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7834
7835 for (i = 0; i < size; i++) {
7836 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
d9a64523 7837 void *elm = bucket->dthb_chain;
2d21ac55 7838
d9a64523
A
7839 ASSERT(elm != NULL);
7840 ndx = DTRACE_HASHSTR(hash, elm) & new_mask;
2d21ac55
A
7841
7842 next = bucket->dthb_next;
7843 bucket->dthb_next = new_tab[ndx];
7844 new_tab[ndx] = bucket;
7845 }
7846 }
7847
7848 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7849 hash->dth_tab = new_tab;
7850 hash->dth_size = new_size;
7851 hash->dth_mask = new_mask;
7852}
7853
7854static void
d9a64523 7855dtrace_hash_add(dtrace_hash_t *hash, void *new)
2d21ac55
A
7856{
7857 int hashval = DTRACE_HASHSTR(hash, new);
7858 int ndx = hashval & hash->dth_mask;
7859 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
d9a64523 7860 void **nextp, **prevp;
2d21ac55
A
7861
7862 for (; bucket != NULL; bucket = bucket->dthb_next) {
7863 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7864 goto add;
7865 }
7866
7867 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7868 dtrace_hash_resize(hash);
7869 dtrace_hash_add(hash, new);
7870 return;
7871 }
7872
7873 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7874 bucket->dthb_next = hash->dth_tab[ndx];
7875 hash->dth_tab[ndx] = bucket;
7876 hash->dth_nbuckets++;
7877
7878add:
7879 nextp = DTRACE_HASHNEXT(hash, new);
7880 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7881 *nextp = bucket->dthb_chain;
7882
7883 if (bucket->dthb_chain != NULL) {
7884 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7885 ASSERT(*prevp == NULL);
7886 *prevp = new;
7887 }
7888
7889 bucket->dthb_chain = new;
7890 bucket->dthb_len++;
7891}
7892
d9a64523
A
7893static void *
7894dtrace_hash_lookup_string(dtrace_hash_t *hash, const char *str)
2d21ac55 7895{
d9a64523 7896 int hashval = dtrace_hash_str(str);
2d21ac55
A
7897 int ndx = hashval & hash->dth_mask;
7898 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7899
7900 for (; bucket != NULL; bucket = bucket->dthb_next) {
d9a64523 7901 if (strcmp(str, DTRACE_GETSTR(hash, bucket->dthb_chain)) == 0)
2d21ac55
A
7902 return (bucket->dthb_chain);
7903 }
7904
7905 return (NULL);
7906}
7907
d9a64523
A
7908static dtrace_probe_t *
7909dtrace_hash_lookup(dtrace_hash_t *hash, void *template)
7910{
7911 return dtrace_hash_lookup_string(hash, DTRACE_GETSTR(hash, template));
7912}
7913
2d21ac55 7914static int
d9a64523 7915dtrace_hash_collisions(dtrace_hash_t *hash, void *template)
2d21ac55
A
7916{
7917 int hashval = DTRACE_HASHSTR(hash, template);
7918 int ndx = hashval & hash->dth_mask;
7919 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7920
7921 for (; bucket != NULL; bucket = bucket->dthb_next) {
7922 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7923 return (bucket->dthb_len);
7924 }
7925
fe8ab488 7926 return (0);
2d21ac55
A
7927}
7928
7929static void
d9a64523 7930dtrace_hash_remove(dtrace_hash_t *hash, void *elm)
2d21ac55 7931{
d9a64523 7932 int ndx = DTRACE_HASHSTR(hash, elm) & hash->dth_mask;
2d21ac55
A
7933 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7934
d9a64523
A
7935 void **prevp = DTRACE_HASHPREV(hash, elm);
7936 void **nextp = DTRACE_HASHNEXT(hash, elm);
2d21ac55
A
7937
7938 /*
d9a64523 7939 * Find the bucket that we're removing this elm from.
2d21ac55
A
7940 */
7941 for (; bucket != NULL; bucket = bucket->dthb_next) {
d9a64523 7942 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, elm))
2d21ac55
A
7943 break;
7944 }
7945
7946 ASSERT(bucket != NULL);
7947
7948 if (*prevp == NULL) {
7949 if (*nextp == NULL) {
7950 /*
d9a64523 7951 * The removed element was the only element on this
2d21ac55
A
7952 * bucket; we need to remove the bucket.
7953 */
7954 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7955
d9a64523 7956 ASSERT(bucket->dthb_chain == elm);
2d21ac55
A
7957 ASSERT(b != NULL);
7958
7959 if (b == bucket) {
7960 hash->dth_tab[ndx] = bucket->dthb_next;
7961 } else {
7962 while (b->dthb_next != bucket)
7963 b = b->dthb_next;
7964 b->dthb_next = bucket->dthb_next;
7965 }
7966
7967 ASSERT(hash->dth_nbuckets > 0);
7968 hash->dth_nbuckets--;
7969 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7970 return;
7971 }
7972
7973 bucket->dthb_chain = *nextp;
7974 } else {
7975 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7976 }
7977
7978 if (*nextp != NULL)
7979 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7980}
7981
7982/*
7983 * DTrace Utility Functions
7984 *
7985 * These are random utility functions that are _not_ called from probe context.
7986 */
7987static int
7988dtrace_badattr(const dtrace_attribute_t *a)
7989{
7990 return (a->dtat_name > DTRACE_STABILITY_MAX ||
7991 a->dtat_data > DTRACE_STABILITY_MAX ||
7992 a->dtat_class > DTRACE_CLASS_MAX);
7993}
7994
7995/*
d9a64523
A
7996 * Returns a dtrace-managed copy of a string, and will
7997 * deduplicate copies of the same string.
7998 * If the specified string is NULL, returns an empty string
2d21ac55 7999 */
b0d623f7 8000static char *
d9a64523 8001dtrace_strref(const char *str)
b0d623f7 8002{
d9a64523 8003 dtrace_string_t *s = NULL;
b0d623f7 8004 size_t bufsize = (str != NULL ? strlen(str) : 0) + 1;
b0d623f7 8005
d9a64523 8006 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 8007
d9a64523
A
8008 if (str == NULL)
8009 str = "";
8010
8011 for (s = dtrace_hash_lookup_string(dtrace_strings, str); s != NULL;
8012 s = *(DTRACE_HASHNEXT(dtrace_strings, s))) {
8013 if (strncmp(str, s->dtst_str, bufsize) != 0) {
8014 continue;
8015 }
8016 ASSERT(s->dtst_refcount != UINT32_MAX);
8017 s->dtst_refcount++;
8018 return s->dtst_str;
8019 }
8020
8021 s = kmem_zalloc(sizeof(dtrace_string_t) + bufsize, KM_SLEEP);
8022 s->dtst_refcount = 1;
8023 (void) strlcpy(s->dtst_str, str, bufsize);
8024
8025 dtrace_hash_add(dtrace_strings, s);
8026
8027 return s->dtst_str;
8028}
8029
8030static void
8031dtrace_strunref(const char *str)
8032{
8033 ASSERT(str != NULL);
8034 dtrace_string_t *s = NULL;
8035 size_t bufsize = strlen(str) + 1;
8036
8037 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8038
8039 for (s = dtrace_hash_lookup_string(dtrace_strings, str); s != NULL;
8040 s = *(DTRACE_HASHNEXT(dtrace_strings, s))) {
8041 if (strncmp(str, s->dtst_str, bufsize) != 0) {
8042 continue;
8043 }
8044 ASSERT(s->dtst_refcount != 0);
8045 s->dtst_refcount--;
8046 if (s->dtst_refcount == 0) {
8047 dtrace_hash_remove(dtrace_strings, s);
8048 kmem_free(s, sizeof(dtrace_string_t) + bufsize);
8049 }
8050 return;
8051 }
8052 panic("attempt to unref non-existent string %s", str);
b0d623f7 8053}
2d21ac55
A
8054
8055#define DTRACE_ISALPHA(c) \
8056 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
8057
8058static int
8059dtrace_badname(const char *s)
8060{
8061 char c;
8062
8063 if (s == NULL || (c = *s++) == '\0')
8064 return (0);
8065
8066 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
8067 return (1);
8068
8069 while ((c = *s++) != '\0') {
8070 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
8071 c != '-' && c != '_' && c != '.' && c != '`')
8072 return (1);
8073 }
8074
8075 return (0);
8076}
8077
8078static void
8079dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
8080{
8081 uint32_t priv;
8082
8083 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
39037602 8084 if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
5ba3f43e 8085 priv = DTRACE_PRIV_USER | DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER;
39037602
A
8086 }
8087 else {
8088 priv = DTRACE_PRIV_ALL;
8089 }
5ba3f43e
A
8090 *uidp = 0;
8091 *zoneidp = 0;
2d21ac55
A
8092 } else {
8093 *uidp = crgetuid(cr);
8094 *zoneidp = crgetzoneid(cr);
8095
8096 priv = 0;
8097 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
8098 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
8099 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
8100 priv |= DTRACE_PRIV_USER;
8101 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
8102 priv |= DTRACE_PRIV_PROC;
8103 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
8104 priv |= DTRACE_PRIV_OWNER;
8105 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
8106 priv |= DTRACE_PRIV_ZONEOWNER;
8107 }
8108
8109 *privp = priv;
8110}
8111
8112#ifdef DTRACE_ERRDEBUG
8113static void
8114dtrace_errdebug(const char *str)
8115{
b0d623f7 8116 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
2d21ac55
A
8117 int occupied = 0;
8118
8119 lck_mtx_lock(&dtrace_errlock);
8120 dtrace_errlast = str;
b0d623f7 8121 dtrace_errthread = (kthread_t *)current_thread();
2d21ac55
A
8122
8123 while (occupied++ < DTRACE_ERRHASHSZ) {
8124 if (dtrace_errhash[hval].dter_msg == str) {
8125 dtrace_errhash[hval].dter_count++;
8126 goto out;
8127 }
8128
8129 if (dtrace_errhash[hval].dter_msg != NULL) {
8130 hval = (hval + 1) % DTRACE_ERRHASHSZ;
8131 continue;
8132 }
8133
8134 dtrace_errhash[hval].dter_msg = str;
8135 dtrace_errhash[hval].dter_count = 1;
8136 goto out;
8137 }
8138
8139 panic("dtrace: undersized error hash");
8140out:
8141 lck_mtx_unlock(&dtrace_errlock);
8142}
8143#endif
8144
8145/*
8146 * DTrace Matching Functions
8147 *
8148 * These functions are used to match groups of probes, given some elements of
8149 * a probe tuple, or some globbed expressions for elements of a probe tuple.
8150 */
8151static int
8152dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
8153 zoneid_t zoneid)
8154{
8155 if (priv != DTRACE_PRIV_ALL) {
8156 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
8157 uint32_t match = priv & ppriv;
8158
8159 /*
8160 * No PRIV_DTRACE_* privileges...
8161 */
8162 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
8163 DTRACE_PRIV_KERNEL)) == 0)
8164 return (0);
8165
8166 /*
8167 * No matching bits, but there were bits to match...
8168 */
8169 if (match == 0 && ppriv != 0)
8170 return (0);
8171
8172 /*
8173 * Need to have permissions to the process, but don't...
8174 */
8175 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
8176 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
8177 return (0);
8178 }
8179
8180 /*
8181 * Need to be in the same zone unless we possess the
8182 * privilege to examine all zones.
8183 */
8184 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
8185 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
8186 return (0);
8187 }
8188 }
8189
8190 return (1);
8191}
8192
8193/*
8194 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
8195 * consists of input pattern strings and an ops-vector to evaluate them.
8196 * This function returns >0 for match, 0 for no match, and <0 for error.
8197 */
8198static int
8199dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
8200 uint32_t priv, uid_t uid, zoneid_t zoneid)
8201{
8202 dtrace_provider_t *pvp = prp->dtpr_provider;
8203 int rv;
8204
8205 if (pvp->dtpv_defunct)
8206 return (0);
8207
8208 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
8209 return (rv);
8210
8211 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
8212 return (rv);
8213
8214 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
8215 return (rv);
8216
8217 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
8218 return (rv);
8219
8220 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
8221 return (0);
8222
8223 return (rv);
8224}
8225
8226/*
8227 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
8228 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
8229 * libc's version, the kernel version only applies to 8-bit ASCII strings.
8230 * In addition, all of the recursion cases except for '*' matching have been
8231 * unwound. For '*', we still implement recursive evaluation, but a depth
8232 * counter is maintained and matching is aborted if we recurse too deep.
8233 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
8234 */
8235static int
8236dtrace_match_glob(const char *s, const char *p, int depth)
8237{
8238 const char *olds;
8239 char s1, c;
8240 int gs;
8241
8242 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
8243 return (-1);
8244
8245 if (s == NULL)
8246 s = ""; /* treat NULL as empty string */
8247
8248top:
8249 olds = s;
8250 s1 = *s++;
8251
8252 if (p == NULL)
8253 return (0);
8254
8255 if ((c = *p++) == '\0')
8256 return (s1 == '\0');
8257
8258 switch (c) {
8259 case '[': {
8260 int ok = 0, notflag = 0;
8261 char lc = '\0';
8262
8263 if (s1 == '\0')
8264 return (0);
8265
8266 if (*p == '!') {
8267 notflag = 1;
8268 p++;
8269 }
8270
8271 if ((c = *p++) == '\0')
8272 return (0);
8273
8274 do {
8275 if (c == '-' && lc != '\0' && *p != ']') {
8276 if ((c = *p++) == '\0')
8277 return (0);
8278 if (c == '\\' && (c = *p++) == '\0')
8279 return (0);
8280
8281 if (notflag) {
8282 if (s1 < lc || s1 > c)
8283 ok++;
8284 else
8285 return (0);
8286 } else if (lc <= s1 && s1 <= c)
8287 ok++;
8288
8289 } else if (c == '\\' && (c = *p++) == '\0')
8290 return (0);
8291
8292 lc = c; /* save left-hand 'c' for next iteration */
8293
8294 if (notflag) {
8295 if (s1 != c)
8296 ok++;
8297 else
8298 return (0);
8299 } else if (s1 == c)
8300 ok++;
8301
8302 if ((c = *p++) == '\0')
8303 return (0);
8304
8305 } while (c != ']');
8306
8307 if (ok)
8308 goto top;
8309
8310 return (0);
8311 }
8312
8313 case '\\':
8314 if ((c = *p++) == '\0')
8315 return (0);
f427ee49 8316 OS_FALLTHROUGH;
2d21ac55
A
8317
8318 default:
8319 if (c != s1)
8320 return (0);
f427ee49 8321 OS_FALLTHROUGH;
2d21ac55
A
8322
8323 case '?':
8324 if (s1 != '\0')
8325 goto top;
8326 return (0);
8327
8328 case '*':
8329 while (*p == '*')
8330 p++; /* consecutive *'s are identical to a single one */
8331
8332 if (*p == '\0')
8333 return (1);
8334
8335 for (s = olds; *s != '\0'; s++) {
8336 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
8337 return (gs);
8338 }
8339
8340 return (0);
8341 }
8342}
8343
8344/*ARGSUSED*/
8345static int
8346dtrace_match_string(const char *s, const char *p, int depth)
8347{
b0d623f7 8348#pragma unused(depth) /* __APPLE__ */
d9a64523
A
8349 return (s != NULL && s == p);
8350}
fe8ab488 8351
d9a64523
A
8352/*ARGSUSED*/
8353static int
8354dtrace_match_module(const char *s, const char *p, int depth)
8355{
8356#pragma unused(depth) /* __APPLE__ */
8357 size_t len;
8358 if (s == NULL || p == NULL)
8359 return (0);
8360
8361 len = strlen(p);
8362
8363 if (strncmp(p, s, len) != 0)
8364 return (0);
8365
8366 if (s[len] == '.' || s[len] == '\0')
8367 return (1);
8368
8369 return (0);
2d21ac55
A
8370}
8371
8372/*ARGSUSED*/
8373static int
8374dtrace_match_nul(const char *s, const char *p, int depth)
8375{
b0d623f7 8376#pragma unused(s, p, depth) /* __APPLE__ */
2d21ac55
A
8377 return (1); /* always match the empty pattern */
8378}
8379
8380/*ARGSUSED*/
8381static int
8382dtrace_match_nonzero(const char *s, const char *p, int depth)
8383{
b0d623f7 8384#pragma unused(p, depth) /* __APPLE__ */
2d21ac55
A
8385 return (s != NULL && s[0] != '\0');
8386}
8387
8388static int
8389dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
d190cdc3 8390 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *, void *), void *arg1, void *arg2)
2d21ac55 8391{
d9a64523
A
8392 dtrace_probe_t *probe;
8393 dtrace_provider_t prov_template = {
8394 .dtpv_name = (char *)(uintptr_t)pkp->dtpk_prov
8395 };
8396
8397 dtrace_probe_t template = {
8398 .dtpr_provider = &prov_template,
8399 .dtpr_mod = (char *)(uintptr_t)pkp->dtpk_mod,
8400 .dtpr_func = (char *)(uintptr_t)pkp->dtpk_func,
8401 .dtpr_name = (char *)(uintptr_t)pkp->dtpk_name
8402 };
8403
2d21ac55 8404 dtrace_hash_t *hash = NULL;
6d2010ae 8405 int len, rc, best = INT_MAX, nmatched = 0;
2d21ac55
A
8406 dtrace_id_t i;
8407
5ba3f43e 8408 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
8409
8410 /*
8411 * If the probe ID is specified in the key, just lookup by ID and
8412 * invoke the match callback once if a matching probe is found.
8413 */
8414 if (pkp->dtpk_id != DTRACE_IDNONE) {
8415 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8416 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
d190cdc3 8417 if ((*matched)(probe, arg1, arg2) == DTRACE_MATCH_FAIL)
6d2010ae 8418 return (DTRACE_MATCH_FAIL);
2d21ac55
A
8419 nmatched++;
8420 }
8421 return (nmatched);
8422 }
8423
2d21ac55 8424 /*
d9a64523
A
8425 * We want to find the most distinct of the provider name, module name,
8426 * function name, and name. So for each one that is not a glob
8427 * pattern or empty string, we perform a lookup in the corresponding
8428 * hash and use the hash table with the fewest collisions to do our
8429 * search.
2d21ac55 8430 */
d9a64523
A
8431 if (pkp->dtpk_pmatch == &dtrace_match_string &&
8432 (len = dtrace_hash_collisions(dtrace_byprov, &template)) < best) {
8433 best = len;
8434 hash = dtrace_byprov;
8435 }
8436
2d21ac55
A
8437 if (pkp->dtpk_mmatch == &dtrace_match_string &&
8438 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8439 best = len;
8440 hash = dtrace_bymod;
8441 }
8442
8443 if (pkp->dtpk_fmatch == &dtrace_match_string &&
8444 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8445 best = len;
8446 hash = dtrace_byfunc;
8447 }
8448
8449 if (pkp->dtpk_nmatch == &dtrace_match_string &&
8450 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8451 best = len;
8452 hash = dtrace_byname;
8453 }
8454
8455 /*
8456 * If we did not select a hash table, iterate over every probe and
8457 * invoke our callback for each one that matches our input probe key.
8458 */
8459 if (hash == NULL) {
b0d623f7 8460 for (i = 0; i < (dtrace_id_t)dtrace_nprobes; i++) {
2d21ac55
A
8461 if ((probe = dtrace_probes[i]) == NULL ||
8462 dtrace_match_probe(probe, pkp, priv, uid,
8463 zoneid) <= 0)
8464 continue;
8465
8466 nmatched++;
8467
d190cdc3 8468 if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) {
6d2010ae
A
8469 if (rc == DTRACE_MATCH_FAIL)
8470 return (DTRACE_MATCH_FAIL);
8471 break;
8472 }
2d21ac55
A
8473 }
8474
8475 return (nmatched);
8476 }
8477
8478 /*
8479 * If we selected a hash table, iterate over each probe of the same key
8480 * name and invoke the callback for every probe that matches the other
8481 * attributes of our input probe key.
8482 */
8483 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8484 probe = *(DTRACE_HASHNEXT(hash, probe))) {
8485
8486 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8487 continue;
8488
8489 nmatched++;
8490
d190cdc3 8491 if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) {
6d2010ae
A
8492 if (rc == DTRACE_MATCH_FAIL)
8493 return (DTRACE_MATCH_FAIL);
8494 break;
8495 }
2d21ac55
A
8496 }
8497
8498 return (nmatched);
8499}
8500
8501/*
8502 * Return the function pointer dtrace_probecmp() should use to compare the
8503 * specified pattern with a string. For NULL or empty patterns, we select
8504 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
8505 * For non-empty non-glob strings, we use dtrace_match_string().
8506 */
8507static dtrace_probekey_f *
8508dtrace_probekey_func(const char *p)
8509{
8510 char c;
8511
8512 if (p == NULL || *p == '\0')
8513 return (&dtrace_match_nul);
8514
8515 while ((c = *p++) != '\0') {
8516 if (c == '[' || c == '?' || c == '*' || c == '\\')
8517 return (&dtrace_match_glob);
8518 }
8519
8520 return (&dtrace_match_string);
8521}
8522
d9a64523
A
8523static dtrace_probekey_f *
8524dtrace_probekey_module_func(const char *p)
8525{
8526 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8527
8528 dtrace_probekey_f *f = dtrace_probekey_func(p);
8529 if (f == &dtrace_match_string) {
8530 dtrace_probe_t template = {
8531 .dtpr_mod = (char *)(uintptr_t)p,
8532 };
8533 if (dtrace_hash_lookup(dtrace_bymod, &template) == NULL) {
8534 return (&dtrace_match_module);
8535 }
8536 return (&dtrace_match_string);
8537 }
8538 return f;
8539}
8540
2d21ac55
A
8541/*
8542 * Build a probe comparison key for use with dtrace_match_probe() from the
8543 * given probe description. By convention, a null key only matches anchored
8544 * probes: if each field is the empty string, reset dtpk_fmatch to
8545 * dtrace_match_nonzero().
8546 */
8547static void
8548dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8549{
d9a64523
A
8550
8551 pkp->dtpk_prov = dtrace_strref(pdp->dtpd_provider);
2d21ac55
A
8552 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8553
d9a64523
A
8554 pkp->dtpk_mod = dtrace_strref(pdp->dtpd_mod);
8555 pkp->dtpk_mmatch = dtrace_probekey_module_func(pdp->dtpd_mod);
2d21ac55 8556
d9a64523 8557 pkp->dtpk_func = dtrace_strref(pdp->dtpd_func);
2d21ac55
A
8558 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8559
d9a64523 8560 pkp->dtpk_name = dtrace_strref(pdp->dtpd_name);
2d21ac55
A
8561 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8562
8563 pkp->dtpk_id = pdp->dtpd_id;
8564
8565 if (pkp->dtpk_id == DTRACE_IDNONE &&
8566 pkp->dtpk_pmatch == &dtrace_match_nul &&
8567 pkp->dtpk_mmatch == &dtrace_match_nul &&
8568 pkp->dtpk_fmatch == &dtrace_match_nul &&
8569 pkp->dtpk_nmatch == &dtrace_match_nul)
8570 pkp->dtpk_fmatch = &dtrace_match_nonzero;
8571}
8572
d9a64523
A
8573static void
8574dtrace_probekey_release(dtrace_probekey_t *pkp)
8575{
8576 dtrace_strunref(pkp->dtpk_prov);
8577 dtrace_strunref(pkp->dtpk_mod);
8578 dtrace_strunref(pkp->dtpk_func);
8579 dtrace_strunref(pkp->dtpk_name);
8580}
8581
39037602
A
8582static int
8583dtrace_cond_provider_match(dtrace_probedesc_t *desc, void *data)
8584{
8585 if (desc == NULL)
8586 return 1;
8587
8588 dtrace_probekey_f *func = dtrace_probekey_func(desc->dtpd_provider);
8589
813fb2f6 8590 return func((char*)data, desc->dtpd_provider, 0);
39037602
A
8591}
8592
2d21ac55
A
8593/*
8594 * DTrace Provider-to-Framework API Functions
8595 *
8596 * These functions implement much of the Provider-to-Framework API, as
8597 * described in <sys/dtrace.h>. The parts of the API not in this section are
8598 * the functions in the API for probe management (found below), and
8599 * dtrace_probe() itself (found above).
8600 */
8601
8602/*
8603 * Register the calling provider with the DTrace framework. This should
8604 * generally be called by DTrace providers in their attach(9E) entry point.
8605 */
8606int
8607dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8608 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8609{
8610 dtrace_provider_t *provider;
8611
8612 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8613 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8614 "arguments", name ? name : "<NULL>");
8615 return (EINVAL);
8616 }
8617
8618 if (name[0] == '\0' || dtrace_badname(name)) {
8619 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8620 "provider name", name);
8621 return (EINVAL);
8622 }
8623
8624 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8625 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8626 pops->dtps_destroy == NULL ||
8627 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8628 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8629 "provider ops", name);
8630 return (EINVAL);
8631 }
8632
8633 if (dtrace_badattr(&pap->dtpa_provider) ||
8634 dtrace_badattr(&pap->dtpa_mod) ||
8635 dtrace_badattr(&pap->dtpa_func) ||
8636 dtrace_badattr(&pap->dtpa_name) ||
8637 dtrace_badattr(&pap->dtpa_args)) {
8638 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8639 "provider attributes", name);
8640 return (EINVAL);
8641 }
8642
8643 if (priv & ~DTRACE_PRIV_ALL) {
8644 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8645 "privilege attributes", name);
8646 return (EINVAL);
8647 }
8648
8649 if ((priv & DTRACE_PRIV_KERNEL) &&
8650 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8651 pops->dtps_usermode == NULL) {
8652 cmn_err(CE_WARN, "failed to register provider '%s': need "
8653 "dtps_usermode() op for given privilege attributes", name);
8654 return (EINVAL);
8655 }
8656
8657 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
fe8ab488 8658
2d21ac55
A
8659 provider->dtpv_attr = *pap;
8660 provider->dtpv_priv.dtpp_flags = priv;
8661 if (cr != NULL) {
8662 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8663 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8664 }
8665 provider->dtpv_pops = *pops;
8666
8667 if (pops->dtps_provide == NULL) {
8668 ASSERT(pops->dtps_provide_module != NULL);
0a7de745 8669 provider->dtpv_pops.dtps_provide = dtrace_provide_nullop;
2d21ac55
A
8670 }
8671
8672 if (pops->dtps_provide_module == NULL) {
8673 ASSERT(pops->dtps_provide != NULL);
8674 provider->dtpv_pops.dtps_provide_module =
0a7de745 8675 dtrace_provide_module_nullop;
2d21ac55
A
8676 }
8677
8678 if (pops->dtps_suspend == NULL) {
8679 ASSERT(pops->dtps_resume == NULL);
0a7de745
A
8680 provider->dtpv_pops.dtps_suspend = dtrace_suspend_nullop;
8681 provider->dtpv_pops.dtps_resume = dtrace_resume_nullop;
2d21ac55
A
8682 }
8683
8684 provider->dtpv_arg = arg;
8685 *idp = (dtrace_provider_id_t)provider;
8686
8687 if (pops == &dtrace_provider_ops) {
5ba3f43e
A
8688 LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
8689 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
d9a64523
A
8690
8691 provider->dtpv_name = dtrace_strref(name);
8692
2d21ac55
A
8693 ASSERT(dtrace_anon.dta_enabling == NULL);
8694
8695 /*
8696 * We make sure that the DTrace provider is at the head of
8697 * the provider chain.
8698 */
8699 provider->dtpv_next = dtrace_provider;
8700 dtrace_provider = provider;
8701 return (0);
8702 }
8703
8704 lck_mtx_lock(&dtrace_provider_lock);
8705 lck_mtx_lock(&dtrace_lock);
8706
d9a64523
A
8707 provider->dtpv_name = dtrace_strref(name);
8708
2d21ac55
A
8709 /*
8710 * If there is at least one provider registered, we'll add this
8711 * provider after the first provider.
8712 */
8713 if (dtrace_provider != NULL) {
8714 provider->dtpv_next = dtrace_provider->dtpv_next;
8715 dtrace_provider->dtpv_next = provider;
8716 } else {
8717 dtrace_provider = provider;
8718 }
8719
8720 if (dtrace_retained != NULL) {
8721 dtrace_enabling_provide(provider);
8722
8723 /*
39037602
A
8724 * Now we need to call dtrace_enabling_matchall_with_cond() --
8725 * with a condition matching the provider name we just added,
8726 * which will acquire cpu_lock and dtrace_lock. We therefore need
2d21ac55
A
8727 * to drop all of our locks before calling into it...
8728 */
8729 lck_mtx_unlock(&dtrace_lock);
8730 lck_mtx_unlock(&dtrace_provider_lock);
39037602
A
8731
8732 dtrace_match_cond_t cond = {dtrace_cond_provider_match, provider->dtpv_name};
8733 dtrace_enabling_matchall_with_cond(&cond);
2d21ac55
A
8734
8735 return (0);
8736 }
8737
8738 lck_mtx_unlock(&dtrace_lock);
8739 lck_mtx_unlock(&dtrace_provider_lock);
8740
8741 return (0);
8742}
8743
8744/*
8745 * Unregister the specified provider from the DTrace framework. This should
8746 * generally be called by DTrace providers in their detach(9E) entry point.
8747 */
8748int
8749dtrace_unregister(dtrace_provider_id_t id)
8750{
8751 dtrace_provider_t *old = (dtrace_provider_t *)id;
8752 dtrace_provider_t *prev = NULL;
d9a64523
A
8753 int self = 0;
8754 dtrace_probe_t *probe, *first = NULL, *next = NULL;
8755 dtrace_probe_t template = {
8756 .dtpr_provider = old
8757 };
2d21ac55
A
8758
8759 if (old->dtpv_pops.dtps_enable ==
6d2010ae 8760 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
2d21ac55
A
8761 /*
8762 * If DTrace itself is the provider, we're called with locks
8763 * already held.
8764 */
8765 ASSERT(old == dtrace_provider);
8766 ASSERT(dtrace_devi != NULL);
5ba3f43e
A
8767 LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
8768 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
8769 self = 1;
8770
8771 if (dtrace_provider->dtpv_next != NULL) {
8772 /*
8773 * There's another provider here; return failure.
8774 */
8775 return (EBUSY);
8776 }
8777 } else {
8778 lck_mtx_lock(&dtrace_provider_lock);
8779 lck_mtx_lock(&mod_lock);
8780 lck_mtx_lock(&dtrace_lock);
8781 }
8782
8783 /*
8784 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8785 * probes, we refuse to let providers slither away, unless this
8786 * provider has already been explicitly invalidated.
8787 */
8788 if (!old->dtpv_defunct &&
8789 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8790 dtrace_anon.dta_state->dts_necbs > 0))) {
8791 if (!self) {
8792 lck_mtx_unlock(&dtrace_lock);
8793 lck_mtx_unlock(&mod_lock);
8794 lck_mtx_unlock(&dtrace_provider_lock);
8795 }
8796 return (EBUSY);
8797 }
8798
8799 /*
8800 * Attempt to destroy the probes associated with this provider.
8801 */
fe8ab488 8802 if (old->dtpv_ecb_count!=0) {
2d21ac55
A
8803 /*
8804 * We have at least one ECB; we can't remove this provider.
8805 */
8806 if (!self) {
8807 lck_mtx_unlock(&dtrace_lock);
8808 lck_mtx_unlock(&mod_lock);
8809 lck_mtx_unlock(&dtrace_provider_lock);
8810 }
8811 return (EBUSY);
8812 }
8813
8814 /*
8815 * All of the probes for this provider are disabled; we can safely
8816 * remove all of them from their hash chains and from the probe array.
8817 */
d9a64523
A
8818 for (probe = dtrace_hash_lookup(dtrace_byprov, &template); probe != NULL;
8819 probe = *(DTRACE_HASHNEXT(dtrace_byprov, probe))) {
2d21ac55
A
8820 if (probe->dtpr_provider != old)
8821 continue;
8822
d9a64523 8823 dtrace_probes[probe->dtpr_id - 1] = NULL;
fe8ab488 8824 old->dtpv_probe_count--;
2d21ac55
A
8825
8826 dtrace_hash_remove(dtrace_bymod, probe);
8827 dtrace_hash_remove(dtrace_byfunc, probe);
8828 dtrace_hash_remove(dtrace_byname, probe);
8829
8830 if (first == NULL) {
8831 first = probe;
8832 probe->dtpr_nextmod = NULL;
8833 } else {
d9a64523
A
8834 /*
8835 * Use nextmod as the chain of probes to remove
8836 */
2d21ac55
A
8837 probe->dtpr_nextmod = first;
8838 first = probe;
8839 }
8840 }
8841
d9a64523
A
8842 for (probe = first; probe != NULL; probe = next) {
8843 next = probe->dtpr_nextmod;
8844 dtrace_hash_remove(dtrace_byprov, probe);
8845 }
8846
2d21ac55
A
8847 /*
8848 * The provider's probes have been removed from the hash chains and
8849 * from the probe array. Now issue a dtrace_sync() to be sure that
8850 * everyone has cleared out from any probe array processing.
8851 */
8852 dtrace_sync();
8853
d9a64523
A
8854 for (probe = first; probe != NULL; probe = next) {
8855 next = probe->dtpr_nextmod;
2d21ac55
A
8856
8857 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8858 probe->dtpr_arg);
d9a64523
A
8859 dtrace_strunref(probe->dtpr_mod);
8860 dtrace_strunref(probe->dtpr_func);
8861 dtrace_strunref(probe->dtpr_name);
2d21ac55 8862 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
2d21ac55 8863 zfree(dtrace_probe_t_zone, probe);
2d21ac55
A
8864 }
8865
8866 if ((prev = dtrace_provider) == old) {
8867 ASSERT(self || dtrace_devi == NULL);
8868 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8869 dtrace_provider = old->dtpv_next;
8870 } else {
8871 while (prev != NULL && prev->dtpv_next != old)
8872 prev = prev->dtpv_next;
8873
8874 if (prev == NULL) {
8875 panic("attempt to unregister non-existent "
8876 "dtrace provider %p\n", (void *)id);
8877 }
8878
8879 prev->dtpv_next = old->dtpv_next;
8880 }
8881
d9a64523
A
8882 dtrace_strunref(old->dtpv_name);
8883
2d21ac55
A
8884 if (!self) {
8885 lck_mtx_unlock(&dtrace_lock);
8886 lck_mtx_unlock(&mod_lock);
8887 lck_mtx_unlock(&dtrace_provider_lock);
8888 }
8889
2d21ac55
A
8890 kmem_free(old, sizeof (dtrace_provider_t));
8891
8892 return (0);
8893}
8894
8895/*
8896 * Invalidate the specified provider. All subsequent probe lookups for the
8897 * specified provider will fail, but its probes will not be removed.
8898 */
8899void
8900dtrace_invalidate(dtrace_provider_id_t id)
8901{
8902 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8903
8904 ASSERT(pvp->dtpv_pops.dtps_enable !=
6d2010ae 8905 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
2d21ac55
A
8906
8907 lck_mtx_lock(&dtrace_provider_lock);
8908 lck_mtx_lock(&dtrace_lock);
8909
8910 pvp->dtpv_defunct = 1;
8911
8912 lck_mtx_unlock(&dtrace_lock);
8913 lck_mtx_unlock(&dtrace_provider_lock);
8914}
8915
8916/*
8917 * Indicate whether or not DTrace has attached.
8918 */
8919int
8920dtrace_attached(void)
8921{
8922 /*
8923 * dtrace_provider will be non-NULL iff the DTrace driver has
8924 * attached. (It's non-NULL because DTrace is always itself a
8925 * provider.)
8926 */
8927 return (dtrace_provider != NULL);
8928}
8929
8930/*
8931 * Remove all the unenabled probes for the given provider. This function is
8932 * not unlike dtrace_unregister(), except that it doesn't remove the provider
8933 * -- just as many of its associated probes as it can.
8934 */
8935int
8936dtrace_condense(dtrace_provider_id_t id)
8937{
8938 dtrace_provider_t *prov = (dtrace_provider_t *)id;
d9a64523
A
8939 dtrace_probe_t *probe, *first = NULL;
8940 dtrace_probe_t template = {
8941 .dtpr_provider = prov
8942 };
2d21ac55
A
8943
8944 /*
8945 * Make sure this isn't the dtrace provider itself.
8946 */
8947 ASSERT(prov->dtpv_pops.dtps_enable !=
6d2010ae 8948 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
2d21ac55
A
8949
8950 lck_mtx_lock(&dtrace_provider_lock);
8951 lck_mtx_lock(&dtrace_lock);
8952
8953 /*
8954 * Attempt to destroy the probes associated with this provider.
8955 */
d9a64523
A
8956 for (probe = dtrace_hash_lookup(dtrace_byprov, &template); probe != NULL;
8957 probe = *(DTRACE_HASHNEXT(dtrace_byprov, probe))) {
2d21ac55
A
8958
8959 if (probe->dtpr_provider != prov)
8960 continue;
8961
8962 if (probe->dtpr_ecb != NULL)
8963 continue;
8964
d9a64523 8965 dtrace_probes[probe->dtpr_id - 1] = NULL;
fe8ab488 8966 prov->dtpv_probe_count--;
2d21ac55
A
8967
8968 dtrace_hash_remove(dtrace_bymod, probe);
8969 dtrace_hash_remove(dtrace_byfunc, probe);
8970 dtrace_hash_remove(dtrace_byname, probe);
8971
d9a64523 8972 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
2d21ac55 8973 probe->dtpr_arg);
d9a64523
A
8974 dtrace_strunref(probe->dtpr_mod);
8975 dtrace_strunref(probe->dtpr_func);
8976 dtrace_strunref(probe->dtpr_name);
8977 if (first == NULL) {
8978 first = probe;
8979 probe->dtpr_nextmod = NULL;
8980 } else {
8981 /*
8982 * Use nextmod as the chain of probes to remove
8983 */
8984 probe->dtpr_nextmod = first;
8985 first = probe;
8986 }
8987 }
8988
8989 for (probe = first; probe != NULL; probe = first) {
8990 first = probe->dtpr_nextmod;
8991 dtrace_hash_remove(dtrace_byprov, probe);
8992 vmem_free(dtrace_arena, (void *)((uintptr_t)probe->dtpr_id), 1);
2d21ac55 8993 zfree(dtrace_probe_t_zone, probe);
2d21ac55
A
8994 }
8995
8996 lck_mtx_unlock(&dtrace_lock);
8997 lck_mtx_unlock(&dtrace_provider_lock);
8998
8999 return (0);
9000}
9001
9002/*
9003 * DTrace Probe Management Functions
9004 *
9005 * The functions in this section perform the DTrace probe management,
9006 * including functions to create probes, look-up probes, and call into the
9007 * providers to request that probes be provided. Some of these functions are
9008 * in the Provider-to-Framework API; these functions can be identified by the
9009 * fact that they are not declared "static".
9010 */
9011
9012/*
9013 * Create a probe with the specified module name, function name, and name.
9014 */
9015dtrace_id_t
9016dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
9017 const char *func, const char *name, int aframes, void *arg)
9018{
9019 dtrace_probe_t *probe, **probes;
9020 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
9021 dtrace_id_t id;
9022
9023 if (provider == dtrace_provider) {
5ba3f43e 9024 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
9025 } else {
9026 lck_mtx_lock(&dtrace_lock);
9027 }
9028
9029 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
9030 VM_BESTFIT | VM_SLEEP);
fe8ab488 9031
2d21ac55
A
9032 probe = zalloc(dtrace_probe_t_zone);
9033 bzero(probe, sizeof (dtrace_probe_t));
2d21ac55
A
9034
9035 probe->dtpr_id = id;
9036 probe->dtpr_gen = dtrace_probegen++;
d9a64523
A
9037 probe->dtpr_mod = dtrace_strref(mod);
9038 probe->dtpr_func = dtrace_strref(func);
9039 probe->dtpr_name = dtrace_strref(name);
2d21ac55
A
9040 probe->dtpr_arg = arg;
9041 probe->dtpr_aframes = aframes;
9042 probe->dtpr_provider = provider;
9043
d9a64523 9044 dtrace_hash_add(dtrace_byprov, probe);
2d21ac55
A
9045 dtrace_hash_add(dtrace_bymod, probe);
9046 dtrace_hash_add(dtrace_byfunc, probe);
9047 dtrace_hash_add(dtrace_byname, probe);
9048
b0d623f7 9049 if (id - 1 >= (dtrace_id_t)dtrace_nprobes) {
2d21ac55 9050 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
cb323159 9051 size_t nsize = osize * 2;
2d21ac55
A
9052
9053 probes = kmem_zalloc(nsize, KM_SLEEP);
9054
cb323159 9055 dtrace_probe_t **oprobes = dtrace_probes;
2d21ac55 9056
cb323159
A
9057 bcopy(oprobes, probes, osize);
9058 dtrace_membar_producer();
9059 dtrace_probes = probes;
2d21ac55 9060
cb323159 9061 dtrace_sync();
2d21ac55 9062
cb323159
A
9063 /*
9064 * All CPUs are now seeing the new probes array; we can
9065 * safely free the old array.
9066 */
9067 kmem_free(oprobes, osize);
9068 dtrace_nprobes *= 2;
2d21ac55 9069
b0d623f7 9070 ASSERT(id - 1 < (dtrace_id_t)dtrace_nprobes);
2d21ac55
A
9071 }
9072
9073 ASSERT(dtrace_probes[id - 1] == NULL);
9074 dtrace_probes[id - 1] = probe;
fe8ab488 9075 provider->dtpv_probe_count++;
2d21ac55
A
9076
9077 if (provider != dtrace_provider)
9078 lck_mtx_unlock(&dtrace_lock);
9079
9080 return (id);
9081}
9082
9083static dtrace_probe_t *
9084dtrace_probe_lookup_id(dtrace_id_t id)
9085{
5ba3f43e 9086 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 9087
b0d623f7
A
9088 if (id == 0 || id > (dtrace_id_t)dtrace_nprobes)
9089 return (NULL);
2d21ac55
A
9090
9091 return (dtrace_probes[id - 1]);
9092}
9093
9094static int
d190cdc3 9095dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg1, void *arg2)
2d21ac55 9096{
d190cdc3
A
9097#pragma unused(arg2)
9098 *((dtrace_id_t *)arg1) = probe->dtpr_id;
2d21ac55
A
9099
9100 return (DTRACE_MATCH_DONE);
9101}
9102
9103/*
9104 * Look up a probe based on provider and one or more of module name, function
9105 * name and probe name.
9106 */
9107dtrace_id_t
9108dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
9109 const char *func, const char *name)
9110{
9111 dtrace_probekey_t pkey;
9112 dtrace_id_t id;
9113 int match;
9114
d9a64523
A
9115 lck_mtx_lock(&dtrace_lock);
9116
9117 pkey.dtpk_prov = dtrace_strref(((dtrace_provider_t *)prid)->dtpv_name);
2d21ac55 9118 pkey.dtpk_pmatch = &dtrace_match_string;
d9a64523 9119 pkey.dtpk_mod = dtrace_strref(mod);
2d21ac55 9120 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
d9a64523 9121 pkey.dtpk_func = dtrace_strref(func);
2d21ac55 9122 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
d9a64523 9123 pkey.dtpk_name = dtrace_strref(name);
2d21ac55
A
9124 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
9125 pkey.dtpk_id = DTRACE_IDNONE;
9126
2d21ac55 9127 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
d190cdc3 9128 dtrace_probe_lookup_match, &id, NULL);
d9a64523
A
9129
9130 dtrace_probekey_release(&pkey);
9131
2d21ac55
A
9132 lck_mtx_unlock(&dtrace_lock);
9133
9134 ASSERT(match == 1 || match == 0);
9135 return (match ? id : 0);
9136}
9137
9138/*
9139 * Returns the probe argument associated with the specified probe.
9140 */
9141void *
9142dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
9143{
9144 dtrace_probe_t *probe;
9145 void *rval = NULL;
9146
9147 lck_mtx_lock(&dtrace_lock);
9148
9149 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
9150 probe->dtpr_provider == (dtrace_provider_t *)id)
9151 rval = probe->dtpr_arg;
9152
9153 lck_mtx_unlock(&dtrace_lock);
9154
9155 return (rval);
9156}
9157
9158/*
9159 * Copy a probe into a probe description.
9160 */
9161static void
9162dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
9163{
9164 bzero(pdp, sizeof (dtrace_probedesc_t));
9165 pdp->dtpd_id = prp->dtpr_id;
9166
fe8ab488 9167 /* APPLE NOTE: Darwin employs size bounded string operation. */
2d21ac55
A
9168 (void) strlcpy(pdp->dtpd_provider,
9169 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN);
9170
9171 (void) strlcpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN);
9172 (void) strlcpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN);
9173 (void) strlcpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN);
9174}
9175
9176/*
9177 * Called to indicate that a probe -- or probes -- should be provided by a
9178 * specfied provider. If the specified description is NULL, the provider will
9179 * be told to provide all of its probes. (This is done whenever a new
9180 * consumer comes along, or whenever a retained enabling is to be matched.) If
9181 * the specified description is non-NULL, the provider is given the
9182 * opportunity to dynamically provide the specified probe, allowing providers
9183 * to support the creation of probes on-the-fly. (So-called _autocreated_
9184 * probes.) If the provider is NULL, the operations will be applied to all
9185 * providers; if the provider is non-NULL the operations will only be applied
9186 * to the specified provider. The dtrace_provider_lock must be held, and the
9187 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
9188 * will need to grab the dtrace_lock when it reenters the framework through
9189 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
9190 */
9191static void
9192dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
9193{
9194 struct modctl *ctl;
9195 int all = 0;
9196
5ba3f43e 9197 LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
9198
9199 if (prv == NULL) {
9200 all = 1;
9201 prv = dtrace_provider;
9202 }
6d2010ae 9203
2d21ac55 9204 do {
2d21ac55
A
9205 /*
9206 * First, call the blanket provide operation.
9207 */
9208 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
6d2010ae 9209
2d21ac55
A
9210 /*
9211 * Now call the per-module provide operation. We will grab
9212 * mod_lock to prevent the list from being modified. Note
9213 * that this also prevents the mod_busy bits from changing.
9214 * (mod_busy can only be changed with mod_lock held.)
9215 */
6d2010ae
A
9216 lck_mtx_lock(&mod_lock);
9217
6d2010ae
A
9218 ctl = dtrace_modctl_list;
9219 while (ctl) {
9220 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
9221 ctl = ctl->mod_next;
2d21ac55 9222 }
6d2010ae
A
9223
9224 lck_mtx_unlock(&mod_lock);
2d21ac55
A
9225 } while (all && (prv = prv->dtpv_next) != NULL);
9226}
9227
9228/*
9229 * Iterate over each probe, and call the Framework-to-Provider API function
9230 * denoted by offs.
9231 */
9232static void
9233dtrace_probe_foreach(uintptr_t offs)
9234{
9235 dtrace_provider_t *prov;
9236 void (*func)(void *, dtrace_id_t, void *);
9237 dtrace_probe_t *probe;
9238 dtrace_icookie_t cookie;
9239 int i;
9240
9241 /*
9242 * We disable interrupts to walk through the probe array. This is
9243 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
9244 * won't see stale data.
9245 */
9246 cookie = dtrace_interrupt_disable();
9247
9248 for (i = 0; i < dtrace_nprobes; i++) {
9249 if ((probe = dtrace_probes[i]) == NULL)
9250 continue;
9251
9252 if (probe->dtpr_ecb == NULL) {
9253 /*
9254 * This probe isn't enabled -- don't call the function.
9255 */
9256 continue;
9257 }
9258
9259 prov = probe->dtpr_provider;
9260 func = *((void(**)(void *, dtrace_id_t, void *))
9261 ((uintptr_t)&prov->dtpv_pops + offs));
9262
9263 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
9264 }
9265
9266 dtrace_interrupt_enable(cookie);
9267}
9268
9269static int
d190cdc3 9270dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab, dtrace_ecbdesc_t *ep)
2d21ac55
A
9271{
9272 dtrace_probekey_t pkey;
9273 uint32_t priv;
9274 uid_t uid;
9275 zoneid_t zoneid;
d9a64523 9276 int err;
2d21ac55 9277
5ba3f43e 9278 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
9279
9280 dtrace_ecb_create_cache = NULL;
9281
9282 if (desc == NULL) {
9283 /*
9284 * If we're passed a NULL description, we're being asked to
9285 * create an ECB with a NULL probe.
9286 */
d190cdc3 9287 (void) dtrace_ecb_create_enable(NULL, enab, ep);
2d21ac55
A
9288 return (0);
9289 }
9290
9291 dtrace_probekey(desc, &pkey);
9292 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
9293 &priv, &uid, &zoneid);
9294
d9a64523
A
9295 err = dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, enab, ep);
9296
9297 dtrace_probekey_release(&pkey);
9298
9299 return err;
2d21ac55
A
9300}
9301
9302/*
9303 * DTrace Helper Provider Functions
9304 */
9305static void
9306dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
9307{
9308 attr->dtat_name = DOF_ATTR_NAME(dofattr);
9309 attr->dtat_data = DOF_ATTR_DATA(dofattr);
9310 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
9311}
9312
9313static void
9314dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
9315 const dof_provider_t *dofprov, char *strtab)
9316{
9317 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
9318 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
9319 dofprov->dofpv_provattr);
9320 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
9321 dofprov->dofpv_modattr);
9322 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
9323 dofprov->dofpv_funcattr);
9324 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
9325 dofprov->dofpv_nameattr);
9326 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
9327 dofprov->dofpv_argsattr);
9328}
9329
9330static void
d190cdc3 9331dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p)
2d21ac55
A
9332{
9333 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9334 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9335 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
9336 dof_provider_t *provider;
9337 dof_probe_t *probe;
9338 uint32_t *off, *enoff;
9339 uint8_t *arg;
9340 char *strtab;
9341 uint_t i, nprobes;
9342 dtrace_helper_provdesc_t dhpv;
9343 dtrace_helper_probedesc_t dhpb;
9344 dtrace_meta_t *meta = dtrace_meta_pid;
9345 dtrace_mops_t *mops = &meta->dtm_mops;
9346 void *parg;
9347
9348 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9349 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9350 provider->dofpv_strtab * dof->dofh_secsize);
9351 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9352 provider->dofpv_probes * dof->dofh_secsize);
9353 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9354 provider->dofpv_prargs * dof->dofh_secsize);
9355 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9356 provider->dofpv_proffs * dof->dofh_secsize);
9357
9358 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9359 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
9360 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
9361 enoff = NULL;
9362
9363 /*
9364 * See dtrace_helper_provider_validate().
9365 */
9366 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
9367 provider->dofpv_prenoffs != DOF_SECT_NONE) {
9368 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9369 provider->dofpv_prenoffs * dof->dofh_secsize);
9370 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
9371 }
9372
9373 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
9374
9375 /*
9376 * Create the provider.
9377 */
9378 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9379
d190cdc3 9380 if ((parg = mops->dtms_provide_proc(meta->dtm_arg, &dhpv, p)) == NULL)
2d21ac55
A
9381 return;
9382
9383 meta->dtm_count++;
9384
9385 /*
9386 * Create the probes.
9387 */
9388 for (i = 0; i < nprobes; i++) {
9389 probe = (dof_probe_t *)(uintptr_t)(daddr +
9390 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
9391
9392 dhpb.dthpb_mod = dhp->dofhp_mod;
9393 dhpb.dthpb_func = strtab + probe->dofpr_func;
9394 dhpb.dthpb_name = strtab + probe->dofpr_name;
b0d623f7 9395#if !defined(__APPLE__)
2d21ac55 9396 dhpb.dthpb_base = probe->dofpr_addr;
b0d623f7
A
9397#else
9398 dhpb.dthpb_base = dhp->dofhp_addr; /* FIXME: James, why? */
2d21ac55 9399#endif
b0d623f7 9400 dhpb.dthpb_offs = (int32_t *)(off + probe->dofpr_offidx);
2d21ac55
A
9401 dhpb.dthpb_noffs = probe->dofpr_noffs;
9402 if (enoff != NULL) {
b0d623f7 9403 dhpb.dthpb_enoffs = (int32_t *)(enoff + probe->dofpr_enoffidx);
2d21ac55
A
9404 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
9405 } else {
9406 dhpb.dthpb_enoffs = NULL;
9407 dhpb.dthpb_nenoffs = 0;
9408 }
9409 dhpb.dthpb_args = arg + probe->dofpr_argidx;
9410 dhpb.dthpb_nargc = probe->dofpr_nargc;
9411 dhpb.dthpb_xargc = probe->dofpr_xargc;
9412 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
9413 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
9414
9415 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
9416 }
39037602
A
9417
9418 /*
9419 * Since we just created probes, we need to match our enablings
9420 * against those, with a precondition knowing that we have only
9421 * added probes from this provider
9422 */
9423 char *prov_name = mops->dtms_provider_name(parg);
9424 ASSERT(prov_name != NULL);
9425 dtrace_match_cond_t cond = {dtrace_cond_provider_match, (void*)prov_name};
9426
9427 dtrace_enabling_matchall_with_cond(&cond);
2d21ac55
A
9428}
9429
9430static void
d190cdc3 9431dtrace_helper_provide(dof_helper_t *dhp, proc_t *p)
2d21ac55
A
9432{
9433 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9434 dof_hdr_t *dof = (dof_hdr_t *)daddr;
b0d623f7 9435 uint32_t i;
2d21ac55 9436
5ba3f43e 9437 LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
9438
9439 for (i = 0; i < dof->dofh_secnum; i++) {
9440 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9441 dof->dofh_secoff + i * dof->dofh_secsize);
9442
9443 if (sec->dofs_type != DOF_SECT_PROVIDER)
9444 continue;
9445
d190cdc3 9446 dtrace_helper_provide_one(dhp, sec, p);
2d21ac55 9447 }
2d21ac55
A
9448}
9449
9450static void
d190cdc3 9451dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p)
2d21ac55
A
9452{
9453 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9454 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9455 dof_sec_t *str_sec;
9456 dof_provider_t *provider;
9457 char *strtab;
9458 dtrace_helper_provdesc_t dhpv;
9459 dtrace_meta_t *meta = dtrace_meta_pid;
9460 dtrace_mops_t *mops = &meta->dtm_mops;
9461
9462 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9463 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9464 provider->dofpv_strtab * dof->dofh_secsize);
9465
9466 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9467
9468 /*
9469 * Create the provider.
9470 */
9471 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9472
d190cdc3 9473 mops->dtms_remove_proc(meta->dtm_arg, &dhpv, p);
2d21ac55
A
9474
9475 meta->dtm_count--;
9476}
9477
9478static void
d190cdc3 9479dtrace_helper_provider_remove(dof_helper_t *dhp, proc_t *p)
2d21ac55
A
9480{
9481 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9482 dof_hdr_t *dof = (dof_hdr_t *)daddr;
b0d623f7 9483 uint32_t i;
2d21ac55 9484
5ba3f43e 9485 LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
9486
9487 for (i = 0; i < dof->dofh_secnum; i++) {
9488 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9489 dof->dofh_secoff + i * dof->dofh_secsize);
9490
9491 if (sec->dofs_type != DOF_SECT_PROVIDER)
9492 continue;
9493
d190cdc3 9494 dtrace_helper_provider_remove_one(dhp, sec, p);
2d21ac55
A
9495 }
9496}
9497
9498/*
9499 * DTrace Meta Provider-to-Framework API Functions
9500 *
9501 * These functions implement the Meta Provider-to-Framework API, as described
9502 * in <sys/dtrace.h>.
9503 */
9504int
9505dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9506 dtrace_meta_provider_id_t *idp)
9507{
9508 dtrace_meta_t *meta;
9509 dtrace_helpers_t *help, *next;
b0d623f7 9510 uint_t i;
2d21ac55
A
9511
9512 *idp = DTRACE_METAPROVNONE;
9513
9514 /*
9515 * We strictly don't need the name, but we hold onto it for
9516 * debuggability. All hail error queues!
9517 */
9518 if (name == NULL) {
9519 cmn_err(CE_WARN, "failed to register meta-provider: "
9520 "invalid name");
9521 return (EINVAL);
9522 }
9523
9524 if (mops == NULL ||
9525 mops->dtms_create_probe == NULL ||
d190cdc3
A
9526 mops->dtms_provide_proc == NULL ||
9527 mops->dtms_remove_proc == NULL) {
2d21ac55
A
9528 cmn_err(CE_WARN, "failed to register meta-register %s: "
9529 "invalid ops", name);
9530 return (EINVAL);
9531 }
9532
9533 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9534 meta->dtm_mops = *mops;
2d21ac55
A
9535 meta->dtm_arg = arg;
9536
9537 lck_mtx_lock(&dtrace_meta_lock);
9538 lck_mtx_lock(&dtrace_lock);
9539
9540 if (dtrace_meta_pid != NULL) {
9541 lck_mtx_unlock(&dtrace_lock);
9542 lck_mtx_unlock(&dtrace_meta_lock);
9543 cmn_err(CE_WARN, "failed to register meta-register %s: "
9544 "user-land meta-provider exists", name);
2d21ac55
A
9545 kmem_free(meta, sizeof (dtrace_meta_t));
9546 return (EINVAL);
9547 }
9548
d9a64523
A
9549 meta->dtm_name = dtrace_strref(name);
9550
2d21ac55
A
9551 dtrace_meta_pid = meta;
9552 *idp = (dtrace_meta_provider_id_t)meta;
9553
9554 /*
9555 * If there are providers and probes ready to go, pass them
9556 * off to the new meta provider now.
9557 */
9558
9559 help = dtrace_deferred_pid;
9560 dtrace_deferred_pid = NULL;
9561
9562 lck_mtx_unlock(&dtrace_lock);
9563
9564 while (help != NULL) {
9565 for (i = 0; i < help->dthps_nprovs; i++) {
d190cdc3
A
9566 proc_t *p = proc_find(help->dthps_pid);
9567 if (p == PROC_NULL)
9568 continue;
2d21ac55 9569 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
d190cdc3
A
9570 p);
9571 proc_rele(p);
2d21ac55
A
9572 }
9573
9574 next = help->dthps_next;
9575 help->dthps_next = NULL;
9576 help->dthps_prev = NULL;
9577 help->dthps_deferred = 0;
9578 help = next;
9579 }
9580
9581 lck_mtx_unlock(&dtrace_meta_lock);
9582
9583 return (0);
9584}
9585
9586int
9587dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9588{
9589 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9590
9591 lck_mtx_lock(&dtrace_meta_lock);
9592 lck_mtx_lock(&dtrace_lock);
9593
9594 if (old == dtrace_meta_pid) {
9595 pp = &dtrace_meta_pid;
9596 } else {
9597 panic("attempt to unregister non-existent "
9598 "dtrace meta-provider %p\n", (void *)old);
9599 }
9600
9601 if (old->dtm_count != 0) {
9602 lck_mtx_unlock(&dtrace_lock);
9603 lck_mtx_unlock(&dtrace_meta_lock);
9604 return (EBUSY);
9605 }
9606
9607 *pp = NULL;
9608
d9a64523
A
9609 dtrace_strunref(old->dtm_name);
9610
2d21ac55
A
9611 lck_mtx_unlock(&dtrace_lock);
9612 lck_mtx_unlock(&dtrace_meta_lock);
9613
2d21ac55
A
9614 kmem_free(old, sizeof (dtrace_meta_t));
9615
9616 return (0);
9617}
9618
9619
9620/*
9621 * DTrace DIF Object Functions
9622 */
9623static int
9624dtrace_difo_err(uint_t pc, const char *format, ...)
9625{
9626 if (dtrace_err_verbose) {
9627 va_list alist;
9628
9629 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
9630 va_start(alist, format);
9631 (void) vuprintf(format, alist);
9632 va_end(alist);
9633 }
9634
9635#ifdef DTRACE_ERRDEBUG
9636 dtrace_errdebug(format);
9637#endif
9638 return (1);
9639}
9640
9641/*
9642 * Validate a DTrace DIF object by checking the IR instructions. The following
9643 * rules are currently enforced by dtrace_difo_validate():
9644 *
9645 * 1. Each instruction must have a valid opcode
9646 * 2. Each register, string, variable, or subroutine reference must be valid
9647 * 3. No instruction can modify register %r0 (must be zero)
9648 * 4. All instruction reserved bits must be set to zero
9649 * 5. The last instruction must be a "ret" instruction
9650 * 6. All branch targets must reference a valid instruction _after_ the branch
9651 */
9652static int
9653dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9654 cred_t *cr)
9655{
b0d623f7
A
9656 int err = 0;
9657 uint_t i;
fe8ab488 9658
b0d623f7
A
9659 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9660 int kcheckload;
9661 uint_t pc;
39037602 9662 int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
b0d623f7
A
9663
9664 kcheckload = cr == NULL ||
9665 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
2d21ac55
A
9666
9667 dp->dtdo_destructive = 0;
9668
9669 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9670 dif_instr_t instr = dp->dtdo_buf[pc];
9671
9672 uint_t r1 = DIF_INSTR_R1(instr);
9673 uint_t r2 = DIF_INSTR_R2(instr);
9674 uint_t rd = DIF_INSTR_RD(instr);
9675 uint_t rs = DIF_INSTR_RS(instr);
9676 uint_t label = DIF_INSTR_LABEL(instr);
9677 uint_t v = DIF_INSTR_VAR(instr);
9678 uint_t subr = DIF_INSTR_SUBR(instr);
9679 uint_t type = DIF_INSTR_TYPE(instr);
9680 uint_t op = DIF_INSTR_OP(instr);
9681
9682 switch (op) {
9683 case DIF_OP_OR:
9684 case DIF_OP_XOR:
9685 case DIF_OP_AND:
9686 case DIF_OP_SLL:
9687 case DIF_OP_SRL:
9688 case DIF_OP_SRA:
9689 case DIF_OP_SUB:
9690 case DIF_OP_ADD:
9691 case DIF_OP_MUL:
9692 case DIF_OP_SDIV:
9693 case DIF_OP_UDIV:
9694 case DIF_OP_SREM:
9695 case DIF_OP_UREM:
9696 case DIF_OP_COPYS:
9697 if (r1 >= nregs)
9698 err += efunc(pc, "invalid register %u\n", r1);
9699 if (r2 >= nregs)
9700 err += efunc(pc, "invalid register %u\n", r2);
9701 if (rd >= nregs)
9702 err += efunc(pc, "invalid register %u\n", rd);
9703 if (rd == 0)
cb323159 9704 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9705 break;
9706 case DIF_OP_NOT:
9707 case DIF_OP_MOV:
9708 case DIF_OP_ALLOCS:
9709 if (r1 >= nregs)
9710 err += efunc(pc, "invalid register %u\n", r1);
9711 if (r2 != 0)
9712 err += efunc(pc, "non-zero reserved bits\n");
9713 if (rd >= nregs)
9714 err += efunc(pc, "invalid register %u\n", rd);
9715 if (rd == 0)
cb323159 9716 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9717 break;
9718 case DIF_OP_LDSB:
9719 case DIF_OP_LDSH:
9720 case DIF_OP_LDSW:
9721 case DIF_OP_LDUB:
9722 case DIF_OP_LDUH:
9723 case DIF_OP_LDUW:
9724 case DIF_OP_LDX:
9725 if (r1 >= nregs)
9726 err += efunc(pc, "invalid register %u\n", r1);
9727 if (r2 != 0)
9728 err += efunc(pc, "non-zero reserved bits\n");
9729 if (rd >= nregs)
9730 err += efunc(pc, "invalid register %u\n", rd);
9731 if (rd == 0)
cb323159 9732 err += efunc(pc, "cannot write to %%r0\n");
b0d623f7 9733 if (kcheckload)
2d21ac55
A
9734 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9735 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9736 break;
9737 case DIF_OP_RLDSB:
9738 case DIF_OP_RLDSH:
9739 case DIF_OP_RLDSW:
9740 case DIF_OP_RLDUB:
9741 case DIF_OP_RLDUH:
9742 case DIF_OP_RLDUW:
9743 case DIF_OP_RLDX:
9744 if (r1 >= nregs)
9745 err += efunc(pc, "invalid register %u\n", r1);
9746 if (r2 != 0)
9747 err += efunc(pc, "non-zero reserved bits\n");
9748 if (rd >= nregs)
9749 err += efunc(pc, "invalid register %u\n", rd);
9750 if (rd == 0)
cb323159 9751 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9752 break;
9753 case DIF_OP_ULDSB:
9754 case DIF_OP_ULDSH:
9755 case DIF_OP_ULDSW:
9756 case DIF_OP_ULDUB:
9757 case DIF_OP_ULDUH:
9758 case DIF_OP_ULDUW:
9759 case DIF_OP_ULDX:
9760 if (r1 >= nregs)
9761 err += efunc(pc, "invalid register %u\n", r1);
9762 if (r2 != 0)
9763 err += efunc(pc, "non-zero reserved bits\n");
9764 if (rd >= nregs)
9765 err += efunc(pc, "invalid register %u\n", rd);
9766 if (rd == 0)
cb323159 9767 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9768 break;
9769 case DIF_OP_STB:
9770 case DIF_OP_STH:
9771 case DIF_OP_STW:
9772 case DIF_OP_STX:
9773 if (r1 >= nregs)
9774 err += efunc(pc, "invalid register %u\n", r1);
9775 if (r2 != 0)
9776 err += efunc(pc, "non-zero reserved bits\n");
9777 if (rd >= nregs)
9778 err += efunc(pc, "invalid register %u\n", rd);
9779 if (rd == 0)
9780 err += efunc(pc, "cannot write to 0 address\n");
9781 break;
9782 case DIF_OP_CMP:
9783 case DIF_OP_SCMP:
9784 if (r1 >= nregs)
9785 err += efunc(pc, "invalid register %u\n", r1);
9786 if (r2 >= nregs)
9787 err += efunc(pc, "invalid register %u\n", r2);
9788 if (rd != 0)
9789 err += efunc(pc, "non-zero reserved bits\n");
9790 break;
9791 case DIF_OP_TST:
9792 if (r1 >= nregs)
9793 err += efunc(pc, "invalid register %u\n", r1);
9794 if (r2 != 0 || rd != 0)
9795 err += efunc(pc, "non-zero reserved bits\n");
9796 break;
9797 case DIF_OP_BA:
9798 case DIF_OP_BE:
9799 case DIF_OP_BNE:
9800 case DIF_OP_BG:
9801 case DIF_OP_BGU:
9802 case DIF_OP_BGE:
9803 case DIF_OP_BGEU:
9804 case DIF_OP_BL:
9805 case DIF_OP_BLU:
9806 case DIF_OP_BLE:
9807 case DIF_OP_BLEU:
9808 if (label >= dp->dtdo_len) {
9809 err += efunc(pc, "invalid branch target %u\n",
9810 label);
9811 }
9812 if (label <= pc) {
9813 err += efunc(pc, "backward branch to %u\n",
9814 label);
9815 }
9816 break;
9817 case DIF_OP_RET:
9818 if (r1 != 0 || r2 != 0)
9819 err += efunc(pc, "non-zero reserved bits\n");
9820 if (rd >= nregs)
9821 err += efunc(pc, "invalid register %u\n", rd);
9822 break;
9823 case DIF_OP_NOP:
9824 case DIF_OP_POPTS:
9825 case DIF_OP_FLUSHTS:
9826 if (r1 != 0 || r2 != 0 || rd != 0)
9827 err += efunc(pc, "non-zero reserved bits\n");
9828 break;
9829 case DIF_OP_SETX:
9830 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9831 err += efunc(pc, "invalid integer ref %u\n",
9832 DIF_INSTR_INTEGER(instr));
9833 }
9834 if (rd >= nregs)
9835 err += efunc(pc, "invalid register %u\n", rd);
9836 if (rd == 0)
cb323159 9837 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9838 break;
9839 case DIF_OP_SETS:
9840 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9841 err += efunc(pc, "invalid string ref %u\n",
9842 DIF_INSTR_STRING(instr));
9843 }
9844 if (rd >= nregs)
9845 err += efunc(pc, "invalid register %u\n", rd);
9846 if (rd == 0)
cb323159 9847 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9848 break;
9849 case DIF_OP_LDGA:
9850 case DIF_OP_LDTA:
9851 if (r1 > DIF_VAR_ARRAY_MAX)
9852 err += efunc(pc, "invalid array %u\n", r1);
9853 if (r2 >= nregs)
9854 err += efunc(pc, "invalid register %u\n", r2);
9855 if (rd >= nregs)
9856 err += efunc(pc, "invalid register %u\n", rd);
9857 if (rd == 0)
cb323159 9858 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9859 break;
9860 case DIF_OP_LDGS:
9861 case DIF_OP_LDTS:
9862 case DIF_OP_LDLS:
9863 case DIF_OP_LDGAA:
9864 case DIF_OP_LDTAA:
9865 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9866 err += efunc(pc, "invalid variable %u\n", v);
9867 if (rd >= nregs)
9868 err += efunc(pc, "invalid register %u\n", rd);
9869 if (rd == 0)
cb323159 9870 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55
A
9871 break;
9872 case DIF_OP_STGS:
9873 case DIF_OP_STTS:
9874 case DIF_OP_STLS:
9875 case DIF_OP_STGAA:
9876 case DIF_OP_STTAA:
9877 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9878 err += efunc(pc, "invalid variable %u\n", v);
9879 if (rs >= nregs)
9880 err += efunc(pc, "invalid register %u\n", rd);
9881 break;
9882 case DIF_OP_CALL:
39037602
A
9883 if (subr > DIF_SUBR_MAX &&
9884 !(subr >= DIF_SUBR_APPLE_MIN && subr <= DIF_SUBR_APPLE_MAX))
2d21ac55
A
9885 err += efunc(pc, "invalid subr %u\n", subr);
9886 if (rd >= nregs)
9887 err += efunc(pc, "invalid register %u\n", rd);
9888 if (rd == 0)
cb323159 9889 err += efunc(pc, "cannot write to %%r0\n");
2d21ac55 9890
f427ee49
A
9891 switch (subr) {
9892 case DIF_SUBR_COPYOUT:
9893 case DIF_SUBR_COPYOUTSTR:
9894 case DIF_SUBR_KDEBUG_TRACE:
9895 case DIF_SUBR_KDEBUG_TRACE_STRING:
9896 case DIF_SUBR_PHYSMEM_READ:
9897 case DIF_SUBR_PHYSMEM_WRITE:
2d21ac55 9898 dp->dtdo_destructive = 1;
f427ee49
A
9899 break;
9900 default:
9901 break;
2d21ac55
A
9902 }
9903 break;
9904 case DIF_OP_PUSHTR:
9905 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9906 err += efunc(pc, "invalid ref type %u\n", type);
9907 if (r2 >= nregs)
9908 err += efunc(pc, "invalid register %u\n", r2);
9909 if (rs >= nregs)
9910 err += efunc(pc, "invalid register %u\n", rs);
9911 break;
9912 case DIF_OP_PUSHTV:
9913 if (type != DIF_TYPE_CTF)
9914 err += efunc(pc, "invalid val type %u\n", type);
9915 if (r2 >= nregs)
9916 err += efunc(pc, "invalid register %u\n", r2);
9917 if (rs >= nregs)
9918 err += efunc(pc, "invalid register %u\n", rs);
9919 break;
cb323159
A
9920 case DIF_OP_STRIP:
9921 if (r1 >= nregs)
9922 err += efunc(pc, "invalid register %u\n", r1);
9923 if (!dtrace_is_valid_ptrauth_key(r2))
9924 err += efunc(pc, "invalid key\n");
9925 if (rd >= nregs)
9926 err += efunc(pc, "invalid register %u\n", rd);
9927 if (rd == 0)
9928 err += efunc(pc, "cannot write to %%r0\n");
9929 break;
2d21ac55
A
9930 default:
9931 err += efunc(pc, "invalid opcode %u\n",
9932 DIF_INSTR_OP(instr));
9933 }
9934 }
9935
9936 if (dp->dtdo_len != 0 &&
9937 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9938 err += efunc(dp->dtdo_len - 1,
9939 "expected 'ret' as last DIF instruction\n");
9940 }
9941
3e170ce0 9942 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
2d21ac55
A
9943 /*
9944 * If we're not returning by reference, the size must be either
9945 * 0 or the size of one of the base types.
9946 */
9947 switch (dp->dtdo_rtype.dtdt_size) {
9948 case 0:
9949 case sizeof (uint8_t):
9950 case sizeof (uint16_t):
9951 case sizeof (uint32_t):
9952 case sizeof (uint64_t):
9953 break;
9954
9955 default:
6d2010ae 9956 err += efunc(dp->dtdo_len - 1, "bad return size\n");
2d21ac55
A
9957 }
9958 }
9959
9960 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9961 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9962 dtrace_diftype_t *vt, *et;
b0d623f7
A
9963 uint_t id;
9964 int ndx;
2d21ac55
A
9965
9966 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9967 v->dtdv_scope != DIFV_SCOPE_THREAD &&
9968 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9969 err += efunc(i, "unrecognized variable scope %d\n",
9970 v->dtdv_scope);
9971 break;
9972 }
9973
9974 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9975 v->dtdv_kind != DIFV_KIND_SCALAR) {
9976 err += efunc(i, "unrecognized variable type %d\n",
9977 v->dtdv_kind);
9978 break;
9979 }
9980
9981 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9982 err += efunc(i, "%d exceeds variable id limit\n", id);
9983 break;
9984 }
9985
9986 if (id < DIF_VAR_OTHER_UBASE)
9987 continue;
9988
9989 /*
9990 * For user-defined variables, we need to check that this
9991 * definition is identical to any previous definition that we
9992 * encountered.
9993 */
9994 ndx = id - DIF_VAR_OTHER_UBASE;
9995
9996 switch (v->dtdv_scope) {
9997 case DIFV_SCOPE_GLOBAL:
39037602
A
9998 if (maxglobal == -1 || ndx > maxglobal)
9999 maxglobal = ndx;
10000
2d21ac55
A
10001 if (ndx < vstate->dtvs_nglobals) {
10002 dtrace_statvar_t *svar;
10003
10004 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
10005 existing = &svar->dtsv_var;
10006 }
10007
10008 break;
10009
10010 case DIFV_SCOPE_THREAD:
39037602
A
10011 if (maxtlocal == -1 || ndx > maxtlocal)
10012 maxtlocal = ndx;
10013
2d21ac55
A
10014 if (ndx < vstate->dtvs_ntlocals)
10015 existing = &vstate->dtvs_tlocals[ndx];
10016 break;
10017
10018 case DIFV_SCOPE_LOCAL:
39037602
A
10019 if (maxlocal == -1 || ndx > maxlocal)
10020 maxlocal = ndx;
2d21ac55
A
10021 if (ndx < vstate->dtvs_nlocals) {
10022 dtrace_statvar_t *svar;
10023
10024 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
10025 existing = &svar->dtsv_var;
10026 }
10027
10028 break;
10029 }
10030
10031 vt = &v->dtdv_type;
10032
10033 if (vt->dtdt_flags & DIF_TF_BYREF) {
10034 if (vt->dtdt_size == 0) {
10035 err += efunc(i, "zero-sized variable\n");
10036 break;
10037 }
10038
ecc0ceb4
A
10039 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
10040 v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
10041 vt->dtdt_size > dtrace_statvar_maxsize) {
10042 err += efunc(i, "oversized by-ref static\n");
2d21ac55
A
10043 break;
10044 }
10045 }
10046
10047 if (existing == NULL || existing->dtdv_id == 0)
10048 continue;
10049
10050 ASSERT(existing->dtdv_id == v->dtdv_id);
10051 ASSERT(existing->dtdv_scope == v->dtdv_scope);
10052
10053 if (existing->dtdv_kind != v->dtdv_kind)
10054 err += efunc(i, "%d changed variable kind\n", id);
10055
10056 et = &existing->dtdv_type;
10057
10058 if (vt->dtdt_flags != et->dtdt_flags) {
10059 err += efunc(i, "%d changed variable type flags\n", id);
10060 break;
10061 }
10062
10063 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
10064 err += efunc(i, "%d changed variable type size\n", id);
10065 break;
10066 }
10067 }
10068
39037602
A
10069 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
10070 dif_instr_t instr = dp->dtdo_buf[pc];
10071
10072 uint_t v = DIF_INSTR_VAR(instr);
10073 uint_t op = DIF_INSTR_OP(instr);
10074
10075 switch (op) {
10076 case DIF_OP_LDGS:
10077 case DIF_OP_LDGAA:
10078 case DIF_OP_STGS:
10079 case DIF_OP_STGAA:
10080 if (v > (uint_t)(DIF_VAR_OTHER_UBASE + maxglobal))
10081 err += efunc(pc, "invalid variable %u\n", v);
10082 break;
10083 case DIF_OP_LDTS:
10084 case DIF_OP_LDTAA:
10085 case DIF_OP_STTS:
10086 case DIF_OP_STTAA:
10087 if (v > (uint_t)(DIF_VAR_OTHER_UBASE + maxtlocal))
10088 err += efunc(pc, "invalid variable %u\n", v);
10089 break;
10090 case DIF_OP_LDLS:
10091 case DIF_OP_STLS:
10092 if (v > (uint_t)(DIF_VAR_OTHER_UBASE + maxlocal))
10093 err += efunc(pc, "invalid variable %u\n", v);
10094 break;
10095 default:
10096 break;
10097 }
10098 }
10099
2d21ac55
A
10100 return (err);
10101}
10102
10103/*
10104 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
10105 * are much more constrained than normal DIFOs. Specifically, they may
10106 * not:
10107 *
10108 * 1. Make calls to subroutines other than copyin(), copyinstr() or
10109 * miscellaneous string routines
10110 * 2. Access DTrace variables other than the args[] array, and the
10111 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
10112 * 3. Have thread-local variables.
10113 * 4. Have dynamic variables.
10114 */
10115static int
10116dtrace_difo_validate_helper(dtrace_difo_t *dp)
10117{
10118 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
10119 int err = 0;
10120 uint_t pc;
10121
10122 for (pc = 0; pc < dp->dtdo_len; pc++) {
10123 dif_instr_t instr = dp->dtdo_buf[pc];
10124
10125 uint_t v = DIF_INSTR_VAR(instr);
10126 uint_t subr = DIF_INSTR_SUBR(instr);
10127 uint_t op = DIF_INSTR_OP(instr);
10128
10129 switch (op) {
10130 case DIF_OP_OR:
10131 case DIF_OP_XOR:
10132 case DIF_OP_AND:
10133 case DIF_OP_SLL:
10134 case DIF_OP_SRL:
10135 case DIF_OP_SRA:
10136 case DIF_OP_SUB:
10137 case DIF_OP_ADD:
10138 case DIF_OP_MUL:
10139 case DIF_OP_SDIV:
10140 case DIF_OP_UDIV:
10141 case DIF_OP_SREM:
10142 case DIF_OP_UREM:
10143 case DIF_OP_COPYS:
10144 case DIF_OP_NOT:
10145 case DIF_OP_MOV:
10146 case DIF_OP_RLDSB:
10147 case DIF_OP_RLDSH:
10148 case DIF_OP_RLDSW:
10149 case DIF_OP_RLDUB:
10150 case DIF_OP_RLDUH:
10151 case DIF_OP_RLDUW:
10152 case DIF_OP_RLDX:
10153 case DIF_OP_ULDSB:
10154 case DIF_OP_ULDSH:
10155 case DIF_OP_ULDSW:
10156 case DIF_OP_ULDUB:
10157 case DIF_OP_ULDUH:
10158 case DIF_OP_ULDUW:
10159 case DIF_OP_ULDX:
10160 case DIF_OP_STB:
10161 case DIF_OP_STH:
10162 case DIF_OP_STW:
10163 case DIF_OP_STX:
10164 case DIF_OP_ALLOCS:
10165 case DIF_OP_CMP:
10166 case DIF_OP_SCMP:
10167 case DIF_OP_TST:
10168 case DIF_OP_BA:
10169 case DIF_OP_BE:
10170 case DIF_OP_BNE:
10171 case DIF_OP_BG:
10172 case DIF_OP_BGU:
10173 case DIF_OP_BGE:
10174 case DIF_OP_BGEU:
10175 case DIF_OP_BL:
10176 case DIF_OP_BLU:
10177 case DIF_OP_BLE:
10178 case DIF_OP_BLEU:
10179 case DIF_OP_RET:
10180 case DIF_OP_NOP:
10181 case DIF_OP_POPTS:
10182 case DIF_OP_FLUSHTS:
10183 case DIF_OP_SETX:
10184 case DIF_OP_SETS:
10185 case DIF_OP_LDGA:
10186 case DIF_OP_LDLS:
10187 case DIF_OP_STGS:
10188 case DIF_OP_STLS:
10189 case DIF_OP_PUSHTR:
10190 case DIF_OP_PUSHTV:
10191 break;
10192
10193 case DIF_OP_LDGS:
10194 if (v >= DIF_VAR_OTHER_UBASE)
10195 break;
10196
10197 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
10198 break;
10199
10200 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
10201 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
10202 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
10203 v == DIF_VAR_UID || v == DIF_VAR_GID)
10204 break;
10205
10206 err += efunc(pc, "illegal variable %u\n", v);
10207 break;
10208
10209 case DIF_OP_LDTA:
10210 case DIF_OP_LDTS:
10211 case DIF_OP_LDGAA:
10212 case DIF_OP_LDTAA:
10213 err += efunc(pc, "illegal dynamic variable load\n");
10214 break;
10215
10216 case DIF_OP_STTS:
10217 case DIF_OP_STGAA:
10218 case DIF_OP_STTAA:
10219 err += efunc(pc, "illegal dynamic variable store\n");
10220 break;
10221
10222 case DIF_OP_CALL:
f427ee49
A
10223 switch (subr) {
10224 case DIF_SUBR_ALLOCA:
10225 case DIF_SUBR_BCOPY:
10226 case DIF_SUBR_COPYIN:
10227 case DIF_SUBR_COPYINTO:
10228 case DIF_SUBR_COPYINSTR:
10229 case DIF_SUBR_HTONS:
10230 case DIF_SUBR_HTONL:
10231 case DIF_SUBR_HTONLL:
10232 case DIF_SUBR_INDEX:
10233 case DIF_SUBR_INET_NTOA:
10234 case DIF_SUBR_INET_NTOA6:
10235 case DIF_SUBR_INET_NTOP:
10236 case DIF_SUBR_JSON:
10237 case DIF_SUBR_LLTOSTR:
10238 case DIF_SUBR_NTOHS:
10239 case DIF_SUBR_NTOHL:
10240 case DIF_SUBR_NTOHLL:
10241 case DIF_SUBR_RINDEX:
10242 case DIF_SUBR_STRCHR:
10243 case DIF_SUBR_STRTOLL:
10244 case DIF_SUBR_STRJOIN:
10245 case DIF_SUBR_STRRCHR:
10246 case DIF_SUBR_STRSTR:
2d21ac55 10247 break;
f427ee49
A
10248 default:
10249 err += efunc(pc, "invalid subr %u\n", subr);
10250 }
2d21ac55
A
10251 break;
10252
10253 default:
10254 err += efunc(pc, "invalid opcode %u\n",
10255 DIF_INSTR_OP(instr));
10256 }
10257 }
10258
10259 return (err);
10260}
10261
10262/*
10263 * Returns 1 if the expression in the DIF object can be cached on a per-thread
10264 * basis; 0 if not.
10265 */
10266static int
10267dtrace_difo_cacheable(dtrace_difo_t *dp)
10268{
b0d623f7 10269 uint_t i;
2d21ac55
A
10270
10271 if (dp == NULL)
10272 return (0);
10273
10274 for (i = 0; i < dp->dtdo_varlen; i++) {
10275 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10276
10277 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
10278 continue;
10279
10280 switch (v->dtdv_id) {
10281 case DIF_VAR_CURTHREAD:
10282 case DIF_VAR_PID:
10283 case DIF_VAR_TID:
10284 case DIF_VAR_EXECNAME:
10285 case DIF_VAR_ZONENAME:
10286 break;
10287
10288 default:
10289 return (0);
10290 }
10291 }
10292
10293 /*
10294 * This DIF object may be cacheable. Now we need to look for any
10295 * array loading instructions, any memory loading instructions, or
10296 * any stores to thread-local variables.
10297 */
10298 for (i = 0; i < dp->dtdo_len; i++) {
10299 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
10300
10301 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
10302 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
10303 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
10304 op == DIF_OP_LDGA || op == DIF_OP_STTS)
10305 return (0);
10306 }
10307
10308 return (1);
10309}
10310
10311static void
10312dtrace_difo_hold(dtrace_difo_t *dp)
10313{
b0d623f7 10314 uint_t i;
2d21ac55 10315
5ba3f43e 10316 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10317
10318 dp->dtdo_refcnt++;
10319 ASSERT(dp->dtdo_refcnt != 0);
10320
10321 /*
10322 * We need to check this DIF object for references to the variable
10323 * DIF_VAR_VTIMESTAMP.
10324 */
10325 for (i = 0; i < dp->dtdo_varlen; i++) {
10326 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10327
10328 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10329 continue;
10330
10331 if (dtrace_vtime_references++ == 0)
10332 dtrace_vtime_enable();
10333 }
10334}
10335
10336/*
10337 * This routine calculates the dynamic variable chunksize for a given DIF
10338 * object. The calculation is not fool-proof, and can probably be tricked by
10339 * malicious DIF -- but it works for all compiler-generated DIF. Because this
10340 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
10341 * if a dynamic variable size exceeds the chunksize.
10342 */
10343static void
10344dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10345{
b0d623f7 10346 uint64_t sval = 0;
2d21ac55
A
10347 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
10348 const dif_instr_t *text = dp->dtdo_buf;
10349 uint_t pc, srd = 0;
10350 uint_t ttop = 0;
10351 size_t size, ksize;
10352 uint_t id, i;
10353
10354 for (pc = 0; pc < dp->dtdo_len; pc++) {
10355 dif_instr_t instr = text[pc];
10356 uint_t op = DIF_INSTR_OP(instr);
10357 uint_t rd = DIF_INSTR_RD(instr);
10358 uint_t r1 = DIF_INSTR_R1(instr);
10359 uint_t nkeys = 0;
10360 uchar_t scope;
10361
10362 dtrace_key_t *key = tupregs;
10363
10364 switch (op) {
10365 case DIF_OP_SETX:
10366 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
10367 srd = rd;
10368 continue;
10369
10370 case DIF_OP_STTS:
10371 key = &tupregs[DIF_DTR_NREGS];
10372 key[0].dttk_size = 0;
10373 key[1].dttk_size = 0;
10374 nkeys = 2;
10375 scope = DIFV_SCOPE_THREAD;
10376 break;
10377
10378 case DIF_OP_STGAA:
10379 case DIF_OP_STTAA:
10380 nkeys = ttop;
10381
10382 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
10383 key[nkeys++].dttk_size = 0;
10384
10385 key[nkeys++].dttk_size = 0;
10386
10387 if (op == DIF_OP_STTAA) {
10388 scope = DIFV_SCOPE_THREAD;
10389 } else {
10390 scope = DIFV_SCOPE_GLOBAL;
10391 }
10392
10393 break;
10394
10395 case DIF_OP_PUSHTR:
10396 if (ttop == DIF_DTR_NREGS)
10397 return;
10398
10399 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
10400 /*
10401 * If the register for the size of the "pushtr"
10402 * is %r0 (or the value is 0) and the type is
10403 * a string, we'll use the system-wide default
10404 * string size.
10405 */
10406 tupregs[ttop++].dttk_size =
10407 dtrace_strsize_default;
10408 } else {
10409 if (srd == 0)
10410 return;
10411
ecc0ceb4
A
10412 if (sval > LONG_MAX)
10413 return;
10414
2d21ac55
A
10415 tupregs[ttop++].dttk_size = sval;
10416 }
10417
10418 break;
10419
10420 case DIF_OP_PUSHTV:
10421 if (ttop == DIF_DTR_NREGS)
10422 return;
10423
10424 tupregs[ttop++].dttk_size = 0;
10425 break;
10426
10427 case DIF_OP_FLUSHTS:
10428 ttop = 0;
10429 break;
10430
10431 case DIF_OP_POPTS:
10432 if (ttop != 0)
10433 ttop--;
10434 break;
10435 }
10436
10437 sval = 0;
10438 srd = 0;
10439
10440 if (nkeys == 0)
10441 continue;
10442
10443 /*
10444 * We have a dynamic variable allocation; calculate its size.
10445 */
10446 for (ksize = 0, i = 0; i < nkeys; i++)
10447 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10448
10449 size = sizeof (dtrace_dynvar_t);
10450 size += sizeof (dtrace_key_t) * (nkeys - 1);
10451 size += ksize;
10452
10453 /*
10454 * Now we need to determine the size of the stored data.
10455 */
10456 id = DIF_INSTR_VAR(instr);
10457
10458 for (i = 0; i < dp->dtdo_varlen; i++) {
10459 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10460
10461 if (v->dtdv_id == id && v->dtdv_scope == scope) {
10462 size += v->dtdv_type.dtdt_size;
10463 break;
10464 }
10465 }
10466
10467 if (i == dp->dtdo_varlen)
10468 return;
10469
10470 /*
10471 * We have the size. If this is larger than the chunk size
10472 * for our dynamic variable state, reset the chunk size.
10473 */
10474 size = P2ROUNDUP(size, sizeof (uint64_t));
10475
ecc0ceb4
A
10476 /*
10477 * Before setting the chunk size, check that we're not going
10478 * to set it to a negative value...
10479 */
10480 if (size > LONG_MAX)
10481 return;
10482
10483 /*
10484 * ...and make certain that we didn't badly overflow.
10485 */
10486 if (size < ksize || size < sizeof (dtrace_dynvar_t))
10487 return;
10488
2d21ac55
A
10489 if (size > vstate->dtvs_dynvars.dtds_chunksize)
10490 vstate->dtvs_dynvars.dtds_chunksize = size;
10491 }
10492}
10493
10494static void
10495dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10496{
b0d623f7
A
10497 int oldsvars, osz, nsz, otlocals, ntlocals;
10498 uint_t i, id;
2d21ac55 10499
5ba3f43e 10500 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10501 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10502
10503 for (i = 0; i < dp->dtdo_varlen; i++) {
10504 dtrace_difv_t *v = &dp->dtdo_vartab[i];
b0d623f7
A
10505 dtrace_statvar_t *svar;
10506 dtrace_statvar_t ***svarp = NULL;
2d21ac55
A
10507 size_t dsize = 0;
10508 uint8_t scope = v->dtdv_scope;
b0d623f7 10509 int *np = (int *)NULL;
2d21ac55
A
10510
10511 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10512 continue;
10513
10514 id -= DIF_VAR_OTHER_UBASE;
10515
10516 switch (scope) {
10517 case DIFV_SCOPE_THREAD:
b0d623f7 10518 while (id >= (uint_t)(otlocals = vstate->dtvs_ntlocals)) {
2d21ac55
A
10519 dtrace_difv_t *tlocals;
10520
10521 if ((ntlocals = (otlocals << 1)) == 0)
10522 ntlocals = 1;
10523
10524 osz = otlocals * sizeof (dtrace_difv_t);
10525 nsz = ntlocals * sizeof (dtrace_difv_t);
10526
10527 tlocals = kmem_zalloc(nsz, KM_SLEEP);
10528
10529 if (osz != 0) {
10530 bcopy(vstate->dtvs_tlocals,
10531 tlocals, osz);
10532 kmem_free(vstate->dtvs_tlocals, osz);
10533 }
10534
10535 vstate->dtvs_tlocals = tlocals;
10536 vstate->dtvs_ntlocals = ntlocals;
10537 }
10538
10539 vstate->dtvs_tlocals[id] = *v;
10540 continue;
10541
10542 case DIFV_SCOPE_LOCAL:
10543 np = &vstate->dtvs_nlocals;
10544 svarp = &vstate->dtvs_locals;
10545
10546 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
c910b4d9 10547 dsize = (int)NCPU * (v->dtdv_type.dtdt_size +
2d21ac55
A
10548 sizeof (uint64_t));
10549 else
c910b4d9 10550 dsize = (int)NCPU * sizeof (uint64_t);
2d21ac55
A
10551
10552 break;
10553
10554 case DIFV_SCOPE_GLOBAL:
10555 np = &vstate->dtvs_nglobals;
10556 svarp = &vstate->dtvs_globals;
10557
10558 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10559 dsize = v->dtdv_type.dtdt_size +
10560 sizeof (uint64_t);
10561
10562 break;
10563
10564 default:
10565 ASSERT(0);
10566 }
10567
b0d623f7 10568 while (id >= (uint_t)(oldsvars = *np)) {
2d21ac55
A
10569 dtrace_statvar_t **statics;
10570 int newsvars, oldsize, newsize;
10571
10572 if ((newsvars = (oldsvars << 1)) == 0)
10573 newsvars = 1;
10574
10575 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10576 newsize = newsvars * sizeof (dtrace_statvar_t *);
10577
10578 statics = kmem_zalloc(newsize, KM_SLEEP);
10579
10580 if (oldsize != 0) {
10581 bcopy(*svarp, statics, oldsize);
10582 kmem_free(*svarp, oldsize);
10583 }
10584
10585 *svarp = statics;
10586 *np = newsvars;
10587 }
10588
10589 if ((svar = (*svarp)[id]) == NULL) {
10590 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10591 svar->dtsv_var = *v;
10592
10593 if ((svar->dtsv_size = dsize) != 0) {
10594 svar->dtsv_data = (uint64_t)(uintptr_t)
10595 kmem_zalloc(dsize, KM_SLEEP);
10596 }
10597
10598 (*svarp)[id] = svar;
10599 }
10600
10601 svar->dtsv_refcnt++;
10602 }
10603
10604 dtrace_difo_chunksize(dp, vstate);
10605 dtrace_difo_hold(dp);
10606}
10607
10608static dtrace_difo_t *
10609dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10610{
10611 dtrace_difo_t *new;
10612 size_t sz;
10613
10614 ASSERT(dp->dtdo_buf != NULL);
10615 ASSERT(dp->dtdo_refcnt != 0);
10616
10617 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10618
10619 ASSERT(dp->dtdo_buf != NULL);
10620 sz = dp->dtdo_len * sizeof (dif_instr_t);
10621 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10622 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10623 new->dtdo_len = dp->dtdo_len;
10624
10625 if (dp->dtdo_strtab != NULL) {
10626 ASSERT(dp->dtdo_strlen != 0);
10627 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10628 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10629 new->dtdo_strlen = dp->dtdo_strlen;
10630 }
10631
10632 if (dp->dtdo_inttab != NULL) {
10633 ASSERT(dp->dtdo_intlen != 0);
10634 sz = dp->dtdo_intlen * sizeof (uint64_t);
10635 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10636 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10637 new->dtdo_intlen = dp->dtdo_intlen;
10638 }
10639
10640 if (dp->dtdo_vartab != NULL) {
10641 ASSERT(dp->dtdo_varlen != 0);
10642 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10643 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10644 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10645 new->dtdo_varlen = dp->dtdo_varlen;
10646 }
10647
10648 dtrace_difo_init(new, vstate);
10649 return (new);
10650}
10651
10652static void
10653dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10654{
b0d623f7 10655 uint_t i;
2d21ac55
A
10656
10657 ASSERT(dp->dtdo_refcnt == 0);
10658
10659 for (i = 0; i < dp->dtdo_varlen; i++) {
10660 dtrace_difv_t *v = &dp->dtdo_vartab[i];
b0d623f7
A
10661 dtrace_statvar_t *svar;
10662 dtrace_statvar_t **svarp = NULL;
10663 uint_t id;
10664 uint8_t scope = v->dtdv_scope;
10665 int *np = NULL;
2d21ac55
A
10666
10667 switch (scope) {
10668 case DIFV_SCOPE_THREAD:
10669 continue;
10670
10671 case DIFV_SCOPE_LOCAL:
10672 np = &vstate->dtvs_nlocals;
10673 svarp = vstate->dtvs_locals;
10674 break;
10675
10676 case DIFV_SCOPE_GLOBAL:
10677 np = &vstate->dtvs_nglobals;
10678 svarp = vstate->dtvs_globals;
10679 break;
10680
10681 default:
10682 ASSERT(0);
10683 }
10684
10685 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10686 continue;
10687
10688 id -= DIF_VAR_OTHER_UBASE;
b0d623f7 10689
b0d623f7 10690 ASSERT(id < (uint_t)*np);
2d21ac55
A
10691
10692 svar = svarp[id];
10693 ASSERT(svar != NULL);
10694 ASSERT(svar->dtsv_refcnt > 0);
10695
10696 if (--svar->dtsv_refcnt > 0)
10697 continue;
10698
10699 if (svar->dtsv_size != 0) {
fe8ab488 10700 ASSERT(svar->dtsv_data != 0);
2d21ac55
A
10701 kmem_free((void *)(uintptr_t)svar->dtsv_data,
10702 svar->dtsv_size);
10703 }
10704
10705 kmem_free(svar, sizeof (dtrace_statvar_t));
10706 svarp[id] = NULL;
10707 }
10708
10709 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10710 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10711 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10712 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10713
10714 kmem_free(dp, sizeof (dtrace_difo_t));
10715}
10716
10717static void
10718dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10719{
b0d623f7 10720 uint_t i;
2d21ac55 10721
5ba3f43e 10722 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10723 ASSERT(dp->dtdo_refcnt != 0);
10724
10725 for (i = 0; i < dp->dtdo_varlen; i++) {
10726 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10727
10728 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10729 continue;
10730
10731 ASSERT(dtrace_vtime_references > 0);
10732 if (--dtrace_vtime_references == 0)
10733 dtrace_vtime_disable();
10734 }
10735
10736 if (--dp->dtdo_refcnt == 0)
10737 dtrace_difo_destroy(dp, vstate);
10738}
10739
10740/*
10741 * DTrace Format Functions
10742 */
4ba76501
A
10743
10744static dtrace_format_t*
10745dtrace_format_new(char *str)
10746{
10747 dtrace_format_t *fmt = NULL;
10748 size_t bufsize = strlen(str) + 1;
10749
10750 fmt = kmem_zalloc(sizeof(*fmt) + bufsize, KM_SLEEP);
10751
10752 fmt->dtf_refcount = 1;
10753 (void) strlcpy(fmt->dtf_str, str, bufsize);
10754
10755 return fmt;
10756}
10757
2d21ac55
A
10758static uint16_t
10759dtrace_format_add(dtrace_state_t *state, char *str)
10760{
4ba76501
A
10761 dtrace_format_t **new;
10762 uint16_t ndx;
2d21ac55
A
10763
10764 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10765 if (state->dts_formats[ndx] == NULL) {
4ba76501
A
10766 state->dts_formats[ndx] = dtrace_format_new(str);
10767 return (ndx + 1);
10768 }
10769 else if (strcmp(state->dts_formats[ndx]->dtf_str, str) == 0) {
10770 VERIFY(state->dts_formats[ndx]->dtf_refcount < UINT64_MAX);
10771 state->dts_formats[ndx]->dtf_refcount++;
2d21ac55
A
10772 return (ndx + 1);
10773 }
10774 }
10775
10776 if (state->dts_nformats == USHRT_MAX) {
10777 /*
10778 * This is only likely if a denial-of-service attack is being
10779 * attempted. As such, it's okay to fail silently here.
10780 */
2d21ac55
A
10781 return (0);
10782 }
10783
10784 /*
10785 * For simplicity, we always resize the formats array to be exactly the
10786 * number of formats.
10787 */
10788 ndx = state->dts_nformats++;
4ba76501 10789 new = kmem_alloc((ndx + 1) * sizeof (*state->dts_formats), KM_SLEEP);
2d21ac55
A
10790
10791 if (state->dts_formats != NULL) {
10792 ASSERT(ndx != 0);
4ba76501
A
10793 bcopy(state->dts_formats, new, ndx * sizeof (*state->dts_formats));
10794 kmem_free(state->dts_formats, ndx * sizeof (*state->dts_formats));
2d21ac55
A
10795 }
10796
10797 state->dts_formats = new;
4ba76501 10798 state->dts_formats[ndx] = dtrace_format_new(str);
2d21ac55
A
10799
10800 return (ndx + 1);
10801}
10802
10803static void
10804dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10805{
4ba76501 10806 dtrace_format_t *fmt;
2d21ac55
A
10807
10808 ASSERT(state->dts_formats != NULL);
10809 ASSERT(format <= state->dts_nformats);
2d21ac55
A
10810
10811 fmt = state->dts_formats[format - 1];
4ba76501
A
10812
10813 ASSERT(fmt != NULL);
10814 VERIFY(fmt->dtf_refcount > 0);
10815
10816 fmt->dtf_refcount--;
10817
10818 if (fmt->dtf_refcount == 0) {
10819 kmem_free(fmt, DTRACE_FORMAT_SIZE(fmt));
10820 state->dts_formats[format - 1] = NULL;
10821 }
2d21ac55
A
10822}
10823
10824static void
10825dtrace_format_destroy(dtrace_state_t *state)
10826{
10827 int i;
10828
10829 if (state->dts_nformats == 0) {
10830 ASSERT(state->dts_formats == NULL);
10831 return;
10832 }
10833
10834 ASSERT(state->dts_formats != NULL);
10835
10836 for (i = 0; i < state->dts_nformats; i++) {
4ba76501 10837 dtrace_format_t *fmt = state->dts_formats[i];
2d21ac55
A
10838
10839 if (fmt == NULL)
10840 continue;
10841
4ba76501 10842 kmem_free(fmt, DTRACE_FORMAT_SIZE(fmt));
2d21ac55
A
10843 }
10844
4ba76501 10845 kmem_free(state->dts_formats, state->dts_nformats * sizeof (*state->dts_formats));
2d21ac55
A
10846 state->dts_nformats = 0;
10847 state->dts_formats = NULL;
10848}
10849
10850/*
10851 * DTrace Predicate Functions
10852 */
10853static dtrace_predicate_t *
10854dtrace_predicate_create(dtrace_difo_t *dp)
10855{
10856 dtrace_predicate_t *pred;
10857
5ba3f43e 10858 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10859 ASSERT(dp->dtdo_refcnt != 0);
10860
10861 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10862 pred->dtp_difo = dp;
10863 pred->dtp_refcnt = 1;
10864
10865 if (!dtrace_difo_cacheable(dp))
10866 return (pred);
10867
10868 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10869 /*
10870 * This is only theoretically possible -- we have had 2^32
10871 * cacheable predicates on this machine. We cannot allow any
10872 * more predicates to become cacheable: as unlikely as it is,
10873 * there may be a thread caching a (now stale) predicate cache
10874 * ID. (N.B.: the temptation is being successfully resisted to
10875 * have this cmn_err() "Holy shit -- we executed this code!")
10876 */
10877 return (pred);
10878 }
10879
10880 pred->dtp_cacheid = dtrace_predcache_id++;
10881
10882 return (pred);
10883}
10884
10885static void
10886dtrace_predicate_hold(dtrace_predicate_t *pred)
10887{
5ba3f43e 10888 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10889 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10890 ASSERT(pred->dtp_refcnt > 0);
10891
10892 pred->dtp_refcnt++;
10893}
10894
10895static void
10896dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10897{
10898 dtrace_difo_t *dp = pred->dtp_difo;
b0d623f7 10899#pragma unused(dp) /* __APPLE__ */
2d21ac55 10900
5ba3f43e 10901 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10902 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10903 ASSERT(pred->dtp_refcnt > 0);
10904
10905 if (--pred->dtp_refcnt == 0) {
10906 dtrace_difo_release(pred->dtp_difo, vstate);
10907 kmem_free(pred, sizeof (dtrace_predicate_t));
10908 }
10909}
10910
10911/*
10912 * DTrace Action Description Functions
10913 */
10914static dtrace_actdesc_t *
10915dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10916 uint64_t uarg, uint64_t arg)
10917{
10918 dtrace_actdesc_t *act;
10919
fe8ab488
A
10920 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != 0 &&
10921 arg >= KERNELBASE) || (arg == 0 && kind == DTRACEACT_PRINTA));
2d21ac55
A
10922
10923 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10924 act->dtad_kind = kind;
10925 act->dtad_ntuple = ntuple;
10926 act->dtad_uarg = uarg;
10927 act->dtad_arg = arg;
10928 act->dtad_refcnt = 1;
10929
10930 return (act);
10931}
10932
10933static void
10934dtrace_actdesc_hold(dtrace_actdesc_t *act)
10935{
10936 ASSERT(act->dtad_refcnt >= 1);
10937 act->dtad_refcnt++;
10938}
10939
10940static void
10941dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10942{
10943 dtrace_actkind_t kind = act->dtad_kind;
10944 dtrace_difo_t *dp;
10945
10946 ASSERT(act->dtad_refcnt >= 1);
10947
10948 if (--act->dtad_refcnt != 0)
10949 return;
10950
10951 if ((dp = act->dtad_difo) != NULL)
10952 dtrace_difo_release(dp, vstate);
10953
10954 if (DTRACEACT_ISPRINTFLIKE(kind)) {
10955 char *str = (char *)(uintptr_t)act->dtad_arg;
10956
b0d623f7
A
10957 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10958 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
2d21ac55
A
10959
10960 if (str != NULL)
10961 kmem_free(str, strlen(str) + 1);
10962 }
10963
10964 kmem_free(act, sizeof (dtrace_actdesc_t));
10965}
10966
10967/*
10968 * DTrace ECB Functions
10969 */
10970static dtrace_ecb_t *
10971dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10972{
10973 dtrace_ecb_t *ecb;
10974 dtrace_epid_t epid;
10975
5ba3f43e 10976 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
10977
10978 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10979 ecb->dte_predicate = NULL;
10980 ecb->dte_probe = probe;
10981
10982 /*
10983 * The default size is the size of the default action: recording
04b8595b 10984 * the header.
2d21ac55 10985 */
04b8595b 10986 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
2d21ac55
A
10987 ecb->dte_alignment = sizeof (dtrace_epid_t);
10988
10989 epid = state->dts_epid++;
10990
b0d623f7 10991 if (epid - 1 >= (dtrace_epid_t)state->dts_necbs) {
2d21ac55
A
10992 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10993 int necbs = state->dts_necbs << 1;
10994
b0d623f7 10995 ASSERT(epid == (dtrace_epid_t)state->dts_necbs + 1);
2d21ac55
A
10996
10997 if (necbs == 0) {
10998 ASSERT(oecbs == NULL);
10999 necbs = 1;
11000 }
11001
11002 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
11003
11004 if (oecbs != NULL)
11005 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
11006
11007 dtrace_membar_producer();
11008 state->dts_ecbs = ecbs;
11009
11010 if (oecbs != NULL) {
11011 /*
11012 * If this state is active, we must dtrace_sync()
11013 * before we can free the old dts_ecbs array: we're
11014 * coming in hot, and there may be active ring
11015 * buffer processing (which indexes into the dts_ecbs
11016 * array) on another CPU.
11017 */
11018 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
11019 dtrace_sync();
11020
11021 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
11022 }
11023
11024 dtrace_membar_producer();
11025 state->dts_necbs = necbs;
11026 }
11027
11028 ecb->dte_state = state;
11029
11030 ASSERT(state->dts_ecbs[epid - 1] == NULL);
11031 dtrace_membar_producer();
11032 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
11033
11034 return (ecb);
11035}
11036
6d2010ae 11037static int
2d21ac55
A
11038dtrace_ecb_enable(dtrace_ecb_t *ecb)
11039{
11040 dtrace_probe_t *probe = ecb->dte_probe;
11041
5ba3f43e
A
11042 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
11043 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
11044 ASSERT(ecb->dte_next == NULL);
11045
11046 if (probe == NULL) {
11047 /*
11048 * This is the NULL probe -- there's nothing to do.
11049 */
6d2010ae 11050 return(0);
2d21ac55
A
11051 }
11052
fe8ab488 11053 probe->dtpr_provider->dtpv_ecb_count++;
2d21ac55
A
11054 if (probe->dtpr_ecb == NULL) {
11055 dtrace_provider_t *prov = probe->dtpr_provider;
11056
11057 /*
11058 * We're the first ECB on this probe.
11059 */
11060 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
11061
11062 if (ecb->dte_predicate != NULL)
11063 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
11064
6d2010ae
A
11065 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
11066 probe->dtpr_id, probe->dtpr_arg));
2d21ac55
A
11067 } else {
11068 /*
11069 * This probe is already active. Swing the last pointer to
11070 * point to the new ECB, and issue a dtrace_sync() to assure
11071 * that all CPUs have seen the change.
11072 */
11073 ASSERT(probe->dtpr_ecb_last != NULL);
11074 probe->dtpr_ecb_last->dte_next = ecb;
11075 probe->dtpr_ecb_last = ecb;
11076 probe->dtpr_predcache = 0;
11077
11078 dtrace_sync();
6d2010ae 11079 return(0);
2d21ac55
A
11080 }
11081}
11082
39037602 11083static int
2d21ac55
A
11084dtrace_ecb_resize(dtrace_ecb_t *ecb)
11085{
2d21ac55 11086 dtrace_action_t *act;
04b8595b 11087 uint32_t curneeded = UINT32_MAX;
2d21ac55 11088 uint32_t aggbase = UINT32_MAX;
2d21ac55
A
11089
11090 /*
04b8595b
A
11091 * If we record anything, we always record the dtrace_rechdr_t. (And
11092 * we always record it first.)
2d21ac55 11093 */
04b8595b
A
11094 ecb->dte_size = sizeof (dtrace_rechdr_t);
11095 ecb->dte_alignment = sizeof (dtrace_epid_t);
2d21ac55
A
11096
11097 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11098 dtrace_recdesc_t *rec = &act->dta_rec;
04b8595b 11099 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
2d21ac55 11100
04b8595b 11101 ecb->dte_alignment = MAX(ecb->dte_alignment, rec->dtrd_alignment);
2d21ac55
A
11102
11103 if (DTRACEACT_ISAGG(act->dta_kind)) {
11104 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
2d21ac55 11105
04b8595b
A
11106 ASSERT(rec->dtrd_size != 0);
11107 ASSERT(agg->dtag_first != NULL);
11108 ASSERT(act->dta_prev->dta_intuple);
2d21ac55 11109 ASSERT(aggbase != UINT32_MAX);
04b8595b 11110 ASSERT(curneeded != UINT32_MAX);
2d21ac55
A
11111
11112 agg->dtag_base = aggbase;
04b8595b
A
11113 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11114 rec->dtrd_offset = curneeded;
39037602
A
11115 if (curneeded + rec->dtrd_size < curneeded)
11116 return (EINVAL);
04b8595b
A
11117 curneeded += rec->dtrd_size;
11118 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
2d21ac55 11119
04b8595b
A
11120 aggbase = UINT32_MAX;
11121 curneeded = UINT32_MAX;
11122 } else if (act->dta_intuple) {
11123 if (curneeded == UINT32_MAX) {
11124 /*
11125 * This is the first record in a tuple. Align
11126 * curneeded to be at offset 4 in an 8-byte
11127 * aligned block.
11128 */
11129 ASSERT(act->dta_prev == NULL || !act->dta_prev->dta_intuple);
11130 ASSERT(aggbase == UINT32_MAX);
11131
11132 curneeded = P2PHASEUP(ecb->dte_size,
11133 sizeof (uint64_t), sizeof (dtrace_aggid_t));
11134
11135 aggbase = curneeded - sizeof (dtrace_aggid_t);
11136 ASSERT(IS_P2ALIGNED(aggbase,
11137 sizeof (uint64_t)));
2d21ac55 11138 }
2d21ac55 11139
04b8595b
A
11140 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11141 rec->dtrd_offset = curneeded;
11142 curneeded += rec->dtrd_size;
39037602
A
11143 if (curneeded + rec->dtrd_size < curneeded)
11144 return (EINVAL);
04b8595b
A
11145 } else {
11146 /* tuples must be followed by an aggregation */
11147 ASSERT(act->dta_prev == NULL || !act->dta_prev->dta_intuple);
11148 ecb->dte_size = P2ROUNDUP(ecb->dte_size, rec->dtrd_alignment);
11149 rec->dtrd_offset = ecb->dte_size;
39037602
A
11150 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
11151 return (EINVAL);
04b8595b
A
11152 ecb->dte_size += rec->dtrd_size;
11153 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
2d21ac55 11154 }
2d21ac55
A
11155 }
11156
11157 if ((act = ecb->dte_action) != NULL &&
11158 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
04b8595b 11159 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
2d21ac55 11160 /*
04b8595b 11161 * If the size is still sizeof (dtrace_rechdr_t), then all
2d21ac55
A
11162 * actions store no data; set the size to 0.
11163 */
2d21ac55 11164 ecb->dte_size = 0;
2d21ac55
A
11165 }
11166
04b8595b
A
11167 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
11168 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
11169 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, ecb->dte_needed);
39037602 11170 return (0);
2d21ac55
A
11171}
11172
11173static dtrace_action_t *
11174dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11175{
11176 dtrace_aggregation_t *agg;
11177 size_t size = sizeof (uint64_t);
11178 int ntuple = desc->dtad_ntuple;
11179 dtrace_action_t *act;
11180 dtrace_recdesc_t *frec;
11181 dtrace_aggid_t aggid;
11182 dtrace_state_t *state = ecb->dte_state;
11183
11184 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
11185 agg->dtag_ecb = ecb;
11186
11187 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
11188
11189 switch (desc->dtad_kind) {
11190 case DTRACEAGG_MIN:
b0d623f7 11191 agg->dtag_initial = INT64_MAX;
2d21ac55
A
11192 agg->dtag_aggregate = dtrace_aggregate_min;
11193 break;
11194
11195 case DTRACEAGG_MAX:
b0d623f7 11196 agg->dtag_initial = INT64_MIN;
2d21ac55
A
11197 agg->dtag_aggregate = dtrace_aggregate_max;
11198 break;
11199
11200 case DTRACEAGG_COUNT:
11201 agg->dtag_aggregate = dtrace_aggregate_count;
11202 break;
11203
11204 case DTRACEAGG_QUANTIZE:
11205 agg->dtag_aggregate = dtrace_aggregate_quantize;
11206 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
11207 sizeof (uint64_t);
11208 break;
11209
11210 case DTRACEAGG_LQUANTIZE: {
11211 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
11212 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
11213
11214 agg->dtag_initial = desc->dtad_arg;
11215 agg->dtag_aggregate = dtrace_aggregate_lquantize;
11216
11217 if (step == 0 || levels == 0)
11218 goto err;
11219
11220 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
11221 break;
11222 }
11223
39236c6e
A
11224 case DTRACEAGG_LLQUANTIZE: {
11225 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
11226 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
11227 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
15129b1c 11228 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
39236c6e
A
11229 int64_t v;
11230
11231 agg->dtag_initial = desc->dtad_arg;
11232 agg->dtag_aggregate = dtrace_aggregate_llquantize;
11233
11234 if (factor < 2 || low >= high || nsteps < factor)
11235 goto err;
11236
11237 /*
11238 * Now check that the number of steps evenly divides a power
11239 * of the factor. (This assures both integer bucket size and
11240 * linearity within each magnitude.)
11241 */
11242 for (v = factor; v < nsteps; v *= factor)
11243 continue;
11244
11245 if ((v % nsteps) || (nsteps % factor))
11246 goto err;
11247
11248 size = (dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
11249 break;
11250 }
11251
2d21ac55
A
11252 case DTRACEAGG_AVG:
11253 agg->dtag_aggregate = dtrace_aggregate_avg;
11254 size = sizeof (uint64_t) * 2;
11255 break;
11256
b0d623f7
A
11257 case DTRACEAGG_STDDEV:
11258 agg->dtag_aggregate = dtrace_aggregate_stddev;
11259 size = sizeof (uint64_t) * 4;
11260 break;
11261
2d21ac55
A
11262 case DTRACEAGG_SUM:
11263 agg->dtag_aggregate = dtrace_aggregate_sum;
11264 break;
11265
11266 default:
11267 goto err;
11268 }
11269
11270 agg->dtag_action.dta_rec.dtrd_size = size;
11271
11272 if (ntuple == 0)
11273 goto err;
11274
11275 /*
11276 * We must make sure that we have enough actions for the n-tuple.
11277 */
11278 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
11279 if (DTRACEACT_ISAGG(act->dta_kind))
11280 break;
11281
11282 if (--ntuple == 0) {
11283 /*
11284 * This is the action with which our n-tuple begins.
11285 */
11286 agg->dtag_first = act;
11287 goto success;
11288 }
11289 }
11290
11291 /*
11292 * This n-tuple is short by ntuple elements. Return failure.
11293 */
11294 ASSERT(ntuple != 0);
11295err:
11296 kmem_free(agg, sizeof (dtrace_aggregation_t));
11297 return (NULL);
11298
11299success:
11300 /*
11301 * If the last action in the tuple has a size of zero, it's actually
11302 * an expression argument for the aggregating action.
11303 */
11304 ASSERT(ecb->dte_action_last != NULL);
11305 act = ecb->dte_action_last;
11306
11307 if (act->dta_kind == DTRACEACT_DIFEXPR) {
11308 ASSERT(act->dta_difo != NULL);
11309
11310 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
11311 agg->dtag_hasarg = 1;
11312 }
11313
11314 /*
11315 * We need to allocate an id for this aggregation.
11316 */
11317 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
11318 VM_BESTFIT | VM_SLEEP);
11319
b0d623f7 11320 if (aggid - 1 >= (dtrace_aggid_t)state->dts_naggregations) {
2d21ac55
A
11321 dtrace_aggregation_t **oaggs = state->dts_aggregations;
11322 dtrace_aggregation_t **aggs;
11323 int naggs = state->dts_naggregations << 1;
11324 int onaggs = state->dts_naggregations;
11325
b0d623f7 11326 ASSERT(aggid == (dtrace_aggid_t)state->dts_naggregations + 1);
2d21ac55
A
11327
11328 if (naggs == 0) {
11329 ASSERT(oaggs == NULL);
11330 naggs = 1;
11331 }
11332
11333 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
11334
11335 if (oaggs != NULL) {
11336 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
11337 kmem_free(oaggs, onaggs * sizeof (*aggs));
11338 }
11339
11340 state->dts_aggregations = aggs;
11341 state->dts_naggregations = naggs;
11342 }
11343
11344 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
11345 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
11346
11347 frec = &agg->dtag_first->dta_rec;
11348 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
11349 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
11350
11351 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
11352 ASSERT(!act->dta_intuple);
11353 act->dta_intuple = 1;
11354 }
11355
11356 return (&agg->dtag_action);
11357}
11358
11359static void
11360dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
11361{
11362 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11363 dtrace_state_t *state = ecb->dte_state;
11364 dtrace_aggid_t aggid = agg->dtag_id;
11365
11366 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
11367 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
11368
11369 ASSERT(state->dts_aggregations[aggid - 1] == agg);
11370 state->dts_aggregations[aggid - 1] = NULL;
11371
11372 kmem_free(agg, sizeof (dtrace_aggregation_t));
11373}
11374
11375static int
11376dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11377{
11378 dtrace_action_t *action, *last;
11379 dtrace_difo_t *dp = desc->dtad_difo;
11380 uint32_t size = 0, align = sizeof (uint8_t), mask;
11381 uint16_t format = 0;
11382 dtrace_recdesc_t *rec;
11383 dtrace_state_t *state = ecb->dte_state;
b0d623f7
A
11384 dtrace_optval_t *opt = state->dts_options;
11385 dtrace_optval_t nframes=0, strsize;
2d21ac55
A
11386 uint64_t arg = desc->dtad_arg;
11387
5ba3f43e 11388 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
11389 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
11390
11391 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
11392 /*
11393 * If this is an aggregating action, there must be neither
11394 * a speculate nor a commit on the action chain.
11395 */
11396 dtrace_action_t *act;
11397
11398 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11399 if (act->dta_kind == DTRACEACT_COMMIT)
11400 return (EINVAL);
11401
11402 if (act->dta_kind == DTRACEACT_SPECULATE)
11403 return (EINVAL);
11404 }
11405
11406 action = dtrace_ecb_aggregation_create(ecb, desc);
11407
11408 if (action == NULL)
11409 return (EINVAL);
11410 } else {
11411 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
11412 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
11413 dp != NULL && dp->dtdo_destructive)) {
11414 state->dts_destructive = 1;
11415 }
11416
11417 switch (desc->dtad_kind) {
11418 case DTRACEACT_PRINTF:
11419 case DTRACEACT_PRINTA:
11420 case DTRACEACT_SYSTEM:
11421 case DTRACEACT_FREOPEN:
3e170ce0 11422 case DTRACEACT_DIFEXPR:
2d21ac55
A
11423 /*
11424 * We know that our arg is a string -- turn it into a
11425 * format.
11426 */
fe8ab488 11427 if (arg == 0) {
3e170ce0
A
11428 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
11429 desc->dtad_kind == DTRACEACT_DIFEXPR);
2d21ac55
A
11430 format = 0;
11431 } else {
fe8ab488 11432 ASSERT(arg != 0);
b0d623f7 11433 ASSERT(arg > KERNELBASE);
2d21ac55
A
11434 format = dtrace_format_add(state,
11435 (char *)(uintptr_t)arg);
11436 }
11437
f427ee49 11438 OS_FALLTHROUGH;
2d21ac55 11439 case DTRACEACT_LIBACT:
fe8ab488
A
11440 case DTRACEACT_TRACEMEM:
11441 case DTRACEACT_TRACEMEM_DYNSIZE:
11442 case DTRACEACT_APPLEBINARY: /* __APPLE__ */
2d21ac55
A
11443 if (dp == NULL)
11444 return (EINVAL);
11445
11446 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11447 break;
11448
11449 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11450 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11451 return (EINVAL);
11452
11453 size = opt[DTRACEOPT_STRSIZE];
11454 }
11455
11456 break;
11457
11458 case DTRACEACT_STACK:
11459 if ((nframes = arg) == 0) {
11460 nframes = opt[DTRACEOPT_STACKFRAMES];
11461 ASSERT(nframes > 0);
11462 arg = nframes;
11463 }
11464
11465 size = nframes * sizeof (pc_t);
11466 break;
11467
11468 case DTRACEACT_JSTACK:
11469 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11470 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11471
11472 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11473 nframes = opt[DTRACEOPT_JSTACKFRAMES];
11474
11475 arg = DTRACE_USTACK_ARG(nframes, strsize);
11476
f427ee49 11477 OS_FALLTHROUGH;
2d21ac55
A
11478 case DTRACEACT_USTACK:
11479 if (desc->dtad_kind != DTRACEACT_JSTACK &&
11480 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11481 strsize = DTRACE_USTACK_STRSIZE(arg);
11482 nframes = opt[DTRACEOPT_USTACKFRAMES];
11483 ASSERT(nframes > 0);
11484 arg = DTRACE_USTACK_ARG(nframes, strsize);
11485 }
11486
11487 /*
11488 * Save a slot for the pid.
11489 */
11490 size = (nframes + 1) * sizeof (uint64_t);
11491 size += DTRACE_USTACK_STRSIZE(arg);
11492 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11493
11494 break;
11495
11496 case DTRACEACT_SYM:
11497 case DTRACEACT_MOD:
11498 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11499 sizeof (uint64_t)) ||
11500 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11501 return (EINVAL);
11502 break;
11503
11504 case DTRACEACT_USYM:
11505 case DTRACEACT_UMOD:
11506 case DTRACEACT_UADDR:
11507 if (dp == NULL ||
11508 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11509 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11510 return (EINVAL);
11511
11512 /*
11513 * We have a slot for the pid, plus a slot for the
11514 * argument. To keep things simple (aligned with
11515 * bitness-neutral sizing), we store each as a 64-bit
11516 * quantity.
11517 */
11518 size = 2 * sizeof (uint64_t);
11519 break;
11520
11521 case DTRACEACT_STOP:
11522 case DTRACEACT_BREAKPOINT:
11523 case DTRACEACT_PANIC:
11524 break;
11525
11526 case DTRACEACT_CHILL:
11527 case DTRACEACT_DISCARD:
11528 case DTRACEACT_RAISE:
fe8ab488 11529 case DTRACEACT_PIDRESUME: /* __APPLE__ */
2d21ac55
A
11530 if (dp == NULL)
11531 return (EINVAL);
11532 break;
11533
11534 case DTRACEACT_EXIT:
11535 if (dp == NULL ||
11536 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11537 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11538 return (EINVAL);
11539 break;
11540
11541 case DTRACEACT_SPECULATE:
04b8595b 11542 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
2d21ac55
A
11543 return (EINVAL);
11544
11545 if (dp == NULL)
11546 return (EINVAL);
11547
11548 state->dts_speculates = 1;
11549 break;
11550
11551 case DTRACEACT_COMMIT: {
11552 dtrace_action_t *act = ecb->dte_action;
11553
11554 for (; act != NULL; act = act->dta_next) {
11555 if (act->dta_kind == DTRACEACT_COMMIT)
11556 return (EINVAL);
11557 }
11558
11559 if (dp == NULL)
11560 return (EINVAL);
11561 break;
11562 }
11563
11564 default:
11565 return (EINVAL);
11566 }
11567
11568 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11569 /*
11570 * If this is a data-storing action or a speculate,
11571 * we must be sure that there isn't a commit on the
11572 * action chain.
11573 */
11574 dtrace_action_t *act = ecb->dte_action;
11575
11576 for (; act != NULL; act = act->dta_next) {
11577 if (act->dta_kind == DTRACEACT_COMMIT)
11578 return (EINVAL);
11579 }
11580 }
11581
11582 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11583 action->dta_rec.dtrd_size = size;
11584 }
11585
11586 action->dta_refcnt = 1;
11587 rec = &action->dta_rec;
11588 size = rec->dtrd_size;
11589
11590 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11591 if (!(size & mask)) {
11592 align = mask + 1;
11593 break;
11594 }
11595 }
11596
11597 action->dta_kind = desc->dtad_kind;
11598
11599 if ((action->dta_difo = dp) != NULL)
11600 dtrace_difo_hold(dp);
11601
11602 rec->dtrd_action = action->dta_kind;
11603 rec->dtrd_arg = arg;
11604 rec->dtrd_uarg = desc->dtad_uarg;
11605 rec->dtrd_alignment = (uint16_t)align;
11606 rec->dtrd_format = format;
11607
11608 if ((last = ecb->dte_action_last) != NULL) {
11609 ASSERT(ecb->dte_action != NULL);
11610 action->dta_prev = last;
11611 last->dta_next = action;
11612 } else {
11613 ASSERT(ecb->dte_action == NULL);
11614 ecb->dte_action = action;
11615 }
11616
11617 ecb->dte_action_last = action;
11618
11619 return (0);
11620}
11621
11622static void
11623dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11624{
11625 dtrace_action_t *act = ecb->dte_action, *next;
11626 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11627 dtrace_difo_t *dp;
11628 uint16_t format;
11629
11630 if (act != NULL && act->dta_refcnt > 1) {
11631 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11632 act->dta_refcnt--;
11633 } else {
11634 for (; act != NULL; act = next) {
11635 next = act->dta_next;
11636 ASSERT(next != NULL || act == ecb->dte_action_last);
11637 ASSERT(act->dta_refcnt == 1);
11638
11639 if ((format = act->dta_rec.dtrd_format) != 0)
11640 dtrace_format_remove(ecb->dte_state, format);
11641
11642 if ((dp = act->dta_difo) != NULL)
11643 dtrace_difo_release(dp, vstate);
11644
11645 if (DTRACEACT_ISAGG(act->dta_kind)) {
11646 dtrace_ecb_aggregation_destroy(ecb, act);
11647 } else {
11648 kmem_free(act, sizeof (dtrace_action_t));
11649 }
11650 }
11651 }
11652
11653 ecb->dte_action = NULL;
11654 ecb->dte_action_last = NULL;
04b8595b 11655 ecb->dte_size = 0;
2d21ac55
A
11656}
11657
11658static void
11659dtrace_ecb_disable(dtrace_ecb_t *ecb)
11660{
11661 /*
11662 * We disable the ECB by removing it from its probe.
11663 */
11664 dtrace_ecb_t *pecb, *prev = NULL;
11665 dtrace_probe_t *probe = ecb->dte_probe;
11666
5ba3f43e 11667 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
11668
11669 if (probe == NULL) {
11670 /*
11671 * This is the NULL probe; there is nothing to disable.
11672 */
11673 return;
11674 }
11675
11676 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11677 if (pecb == ecb)
11678 break;
11679 prev = pecb;
11680 }
11681
11682 ASSERT(pecb != NULL);
11683
11684 if (prev == NULL) {
11685 probe->dtpr_ecb = ecb->dte_next;
11686 } else {
11687 prev->dte_next = ecb->dte_next;
11688 }
11689
11690 if (ecb == probe->dtpr_ecb_last) {
11691 ASSERT(ecb->dte_next == NULL);
11692 probe->dtpr_ecb_last = prev;
11693 }
11694
fe8ab488 11695 probe->dtpr_provider->dtpv_ecb_count--;
2d21ac55
A
11696 /*
11697 * The ECB has been disconnected from the probe; now sync to assure
11698 * that all CPUs have seen the change before returning.
11699 */
11700 dtrace_sync();
11701
11702 if (probe->dtpr_ecb == NULL) {
11703 /*
11704 * That was the last ECB on the probe; clear the predicate
11705 * cache ID for the probe, disable it and sync one more time
11706 * to assure that we'll never hit it again.
11707 */
11708 dtrace_provider_t *prov = probe->dtpr_provider;
11709
11710 ASSERT(ecb->dte_next == NULL);
11711 ASSERT(probe->dtpr_ecb_last == NULL);
11712 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11713 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11714 probe->dtpr_id, probe->dtpr_arg);
11715 dtrace_sync();
11716 } else {
11717 /*
11718 * There is at least one ECB remaining on the probe. If there
11719 * is _exactly_ one, set the probe's predicate cache ID to be
11720 * the predicate cache ID of the remaining ECB.
11721 */
11722 ASSERT(probe->dtpr_ecb_last != NULL);
11723 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11724
11725 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11726 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11727
11728 ASSERT(probe->dtpr_ecb->dte_next == NULL);
11729
11730 if (p != NULL)
11731 probe->dtpr_predcache = p->dtp_cacheid;
11732 }
11733
11734 ecb->dte_next = NULL;
11735 }
11736}
11737
11738static void
11739dtrace_ecb_destroy(dtrace_ecb_t *ecb)
11740{
11741 dtrace_state_t *state = ecb->dte_state;
11742 dtrace_vstate_t *vstate = &state->dts_vstate;
11743 dtrace_predicate_t *pred;
11744 dtrace_epid_t epid = ecb->dte_epid;
11745
5ba3f43e 11746 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
11747 ASSERT(ecb->dte_next == NULL);
11748 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11749
11750 if ((pred = ecb->dte_predicate) != NULL)
11751 dtrace_predicate_release(pred, vstate);
11752
11753 dtrace_ecb_action_remove(ecb);
11754
11755 ASSERT(state->dts_ecbs[epid - 1] == ecb);
11756 state->dts_ecbs[epid - 1] = NULL;
11757
11758 kmem_free(ecb, sizeof (dtrace_ecb_t));
11759}
11760
11761static dtrace_ecb_t *
11762dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11763 dtrace_enabling_t *enab)
11764{
11765 dtrace_ecb_t *ecb;
11766 dtrace_predicate_t *pred;
11767 dtrace_actdesc_t *act;
11768 dtrace_provider_t *prov;
11769 dtrace_ecbdesc_t *desc = enab->dten_current;
11770
5ba3f43e 11771 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
11772 ASSERT(state != NULL);
11773
11774 ecb = dtrace_ecb_add(state, probe);
11775 ecb->dte_uarg = desc->dted_uarg;
11776
11777 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11778 dtrace_predicate_hold(pred);
11779 ecb->dte_predicate = pred;
11780 }
11781
11782 if (probe != NULL) {
11783 /*
11784 * If the provider shows more leg than the consumer is old
11785 * enough to see, we need to enable the appropriate implicit
11786 * predicate bits to prevent the ecb from activating at
11787 * revealing times.
11788 *
11789 * Providers specifying DTRACE_PRIV_USER at register time
11790 * are stating that they need the /proc-style privilege
11791 * model to be enforced, and this is what DTRACE_COND_OWNER
11792 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11793 */
11794 prov = probe->dtpr_provider;
11795 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11796 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11797 ecb->dte_cond |= DTRACE_COND_OWNER;
11798
11799 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11800 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11801 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11802
11803 /*
11804 * If the provider shows us kernel innards and the user
11805 * is lacking sufficient privilege, enable the
11806 * DTRACE_COND_USERMODE implicit predicate.
11807 */
11808 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11809 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11810 ecb->dte_cond |= DTRACE_COND_USERMODE;
11811 }
11812
11813 if (dtrace_ecb_create_cache != NULL) {
11814 /*
11815 * If we have a cached ecb, we'll use its action list instead
11816 * of creating our own (saving both time and space).
11817 */
11818 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
c910b4d9 11819 dtrace_action_t *act_if = cached->dte_action;
2d21ac55 11820
c910b4d9
A
11821 if (act_if != NULL) {
11822 ASSERT(act_if->dta_refcnt > 0);
11823 act_if->dta_refcnt++;
11824 ecb->dte_action = act_if;
2d21ac55
A
11825 ecb->dte_action_last = cached->dte_action_last;
11826 ecb->dte_needed = cached->dte_needed;
11827 ecb->dte_size = cached->dte_size;
11828 ecb->dte_alignment = cached->dte_alignment;
11829 }
11830
11831 return (ecb);
11832 }
11833
11834 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11835 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11836 dtrace_ecb_destroy(ecb);
11837 return (NULL);
11838 }
11839 }
11840
39037602
A
11841 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
11842 dtrace_ecb_destroy(ecb);
11843 return (NULL);
11844 }
2d21ac55
A
11845
11846 return (dtrace_ecb_create_cache = ecb);
11847}
11848
11849static int
d190cdc3 11850dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg1, void *arg2)
2d21ac55
A
11851{
11852 dtrace_ecb_t *ecb;
d190cdc3
A
11853 dtrace_enabling_t *enab = arg1;
11854 dtrace_ecbdesc_t *ep = arg2;
2d21ac55
A
11855 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11856
11857 ASSERT(state != NULL);
11858
d190cdc3 11859 if (probe != NULL && ep != NULL && probe->dtpr_gen < ep->dted_probegen) {
2d21ac55
A
11860 /*
11861 * This probe was created in a generation for which this
11862 * enabling has previously created ECBs; we don't want to
11863 * enable it again, so just kick out.
11864 */
11865 return (DTRACE_MATCH_NEXT);
11866 }
11867
11868 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11869 return (DTRACE_MATCH_DONE);
11870
6d2010ae
A
11871 if (dtrace_ecb_enable(ecb) < 0)
11872 return (DTRACE_MATCH_FAIL);
11873
2d21ac55
A
11874 return (DTRACE_MATCH_NEXT);
11875}
11876
11877static dtrace_ecb_t *
11878dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11879{
11880 dtrace_ecb_t *ecb;
b0d623f7 11881#pragma unused(ecb) /* __APPLE__ */
2d21ac55 11882
5ba3f43e 11883 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 11884
fe8ab488 11885 if (id == 0 || id > (dtrace_epid_t)state->dts_necbs)
2d21ac55
A
11886 return (NULL);
11887
11888 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11889 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11890
11891 return (state->dts_ecbs[id - 1]);
11892}
11893
11894static dtrace_aggregation_t *
11895dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11896{
11897 dtrace_aggregation_t *agg;
b0d623f7 11898#pragma unused(agg) /* __APPLE__ */
2d21ac55 11899
5ba3f43e 11900 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 11901
b0d623f7 11902 if (id == 0 || id > (dtrace_aggid_t)state->dts_naggregations)
2d21ac55
A
11903 return (NULL);
11904
11905 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11906 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11907 agg->dtag_id == id);
11908
11909 return (state->dts_aggregations[id - 1]);
11910}
11911
11912/*
11913 * DTrace Buffer Functions
11914 *
11915 * The following functions manipulate DTrace buffers. Most of these functions
11916 * are called in the context of establishing or processing consumer state;
11917 * exceptions are explicitly noted.
11918 */
11919
11920/*
11921 * Note: called from cross call context. This function switches the two
11922 * buffers on a given CPU. The atomicity of this operation is assured by
11923 * disabling interrupts while the actual switch takes place; the disabling of
11924 * interrupts serializes the execution with any execution of dtrace_probe() on
11925 * the same CPU.
11926 */
11927static void
11928dtrace_buffer_switch(dtrace_buffer_t *buf)
11929{
11930 caddr_t tomax = buf->dtb_tomax;
11931 caddr_t xamot = buf->dtb_xamot;
11932 dtrace_icookie_t cookie;
04b8595b 11933 hrtime_t now;
2d21ac55
A
11934
11935 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11936 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11937
11938 cookie = dtrace_interrupt_disable();
04b8595b 11939 now = dtrace_gethrtime();
2d21ac55
A
11940 buf->dtb_tomax = xamot;
11941 buf->dtb_xamot = tomax;
11942 buf->dtb_xamot_drops = buf->dtb_drops;
11943 buf->dtb_xamot_offset = buf->dtb_offset;
11944 buf->dtb_xamot_errors = buf->dtb_errors;
11945 buf->dtb_xamot_flags = buf->dtb_flags;
11946 buf->dtb_offset = 0;
11947 buf->dtb_drops = 0;
11948 buf->dtb_errors = 0;
11949 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
04b8595b
A
11950 buf->dtb_interval = now - buf->dtb_switched;
11951 buf->dtb_switched = now;
39037602
A
11952 buf->dtb_cur_limit = buf->dtb_limit;
11953
2d21ac55
A
11954 dtrace_interrupt_enable(cookie);
11955}
11956
11957/*
11958 * Note: called from cross call context. This function activates a buffer
11959 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
11960 * is guaranteed by the disabling of interrupts.
11961 */
11962static void
11963dtrace_buffer_activate(dtrace_state_t *state)
11964{
11965 dtrace_buffer_t *buf;
11966 dtrace_icookie_t cookie = dtrace_interrupt_disable();
11967
11968 buf = &state->dts_buffer[CPU->cpu_id];
11969
11970 if (buf->dtb_tomax != NULL) {
11971 /*
11972 * We might like to assert that the buffer is marked inactive,
11973 * but this isn't necessarily true: the buffer for the CPU
11974 * that processes the BEGIN probe has its buffer activated
11975 * manually. In this case, we take the (harmless) action
11976 * re-clearing the bit INACTIVE bit.
11977 */
11978 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11979 }
11980
11981 dtrace_interrupt_enable(cookie);
11982}
11983
fe8ab488
A
11984static int
11985dtrace_buffer_canalloc(size_t size)
11986{
11987 if (size > (UINT64_MAX - dtrace_buffer_memory_inuse))
11988 return (B_FALSE);
11989 if ((size + dtrace_buffer_memory_inuse) > dtrace_buffer_memory_maxsize)
11990 return (B_FALSE);
11991
11992 return (B_TRUE);
11993}
11994
2d21ac55 11995static int
39037602 11996dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t limit, size_t size, int flags,
2d21ac55
A
11997 processorid_t cpu)
11998{
6d2010ae 11999 dtrace_cpu_t *cp;
2d21ac55 12000 dtrace_buffer_t *buf;
fe8ab488 12001 size_t size_before_alloc = dtrace_buffer_memory_inuse;
2d21ac55 12002
5ba3f43e
A
12003 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12004 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 12005
b0d623f7
A
12006 if (size > (size_t)dtrace_nonroot_maxsize &&
12007 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
12008 return (EFBIG);
2d21ac55
A
12009
12010 cp = cpu_list;
12011
12012 do {
12013 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12014 continue;
12015
12016 buf = &bufs[cp->cpu_id];
12017
12018 /*
12019 * If there is already a buffer allocated for this CPU, it
12020 * is only possible that this is a DR event. In this case,
12021 * the buffer size must match our specified size.
12022 */
12023 if (buf->dtb_tomax != NULL) {
12024 ASSERT(buf->dtb_size == size);
12025 continue;
12026 }
12027
12028 ASSERT(buf->dtb_xamot == NULL);
12029
fe8ab488
A
12030 /* DTrace, please do not eat all the memory. */
12031 if (dtrace_buffer_canalloc(size) == B_FALSE)
12032 goto err;
2d21ac55
A
12033 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12034 goto err;
fe8ab488 12035 dtrace_buffer_memory_inuse += size;
2d21ac55 12036
39037602
A
12037 /* Unsure that limit is always lower than size */
12038 limit = limit == size ? limit - 1 : limit;
12039 buf->dtb_cur_limit = limit;
12040 buf->dtb_limit = limit;
2d21ac55
A
12041 buf->dtb_size = size;
12042 buf->dtb_flags = flags;
12043 buf->dtb_offset = 0;
12044 buf->dtb_drops = 0;
12045
12046 if (flags & DTRACEBUF_NOSWITCH)
12047 continue;
12048
fe8ab488
A
12049 /* DTrace, please do not eat all the memory. */
12050 if (dtrace_buffer_canalloc(size) == B_FALSE)
12051 goto err;
2d21ac55
A
12052 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12053 goto err;
fe8ab488 12054 dtrace_buffer_memory_inuse += size;
2d21ac55
A
12055 } while ((cp = cp->cpu_next) != cpu_list);
12056
fe8ab488
A
12057 ASSERT(dtrace_buffer_memory_inuse <= dtrace_buffer_memory_maxsize);
12058
2d21ac55
A
12059 return (0);
12060
12061err:
12062 cp = cpu_list;
12063
12064 do {
12065 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12066 continue;
12067
12068 buf = &bufs[cp->cpu_id];
12069
12070 if (buf->dtb_xamot != NULL) {
12071 ASSERT(buf->dtb_tomax != NULL);
12072 ASSERT(buf->dtb_size == size);
12073 kmem_free(buf->dtb_xamot, size);
12074 }
12075
12076 if (buf->dtb_tomax != NULL) {
12077 ASSERT(buf->dtb_size == size);
12078 kmem_free(buf->dtb_tomax, size);
12079 }
12080
12081 buf->dtb_tomax = NULL;
12082 buf->dtb_xamot = NULL;
12083 buf->dtb_size = 0;
12084 } while ((cp = cp->cpu_next) != cpu_list);
12085
fe8ab488
A
12086 /* Restore the size saved before allocating memory */
12087 dtrace_buffer_memory_inuse = size_before_alloc;
12088
2d21ac55
A
12089 return (ENOMEM);
12090}
12091
12092/*
12093 * Note: called from probe context. This function just increments the drop
12094 * count on a buffer. It has been made a function to allow for the
12095 * possibility of understanding the source of mysterious drop counts. (A
12096 * problem for which one may be particularly disappointed that DTrace cannot
12097 * be used to understand DTrace.)
12098 */
12099static void
12100dtrace_buffer_drop(dtrace_buffer_t *buf)
12101{
12102 buf->dtb_drops++;
12103}
12104
12105/*
12106 * Note: called from probe context. This function is called to reserve space
12107 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
12108 * mstate. Returns the new offset in the buffer, or a negative value if an
12109 * error has occurred.
12110 */
12111static intptr_t
12112dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
12113 dtrace_state_t *state, dtrace_mstate_t *mstate)
12114{
12115 intptr_t offs = buf->dtb_offset, soffs;
12116 intptr_t woffs;
12117 caddr_t tomax;
c910b4d9 12118 size_t total_off;
2d21ac55
A
12119
12120 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
12121 return (-1);
12122
12123 if ((tomax = buf->dtb_tomax) == NULL) {
12124 dtrace_buffer_drop(buf);
12125 return (-1);
12126 }
12127
12128 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
12129 while (offs & (align - 1)) {
12130 /*
12131 * Assert that our alignment is off by a number which
12132 * is itself sizeof (uint32_t) aligned.
12133 */
12134 ASSERT(!((align - (offs & (align - 1))) &
12135 (sizeof (uint32_t) - 1)));
12136 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12137 offs += sizeof (uint32_t);
12138 }
12139
39037602
A
12140 if ((uint64_t)(soffs = offs + needed) > buf->dtb_cur_limit) {
12141 if (buf->dtb_cur_limit == buf->dtb_limit) {
12142 buf->dtb_cur_limit = buf->dtb_size;
12143
cb323159 12144 os_atomic_inc(&state->dts_buf_over_limit, relaxed);
39037602
A
12145 /**
12146 * Set an AST on the current processor
12147 * so that we can wake up the process
12148 * outside of probe context, when we know
12149 * it is safe to do so
12150 */
12151 minor_t minor = getminor(state->dts_dev);
12152 ASSERT(minor < 32);
12153
cb323159 12154 os_atomic_or(&dtrace_wake_clients, 1 << minor, relaxed);
39037602
A
12155 ast_dtrace_on();
12156 }
12157 if ((uint64_t)soffs > buf->dtb_size) {
12158 dtrace_buffer_drop(buf);
12159 return (-1);
12160 }
2d21ac55
A
12161 }
12162
12163 if (mstate == NULL)
12164 return (offs);
12165
12166 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
12167 mstate->dtms_scratch_size = buf->dtb_size - soffs;
12168 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12169
12170 return (offs);
12171 }
12172
12173 if (buf->dtb_flags & DTRACEBUF_FILL) {
12174 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
12175 (buf->dtb_flags & DTRACEBUF_FULL))
12176 return (-1);
12177 goto out;
12178 }
12179
c910b4d9 12180 total_off = needed + (offs & (align - 1));
2d21ac55
A
12181
12182 /*
12183 * For a ring buffer, life is quite a bit more complicated. Before
12184 * we can store any padding, we need to adjust our wrapping offset.
12185 * (If we've never before wrapped or we're not about to, no adjustment
12186 * is required.)
12187 */
12188 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
c910b4d9 12189 offs + total_off > buf->dtb_size) {
2d21ac55
A
12190 woffs = buf->dtb_xamot_offset;
12191
c910b4d9 12192 if (offs + total_off > buf->dtb_size) {
2d21ac55
A
12193 /*
12194 * We can't fit in the end of the buffer. First, a
12195 * sanity check that we can fit in the buffer at all.
12196 */
c910b4d9 12197 if (total_off > buf->dtb_size) {
2d21ac55
A
12198 dtrace_buffer_drop(buf);
12199 return (-1);
12200 }
12201
12202 /*
12203 * We're going to be storing at the top of the buffer,
12204 * so now we need to deal with the wrapped offset. We
12205 * only reset our wrapped offset to 0 if it is
12206 * currently greater than the current offset. If it
12207 * is less than the current offset, it is because a
12208 * previous allocation induced a wrap -- but the
12209 * allocation didn't subsequently take the space due
12210 * to an error or false predicate evaluation. In this
12211 * case, we'll just leave the wrapped offset alone: if
12212 * the wrapped offset hasn't been advanced far enough
12213 * for this allocation, it will be adjusted in the
12214 * lower loop.
12215 */
12216 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
12217 if (woffs >= offs)
12218 woffs = 0;
12219 } else {
12220 woffs = 0;
12221 }
12222
12223 /*
12224 * Now we know that we're going to be storing to the
12225 * top of the buffer and that there is room for us
12226 * there. We need to clear the buffer from the current
12227 * offset to the end (there may be old gunk there).
12228 */
b0d623f7 12229 while ((uint64_t)offs < buf->dtb_size)
2d21ac55
A
12230 tomax[offs++] = 0;
12231
12232 /*
12233 * We need to set our offset to zero. And because we
12234 * are wrapping, we need to set the bit indicating as
12235 * much. We can also adjust our needed space back
12236 * down to the space required by the ECB -- we know
12237 * that the top of the buffer is aligned.
12238 */
12239 offs = 0;
c910b4d9 12240 total_off = needed;
2d21ac55
A
12241 buf->dtb_flags |= DTRACEBUF_WRAPPED;
12242 } else {
12243 /*
12244 * There is room for us in the buffer, so we simply
12245 * need to check the wrapped offset.
12246 */
12247 if (woffs < offs) {
12248 /*
12249 * The wrapped offset is less than the offset.
12250 * This can happen if we allocated buffer space
12251 * that induced a wrap, but then we didn't
12252 * subsequently take the space due to an error
12253 * or false predicate evaluation. This is
12254 * okay; we know that _this_ allocation isn't
12255 * going to induce a wrap. We still can't
12256 * reset the wrapped offset to be zero,
12257 * however: the space may have been trashed in
12258 * the previous failed probe attempt. But at
12259 * least the wrapped offset doesn't need to
12260 * be adjusted at all...
12261 */
12262 goto out;
12263 }
12264 }
12265
b0d623f7 12266 while (offs + total_off > (size_t)woffs) {
2d21ac55
A
12267 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
12268 size_t size;
12269
12270 if (epid == DTRACE_EPIDNONE) {
12271 size = sizeof (uint32_t);
12272 } else {
b0d623f7 12273 ASSERT(epid <= (dtrace_epid_t)state->dts_necbs);
2d21ac55
A
12274 ASSERT(state->dts_ecbs[epid - 1] != NULL);
12275
12276 size = state->dts_ecbs[epid - 1]->dte_size;
12277 }
12278
12279 ASSERT(woffs + size <= buf->dtb_size);
12280 ASSERT(size != 0);
12281
12282 if (woffs + size == buf->dtb_size) {
12283 /*
12284 * We've reached the end of the buffer; we want
12285 * to set the wrapped offset to 0 and break
12286 * out. However, if the offs is 0, then we're
12287 * in a strange edge-condition: the amount of
12288 * space that we want to reserve plus the size
12289 * of the record that we're overwriting is
12290 * greater than the size of the buffer. This
12291 * is problematic because if we reserve the
12292 * space but subsequently don't consume it (due
12293 * to a failed predicate or error) the wrapped
12294 * offset will be 0 -- yet the EPID at offset 0
12295 * will not be committed. This situation is
12296 * relatively easy to deal with: if we're in
12297 * this case, the buffer is indistinguishable
12298 * from one that hasn't wrapped; we need only
12299 * finish the job by clearing the wrapped bit,
12300 * explicitly setting the offset to be 0, and
12301 * zero'ing out the old data in the buffer.
12302 */
12303 if (offs == 0) {
12304 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
12305 buf->dtb_offset = 0;
c910b4d9 12306 woffs = total_off;
2d21ac55 12307
b0d623f7 12308 while ((uint64_t)woffs < buf->dtb_size)
2d21ac55
A
12309 tomax[woffs++] = 0;
12310 }
12311
12312 woffs = 0;
12313 break;
12314 }
12315
12316 woffs += size;
12317 }
12318
12319 /*
12320 * We have a wrapped offset. It may be that the wrapped offset
12321 * has become zero -- that's okay.
12322 */
12323 buf->dtb_xamot_offset = woffs;
12324 }
12325
12326out:
12327 /*
12328 * Now we can plow the buffer with any necessary padding.
12329 */
12330 while (offs & (align - 1)) {
12331 /*
12332 * Assert that our alignment is off by a number which
12333 * is itself sizeof (uint32_t) aligned.
12334 */
12335 ASSERT(!((align - (offs & (align - 1))) &
12336 (sizeof (uint32_t) - 1)));
12337 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12338 offs += sizeof (uint32_t);
12339 }
12340
12341 if (buf->dtb_flags & DTRACEBUF_FILL) {
12342 if (offs + needed > buf->dtb_size - state->dts_reserve) {
12343 buf->dtb_flags |= DTRACEBUF_FULL;
12344 return (-1);
12345 }
12346 }
12347
12348 if (mstate == NULL)
12349 return (offs);
12350
12351 /*
12352 * For ring buffers and fill buffers, the scratch space is always
12353 * the inactive buffer.
12354 */
12355 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
12356 mstate->dtms_scratch_size = buf->dtb_size;
12357 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12358
12359 return (offs);
12360}
12361
12362static void
12363dtrace_buffer_polish(dtrace_buffer_t *buf)
12364{
12365 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
5ba3f43e 12366 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12367
12368 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
12369 return;
12370
12371 /*
12372 * We need to polish the ring buffer. There are three cases:
12373 *
12374 * - The first (and presumably most common) is that there is no gap
12375 * between the buffer offset and the wrapped offset. In this case,
12376 * there is nothing in the buffer that isn't valid data; we can
12377 * mark the buffer as polished and return.
12378 *
12379 * - The second (less common than the first but still more common
12380 * than the third) is that there is a gap between the buffer offset
12381 * and the wrapped offset, and the wrapped offset is larger than the
12382 * buffer offset. This can happen because of an alignment issue, or
12383 * can happen because of a call to dtrace_buffer_reserve() that
12384 * didn't subsequently consume the buffer space. In this case,
12385 * we need to zero the data from the buffer offset to the wrapped
12386 * offset.
12387 *
12388 * - The third (and least common) is that there is a gap between the
12389 * buffer offset and the wrapped offset, but the wrapped offset is
12390 * _less_ than the buffer offset. This can only happen because a
12391 * call to dtrace_buffer_reserve() induced a wrap, but the space
12392 * was not subsequently consumed. In this case, we need to zero the
12393 * space from the offset to the end of the buffer _and_ from the
12394 * top of the buffer to the wrapped offset.
12395 */
12396 if (buf->dtb_offset < buf->dtb_xamot_offset) {
12397 bzero(buf->dtb_tomax + buf->dtb_offset,
12398 buf->dtb_xamot_offset - buf->dtb_offset);
12399 }
12400
12401 if (buf->dtb_offset > buf->dtb_xamot_offset) {
12402 bzero(buf->dtb_tomax + buf->dtb_offset,
12403 buf->dtb_size - buf->dtb_offset);
12404 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
12405 }
12406}
12407
12408static void
12409dtrace_buffer_free(dtrace_buffer_t *bufs)
12410{
12411 int i;
12412
c910b4d9 12413 for (i = 0; i < (int)NCPU; i++) {
2d21ac55
A
12414 dtrace_buffer_t *buf = &bufs[i];
12415
12416 if (buf->dtb_tomax == NULL) {
12417 ASSERT(buf->dtb_xamot == NULL);
12418 ASSERT(buf->dtb_size == 0);
12419 continue;
12420 }
12421
12422 if (buf->dtb_xamot != NULL) {
12423 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12424 kmem_free(buf->dtb_xamot, buf->dtb_size);
fe8ab488
A
12425
12426 ASSERT(dtrace_buffer_memory_inuse >= buf->dtb_size);
12427 dtrace_buffer_memory_inuse -= buf->dtb_size;
2d21ac55
A
12428 }
12429
12430 kmem_free(buf->dtb_tomax, buf->dtb_size);
fe8ab488
A
12431 ASSERT(dtrace_buffer_memory_inuse >= buf->dtb_size);
12432 dtrace_buffer_memory_inuse -= buf->dtb_size;
12433
2d21ac55
A
12434 buf->dtb_size = 0;
12435 buf->dtb_tomax = NULL;
12436 buf->dtb_xamot = NULL;
12437 }
12438}
12439
12440/*
12441 * DTrace Enabling Functions
12442 */
12443static dtrace_enabling_t *
12444dtrace_enabling_create(dtrace_vstate_t *vstate)
12445{
12446 dtrace_enabling_t *enab;
12447
12448 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
12449 enab->dten_vstate = vstate;
12450
12451 return (enab);
12452}
12453
12454static void
12455dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
12456{
12457 dtrace_ecbdesc_t **ndesc;
12458 size_t osize, nsize;
12459
12460 /*
12461 * We can't add to enablings after we've enabled them, or after we've
12462 * retained them.
12463 */
12464 ASSERT(enab->dten_probegen == 0);
12465 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12466
fe8ab488
A
12467 /* APPLE NOTE: this protects against gcc 4.0 botch on x86 */
12468 if (ecb == NULL) return;
2d21ac55
A
12469
12470 if (enab->dten_ndesc < enab->dten_maxdesc) {
12471 enab->dten_desc[enab->dten_ndesc++] = ecb;
12472 return;
12473 }
12474
12475 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12476
12477 if (enab->dten_maxdesc == 0) {
12478 enab->dten_maxdesc = 1;
12479 } else {
12480 enab->dten_maxdesc <<= 1;
12481 }
12482
12483 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12484
12485 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12486 ndesc = kmem_zalloc(nsize, KM_SLEEP);
12487 bcopy(enab->dten_desc, ndesc, osize);
12488 kmem_free(enab->dten_desc, osize);
12489
12490 enab->dten_desc = ndesc;
12491 enab->dten_desc[enab->dten_ndesc++] = ecb;
12492}
12493
12494static void
12495dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12496 dtrace_probedesc_t *pd)
12497{
12498 dtrace_ecbdesc_t *new;
12499 dtrace_predicate_t *pred;
12500 dtrace_actdesc_t *act;
12501
12502 /*
12503 * We're going to create a new ECB description that matches the
12504 * specified ECB in every way, but has the specified probe description.
12505 */
12506 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12507
12508 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12509 dtrace_predicate_hold(pred);
12510
12511 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12512 dtrace_actdesc_hold(act);
12513
12514 new->dted_action = ecb->dted_action;
12515 new->dted_pred = ecb->dted_pred;
12516 new->dted_probe = *pd;
12517 new->dted_uarg = ecb->dted_uarg;
12518
12519 dtrace_enabling_add(enab, new);
12520}
12521
12522static void
12523dtrace_enabling_dump(dtrace_enabling_t *enab)
12524{
12525 int i;
12526
12527 for (i = 0; i < enab->dten_ndesc; i++) {
12528 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12529
12530 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12531 desc->dtpd_provider, desc->dtpd_mod,
12532 desc->dtpd_func, desc->dtpd_name);
12533 }
12534}
12535
12536static void
12537dtrace_enabling_destroy(dtrace_enabling_t *enab)
12538{
12539 int i;
12540 dtrace_ecbdesc_t *ep;
12541 dtrace_vstate_t *vstate = enab->dten_vstate;
12542
5ba3f43e 12543 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12544
12545 for (i = 0; i < enab->dten_ndesc; i++) {
12546 dtrace_actdesc_t *act, *next;
12547 dtrace_predicate_t *pred;
12548
12549 ep = enab->dten_desc[i];
12550
12551 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12552 dtrace_predicate_release(pred, vstate);
12553
12554 for (act = ep->dted_action; act != NULL; act = next) {
12555 next = act->dtad_next;
12556 dtrace_actdesc_release(act, vstate);
12557 }
12558
12559 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12560 }
12561
12562 kmem_free(enab->dten_desc,
12563 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12564
12565 /*
12566 * If this was a retained enabling, decrement the dts_nretained count
12567 * and take it off of the dtrace_retained list.
12568 */
12569 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12570 dtrace_retained == enab) {
12571 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12572 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12573 enab->dten_vstate->dtvs_state->dts_nretained--;
b0d623f7 12574 dtrace_retained_gen++;
2d21ac55
A
12575 }
12576
12577 if (enab->dten_prev == NULL) {
12578 if (dtrace_retained == enab) {
12579 dtrace_retained = enab->dten_next;
12580
12581 if (dtrace_retained != NULL)
12582 dtrace_retained->dten_prev = NULL;
12583 }
12584 } else {
12585 ASSERT(enab != dtrace_retained);
12586 ASSERT(dtrace_retained != NULL);
12587 enab->dten_prev->dten_next = enab->dten_next;
12588 }
12589
12590 if (enab->dten_next != NULL) {
12591 ASSERT(dtrace_retained != NULL);
12592 enab->dten_next->dten_prev = enab->dten_prev;
12593 }
12594
12595 kmem_free(enab, sizeof (dtrace_enabling_t));
12596}
12597
12598static int
12599dtrace_enabling_retain(dtrace_enabling_t *enab)
12600{
12601 dtrace_state_t *state;
12602
5ba3f43e 12603 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12604 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12605 ASSERT(enab->dten_vstate != NULL);
12606
12607 state = enab->dten_vstate->dtvs_state;
12608 ASSERT(state != NULL);
12609
12610 /*
12611 * We only allow each state to retain dtrace_retain_max enablings.
12612 */
12613 if (state->dts_nretained >= dtrace_retain_max)
12614 return (ENOSPC);
12615
12616 state->dts_nretained++;
b0d623f7 12617 dtrace_retained_gen++;
2d21ac55
A
12618
12619 if (dtrace_retained == NULL) {
12620 dtrace_retained = enab;
12621 return (0);
12622 }
12623
12624 enab->dten_next = dtrace_retained;
12625 dtrace_retained->dten_prev = enab;
12626 dtrace_retained = enab;
12627
12628 return (0);
12629}
12630
12631static int
12632dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12633 dtrace_probedesc_t *create)
12634{
12635 dtrace_enabling_t *new, *enab;
12636 int found = 0, err = ENOENT;
12637
5ba3f43e 12638 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12639 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12640 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12641 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12642 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12643
12644 new = dtrace_enabling_create(&state->dts_vstate);
12645
12646 /*
12647 * Iterate over all retained enablings, looking for enablings that
12648 * match the specified state.
12649 */
12650 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12651 int i;
12652
12653 /*
12654 * dtvs_state can only be NULL for helper enablings -- and
12655 * helper enablings can't be retained.
12656 */
12657 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12658
12659 if (enab->dten_vstate->dtvs_state != state)
12660 continue;
12661
12662 /*
12663 * Now iterate over each probe description; we're looking for
12664 * an exact match to the specified probe description.
12665 */
12666 for (i = 0; i < enab->dten_ndesc; i++) {
12667 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12668 dtrace_probedesc_t *pd = &ep->dted_probe;
12669
fe8ab488 12670 /* APPLE NOTE: Darwin employs size bounded string operation. */
b0d623f7
A
12671 if (strncmp(pd->dtpd_provider, match->dtpd_provider, DTRACE_PROVNAMELEN))
12672 continue;
12673
12674 if (strncmp(pd->dtpd_mod, match->dtpd_mod, DTRACE_MODNAMELEN))
12675 continue;
12676
12677 if (strncmp(pd->dtpd_func, match->dtpd_func, DTRACE_FUNCNAMELEN))
12678 continue;
12679
12680 if (strncmp(pd->dtpd_name, match->dtpd_name, DTRACE_NAMELEN))
12681 continue;
2d21ac55
A
12682
12683 /*
12684 * We have a winning probe! Add it to our growing
12685 * enabling.
12686 */
12687 found = 1;
12688 dtrace_enabling_addlike(new, ep, create);
12689 }
12690 }
12691
12692 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12693 dtrace_enabling_destroy(new);
12694 return (err);
12695 }
12696
12697 return (0);
12698}
12699
12700static void
12701dtrace_enabling_retract(dtrace_state_t *state)
12702{
12703 dtrace_enabling_t *enab, *next;
12704
5ba3f43e 12705 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12706
12707 /*
12708 * Iterate over all retained enablings, destroy the enablings retained
12709 * for the specified state.
12710 */
12711 for (enab = dtrace_retained; enab != NULL; enab = next) {
12712 next = enab->dten_next;
12713
12714 /*
12715 * dtvs_state can only be NULL for helper enablings -- and
12716 * helper enablings can't be retained.
12717 */
12718 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12719
12720 if (enab->dten_vstate->dtvs_state == state) {
12721 ASSERT(state->dts_nretained > 0);
12722 dtrace_enabling_destroy(enab);
12723 }
12724 }
12725
12726 ASSERT(state->dts_nretained == 0);
12727}
12728
12729static int
39037602 12730dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched, dtrace_match_cond_t *cond)
2d21ac55
A
12731{
12732 int i = 0;
6d2010ae 12733 int total_matched = 0, matched = 0;
2d21ac55 12734
5ba3f43e
A
12735 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12736 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12737
12738 for (i = 0; i < enab->dten_ndesc; i++) {
12739 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12740
12741 enab->dten_current = ep;
12742 enab->dten_error = 0;
12743
39037602
A
12744 /**
12745 * Before doing a dtrace_probe_enable, which is really
12746 * expensive, check that this enabling matches the matching precondition
12747 * if we have one
12748 */
12749 if (cond && (cond->dmc_func(&ep->dted_probe, cond->dmc_data) == 0)) {
12750 continue;
12751 }
6d2010ae
A
12752 /*
12753 * If a provider failed to enable a probe then get out and
12754 * let the consumer know we failed.
12755 */
d190cdc3 12756 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab, ep)) < 0)
6d2010ae
A
12757 return (EBUSY);
12758
12759 total_matched += matched;
2d21ac55
A
12760
12761 if (enab->dten_error != 0) {
12762 /*
12763 * If we get an error half-way through enabling the
12764 * probes, we kick out -- perhaps with some number of
12765 * them enabled. Leaving enabled probes enabled may
12766 * be slightly confusing for user-level, but we expect
12767 * that no one will attempt to actually drive on in
12768 * the face of such errors. If this is an anonymous
12769 * enabling (indicated with a NULL nmatched pointer),
12770 * we cmn_err() a message. We aren't expecting to
12771 * get such an error -- such as it can exist at all,
12772 * it would be a result of corrupted DOF in the driver
12773 * properties.
12774 */
12775 if (nmatched == NULL) {
12776 cmn_err(CE_WARN, "dtrace_enabling_match() "
12777 "error on %p: %d", (void *)ep,
12778 enab->dten_error);
12779 }
12780
12781 return (enab->dten_error);
12782 }
d190cdc3
A
12783
12784 ep->dted_probegen = dtrace_probegen;
2d21ac55
A
12785 }
12786
2d21ac55 12787 if (nmatched != NULL)
6d2010ae 12788 *nmatched = total_matched;
2d21ac55
A
12789
12790 return (0);
12791}
12792
12793static void
39037602 12794dtrace_enabling_matchall_with_cond(dtrace_match_cond_t *cond)
2d21ac55
A
12795{
12796 dtrace_enabling_t *enab;
12797
12798 lck_mtx_lock(&cpu_lock);
12799 lck_mtx_lock(&dtrace_lock);
12800
12801 /*
b0d623f7
A
12802 * Iterate over all retained enablings to see if any probes match
12803 * against them. We only perform this operation on enablings for which
12804 * we have sufficient permissions by virtue of being in the global zone
12805 * or in the same zone as the DTrace client. Because we can be called
12806 * after dtrace_detach() has been called, we cannot assert that there
12807 * are retained enablings. We can safely load from dtrace_retained,
12808 * however: the taskq_destroy() at the end of dtrace_detach() will
12809 * block pending our completion.
2d21ac55 12810 */
2d21ac55 12811
fe8ab488
A
12812 /*
12813 * Darwin doesn't do zones.
12814 * Behave as if always in "global" zone."
12815 */
12816 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
39037602 12817 (void) dtrace_enabling_match(enab, NULL, cond);
2d21ac55
A
12818 }
12819
b0d623f7
A
12820 lck_mtx_unlock(&dtrace_lock);
12821 lck_mtx_unlock(&cpu_lock);
39037602
A
12822
12823}
12824
12825static void
12826dtrace_enabling_matchall(void)
12827{
12828 dtrace_enabling_matchall_with_cond(NULL);
2d21ac55
A
12829}
12830
39037602
A
12831
12832
2d21ac55
A
12833/*
12834 * If an enabling is to be enabled without having matched probes (that is, if
12835 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12836 * enabling must be _primed_ by creating an ECB for every ECB description.
12837 * This must be done to assure that we know the number of speculations, the
12838 * number of aggregations, the minimum buffer size needed, etc. before we
12839 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
12840 * enabling any probes, we create ECBs for every ECB decription, but with a
12841 * NULL probe -- which is exactly what this function does.
12842 */
12843static void
12844dtrace_enabling_prime(dtrace_state_t *state)
12845{
12846 dtrace_enabling_t *enab;
12847 int i;
12848
12849 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12850 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12851
12852 if (enab->dten_vstate->dtvs_state != state)
12853 continue;
12854
12855 /*
12856 * We don't want to prime an enabling more than once, lest
12857 * we allow a malicious user to induce resource exhaustion.
12858 * (The ECBs that result from priming an enabling aren't
12859 * leaked -- but they also aren't deallocated until the
12860 * consumer state is destroyed.)
12861 */
12862 if (enab->dten_primed)
12863 continue;
12864
12865 for (i = 0; i < enab->dten_ndesc; i++) {
12866 enab->dten_current = enab->dten_desc[i];
d190cdc3 12867 (void) dtrace_probe_enable(NULL, enab, NULL);
2d21ac55
A
12868 }
12869
12870 enab->dten_primed = 1;
12871 }
12872}
12873
12874/*
12875 * Called to indicate that probes should be provided due to retained
12876 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
12877 * must take an initial lap through the enabling calling the dtps_provide()
12878 * entry point explicitly to allow for autocreated probes.
12879 */
12880static void
12881dtrace_enabling_provide(dtrace_provider_t *prv)
12882{
12883 int i, all = 0;
12884 dtrace_probedesc_t desc;
b0d623f7 12885 dtrace_genid_t gen;
2d21ac55 12886
5ba3f43e
A
12887 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12888 LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
12889
12890 if (prv == NULL) {
12891 all = 1;
12892 prv = dtrace_provider;
12893 }
12894
12895 do {
b0d623f7 12896 dtrace_enabling_t *enab;
2d21ac55
A
12897 void *parg = prv->dtpv_arg;
12898
b0d623f7
A
12899retry:
12900 gen = dtrace_retained_gen;
12901 for (enab = dtrace_retained; enab != NULL;
12902 enab = enab->dten_next) {
2d21ac55
A
12903 for (i = 0; i < enab->dten_ndesc; i++) {
12904 desc = enab->dten_desc[i]->dted_probe;
12905 lck_mtx_unlock(&dtrace_lock);
12906 prv->dtpv_pops.dtps_provide(parg, &desc);
12907 lck_mtx_lock(&dtrace_lock);
b0d623f7
A
12908 /*
12909 * Process the retained enablings again if
12910 * they have changed while we weren't holding
12911 * dtrace_lock.
12912 */
12913 if (gen != dtrace_retained_gen)
12914 goto retry;
2d21ac55
A
12915 }
12916 }
12917 } while (all && (prv = prv->dtpv_next) != NULL);
12918
12919 lck_mtx_unlock(&dtrace_lock);
12920 dtrace_probe_provide(NULL, all ? NULL : prv);
12921 lck_mtx_lock(&dtrace_lock);
12922}
12923
12924/*
12925 * DTrace DOF Functions
12926 */
12927/*ARGSUSED*/
12928static void
12929dtrace_dof_error(dof_hdr_t *dof, const char *str)
12930{
b0d623f7 12931#pragma unused(dof) /* __APPLE__ */
2d21ac55
A
12932 if (dtrace_err_verbose)
12933 cmn_err(CE_WARN, "failed to process DOF: %s", str);
12934
12935#ifdef DTRACE_ERRDEBUG
12936 dtrace_errdebug(str);
12937#endif
12938}
12939
12940/*
12941 * Create DOF out of a currently enabled state. Right now, we only create
12942 * DOF containing the run-time options -- but this could be expanded to create
12943 * complete DOF representing the enabled state.
12944 */
12945static dof_hdr_t *
12946dtrace_dof_create(dtrace_state_t *state)
12947{
12948 dof_hdr_t *dof;
12949 dof_sec_t *sec;
12950 dof_optdesc_t *opt;
12951 int i, len = sizeof (dof_hdr_t) +
12952 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12953 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12954
5ba3f43e 12955 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 12956
d9a64523 12957 dof = kmem_zalloc_aligned(len, 8, KM_SLEEP);
2d21ac55
A
12958 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12959 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12960 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12961 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12962
12963 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12964 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12965 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12966 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12967 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12968 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12969
12970 dof->dofh_flags = 0;
12971 dof->dofh_hdrsize = sizeof (dof_hdr_t);
12972 dof->dofh_secsize = sizeof (dof_sec_t);
12973 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
12974 dof->dofh_secoff = sizeof (dof_hdr_t);
12975 dof->dofh_loadsz = len;
12976 dof->dofh_filesz = len;
12977 dof->dofh_pad = 0;
12978
12979 /*
12980 * Fill in the option section header...
12981 */
12982 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12983 sec->dofs_type = DOF_SECT_OPTDESC;
12984 sec->dofs_align = sizeof (uint64_t);
12985 sec->dofs_flags = DOF_SECF_LOAD;
12986 sec->dofs_entsize = sizeof (dof_optdesc_t);
12987
12988 opt = (dof_optdesc_t *)((uintptr_t)sec +
12989 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12990
12991 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12992 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12993
12994 for (i = 0; i < DTRACEOPT_MAX; i++) {
12995 opt[i].dofo_option = i;
12996 opt[i].dofo_strtab = DOF_SECIDX_NONE;
12997 opt[i].dofo_value = state->dts_options[i];
12998 }
12999
13000 return (dof);
13001}
13002
13003static dof_hdr_t *
b0d623f7 13004dtrace_dof_copyin(user_addr_t uarg, int *errp)
2d21ac55
A
13005{
13006 dof_hdr_t hdr, *dof;
13007
5ba3f43e 13008 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
2d21ac55
A
13009
13010 /*
13011 * First, we're going to copyin() the sizeof (dof_hdr_t).
13012 */
b0d623f7 13013 if (copyin(uarg, &hdr, sizeof (hdr)) != 0) {
2d21ac55
A
13014 dtrace_dof_error(NULL, "failed to copyin DOF header");
13015 *errp = EFAULT;
13016 return (NULL);
13017 }
13018
13019 /*
13020 * Now we'll allocate the entire DOF and copy it in -- provided
13021 * that the length isn't outrageous.
13022 */
b0d623f7 13023 if (hdr.dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
2d21ac55
A
13024 dtrace_dof_error(&hdr, "load size exceeds maximum");
13025 *errp = E2BIG;
13026 return (NULL);
13027 }
13028
13029 if (hdr.dofh_loadsz < sizeof (hdr)) {
13030 dtrace_dof_error(&hdr, "invalid load size");
13031 *errp = EINVAL;
13032 return (NULL);
13033 }
13034
d9a64523 13035 dof = kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
2d21ac55 13036
6d2010ae
A
13037 if (copyin(uarg, dof, hdr.dofh_loadsz) != 0 ||
13038 dof->dofh_loadsz != hdr.dofh_loadsz) {
d9a64523 13039 kmem_free_aligned(dof, hdr.dofh_loadsz);
6d2010ae
A
13040 *errp = EFAULT;
13041 return (NULL);
13042 }
2d21ac55
A
13043
13044 return (dof);
13045}
13046
2d21ac55
A
13047static dof_hdr_t *
13048dtrace_dof_copyin_from_proc(proc_t* p, user_addr_t uarg, int *errp)
13049{
13050 dof_hdr_t hdr, *dof;
13051
5ba3f43e 13052 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
2d21ac55
A
13053
13054 /*
13055 * First, we're going to copyin() the sizeof (dof_hdr_t).
13056 */
13057 if (uread(p, &hdr, sizeof(hdr), uarg) != KERN_SUCCESS) {
13058 dtrace_dof_error(NULL, "failed to copyin DOF header");
13059 *errp = EFAULT;
13060 return (NULL);
13061 }
13062
13063 /*
13064 * Now we'll allocate the entire DOF and copy it in -- provided
13065 * that the length isn't outrageous.
13066 */
b0d623f7 13067 if (hdr.dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
2d21ac55
A
13068 dtrace_dof_error(&hdr, "load size exceeds maximum");
13069 *errp = E2BIG;
13070 return (NULL);
13071 }
13072
13073 if (hdr.dofh_loadsz < sizeof (hdr)) {
13074 dtrace_dof_error(&hdr, "invalid load size");
13075 *errp = EINVAL;
13076 return (NULL);
13077 }
13078
d9a64523 13079 dof = kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
2d21ac55
A
13080
13081 if (uread(p, dof, hdr.dofh_loadsz, uarg) != KERN_SUCCESS) {
d9a64523 13082 kmem_free_aligned(dof, hdr.dofh_loadsz);
2d21ac55
A
13083 *errp = EFAULT;
13084 return (NULL);
13085 }
13086
13087 return (dof);
13088}
13089
a39ff7e2
A
13090static void
13091dtrace_dof_destroy(dof_hdr_t *dof)
13092{
d9a64523 13093 kmem_free_aligned(dof, dof->dofh_loadsz);
a39ff7e2
A
13094}
13095
2d21ac55
A
13096static dof_hdr_t *
13097dtrace_dof_property(const char *name)
13098{
d9a64523 13099 unsigned int len = 0;
2d21ac55
A
13100 dof_hdr_t *dof;
13101
a39ff7e2
A
13102 if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
13103 return NULL;
13104 }
13105
13106 if (!PEReadNVRAMProperty(name, NULL, &len)) {
13107 return NULL;
13108 }
2d21ac55 13109
d9a64523 13110 dof = kmem_alloc_aligned(len, 8, KM_SLEEP);
a39ff7e2
A
13111
13112 if (!PEReadNVRAMProperty(name, dof, &len)) {
13113 dtrace_dof_destroy(dof);
13114 dtrace_dof_error(NULL, "unreadable DOF");
13115 return NULL;
13116 }
2d21ac55
A
13117
13118 if (len < sizeof (dof_hdr_t)) {
a39ff7e2 13119 dtrace_dof_destroy(dof);
2d21ac55
A
13120 dtrace_dof_error(NULL, "truncated header");
13121 return (NULL);
13122 }
13123
a39ff7e2
A
13124 if (len < dof->dofh_loadsz) {
13125 dtrace_dof_destroy(dof);
2d21ac55
A
13126 dtrace_dof_error(NULL, "truncated DOF");
13127 return (NULL);
13128 }
13129
a39ff7e2
A
13130 if (len != dof->dofh_loadsz) {
13131 dtrace_dof_destroy(dof);
13132 dtrace_dof_error(NULL, "invalid DOF size");
2d21ac55
A
13133 return (NULL);
13134 }
13135
a39ff7e2
A
13136 if (dof->dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
13137 dtrace_dof_destroy(dof);
13138 dtrace_dof_error(NULL, "oversized DOF");
13139 return (NULL);
13140 }
2d21ac55
A
13141
13142 return (dof);
13143}
13144
2d21ac55
A
13145/*
13146 * Return the dof_sec_t pointer corresponding to a given section index. If the
13147 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
13148 * a type other than DOF_SECT_NONE is specified, the header is checked against
13149 * this type and NULL is returned if the types do not match.
13150 */
13151static dof_sec_t *
13152dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
13153{
13154 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
13155 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
13156
13157 if (i >= dof->dofh_secnum) {
13158 dtrace_dof_error(dof, "referenced section index is invalid");
13159 return (NULL);
13160 }
13161
13162 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
13163 dtrace_dof_error(dof, "referenced section is not loadable");
13164 return (NULL);
13165 }
13166
13167 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
13168 dtrace_dof_error(dof, "referenced section is the wrong type");
13169 return (NULL);
13170 }
13171
13172 return (sec);
13173}
13174
13175static dtrace_probedesc_t *
13176dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
13177{
13178 dof_probedesc_t *probe;
13179 dof_sec_t *strtab;
13180 uintptr_t daddr = (uintptr_t)dof;
13181 uintptr_t str;
13182 size_t size;
13183
13184 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
13185 dtrace_dof_error(dof, "invalid probe section");
13186 return (NULL);
13187 }
13188
13189 if (sec->dofs_align != sizeof (dof_secidx_t)) {
13190 dtrace_dof_error(dof, "bad alignment in probe description");
13191 return (NULL);
13192 }
13193
13194 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
13195 dtrace_dof_error(dof, "truncated probe description");
13196 return (NULL);
13197 }
13198
13199 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
13200 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
13201
13202 if (strtab == NULL)
13203 return (NULL);
13204
13205 str = daddr + strtab->dofs_offset;
13206 size = strtab->dofs_size;
13207
13208 if (probe->dofp_provider >= strtab->dofs_size) {
13209 dtrace_dof_error(dof, "corrupt probe provider");
13210 return (NULL);
13211 }
13212
13213 (void) strncpy(desc->dtpd_provider,
13214 (char *)(str + probe->dofp_provider),
13215 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
fe8ab488
A
13216
13217 /* APPLE NOTE: Darwin employs size bounded string operation. */
b0d623f7 13218 desc->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
2d21ac55
A
13219
13220 if (probe->dofp_mod >= strtab->dofs_size) {
13221 dtrace_dof_error(dof, "corrupt probe module");
13222 return (NULL);
13223 }
13224
13225 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
13226 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
fe8ab488
A
13227
13228 /* APPLE NOTE: Darwin employs size bounded string operation. */
b0d623f7 13229 desc->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
2d21ac55
A
13230
13231 if (probe->dofp_func >= strtab->dofs_size) {
13232 dtrace_dof_error(dof, "corrupt probe function");
13233 return (NULL);
13234 }
13235
13236 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
13237 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
fe8ab488
A
13238
13239 /* APPLE NOTE: Darwin employs size bounded string operation. */
b0d623f7 13240 desc->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
2d21ac55
A
13241
13242 if (probe->dofp_name >= strtab->dofs_size) {
13243 dtrace_dof_error(dof, "corrupt probe name");
13244 return (NULL);
13245 }
13246
13247 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
13248 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
fe8ab488
A
13249
13250 /* APPLE NOTE: Darwin employs size bounded string operation. */
b0d623f7 13251 desc->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
2d21ac55
A
13252
13253 return (desc);
13254}
13255
13256static dtrace_difo_t *
13257dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13258 cred_t *cr)
13259{
13260 dtrace_difo_t *dp;
13261 size_t ttl = 0;
13262 dof_difohdr_t *dofd;
13263 uintptr_t daddr = (uintptr_t)dof;
c910b4d9 13264 size_t max_size = dtrace_difo_maxsize;
b0d623f7
A
13265 uint_t i;
13266 int l, n;
b0d623f7 13267
2d21ac55
A
13268
13269 static const struct {
13270 int section;
13271 int bufoffs;
13272 int lenoffs;
13273 int entsize;
13274 int align;
13275 const char *msg;
13276 } difo[] = {
13277 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
13278 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
13279 sizeof (dif_instr_t), "multiple DIF sections" },
13280
13281 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
13282 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
13283 sizeof (uint64_t), "multiple integer tables" },
13284
13285 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
13286 offsetof(dtrace_difo_t, dtdo_strlen), 0,
13287 sizeof (char), "multiple string tables" },
13288
13289 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
13290 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
13291 sizeof (uint_t), "multiple variable tables" },
13292
2d21ac55 13293 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
2d21ac55
A
13294 };
13295
13296 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
13297 dtrace_dof_error(dof, "invalid DIFO header section");
13298 return (NULL);
13299 }
13300
13301 if (sec->dofs_align != sizeof (dof_secidx_t)) {
13302 dtrace_dof_error(dof, "bad alignment in DIFO header");
13303 return (NULL);
13304 }
13305
13306 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
13307 sec->dofs_size % sizeof (dof_secidx_t)) {
13308 dtrace_dof_error(dof, "bad size in DIFO header");
13309 return (NULL);
13310 }
13311
13312 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13313 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
13314
13315 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
13316 dp->dtdo_rtype = dofd->dofd_rtype;
13317
13318 for (l = 0; l < n; l++) {
13319 dof_sec_t *subsec;
13320 void **bufp;
13321 uint32_t *lenp;
13322
13323 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
13324 dofd->dofd_links[l])) == NULL)
13325 goto err; /* invalid section link */
13326
c910b4d9 13327 if (ttl + subsec->dofs_size > max_size) {
2d21ac55
A
13328 dtrace_dof_error(dof, "exceeds maximum size");
13329 goto err;
13330 }
13331
13332 ttl += subsec->dofs_size;
13333
13334 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
b0d623f7 13335
b0d623f7
A
13336 if (subsec->dofs_type != (uint32_t)difo[i].section)
13337 continue;
2d21ac55
A
13338
13339 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
13340 dtrace_dof_error(dof, "section not loaded");
13341 goto err;
13342 }
13343
b0d623f7
A
13344 if (subsec->dofs_align != (uint32_t)difo[i].align) {
13345 dtrace_dof_error(dof, "bad alignment");
13346 goto err;
13347 }
2d21ac55
A
13348
13349 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
13350 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
13351
13352 if (*bufp != NULL) {
13353 dtrace_dof_error(dof, difo[i].msg);
13354 goto err;
13355 }
13356
b0d623f7
A
13357 if ((uint32_t)difo[i].entsize != subsec->dofs_entsize) {
13358 dtrace_dof_error(dof, "entry size mismatch");
13359 goto err;
13360 }
2d21ac55
A
13361
13362 if (subsec->dofs_entsize != 0 &&
13363 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
13364 dtrace_dof_error(dof, "corrupt entry size");
13365 goto err;
13366 }
13367
13368 *lenp = subsec->dofs_size;
13369 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
13370 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
13371 *bufp, subsec->dofs_size);
13372
13373 if (subsec->dofs_entsize != 0)
13374 *lenp /= subsec->dofs_entsize;
13375
13376 break;
13377 }
13378
13379 /*
13380 * If we encounter a loadable DIFO sub-section that is not
13381 * known to us, assume this is a broken program and fail.
13382 */
13383 if (difo[i].section == DOF_SECT_NONE &&
13384 (subsec->dofs_flags & DOF_SECF_LOAD)) {
13385 dtrace_dof_error(dof, "unrecognized DIFO subsection");
13386 goto err;
13387 }
13388 }
b0d623f7 13389
2d21ac55
A
13390 if (dp->dtdo_buf == NULL) {
13391 /*
13392 * We can't have a DIF object without DIF text.
13393 */
13394 dtrace_dof_error(dof, "missing DIF text");
13395 goto err;
13396 }
13397
13398 /*
13399 * Before we validate the DIF object, run through the variable table
13400 * looking for the strings -- if any of their size are under, we'll set
13401 * their size to be the system-wide default string size. Note that
13402 * this should _not_ happen if the "strsize" option has been set --
13403 * in this case, the compiler should have set the size to reflect the
13404 * setting of the option.
13405 */
13406 for (i = 0; i < dp->dtdo_varlen; i++) {
13407 dtrace_difv_t *v = &dp->dtdo_vartab[i];
13408 dtrace_diftype_t *t = &v->dtdv_type;
13409
13410 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
13411 continue;
13412
13413 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
13414 t->dtdt_size = dtrace_strsize_default;
13415 }
13416
13417 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
13418 goto err;
13419
13420 dtrace_difo_init(dp, vstate);
13421 return (dp);
13422
13423err:
13424 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
13425 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
13426 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
13427 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
13428
13429 kmem_free(dp, sizeof (dtrace_difo_t));
13430 return (NULL);
13431}
13432
13433static dtrace_predicate_t *
13434dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13435 cred_t *cr)
13436{
13437 dtrace_difo_t *dp;
13438
13439 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
13440 return (NULL);
13441
13442 return (dtrace_predicate_create(dp));
13443}
13444
13445static dtrace_actdesc_t *
13446dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13447 cred_t *cr)
13448{
13449 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
13450 dof_actdesc_t *desc;
13451 dof_sec_t *difosec;
13452 size_t offs;
13453 uintptr_t daddr = (uintptr_t)dof;
13454 uint64_t arg;
13455 dtrace_actkind_t kind;
13456
13457 if (sec->dofs_type != DOF_SECT_ACTDESC) {
13458 dtrace_dof_error(dof, "invalid action section");
13459 return (NULL);
13460 }
13461
13462 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
13463 dtrace_dof_error(dof, "truncated action description");
13464 return (NULL);
13465 }
13466
13467 if (sec->dofs_align != sizeof (uint64_t)) {
13468 dtrace_dof_error(dof, "bad alignment in action description");
13469 return (NULL);
13470 }
13471
13472 if (sec->dofs_size < sec->dofs_entsize) {
13473 dtrace_dof_error(dof, "section entry size exceeds total size");
13474 return (NULL);
13475 }
13476
13477 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
13478 dtrace_dof_error(dof, "bad entry size in action description");
13479 return (NULL);
13480 }
13481
13482 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
13483 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
13484 return (NULL);
13485 }
13486
13487 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
13488 desc = (dof_actdesc_t *)(daddr +
13489 (uintptr_t)sec->dofs_offset + offs);
13490 kind = (dtrace_actkind_t)desc->dofa_kind;
13491
3e170ce0
A
13492 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
13493 (kind != DTRACEACT_PRINTA || desc->dofa_strtab != DOF_SECIDX_NONE)) ||
13494 (kind == DTRACEACT_DIFEXPR && desc->dofa_strtab != DOF_SECIDX_NONE))
13495 {
2d21ac55
A
13496 dof_sec_t *strtab;
13497 char *str, *fmt;
13498 uint64_t i;
13499
13500 /*
3e170ce0
A
13501 * The argument to these actions is an index into the
13502 * DOF string table. For printf()-like actions, this
13503 * is the format string. For print(), this is the
13504 * CTF type of the expression result.
2d21ac55
A
13505 */
13506 if ((strtab = dtrace_dof_sect(dof,
13507 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
13508 goto err;
13509
13510 str = (char *)((uintptr_t)dof +
13511 (uintptr_t)strtab->dofs_offset);
13512
13513 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
13514 if (str[i] == '\0')
13515 break;
13516 }
13517
13518 if (i >= strtab->dofs_size) {
13519 dtrace_dof_error(dof, "bogus format string");
13520 goto err;
13521 }
13522
13523 if (i == desc->dofa_arg) {
13524 dtrace_dof_error(dof, "empty format string");
13525 goto err;
13526 }
13527
13528 i -= desc->dofa_arg;
13529 fmt = kmem_alloc(i + 1, KM_SLEEP);
13530 bcopy(&str[desc->dofa_arg], fmt, i + 1);
13531 arg = (uint64_t)(uintptr_t)fmt;
13532 } else {
13533 if (kind == DTRACEACT_PRINTA) {
13534 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13535 arg = 0;
13536 } else {
13537 arg = desc->dofa_arg;
13538 }
13539 }
13540
13541 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13542 desc->dofa_uarg, arg);
13543
13544 if (last != NULL) {
13545 last->dtad_next = act;
13546 } else {
13547 first = act;
13548 }
13549
13550 last = act;
13551
13552 if (desc->dofa_difo == DOF_SECIDX_NONE)
13553 continue;
13554
13555 if ((difosec = dtrace_dof_sect(dof,
13556 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13557 goto err;
13558
13559 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13560
13561 if (act->dtad_difo == NULL)
13562 goto err;
13563 }
13564
13565 ASSERT(first != NULL);
13566 return (first);
13567
13568err:
13569 for (act = first; act != NULL; act = next) {
13570 next = act->dtad_next;
13571 dtrace_actdesc_release(act, vstate);
13572 }
13573
13574 return (NULL);
13575}
13576
13577static dtrace_ecbdesc_t *
13578dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13579 cred_t *cr)
13580{
13581 dtrace_ecbdesc_t *ep;
13582 dof_ecbdesc_t *ecb;
13583 dtrace_probedesc_t *desc;
13584 dtrace_predicate_t *pred = NULL;
13585
13586 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13587 dtrace_dof_error(dof, "truncated ECB description");
13588 return (NULL);
13589 }
13590
13591 if (sec->dofs_align != sizeof (uint64_t)) {
13592 dtrace_dof_error(dof, "bad alignment in ECB description");
13593 return (NULL);
13594 }
13595
13596 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13597 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13598
13599 if (sec == NULL)
13600 return (NULL);
13601
13602 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13603 ep->dted_uarg = ecb->dofe_uarg;
13604 desc = &ep->dted_probe;
13605
13606 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13607 goto err;
13608
13609 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13610 if ((sec = dtrace_dof_sect(dof,
13611 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13612 goto err;
13613
13614 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13615 goto err;
13616
13617 ep->dted_pred.dtpdd_predicate = pred;
13618 }
13619
13620 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13621 if ((sec = dtrace_dof_sect(dof,
13622 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13623 goto err;
13624
13625 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13626
13627 if (ep->dted_action == NULL)
13628 goto err;
13629 }
13630
13631 return (ep);
13632
13633err:
13634 if (pred != NULL)
13635 dtrace_predicate_release(pred, vstate);
13636 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13637 return (NULL);
13638}
13639
2d21ac55 13640/*
fe8ab488
A
13641 * APPLE NOTE: dyld handles dof relocation.
13642 * Darwin does not need dtrace_dof_relocate()
2d21ac55 13643 */
2d21ac55
A
13644
13645/*
13646 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
13647 * header: it should be at the front of a memory region that is at least
13648 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
13649 * size. It need not be validated in any other way.
13650 */
13651static int
13652dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
13653 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
13654{
b0d623f7 13655#pragma unused(ubase) /* __APPLE__ */
2d21ac55
A
13656 uint64_t len = dof->dofh_loadsz, seclen;
13657 uintptr_t daddr = (uintptr_t)dof;
13658 dtrace_ecbdesc_t *ep;
13659 dtrace_enabling_t *enab;
13660 uint_t i;
13661
5ba3f43e 13662 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
13663 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
13664
13665 /*
13666 * Check the DOF header identification bytes. In addition to checking
13667 * valid settings, we also verify that unused bits/bytes are zeroed so
13668 * we can use them later without fear of regressing existing binaries.
13669 */
13670 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
13671 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
13672 dtrace_dof_error(dof, "DOF magic string mismatch");
13673 return (-1);
13674 }
13675
13676 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
13677 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
13678 dtrace_dof_error(dof, "DOF has invalid data model");
13679 return (-1);
13680 }
13681
13682 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
13683 dtrace_dof_error(dof, "DOF encoding mismatch");
13684 return (-1);
13685 }
13686
2d21ac55 13687 /*
fe8ab488 13688 * APPLE NOTE: Darwin only supports DOF_VERSION_3 for now.
2d21ac55
A
13689 */
13690 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_3) {
13691 dtrace_dof_error(dof, "DOF version mismatch");
13692 return (-1);
13693 }
2d21ac55
A
13694
13695 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
13696 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
13697 return (-1);
13698 }
13699
13700 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
13701 dtrace_dof_error(dof, "DOF uses too many integer registers");
13702 return (-1);
13703 }
13704
13705 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
13706 dtrace_dof_error(dof, "DOF uses too many tuple registers");
13707 return (-1);
13708 }
13709
13710 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
13711 if (dof->dofh_ident[i] != 0) {
13712 dtrace_dof_error(dof, "DOF has invalid ident byte set");
13713 return (-1);
13714 }
13715 }
13716
13717 if (dof->dofh_flags & ~DOF_FL_VALID) {
13718 dtrace_dof_error(dof, "DOF has invalid flag bits set");
13719 return (-1);
13720 }
13721
d9a64523
A
13722 if (dof->dofh_secsize < sizeof(dof_sec_t)) {
13723 dtrace_dof_error(dof, "invalid section header size");
2d21ac55
A
13724 return (-1);
13725 }
13726
13727 /*
13728 * Check that the section headers don't exceed the amount of DOF
13729 * data. Note that we cast the section size and number of sections
13730 * to uint64_t's to prevent possible overflow in the multiplication.
13731 */
13732 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
13733
13734 if (dof->dofh_secoff > len || seclen > len ||
13735 dof->dofh_secoff + seclen > len) {
13736 dtrace_dof_error(dof, "truncated section headers");
13737 return (-1);
13738 }
13739
13740 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13741 dtrace_dof_error(dof, "misaligned section headers");
13742 return (-1);
13743 }
13744
13745 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13746 dtrace_dof_error(dof, "misaligned section size");
13747 return (-1);
13748 }
13749
13750 /*
13751 * Take an initial pass through the section headers to be sure that
13752 * the headers don't have stray offsets. If the 'noprobes' flag is
13753 * set, do not permit sections relating to providers, probes, or args.
13754 */
13755 for (i = 0; i < dof->dofh_secnum; i++) {
13756 dof_sec_t *sec = (dof_sec_t *)(daddr +
13757 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13758
13759 if (noprobes) {
13760 switch (sec->dofs_type) {
13761 case DOF_SECT_PROVIDER:
13762 case DOF_SECT_PROBES:
13763 case DOF_SECT_PRARGS:
13764 case DOF_SECT_PROFFS:
13765 dtrace_dof_error(dof, "illegal sections "
13766 "for enabling");
13767 return (-1);
13768 }
13769 }
13770
13771 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13772 continue; /* just ignore non-loadable sections */
13773
13774 if (sec->dofs_align & (sec->dofs_align - 1)) {
13775 dtrace_dof_error(dof, "bad section alignment");
13776 return (-1);
13777 }
13778
13779 if (sec->dofs_offset & (sec->dofs_align - 1)) {
13780 dtrace_dof_error(dof, "misaligned section");
13781 return (-1);
13782 }
13783
13784 if (sec->dofs_offset > len || sec->dofs_size > len ||
13785 sec->dofs_offset + sec->dofs_size > len) {
13786 dtrace_dof_error(dof, "corrupt section header");
13787 return (-1);
13788 }
13789
13790 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13791 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13792 dtrace_dof_error(dof, "non-terminating string table");
13793 return (-1);
13794 }
13795 }
13796
b0d623f7 13797 /*
fe8ab488
A
13798 * APPLE NOTE: We have no further relocation to perform.
13799 * All dof values are relative offsets.
b0d623f7 13800 */
2d21ac55
A
13801
13802 if ((enab = *enabp) == NULL)
13803 enab = *enabp = dtrace_enabling_create(vstate);
13804
13805 for (i = 0; i < dof->dofh_secnum; i++) {
13806 dof_sec_t *sec = (dof_sec_t *)(daddr +
13807 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13808
13809 if (sec->dofs_type != DOF_SECT_ECBDESC)
13810 continue;
13811
fe8ab488
A
13812 /*
13813 * APPLE NOTE: Defend against gcc 4.0 botch on x86.
13814 * not all paths out of inlined dtrace_dof_ecbdesc
13815 * are checked for the NULL return value.
13816 * Check for NULL explicitly here.
13817 */
2d21ac55
A
13818 ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr);
13819 if (ep == NULL) {
13820 dtrace_enabling_destroy(enab);
13821 *enabp = NULL;
13822 return (-1);
13823 }
2d21ac55
A
13824
13825 dtrace_enabling_add(enab, ep);
13826 }
13827
13828 return (0);
13829}
13830
13831/*
13832 * Process DOF for any options. This routine assumes that the DOF has been
13833 * at least processed by dtrace_dof_slurp().
13834 */
13835static int
13836dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13837{
b0d623f7
A
13838 uint_t i;
13839 int rval;
2d21ac55
A
13840 uint32_t entsize;
13841 size_t offs;
13842 dof_optdesc_t *desc;
13843
13844 for (i = 0; i < dof->dofh_secnum; i++) {
13845 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13846 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13847
13848 if (sec->dofs_type != DOF_SECT_OPTDESC)
13849 continue;
13850
13851 if (sec->dofs_align != sizeof (uint64_t)) {
13852 dtrace_dof_error(dof, "bad alignment in "
13853 "option description");
13854 return (EINVAL);
13855 }
13856
13857 if ((entsize = sec->dofs_entsize) == 0) {
13858 dtrace_dof_error(dof, "zeroed option entry size");
13859 return (EINVAL);
13860 }
13861
13862 if (entsize < sizeof (dof_optdesc_t)) {
13863 dtrace_dof_error(dof, "bad option entry size");
13864 return (EINVAL);
13865 }
13866
13867 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13868 desc = (dof_optdesc_t *)((uintptr_t)dof +
13869 (uintptr_t)sec->dofs_offset + offs);
13870
13871 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13872 dtrace_dof_error(dof, "non-zero option string");
13873 return (EINVAL);
13874 }
13875
b0d623f7 13876 if (desc->dofo_value == (uint64_t)DTRACEOPT_UNSET) {
2d21ac55
A
13877 dtrace_dof_error(dof, "unset option");
13878 return (EINVAL);
13879 }
13880
13881 if ((rval = dtrace_state_option(state,
13882 desc->dofo_option, desc->dofo_value)) != 0) {
13883 dtrace_dof_error(dof, "rejected option");
13884 return (rval);
13885 }
13886 }
13887 }
13888
13889 return (0);
13890}
13891
13892/*
13893 * DTrace Consumer State Functions
13894 */
fe8ab488 13895static int
2d21ac55
A
13896dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13897{
c910b4d9 13898 size_t hashsize, maxper, min_size, chunksize = dstate->dtds_chunksize;
2d21ac55
A
13899 void *base;
13900 uintptr_t limit;
13901 dtrace_dynvar_t *dvar, *next, *start;
b0d623f7 13902 size_t i;
2d21ac55 13903
5ba3f43e 13904 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
13905 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13906
13907 bzero(dstate, sizeof (dtrace_dstate_t));
13908
13909 if ((dstate->dtds_chunksize = chunksize) == 0)
13910 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13911
ecc0ceb4
A
13912 VERIFY(dstate->dtds_chunksize < (LONG_MAX - sizeof (dtrace_dynhash_t)));
13913
c910b4d9
A
13914 if (size < (min_size = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13915 size = min_size;
2d21ac55
A
13916
13917 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
13918 return (ENOMEM);
13919
13920 dstate->dtds_size = size;
13921 dstate->dtds_base = base;
13922 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
c910b4d9 13923 bzero(dstate->dtds_percpu, (int)NCPU * sizeof (dtrace_dstate_percpu_t));
2d21ac55
A
13924
13925 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13926
13927 if (hashsize != 1 && (hashsize & 1))
13928 hashsize--;
13929
13930 dstate->dtds_hashsize = hashsize;
13931 dstate->dtds_hash = dstate->dtds_base;
13932
13933 /*
13934 * Set all of our hash buckets to point to the single sink, and (if
13935 * it hasn't already been set), set the sink's hash value to be the
13936 * sink sentinel value. The sink is needed for dynamic variable
13937 * lookups to know that they have iterated over an entire, valid hash
13938 * chain.
13939 */
13940 for (i = 0; i < hashsize; i++)
13941 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13942
13943 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13944 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13945
13946 /*
13947 * Determine number of active CPUs. Divide free list evenly among
13948 * active CPUs.
13949 */
13950 start = (dtrace_dynvar_t *)
13951 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13952 limit = (uintptr_t)base + size;
13953
ecc0ceb4
A
13954 VERIFY((uintptr_t)start < limit);
13955 VERIFY((uintptr_t)start >= (uintptr_t)base);
13956
c910b4d9 13957 maxper = (limit - (uintptr_t)start) / (int)NCPU;
2d21ac55
A
13958 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13959
b0d623f7 13960 for (i = 0; i < NCPU; i++) {
2d21ac55
A
13961 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13962
13963 /*
13964 * If we don't even have enough chunks to make it once through
13965 * NCPUs, we're just going to allocate everything to the first
13966 * CPU. And if we're on the last CPU, we're going to allocate
13967 * whatever is left over. In either case, we set the limit to
13968 * be the limit of the dynamic variable space.
13969 */
b0d623f7 13970 if (maxper == 0 || i == NCPU - 1) {
2d21ac55
A
13971 limit = (uintptr_t)base + size;
13972 start = NULL;
13973 } else {
13974 limit = (uintptr_t)start + maxper;
13975 start = (dtrace_dynvar_t *)limit;
13976 }
13977
ecc0ceb4 13978 VERIFY(limit <= (uintptr_t)base + size);
2d21ac55
A
13979
13980 for (;;) {
13981 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13982 dstate->dtds_chunksize);
13983
13984 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
13985 break;
13986
ecc0ceb4
A
13987 VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
13988 (uintptr_t)dvar <= (uintptr_t)base + size);
2d21ac55
A
13989 dvar->dtdv_next = next;
13990 dvar = next;
13991 }
13992
13993 if (maxper == 0)
13994 break;
13995 }
13996
13997 return (0);
13998}
13999
fe8ab488 14000static void
2d21ac55
A
14001dtrace_dstate_fini(dtrace_dstate_t *dstate)
14002{
5ba3f43e 14003 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
14004
14005 if (dstate->dtds_base == NULL)
14006 return;
14007
14008 kmem_free(dstate->dtds_base, dstate->dtds_size);
14009 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
14010}
14011
14012static void
14013dtrace_vstate_fini(dtrace_vstate_t *vstate)
14014{
14015 /*
14016 * Logical XOR, where are you?
14017 */
14018 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
14019
14020 if (vstate->dtvs_nglobals > 0) {
14021 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
14022 sizeof (dtrace_statvar_t *));
14023 }
14024
14025 if (vstate->dtvs_ntlocals > 0) {
14026 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
14027 sizeof (dtrace_difv_t));
14028 }
14029
14030 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
14031
14032 if (vstate->dtvs_nlocals > 0) {
14033 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
14034 sizeof (dtrace_statvar_t *));
14035 }
14036}
14037
14038static void
14039dtrace_state_clean(dtrace_state_t *state)
14040{
14041 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14042 return;
14043
14044 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14045 dtrace_speculation_clean(state);
14046}
14047
14048static void
14049dtrace_state_deadman(dtrace_state_t *state)
14050{
14051 hrtime_t now;
14052
14053 dtrace_sync();
14054
14055 now = dtrace_gethrtime();
14056
14057 if (state != dtrace_anon.dta_state &&
14058 now - state->dts_laststatus >= dtrace_deadman_user)
14059 return;
14060
14061 /*
14062 * We must be sure that dts_alive never appears to be less than the
14063 * value upon entry to dtrace_state_deadman(), and because we lack a
14064 * dtrace_cas64(), we cannot store to it atomically. We thus instead
14065 * store INT64_MAX to it, followed by a memory barrier, followed by
14066 * the new value. This assures that dts_alive never appears to be
14067 * less than its true value, regardless of the order in which the
14068 * stores to the underlying storage are issued.
14069 */
14070 state->dts_alive = INT64_MAX;
14071 dtrace_membar_producer();
14072 state->dts_alive = now;
14073}
14074
b0d623f7
A
14075static int
14076dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state)
2d21ac55
A
14077{
14078 minor_t minor;
14079 major_t major;
14080 char c[30];
14081 dtrace_state_t *state;
14082 dtrace_optval_t *opt;
c910b4d9 14083 int bufsize = (int)NCPU * sizeof (dtrace_buffer_t), i;
cb323159 14084 unsigned int cpu_it;
2d21ac55 14085
5ba3f43e
A
14086 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14087 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 14088
b0d623f7
A
14089 /* Cause restart */
14090 *new_state = NULL;
14091
813fb2f6
A
14092 if (devp != NULL) {
14093 minor = getminor(*devp);
14094 }
14095 else {
14096 minor = DTRACE_NCLIENTS - 1;
14097 }
2d21ac55 14098
39037602
A
14099 state = dtrace_state_allocate(minor);
14100 if (NULL == state) {
14101 printf("dtrace_open: couldn't acquire minor number %d. This usually means that too many DTrace clients are in use at the moment", minor);
14102 return (ERESTART); /* can't reacquire */
2d21ac55
A
14103 }
14104
2d21ac55
A
14105 state->dts_epid = DTRACE_EPIDNONE + 1;
14106
14107 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
f427ee49 14108 state->dts_aggid_arena = vmem_create(c, (void *)1, INT32_MAX, 1,
2d21ac55
A
14109 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14110
14111 if (devp != NULL) {
14112 major = getemajor(*devp);
14113 } else {
14114 major = ddi_driver_major(dtrace_devi);
14115 }
14116
d9a64523 14117 state->dts_dev = makedev(major, minor);
2d21ac55
A
14118
14119 if (devp != NULL)
14120 *devp = state->dts_dev;
14121
14122 /*
14123 * We allocate NCPU buffers. On the one hand, this can be quite
14124 * a bit of memory per instance (nearly 36K on a Starcat). On the
14125 * other hand, it saves an additional memory reference in the probe
14126 * path.
14127 */
14128 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
14129 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
39037602 14130 state->dts_buf_over_limit = 0;
cb323159
A
14131
14132 /*
14133 * Allocate and initialise the per-process per-CPU random state.
14134 * SI_SUB_RANDOM < SI_SUB_DTRACE_ANON therefore entropy device is
14135 * assumed to be seeded at this point (if from Fortuna seed file).
14136 */
14137 state->dts_rstate = kmem_zalloc(NCPU * sizeof(uint64_t*), KM_SLEEP);
14138 state->dts_rstate[0] = kmem_zalloc(2 * sizeof(uint64_t), KM_SLEEP);
14139 (void) read_random(state->dts_rstate[0], 2 * sizeof(uint64_t));
14140 for (cpu_it = 1; cpu_it < NCPU; cpu_it++) {
14141 state->dts_rstate[cpu_it] = kmem_zalloc(2 * sizeof(uint64_t), KM_SLEEP);
14142 /*
14143 * Each CPU is assigned a 2^64 period, non-overlapping
14144 * subsequence.
14145 */
14146 dtrace_xoroshiro128_plus_jump(state->dts_rstate[cpu_it-1],
14147 state->dts_rstate[cpu_it]);
14148 }
14149
2d21ac55
A
14150 state->dts_cleaner = CYCLIC_NONE;
14151 state->dts_deadman = CYCLIC_NONE;
14152 state->dts_vstate.dtvs_state = state;
14153
14154 for (i = 0; i < DTRACEOPT_MAX; i++)
14155 state->dts_options[i] = DTRACEOPT_UNSET;
14156
14157 /*
14158 * Set the default options.
14159 */
14160 opt = state->dts_options;
14161 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
14162 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
14163 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
14164 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
14165 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
14166 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
14167 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
14168 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
14169 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
14170 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
14171 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
14172 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
14173 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
14174 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
39037602 14175 opt[DTRACEOPT_BUFLIMIT] = dtrace_buflimit_default;
2d21ac55
A
14176
14177 /*
14178 * Depending on the user credentials, we set flag bits which alter probe
14179 * visibility or the amount of destructiveness allowed. In the case of
14180 * actual anonymous tracing, or the possession of all privileges, all of
14181 * the normal checks are bypassed.
14182 */
39037602 14183#if defined(__APPLE__)
d9a64523
A
14184 if (cr != NULL) {
14185 kauth_cred_ref(cr);
14186 state->dts_cred.dcr_cred = cr;
14187 }
39037602
A
14188 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
14189 if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
14190 /*
14191 * Allow only proc credentials when DTrace is
14192 * restricted by the current security policy
14193 */
14194 state->dts_cred.dcr_visible = DTRACE_CRV_ALLPROC;
14195 state->dts_cred.dcr_action = DTRACE_CRA_PROC | DTRACE_CRA_PROC_CONTROL | DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14196 }
14197 else {
14198 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
14199 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
14200 }
14201 }
14202
14203#else
2d21ac55
A
14204 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
14205 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
14206 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
39037602
A
14207 }
14208 else {
2d21ac55
A
14209 /*
14210 * Set up the credentials for this instantiation. We take a
14211 * hold on the credential to prevent it from disappearing on
14212 * us; this in turn prevents the zone_t referenced by this
14213 * credential from disappearing. This means that we can
14214 * examine the credential and the zone from probe context.
14215 */
14216 crhold(cr);
14217 state->dts_cred.dcr_cred = cr;
14218
14219 /*
14220 * CRA_PROC means "we have *some* privilege for dtrace" and
14221 * unlocks the use of variables like pid, zonename, etc.
14222 */
14223 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
14224 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14225 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
14226 }
14227
14228 /*
14229 * dtrace_user allows use of syscall and profile providers.
14230 * If the user also has proc_owner and/or proc_zone, we
14231 * extend the scope to include additional visibility and
14232 * destructive power.
14233 */
14234 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
14235 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
14236 state->dts_cred.dcr_visible |=
14237 DTRACE_CRV_ALLPROC;
14238
14239 state->dts_cred.dcr_action |=
14240 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14241 }
14242
14243 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
14244 state->dts_cred.dcr_visible |=
14245 DTRACE_CRV_ALLZONE;
14246
14247 state->dts_cred.dcr_action |=
14248 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14249 }
14250
14251 /*
14252 * If we have all privs in whatever zone this is,
14253 * we can do destructive things to processes which
14254 * have altered credentials.
fe8ab488
A
14255 *
14256 * APPLE NOTE: Darwin doesn't do zones.
14257 * Behave as if zone always has destructive privs.
2d21ac55 14258 */
fe8ab488 14259
2d21ac55
A
14260 state->dts_cred.dcr_action |=
14261 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
2d21ac55
A
14262 }
14263
14264 /*
14265 * Holding the dtrace_kernel privilege also implies that
14266 * the user has the dtrace_user privilege from a visibility
14267 * perspective. But without further privileges, some
14268 * destructive actions are not available.
14269 */
14270 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
14271 /*
14272 * Make all probes in all zones visible. However,
14273 * this doesn't mean that all actions become available
14274 * to all zones.
14275 */
14276 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
14277 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
14278
14279 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
14280 DTRACE_CRA_PROC;
14281 /*
14282 * Holding proc_owner means that destructive actions
14283 * for *this* zone are allowed.
14284 */
14285 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14286 state->dts_cred.dcr_action |=
14287 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14288
14289 /*
14290 * Holding proc_zone means that destructive actions
14291 * for this user/group ID in all zones is allowed.
14292 */
14293 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14294 state->dts_cred.dcr_action |=
14295 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14296
14297 /*
14298 * If we have all privs in whatever zone this is,
14299 * we can do destructive things to processes which
14300 * have altered credentials.
fe8ab488
A
14301 *
14302 * APPLE NOTE: Darwin doesn't do zones.
14303 * Behave as if zone always has destructive privs.
14304 */
2d21ac55
A
14305 state->dts_cred.dcr_action |=
14306 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
2d21ac55
A
14307 }
14308
14309 /*
14310 * Holding the dtrace_proc privilege gives control over fasttrap
14311 * and pid providers. We need to grant wider destructive
14312 * privileges in the event that the user has proc_owner and/or
14313 * proc_zone.
14314 */
14315 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14316 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14317 state->dts_cred.dcr_action |=
14318 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14319
14320 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14321 state->dts_cred.dcr_action |=
14322 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14323 }
14324 }
39037602 14325#endif
2d21ac55 14326
b0d623f7
A
14327 *new_state = state;
14328 return(0); /* Success */
2d21ac55
A
14329}
14330
14331static int
14332dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
14333{
14334 dtrace_optval_t *opt = state->dts_options, size;
c910b4d9 14335 processorid_t cpu = 0;
39037602 14336 size_t limit = buf->dtb_size;
2d21ac55
A
14337 int flags = 0, rval;
14338
5ba3f43e
A
14339 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14340 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
14341 ASSERT(which < DTRACEOPT_MAX);
14342 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
14343 (state == dtrace_anon.dta_state &&
14344 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
14345
14346 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
14347 return (0);
14348
14349 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
14350 cpu = opt[DTRACEOPT_CPU];
14351
14352 if (which == DTRACEOPT_SPECSIZE)
14353 flags |= DTRACEBUF_NOSWITCH;
14354
14355 if (which == DTRACEOPT_BUFSIZE) {
14356 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
14357 flags |= DTRACEBUF_RING;
14358
14359 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
14360 flags |= DTRACEBUF_FILL;
14361
14362 if (state != dtrace_anon.dta_state ||
14363 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14364 flags |= DTRACEBUF_INACTIVE;
14365 }
14366
b0d623f7 14367 for (size = opt[which]; (size_t)size >= sizeof (uint64_t); size >>= 1) {
2d21ac55
A
14368 /*
14369 * The size must be 8-byte aligned. If the size is not 8-byte
14370 * aligned, drop it down by the difference.
14371 */
14372 if (size & (sizeof (uint64_t) - 1))
14373 size -= size & (sizeof (uint64_t) - 1);
14374
14375 if (size < state->dts_reserve) {
14376 /*
14377 * Buffers always must be large enough to accommodate
14378 * their prereserved space. We return E2BIG instead
14379 * of ENOMEM in this case to allow for user-level
14380 * software to differentiate the cases.
14381 */
14382 return (E2BIG);
14383 }
39037602
A
14384 limit = opt[DTRACEOPT_BUFLIMIT] * size / 100;
14385 rval = dtrace_buffer_alloc(buf, limit, size, flags, cpu);
2d21ac55
A
14386
14387 if (rval != ENOMEM) {
14388 opt[which] = size;
14389 return (rval);
14390 }
14391
14392 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14393 return (rval);
14394 }
14395
14396 return (ENOMEM);
14397}
14398
14399static int
14400dtrace_state_buffers(dtrace_state_t *state)
14401{
14402 dtrace_speculation_t *spec = state->dts_speculations;
14403 int rval, i;
14404
14405 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
14406 DTRACEOPT_BUFSIZE)) != 0)
14407 return (rval);
14408
14409 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
14410 DTRACEOPT_AGGSIZE)) != 0)
14411 return (rval);
14412
14413 for (i = 0; i < state->dts_nspeculations; i++) {
14414 if ((rval = dtrace_state_buffer(state,
14415 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
14416 return (rval);
14417 }
14418
14419 return (0);
14420}
14421
14422static void
14423dtrace_state_prereserve(dtrace_state_t *state)
14424{
14425 dtrace_ecb_t *ecb;
14426 dtrace_probe_t *probe;
14427
14428 state->dts_reserve = 0;
14429
14430 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
14431 return;
14432
14433 /*
14434 * If our buffer policy is a "fill" buffer policy, we need to set the
14435 * prereserved space to be the space required by the END probes.
14436 */
14437 probe = dtrace_probes[dtrace_probeid_end - 1];
14438 ASSERT(probe != NULL);
14439
14440 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
14441 if (ecb->dte_state != state)
14442 continue;
14443
14444 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
14445 }
14446}
14447
14448static int
14449dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
14450{
14451 dtrace_optval_t *opt = state->dts_options, sz, nspec;
14452 dtrace_speculation_t *spec;
14453 dtrace_buffer_t *buf;
14454 cyc_handler_t hdlr;
14455 cyc_time_t when;
c910b4d9 14456 int rval = 0, i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
2d21ac55
A
14457 dtrace_icookie_t cookie;
14458
14459 lck_mtx_lock(&cpu_lock);
14460 lck_mtx_lock(&dtrace_lock);
14461
14462 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14463 rval = EBUSY;
14464 goto out;
14465 }
14466
14467 /*
14468 * Before we can perform any checks, we must prime all of the
14469 * retained enablings that correspond to this state.
14470 */
14471 dtrace_enabling_prime(state);
14472
14473 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
14474 rval = EACCES;
14475 goto out;
14476 }
14477
14478 dtrace_state_prereserve(state);
14479
14480 /*
14481 * Now we want to do is try to allocate our speculations.
14482 * We do not automatically resize the number of speculations; if
14483 * this fails, we will fail the operation.
14484 */
14485 nspec = opt[DTRACEOPT_NSPEC];
14486 ASSERT(nspec != DTRACEOPT_UNSET);
14487
14488 if (nspec > INT_MAX) {
14489 rval = ENOMEM;
14490 goto out;
14491 }
14492
14493 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
14494
14495 if (spec == NULL) {
14496 rval = ENOMEM;
14497 goto out;
14498 }
14499
14500 state->dts_speculations = spec;
14501 state->dts_nspeculations = (int)nspec;
14502
14503 for (i = 0; i < nspec; i++) {
14504 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
14505 rval = ENOMEM;
14506 goto err;
14507 }
14508
14509 spec[i].dtsp_buffer = buf;
14510 }
14511
14512 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14513 if (dtrace_anon.dta_state == NULL) {
14514 rval = ENOENT;
14515 goto out;
14516 }
14517
14518 if (state->dts_necbs != 0) {
14519 rval = EALREADY;
14520 goto out;
14521 }
14522
14523 state->dts_anon = dtrace_anon_grab();
14524 ASSERT(state->dts_anon != NULL);
14525 state = state->dts_anon;
14526
14527 /*
14528 * We want "grabanon" to be set in the grabbed state, so we'll
14529 * copy that option value from the grabbing state into the
14530 * grabbed state.
14531 */
14532 state->dts_options[DTRACEOPT_GRABANON] =
14533 opt[DTRACEOPT_GRABANON];
14534
14535 *cpu = dtrace_anon.dta_beganon;
14536
14537 /*
14538 * If the anonymous state is active (as it almost certainly
14539 * is if the anonymous enabling ultimately matched anything),
14540 * we don't allow any further option processing -- but we
14541 * don't return failure.
14542 */
14543 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14544 goto out;
14545 }
14546
14547 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
14548 opt[DTRACEOPT_AGGSIZE] != 0) {
14549 if (state->dts_aggregations == NULL) {
14550 /*
14551 * We're not going to create an aggregation buffer
14552 * because we don't have any ECBs that contain
14553 * aggregations -- set this option to 0.
14554 */
14555 opt[DTRACEOPT_AGGSIZE] = 0;
14556 } else {
14557 /*
14558 * If we have an aggregation buffer, we must also have
14559 * a buffer to use as scratch.
14560 */
b0d623f7
A
14561 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
14562 (size_t)opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
14563 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
14564 }
2d21ac55
A
14565 }
14566 }
14567
14568 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
14569 opt[DTRACEOPT_SPECSIZE] != 0) {
14570 if (!state->dts_speculates) {
14571 /*
14572 * We're not going to create speculation buffers
14573 * because we don't have any ECBs that actually
14574 * speculate -- set the speculation size to 0.
14575 */
14576 opt[DTRACEOPT_SPECSIZE] = 0;
14577 }
14578 }
14579
14580 /*
14581 * The bare minimum size for any buffer that we're actually going to
14582 * do anything to is sizeof (uint64_t).
14583 */
14584 sz = sizeof (uint64_t);
14585
14586 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
14587 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
14588 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
14589 /*
14590 * A buffer size has been explicitly set to 0 (or to a size
14591 * that will be adjusted to 0) and we need the space -- we
14592 * need to return failure. We return ENOSPC to differentiate
14593 * it from failing to allocate a buffer due to failure to meet
14594 * the reserve (for which we return E2BIG).
14595 */
14596 rval = ENOSPC;
14597 goto out;
14598 }
14599
14600 if ((rval = dtrace_state_buffers(state)) != 0)
14601 goto err;
14602
14603 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
14604 sz = dtrace_dstate_defsize;
14605
14606 do {
14607 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
14608
14609 if (rval == 0)
14610 break;
14611
14612 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14613 goto err;
14614 } while (sz >>= 1);
14615
14616 opt[DTRACEOPT_DYNVARSIZE] = sz;
14617
14618 if (rval != 0)
14619 goto err;
14620
14621 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
14622 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
14623
14624 if (opt[DTRACEOPT_CLEANRATE] == 0)
14625 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14626
14627 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
14628 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
14629
14630 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
14631 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14632
39037602
A
14633 if (opt[DTRACEOPT_STRSIZE] > dtrace_strsize_max)
14634 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_max;
14635
14636 if (opt[DTRACEOPT_STRSIZE] < dtrace_strsize_min)
14637 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_min;
14638
14639 if (opt[DTRACEOPT_BUFLIMIT] > dtrace_buflimit_max)
14640 opt[DTRACEOPT_BUFLIMIT] = dtrace_buflimit_max;
14641
14642 if (opt[DTRACEOPT_BUFLIMIT] < dtrace_buflimit_min)
14643 opt[DTRACEOPT_BUFLIMIT] = dtrace_buflimit_min;
14644
2d21ac55
A
14645 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
14646 hdlr.cyh_arg = state;
14647 hdlr.cyh_level = CY_LOW_LEVEL;
14648
14649 when.cyt_when = 0;
14650 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
14651
14652 state->dts_cleaner = cyclic_add(&hdlr, &when);
14653
14654 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
14655 hdlr.cyh_arg = state;
14656 hdlr.cyh_level = CY_LOW_LEVEL;
14657
14658 when.cyt_when = 0;
14659 when.cyt_interval = dtrace_deadman_interval;
14660
14661 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
14662 state->dts_deadman = cyclic_add(&hdlr, &when);
14663
14664 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
14665
14666 /*
14667 * Now it's time to actually fire the BEGIN probe. We need to disable
14668 * interrupts here both to record the CPU on which we fired the BEGIN
14669 * probe (the data from this CPU will be processed first at user
14670 * level) and to manually activate the buffer for this CPU.
14671 */
14672 cookie = dtrace_interrupt_disable();
14673 *cpu = CPU->cpu_id;
14674 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
14675 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
14676
14677 dtrace_probe(dtrace_probeid_begin,
14678 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14679 dtrace_interrupt_enable(cookie);
14680 /*
14681 * We may have had an exit action from a BEGIN probe; only change our
14682 * state to ACTIVE if we're still in WARMUP.
14683 */
14684 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
14685 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
14686
14687 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
14688 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
14689
14690 /*
14691 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
14692 * want each CPU to transition its principal buffer out of the
14693 * INACTIVE state. Doing this assures that no CPU will suddenly begin
14694 * processing an ECB halfway down a probe's ECB chain; all CPUs will
14695 * atomically transition from processing none of a state's ECBs to
14696 * processing all of them.
14697 */
14698 dtrace_xcall(DTRACE_CPUALL,
14699 (dtrace_xcall_t)dtrace_buffer_activate, state);
14700 goto out;
14701
14702err:
14703 dtrace_buffer_free(state->dts_buffer);
14704 dtrace_buffer_free(state->dts_aggbuffer);
14705
14706 if ((nspec = state->dts_nspeculations) == 0) {
14707 ASSERT(state->dts_speculations == NULL);
14708 goto out;
14709 }
14710
14711 spec = state->dts_speculations;
14712 ASSERT(spec != NULL);
14713
14714 for (i = 0; i < state->dts_nspeculations; i++) {
14715 if ((buf = spec[i].dtsp_buffer) == NULL)
14716 break;
14717
14718 dtrace_buffer_free(buf);
14719 kmem_free(buf, bufsize);
14720 }
14721
14722 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14723 state->dts_nspeculations = 0;
14724 state->dts_speculations = NULL;
14725
14726out:
14727 lck_mtx_unlock(&dtrace_lock);
14728 lck_mtx_unlock(&cpu_lock);
14729
14730 return (rval);
14731}
14732
14733static int
14734dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
14735{
14736 dtrace_icookie_t cookie;
14737
5ba3f43e 14738 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
14739
14740 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
14741 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
14742 return (EINVAL);
14743
14744 /*
14745 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
14746 * to be sure that every CPU has seen it. See below for the details
14747 * on why this is done.
14748 */
14749 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
14750 dtrace_sync();
14751
14752 /*
14753 * By this point, it is impossible for any CPU to be still processing
14754 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
14755 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
14756 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
14757 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
14758 * iff we're in the END probe.
14759 */
14760 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
14761 dtrace_sync();
14762 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
14763
14764 /*
14765 * Finally, we can release the reserve and call the END probe. We
14766 * disable interrupts across calling the END probe to allow us to
14767 * return the CPU on which we actually called the END probe. This
14768 * allows user-land to be sure that this CPU's principal buffer is
14769 * processed last.
14770 */
14771 state->dts_reserve = 0;
14772
14773 cookie = dtrace_interrupt_disable();
14774 *cpu = CPU->cpu_id;
14775 dtrace_probe(dtrace_probeid_end,
14776 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14777 dtrace_interrupt_enable(cookie);
14778
14779 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14780 dtrace_sync();
14781
14782 return (0);
14783}
14784
14785static int
14786dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14787 dtrace_optval_t val)
14788{
5ba3f43e 14789 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
14790
14791 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14792 return (EBUSY);
14793
14794 if (option >= DTRACEOPT_MAX)
14795 return (EINVAL);
14796
14797 if (option != DTRACEOPT_CPU && val < 0)
14798 return (EINVAL);
14799
14800 switch (option) {
14801 case DTRACEOPT_DESTRUCTIVE:
f427ee49 14802 if (dtrace_destructive_disallow)
2d21ac55
A
14803 return (EACCES);
14804
14805 state->dts_cred.dcr_destructive = 1;
14806 break;
14807
14808 case DTRACEOPT_BUFSIZE:
14809 case DTRACEOPT_DYNVARSIZE:
14810 case DTRACEOPT_AGGSIZE:
14811 case DTRACEOPT_SPECSIZE:
14812 case DTRACEOPT_STRSIZE:
14813 if (val < 0)
14814 return (EINVAL);
14815
14816 if (val >= LONG_MAX) {
14817 /*
14818 * If this is an otherwise negative value, set it to
14819 * the highest multiple of 128m less than LONG_MAX.
14820 * Technically, we're adjusting the size without
14821 * regard to the buffer resizing policy, but in fact,
14822 * this has no effect -- if we set the buffer size to
14823 * ~LONG_MAX and the buffer policy is ultimately set to
14824 * be "manual", the buffer allocation is guaranteed to
14825 * fail, if only because the allocation requires two
14826 * buffers. (We set the the size to the highest
14827 * multiple of 128m because it ensures that the size
14828 * will remain a multiple of a megabyte when
14829 * repeatedly halved -- all the way down to 15m.)
14830 */
14831 val = LONG_MAX - (1 << 27) + 1;
14832 }
14833 }
14834
14835 state->dts_options[option] = val;
14836
14837 return (0);
14838}
14839
14840static void
14841dtrace_state_destroy(dtrace_state_t *state)
14842{
14843 dtrace_ecb_t *ecb;
14844 dtrace_vstate_t *vstate = &state->dts_vstate;
14845 minor_t minor = getminor(state->dts_dev);
c910b4d9 14846 int i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
2d21ac55
A
14847 dtrace_speculation_t *spec = state->dts_speculations;
14848 int nspec = state->dts_nspeculations;
14849 uint32_t match;
14850
5ba3f43e
A
14851 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14852 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
14853
14854 /*
14855 * First, retract any retained enablings for this state.
14856 */
14857 dtrace_enabling_retract(state);
14858 ASSERT(state->dts_nretained == 0);
14859
14860 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14861 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14862 /*
14863 * We have managed to come into dtrace_state_destroy() on a
14864 * hot enabling -- almost certainly because of a disorderly
14865 * shutdown of a consumer. (That is, a consumer that is
14866 * exiting without having called dtrace_stop().) In this case,
14867 * we're going to set our activity to be KILLED, and then
14868 * issue a sync to be sure that everyone is out of probe
14869 * context before we start blowing away ECBs.
14870 */
14871 state->dts_activity = DTRACE_ACTIVITY_KILLED;
14872 dtrace_sync();
14873 }
14874
14875 /*
14876 * Release the credential hold we took in dtrace_state_create().
14877 */
14878 if (state->dts_cred.dcr_cred != NULL)
d9a64523 14879 kauth_cred_unref(&state->dts_cred.dcr_cred);
2d21ac55
A
14880
14881 /*
14882 * Now we can safely disable and destroy any enabled probes. Because
14883 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14884 * (especially if they're all enabled), we take two passes through the
14885 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14886 * in the second we disable whatever is left over.
14887 */
14888 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14889 for (i = 0; i < state->dts_necbs; i++) {
14890 if ((ecb = state->dts_ecbs[i]) == NULL)
14891 continue;
14892
14893 if (match && ecb->dte_probe != NULL) {
14894 dtrace_probe_t *probe = ecb->dte_probe;
14895 dtrace_provider_t *prov = probe->dtpr_provider;
14896
14897 if (!(prov->dtpv_priv.dtpp_flags & match))
14898 continue;
14899 }
14900
14901 dtrace_ecb_disable(ecb);
14902 dtrace_ecb_destroy(ecb);
14903 }
14904
14905 if (!match)
14906 break;
14907 }
14908
14909 /*
14910 * Before we free the buffers, perform one more sync to assure that
14911 * every CPU is out of probe context.
14912 */
14913 dtrace_sync();
14914
14915 dtrace_buffer_free(state->dts_buffer);
14916 dtrace_buffer_free(state->dts_aggbuffer);
14917
cb323159
A
14918 for (i = 0; i < (int)NCPU; i++) {
14919 kmem_free(state->dts_rstate[i], 2 * sizeof(uint64_t));
14920 }
14921 kmem_free(state->dts_rstate, NCPU * sizeof(uint64_t*));
14922
2d21ac55
A
14923 for (i = 0; i < nspec; i++)
14924 dtrace_buffer_free(spec[i].dtsp_buffer);
14925
14926 if (state->dts_cleaner != CYCLIC_NONE)
14927 cyclic_remove(state->dts_cleaner);
14928
14929 if (state->dts_deadman != CYCLIC_NONE)
14930 cyclic_remove(state->dts_deadman);
14931
14932 dtrace_dstate_fini(&vstate->dtvs_dynvars);
14933 dtrace_vstate_fini(vstate);
14934 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14935
14936 if (state->dts_aggregations != NULL) {
b0d623f7 14937#if DEBUG
2d21ac55
A
14938 for (i = 0; i < state->dts_naggregations; i++)
14939 ASSERT(state->dts_aggregations[i] == NULL);
14940#endif
14941 ASSERT(state->dts_naggregations > 0);
14942 kmem_free(state->dts_aggregations,
14943 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14944 }
14945
14946 kmem_free(state->dts_buffer, bufsize);
14947 kmem_free(state->dts_aggbuffer, bufsize);
14948
14949 for (i = 0; i < nspec; i++)
14950 kmem_free(spec[i].dtsp_buffer, bufsize);
14951
14952 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14953
14954 dtrace_format_destroy(state);
14955
14956 vmem_destroy(state->dts_aggid_arena);
39037602 14957 dtrace_state_free(minor);
2d21ac55
A
14958}
14959
14960/*
14961 * DTrace Anonymous Enabling Functions
14962 */
d9a64523
A
14963
14964int
14965dtrace_keep_kernel_symbols(void)
14966{
14967 if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
14968 return 0;
14969 }
14970
14971 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL)
14972 return 1;
14973
14974 return 0;
14975}
14976
2d21ac55
A
14977static dtrace_state_t *
14978dtrace_anon_grab(void)
14979{
14980 dtrace_state_t *state;
14981
5ba3f43e 14982 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
14983
14984 if ((state = dtrace_anon.dta_state) == NULL) {
14985 ASSERT(dtrace_anon.dta_enabling == NULL);
14986 return (NULL);
14987 }
14988
14989 ASSERT(dtrace_anon.dta_enabling != NULL);
14990 ASSERT(dtrace_retained != NULL);
14991
14992 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14993 dtrace_anon.dta_enabling = NULL;
14994 dtrace_anon.dta_state = NULL;
14995
14996 return (state);
14997}
14998
14999static void
15000dtrace_anon_property(void)
15001{
15002 int i, rv;
15003 dtrace_state_t *state;
15004 dof_hdr_t *dof;
15005 char c[32]; /* enough for "dof-data-" + digits */
15006
5ba3f43e
A
15007 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
15008 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
15009
15010 for (i = 0; ; i++) {
15011 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
15012
15013 dtrace_err_verbose = 1;
15014
15015 if ((dof = dtrace_dof_property(c)) == NULL) {
15016 dtrace_err_verbose = 0;
15017 break;
15018 }
15019
d9a64523 15020#ifdef illumos
2d21ac55
A
15021 /*
15022 * We want to create anonymous state, so we need to transition
15023 * the kernel debugger to indicate that DTrace is active. If
15024 * this fails (e.g. because the debugger has modified text in
15025 * some way), we won't continue with the processing.
15026 */
15027 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15028 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
15029 "enabling ignored.");
15030 dtrace_dof_destroy(dof);
15031 break;
15032 }
d9a64523 15033#endif
2d21ac55
A
15034
15035 /*
15036 * If we haven't allocated an anonymous state, we'll do so now.
15037 */
15038 if ((state = dtrace_anon.dta_state) == NULL) {
b0d623f7
A
15039 rv = dtrace_state_create(NULL, NULL, &state);
15040 dtrace_anon.dta_state = state;
15041 if (rv != 0 || state == NULL) {
2d21ac55
A
15042 /*
15043 * This basically shouldn't happen: the only
15044 * failure mode from dtrace_state_create() is a
15045 * failure of ddi_soft_state_zalloc() that
15046 * itself should never happen. Still, the
15047 * interface allows for a failure mode, and
15048 * we want to fail as gracefully as possible:
15049 * we'll emit an error message and cease
15050 * processing anonymous state in this case.
15051 */
15052 cmn_err(CE_WARN, "failed to create "
15053 "anonymous state");
15054 dtrace_dof_destroy(dof);
15055 break;
15056 }
15057 }
15058
15059 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
15060 &dtrace_anon.dta_enabling, 0, B_TRUE);
15061
15062 if (rv == 0)
15063 rv = dtrace_dof_options(dof, state);
15064
15065 dtrace_err_verbose = 0;
15066 dtrace_dof_destroy(dof);
15067
15068 if (rv != 0) {
15069 /*
15070 * This is malformed DOF; chuck any anonymous state
15071 * that we created.
15072 */
15073 ASSERT(dtrace_anon.dta_enabling == NULL);
15074 dtrace_state_destroy(state);
15075 dtrace_anon.dta_state = NULL;
15076 break;
15077 }
15078
15079 ASSERT(dtrace_anon.dta_enabling != NULL);
15080 }
15081
15082 if (dtrace_anon.dta_enabling != NULL) {
15083 int rval;
15084
15085 /*
15086 * dtrace_enabling_retain() can only fail because we are
15087 * trying to retain more enablings than are allowed -- but
15088 * we only have one anonymous enabling, and we are guaranteed
15089 * to be allowed at least one retained enabling; we assert
15090 * that dtrace_enabling_retain() returns success.
15091 */
15092 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
15093 ASSERT(rval == 0);
15094
15095 dtrace_enabling_dump(dtrace_anon.dta_enabling);
15096 }
15097}
15098
15099/*
15100 * DTrace Helper Functions
15101 */
15102static void
15103dtrace_helper_trace(dtrace_helper_action_t *helper,
15104 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
15105{
b0d623f7
A
15106 uint32_t size, next, nnext;
15107 int i;
2d21ac55
A
15108 dtrace_helptrace_t *ent;
15109 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
15110
15111 if (!dtrace_helptrace_enabled)
15112 return;
15113
b0d623f7 15114 ASSERT((uint32_t)vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
2d21ac55
A
15115
15116 /*
15117 * What would a tracing framework be without its own tracing
15118 * framework? (Well, a hell of a lot simpler, for starters...)
15119 */
15120 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
15121 sizeof (uint64_t) - sizeof (uint64_t);
15122
15123 /*
15124 * Iterate until we can allocate a slot in the trace buffer.
15125 */
15126 do {
15127 next = dtrace_helptrace_next;
15128
15129 if (next + size < dtrace_helptrace_bufsize) {
15130 nnext = next + size;
15131 } else {
15132 nnext = size;
15133 }
15134 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
15135
15136 /*
15137 * We have our slot; fill it in.
15138 */
15139 if (nnext == size)
15140 next = 0;
15141
15142 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
15143 ent->dtht_helper = helper;
15144 ent->dtht_where = where;
15145 ent->dtht_nlocals = vstate->dtvs_nlocals;
15146
15147 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
15148 mstate->dtms_fltoffs : -1;
15149 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
15150 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
15151
15152 for (i = 0; i < vstate->dtvs_nlocals; i++) {
15153 dtrace_statvar_t *svar;
15154
15155 if ((svar = vstate->dtvs_locals[i]) == NULL)
15156 continue;
15157
c910b4d9 15158 ASSERT(svar->dtsv_size >= (int)NCPU * sizeof (uint64_t));
2d21ac55
A
15159 ent->dtht_locals[i] =
15160 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
15161 }
15162}
15163
f427ee49 15164__attribute__((noinline))
2d21ac55
A
15165static uint64_t
15166dtrace_helper(int which, dtrace_mstate_t *mstate,
15167 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
15168{
15169 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
15170 uint64_t sarg0 = mstate->dtms_arg[0];
15171 uint64_t sarg1 = mstate->dtms_arg[1];
c910b4d9 15172 uint64_t rval = 0;
2d21ac55
A
15173 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
15174 dtrace_helper_action_t *helper;
15175 dtrace_vstate_t *vstate;
15176 dtrace_difo_t *pred;
15177 int i, trace = dtrace_helptrace_enabled;
15178
15179 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
15180
15181 if (helpers == NULL)
15182 return (0);
15183
15184 if ((helper = helpers->dthps_actions[which]) == NULL)
15185 return (0);
15186
15187 vstate = &helpers->dthps_vstate;
15188 mstate->dtms_arg[0] = arg0;
15189 mstate->dtms_arg[1] = arg1;
15190
15191 /*
15192 * Now iterate over each helper. If its predicate evaluates to 'true',
15193 * we'll call the corresponding actions. Note that the below calls
15194 * to dtrace_dif_emulate() may set faults in machine state. This is
15195 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
15196 * the stored DIF offset with its own (which is the desired behavior).
15197 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
15198 * from machine state; this is okay, too.
15199 */
15200 for (; helper != NULL; helper = helper->dtha_next) {
15201 if ((pred = helper->dtha_predicate) != NULL) {
15202 if (trace)
15203 dtrace_helper_trace(helper, mstate, vstate, 0);
15204
15205 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
15206 goto next;
15207
15208 if (*flags & CPU_DTRACE_FAULT)
15209 goto err;
15210 }
15211
15212 for (i = 0; i < helper->dtha_nactions; i++) {
15213 if (trace)
15214 dtrace_helper_trace(helper,
15215 mstate, vstate, i + 1);
15216
15217 rval = dtrace_dif_emulate(helper->dtha_actions[i],
15218 mstate, vstate, state);
15219
15220 if (*flags & CPU_DTRACE_FAULT)
15221 goto err;
15222 }
15223
15224next:
15225 if (trace)
15226 dtrace_helper_trace(helper, mstate, vstate,
15227 DTRACE_HELPTRACE_NEXT);
15228 }
15229
15230 if (trace)
15231 dtrace_helper_trace(helper, mstate, vstate,
15232 DTRACE_HELPTRACE_DONE);
15233
15234 /*
15235 * Restore the arg0 that we saved upon entry.
15236 */
15237 mstate->dtms_arg[0] = sarg0;
15238 mstate->dtms_arg[1] = sarg1;
15239
15240 return (rval);
15241
15242err:
15243 if (trace)
15244 dtrace_helper_trace(helper, mstate, vstate,
15245 DTRACE_HELPTRACE_ERR);
15246
15247 /*
15248 * Restore the arg0 that we saved upon entry.
15249 */
15250 mstate->dtms_arg[0] = sarg0;
15251 mstate->dtms_arg[1] = sarg1;
15252
fe8ab488 15253 return (0);
2d21ac55
A
15254}
15255
15256static void
15257dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
15258 dtrace_vstate_t *vstate)
15259{
15260 int i;
15261
15262 if (helper->dtha_predicate != NULL)
15263 dtrace_difo_release(helper->dtha_predicate, vstate);
15264
15265 for (i = 0; i < helper->dtha_nactions; i++) {
15266 ASSERT(helper->dtha_actions[i] != NULL);
15267 dtrace_difo_release(helper->dtha_actions[i], vstate);
15268 }
15269
15270 kmem_free(helper->dtha_actions,
15271 helper->dtha_nactions * sizeof (dtrace_difo_t *));
15272 kmem_free(helper, sizeof (dtrace_helper_action_t));
15273}
15274
2d21ac55
A
15275static int
15276dtrace_helper_destroygen(proc_t* p, int gen)
15277{
2d21ac55
A
15278 dtrace_helpers_t *help = p->p_dtrace_helpers;
15279 dtrace_vstate_t *vstate;
b0d623f7 15280 uint_t i;
2d21ac55 15281
d9a64523 15282 LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
5ba3f43e 15283 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
15284
15285 if (help == NULL || gen > help->dthps_generation)
15286 return (EINVAL);
15287
15288 vstate = &help->dthps_vstate;
15289
15290 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15291 dtrace_helper_action_t *last = NULL, *h, *next;
15292
15293 for (h = help->dthps_actions[i]; h != NULL; h = next) {
15294 next = h->dtha_next;
15295
15296 if (h->dtha_generation == gen) {
15297 if (last != NULL) {
15298 last->dtha_next = next;
15299 } else {
15300 help->dthps_actions[i] = next;
15301 }
15302
15303 dtrace_helper_action_destroy(h, vstate);
15304 } else {
15305 last = h;
15306 }
15307 }
15308 }
15309
15310 /*
15311 * Interate until we've cleared out all helper providers with the
15312 * given generation number.
15313 */
15314 for (;;) {
c910b4d9 15315 dtrace_helper_provider_t *prov = NULL;
2d21ac55
A
15316
15317 /*
15318 * Look for a helper provider with the right generation. We
15319 * have to start back at the beginning of the list each time
15320 * because we drop dtrace_lock. It's unlikely that we'll make
15321 * more than two passes.
15322 */
15323 for (i = 0; i < help->dthps_nprovs; i++) {
15324 prov = help->dthps_provs[i];
15325
15326 if (prov->dthp_generation == gen)
15327 break;
15328 }
15329
15330 /*
15331 * If there were no matches, we're done.
15332 */
15333 if (i == help->dthps_nprovs)
15334 break;
15335
15336 /*
15337 * Move the last helper provider into this slot.
15338 */
15339 help->dthps_nprovs--;
15340 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
15341 help->dthps_provs[help->dthps_nprovs] = NULL;
15342
15343 lck_mtx_unlock(&dtrace_lock);
15344
15345 /*
15346 * If we have a meta provider, remove this helper provider.
15347 */
2d21ac55
A
15348 if (dtrace_meta_pid != NULL) {
15349 ASSERT(dtrace_deferred_pid == NULL);
15350 dtrace_helper_provider_remove(&prov->dthp_prov,
d190cdc3 15351 p);
2d21ac55 15352 }
2d21ac55
A
15353
15354 dtrace_helper_provider_destroy(prov);
15355
15356 lck_mtx_lock(&dtrace_lock);
15357 }
15358
15359 return (0);
15360}
15361
15362static int
15363dtrace_helper_validate(dtrace_helper_action_t *helper)
15364{
15365 int err = 0, i;
15366 dtrace_difo_t *dp;
15367
15368 if ((dp = helper->dtha_predicate) != NULL)
15369 err += dtrace_difo_validate_helper(dp);
15370
15371 for (i = 0; i < helper->dtha_nactions; i++)
15372 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
15373
15374 return (err == 0);
15375}
15376
2d21ac55
A
15377static int
15378dtrace_helper_action_add(proc_t* p, int which, dtrace_ecbdesc_t *ep)
2d21ac55
A
15379{
15380 dtrace_helpers_t *help;
15381 dtrace_helper_action_t *helper, *last;
15382 dtrace_actdesc_t *act;
15383 dtrace_vstate_t *vstate;
15384 dtrace_predicate_t *pred;
15385 int count = 0, nactions = 0, i;
15386
15387 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
15388 return (EINVAL);
15389
2d21ac55 15390 help = p->p_dtrace_helpers;
2d21ac55
A
15391 last = help->dthps_actions[which];
15392 vstate = &help->dthps_vstate;
15393
15394 for (count = 0; last != NULL; last = last->dtha_next) {
15395 count++;
15396 if (last->dtha_next == NULL)
15397 break;
15398 }
15399
15400 /*
15401 * If we already have dtrace_helper_actions_max helper actions for this
15402 * helper action type, we'll refuse to add a new one.
15403 */
15404 if (count >= dtrace_helper_actions_max)
15405 return (ENOSPC);
15406
15407 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
15408 helper->dtha_generation = help->dthps_generation;
15409
15410 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
15411 ASSERT(pred->dtp_difo != NULL);
15412 dtrace_difo_hold(pred->dtp_difo);
15413 helper->dtha_predicate = pred->dtp_difo;
15414 }
15415
15416 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
15417 if (act->dtad_kind != DTRACEACT_DIFEXPR)
15418 goto err;
15419
15420 if (act->dtad_difo == NULL)
15421 goto err;
15422
15423 nactions++;
15424 }
15425
15426 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
15427 (helper->dtha_nactions = nactions), KM_SLEEP);
15428
15429 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
15430 dtrace_difo_hold(act->dtad_difo);
15431 helper->dtha_actions[i++] = act->dtad_difo;
15432 }
15433
15434 if (!dtrace_helper_validate(helper))
15435 goto err;
15436
15437 if (last == NULL) {
15438 help->dthps_actions[which] = helper;
15439 } else {
15440 last->dtha_next = helper;
15441 }
15442
b0d623f7 15443 if ((uint32_t)vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
2d21ac55
A
15444 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
15445 dtrace_helptrace_next = 0;
15446 }
15447
15448 return (0);
15449err:
15450 dtrace_helper_action_destroy(helper, vstate);
15451 return (EINVAL);
15452}
15453
15454static void
15455dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
15456 dof_helper_t *dofhp)
15457{
d9a64523 15458 LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
5ba3f43e 15459 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
2d21ac55 15460
2d21ac55
A
15461 lck_mtx_lock(&dtrace_lock);
15462
15463 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
15464 /*
15465 * If the dtrace module is loaded but not attached, or if
15466 * there aren't isn't a meta provider registered to deal with
15467 * these provider descriptions, we need to postpone creating
15468 * the actual providers until later.
15469 */
15470
15471 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
15472 dtrace_deferred_pid != help) {
15473 help->dthps_deferred = 1;
15474 help->dthps_pid = p->p_pid;
15475 help->dthps_next = dtrace_deferred_pid;
15476 help->dthps_prev = NULL;
15477 if (dtrace_deferred_pid != NULL)
15478 dtrace_deferred_pid->dthps_prev = help;
15479 dtrace_deferred_pid = help;
15480 }
15481
15482 lck_mtx_unlock(&dtrace_lock);
15483
15484 } else if (dofhp != NULL) {
15485 /*
15486 * If the dtrace module is loaded and we have a particular
15487 * helper provider description, pass that off to the
15488 * meta provider.
15489 */
15490
15491 lck_mtx_unlock(&dtrace_lock);
15492
d190cdc3 15493 dtrace_helper_provide(dofhp, p);
2d21ac55
A
15494
15495 } else {
15496 /*
15497 * Otherwise, just pass all the helper provider descriptions
15498 * off to the meta provider.
15499 */
15500
b0d623f7 15501 uint_t i;
2d21ac55
A
15502 lck_mtx_unlock(&dtrace_lock);
15503
15504 for (i = 0; i < help->dthps_nprovs; i++) {
15505 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
d190cdc3 15506 p);
2d21ac55
A
15507 }
15508 }
2d21ac55
A
15509}
15510
2d21ac55
A
15511static int
15512dtrace_helper_provider_add(proc_t* p, dof_helper_t *dofhp, int gen)
2d21ac55
A
15513{
15514 dtrace_helpers_t *help;
15515 dtrace_helper_provider_t *hprov, **tmp_provs;
15516 uint_t tmp_maxprovs, i;
15517
5ba3f43e 15518 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 15519 help = p->p_dtrace_helpers;
2d21ac55
A
15520 ASSERT(help != NULL);
15521
15522 /*
15523 * If we already have dtrace_helper_providers_max helper providers,
15524 * we're refuse to add a new one.
15525 */
15526 if (help->dthps_nprovs >= dtrace_helper_providers_max)
15527 return (ENOSPC);
15528
15529 /*
15530 * Check to make sure this isn't a duplicate.
15531 */
15532 for (i = 0; i < help->dthps_nprovs; i++) {
15533 if (dofhp->dofhp_addr ==
15534 help->dthps_provs[i]->dthp_prov.dofhp_addr)
15535 return (EALREADY);
15536 }
15537
15538 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
15539 hprov->dthp_prov = *dofhp;
15540 hprov->dthp_ref = 1;
15541 hprov->dthp_generation = gen;
15542
15543 /*
15544 * Allocate a bigger table for helper providers if it's already full.
15545 */
15546 if (help->dthps_maxprovs == help->dthps_nprovs) {
15547 tmp_maxprovs = help->dthps_maxprovs;
15548 tmp_provs = help->dthps_provs;
15549
15550 if (help->dthps_maxprovs == 0)
15551 help->dthps_maxprovs = 2;
15552 else
15553 help->dthps_maxprovs *= 2;
15554 if (help->dthps_maxprovs > dtrace_helper_providers_max)
15555 help->dthps_maxprovs = dtrace_helper_providers_max;
15556
15557 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
15558
15559 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
15560 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15561
15562 if (tmp_provs != NULL) {
15563 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
15564 sizeof (dtrace_helper_provider_t *));
15565 kmem_free(tmp_provs, tmp_maxprovs *
15566 sizeof (dtrace_helper_provider_t *));
15567 }
15568 }
15569
15570 help->dthps_provs[help->dthps_nprovs] = hprov;
15571 help->dthps_nprovs++;
15572
15573 return (0);
15574}
15575
15576static void
15577dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
15578{
15579 lck_mtx_lock(&dtrace_lock);
15580
15581 if (--hprov->dthp_ref == 0) {
15582 dof_hdr_t *dof;
15583 lck_mtx_unlock(&dtrace_lock);
15584 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
15585 dtrace_dof_destroy(dof);
15586 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
15587 } else {
15588 lck_mtx_unlock(&dtrace_lock);
15589 }
15590}
15591
15592static int
15593dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
15594{
15595 uintptr_t daddr = (uintptr_t)dof;
15596 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
15597 dof_provider_t *provider;
15598 dof_probe_t *probe;
15599 uint8_t *arg;
15600 char *strtab, *typestr;
15601 dof_stridx_t typeidx;
15602 size_t typesz;
15603 uint_t nprobes, j, k;
15604
15605 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
15606
15607 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
15608 dtrace_dof_error(dof, "misaligned section offset");
15609 return (-1);
15610 }
15611
15612 /*
15613 * The section needs to be large enough to contain the DOF provider
15614 * structure appropriate for the given version.
15615 */
15616 if (sec->dofs_size <
15617 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
15618 offsetof(dof_provider_t, dofpv_prenoffs) :
15619 sizeof (dof_provider_t))) {
15620 dtrace_dof_error(dof, "provider section too small");
15621 return (-1);
15622 }
15623
15624 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
15625 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
15626 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
15627 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
15628 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
15629
15630 if (str_sec == NULL || prb_sec == NULL ||
15631 arg_sec == NULL || off_sec == NULL)
15632 return (-1);
15633
15634 enoff_sec = NULL;
15635
15636 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
15637 provider->dofpv_prenoffs != DOF_SECT_NONE &&
15638 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
15639 provider->dofpv_prenoffs)) == NULL)
15640 return (-1);
15641
15642 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
15643
15644 if (provider->dofpv_name >= str_sec->dofs_size ||
15645 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
15646 dtrace_dof_error(dof, "invalid provider name");
15647 return (-1);
15648 }
15649
15650 if (prb_sec->dofs_entsize == 0 ||
15651 prb_sec->dofs_entsize > prb_sec->dofs_size) {
15652 dtrace_dof_error(dof, "invalid entry size");
15653 return (-1);
15654 }
15655
15656 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
15657 dtrace_dof_error(dof, "misaligned entry size");
15658 return (-1);
15659 }
15660
15661 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
15662 dtrace_dof_error(dof, "invalid entry size");
15663 return (-1);
15664 }
15665
15666 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
15667 dtrace_dof_error(dof, "misaligned section offset");
15668 return (-1);
15669 }
15670
15671 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
15672 dtrace_dof_error(dof, "invalid entry size");
15673 return (-1);
15674 }
15675
15676 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
15677
15678 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
15679
15680 /*
15681 * Take a pass through the probes to check for errors.
15682 */
15683 for (j = 0; j < nprobes; j++) {
15684 probe = (dof_probe_t *)(uintptr_t)(daddr +
15685 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
15686
15687 if (probe->dofpr_func >= str_sec->dofs_size) {
15688 dtrace_dof_error(dof, "invalid function name");
15689 return (-1);
15690 }
15691
15692 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
15693 dtrace_dof_error(dof, "function name too long");
15694 return (-1);
15695 }
15696
15697 if (probe->dofpr_name >= str_sec->dofs_size ||
15698 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
15699 dtrace_dof_error(dof, "invalid probe name");
15700 return (-1);
15701 }
15702
15703 /*
15704 * The offset count must not wrap the index, and the offsets
15705 * must also not overflow the section's data.
15706 */
15707 if (probe->dofpr_offidx + probe->dofpr_noffs <
15708 probe->dofpr_offidx ||
15709 (probe->dofpr_offidx + probe->dofpr_noffs) *
15710 off_sec->dofs_entsize > off_sec->dofs_size) {
15711 dtrace_dof_error(dof, "invalid probe offset");
15712 return (-1);
15713 }
15714
15715 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
15716 /*
15717 * If there's no is-enabled offset section, make sure
15718 * there aren't any is-enabled offsets. Otherwise
15719 * perform the same checks as for probe offsets
15720 * (immediately above).
15721 */
15722 if (enoff_sec == NULL) {
15723 if (probe->dofpr_enoffidx != 0 ||
15724 probe->dofpr_nenoffs != 0) {
15725 dtrace_dof_error(dof, "is-enabled "
15726 "offsets with null section");
15727 return (-1);
15728 }
15729 } else if (probe->dofpr_enoffidx +
15730 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
15731 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
15732 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
15733 dtrace_dof_error(dof, "invalid is-enabled "
15734 "offset");
15735 return (-1);
15736 }
15737
15738 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
15739 dtrace_dof_error(dof, "zero probe and "
15740 "is-enabled offsets");
15741 return (-1);
15742 }
15743 } else if (probe->dofpr_noffs == 0) {
15744 dtrace_dof_error(dof, "zero probe offsets");
15745 return (-1);
15746 }
15747
15748 if (probe->dofpr_argidx + probe->dofpr_xargc <
15749 probe->dofpr_argidx ||
15750 (probe->dofpr_argidx + probe->dofpr_xargc) *
15751 arg_sec->dofs_entsize > arg_sec->dofs_size) {
15752 dtrace_dof_error(dof, "invalid args");
15753 return (-1);
15754 }
15755
15756 typeidx = probe->dofpr_nargv;
15757 typestr = strtab + probe->dofpr_nargv;
15758 for (k = 0; k < probe->dofpr_nargc; k++) {
15759 if (typeidx >= str_sec->dofs_size) {
15760 dtrace_dof_error(dof, "bad "
15761 "native argument type");
15762 return (-1);
15763 }
15764
15765 typesz = strlen(typestr) + 1;
15766 if (typesz > DTRACE_ARGTYPELEN) {
15767 dtrace_dof_error(dof, "native "
15768 "argument type too long");
15769 return (-1);
15770 }
15771 typeidx += typesz;
15772 typestr += typesz;
15773 }
15774
15775 typeidx = probe->dofpr_xargv;
15776 typestr = strtab + probe->dofpr_xargv;
15777 for (k = 0; k < probe->dofpr_xargc; k++) {
15778 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15779 dtrace_dof_error(dof, "bad "
15780 "native argument index");
15781 return (-1);
15782 }
15783
15784 if (typeidx >= str_sec->dofs_size) {
15785 dtrace_dof_error(dof, "bad "
15786 "translated argument type");
15787 return (-1);
15788 }
15789
15790 typesz = strlen(typestr) + 1;
15791 if (typesz > DTRACE_ARGTYPELEN) {
15792 dtrace_dof_error(dof, "translated argument "
15793 "type too long");
15794 return (-1);
15795 }
15796
15797 typeidx += typesz;
15798 typestr += typesz;
15799 }
15800 }
15801
15802 return (0);
15803}
15804
2d21ac55
A
15805static int
15806dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp)
2d21ac55
A
15807{
15808 dtrace_helpers_t *help;
15809 dtrace_vstate_t *vstate;
15810 dtrace_enabling_t *enab = NULL;
15811 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15812 uintptr_t daddr = (uintptr_t)dof;
15813
d9a64523 15814 LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
5ba3f43e 15815 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 15816
2d21ac55
A
15817 if ((help = p->p_dtrace_helpers) == NULL)
15818 help = dtrace_helpers_create(p);
2d21ac55
A
15819
15820 vstate = &help->dthps_vstate;
15821
15822 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15823 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15824 dtrace_dof_destroy(dof);
15825 return (rv);
15826 }
15827
15828 /*
15829 * Look for helper providers and validate their descriptions.
15830 */
15831 if (dhp != NULL) {
b0d623f7 15832 for (i = 0; (uint32_t)i < dof->dofh_secnum; i++) {
2d21ac55
A
15833 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15834 dof->dofh_secoff + i * dof->dofh_secsize);
15835
15836 if (sec->dofs_type != DOF_SECT_PROVIDER)
15837 continue;
15838
15839 if (dtrace_helper_provider_validate(dof, sec) != 0) {
15840 dtrace_enabling_destroy(enab);
15841 dtrace_dof_destroy(dof);
15842 return (-1);
15843 }
15844
15845 nprovs++;
15846 }
15847 }
15848
15849 /*
15850 * Now we need to walk through the ECB descriptions in the enabling.
15851 */
15852 for (i = 0; i < enab->dten_ndesc; i++) {
15853 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15854 dtrace_probedesc_t *desc = &ep->dted_probe;
15855
fe8ab488 15856 /* APPLE NOTE: Darwin employs size bounded string operation. */
b0d623f7
A
15857 if (!LIT_STRNEQL(desc->dtpd_provider, "dtrace"))
15858 continue;
2d21ac55 15859
b0d623f7
A
15860 if (!LIT_STRNEQL(desc->dtpd_mod, "helper"))
15861 continue;
15862
15863 if (!LIT_STRNEQL(desc->dtpd_func, "ustack"))
15864 continue;
b0d623f7 15865
b0d623f7
A
15866 if ((rv = dtrace_helper_action_add(p, DTRACE_HELPER_ACTION_USTACK,
15867 ep)) != 0) {
b0d623f7 15868 /*
2d21ac55
A
15869 * Adding this helper action failed -- we are now going
15870 * to rip out the entire generation and return failure.
15871 */
2d21ac55 15872 (void) dtrace_helper_destroygen(p, help->dthps_generation);
2d21ac55
A
15873 dtrace_enabling_destroy(enab);
15874 dtrace_dof_destroy(dof);
15875 return (-1);
15876 }
15877
15878 nhelpers++;
15879 }
15880
15881 if (nhelpers < enab->dten_ndesc)
15882 dtrace_dof_error(dof, "unmatched helpers");
15883
15884 gen = help->dthps_generation++;
15885 dtrace_enabling_destroy(enab);
15886
15887 if (dhp != NULL && nprovs > 0) {
15888 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
2d21ac55 15889 if (dtrace_helper_provider_add(p, dhp, gen) == 0) {
2d21ac55 15890 lck_mtx_unlock(&dtrace_lock);
2d21ac55 15891 dtrace_helper_provider_register(p, help, dhp);
2d21ac55
A
15892 lck_mtx_lock(&dtrace_lock);
15893
15894 destroy = 0;
15895 }
15896 }
15897
15898 if (destroy)
15899 dtrace_dof_destroy(dof);
15900
15901 return (gen);
15902}
15903
2d21ac55 15904/*
fe8ab488 15905 * APPLE NOTE: DTrace lazy dof implementation
2d21ac55
A
15906 *
15907 * DTrace user static probes (USDT probes) and helper actions are loaded
15908 * in a process by proccessing dof sections. The dof sections are passed
15909 * into the kernel by dyld, in a dof_ioctl_data_t block. It is rather
15910 * expensive to process dof for a process that will never use it. There
15911 * is a memory cost (allocating the providers/probes), and a cpu cost
15912 * (creating the providers/probes).
15913 *
15914 * To reduce this cost, we use "lazy dof". The normal proceedure for
15915 * dof processing is to copyin the dof(s) pointed to by the dof_ioctl_data_t
15916 * block, and invoke dof_slurp_helper() on them. When "lazy dof" is
15917 * used, each process retains the dof_ioctl_data_t block, instead of
15918 * copying in the data it points to.
15919 *
15920 * The dof_ioctl_data_t blocks are managed as if they were the actual
15921 * processed dof; on fork the block is copied to the child, on exec and
15922 * exit the block is freed.
15923 *
15924 * If the process loads library(s) containing additional dof, the
15925 * new dof_ioctl_data_t is merged with the existing block.
15926 *
15927 * There are a few catches that make this slightly more difficult.
15928 * When dyld registers dof_ioctl_data_t blocks, it expects a unique
15929 * identifier value for each dof in the block. In non-lazy dof terms,
15930 * this is the generation that dof was loaded in. If we hand back
15931 * a UID for a lazy dof, that same UID must be able to unload the
15932 * dof once it has become non-lazy. To meet this requirement, the
15933 * code that loads lazy dof requires that the UID's for dof(s) in
15934 * the lazy dof be sorted, and in ascending order. It is okay to skip
15935 * UID's, I.E., 1 -> 5 -> 6 is legal.
15936 *
15937 * Once a process has become non-lazy, it will stay non-lazy. All
15938 * future dof operations for that process will be non-lazy, even
15939 * if the dof mode transitions back to lazy.
15940 *
15941 * Always do lazy dof checks before non-lazy (I.E. In fork, exit, exec.).
15942 * That way if the lazy check fails due to transitioning to non-lazy, the
15943 * right thing is done with the newly faulted in dof.
15944 */
15945
15946/*
15947 * This method is a bit squicky. It must handle:
15948 *
15949 * dof should not be lazy.
15950 * dof should have been handled lazily, but there was an error
15951 * dof was handled lazily, and needs to be freed.
15952 * dof was handled lazily, and must not be freed.
15953 *
15954 *
15955 * Returns EACCESS if dof should be handled non-lazily.
15956 *
15957 * KERN_SUCCESS and all other return codes indicate lazy handling of dof.
15958 *
15959 * If the dofs data is claimed by this method, dofs_claimed will be set.
15960 * Callers should not free claimed dofs.
15961 */
b0d623f7 15962static int
2d21ac55
A
15963dtrace_lazy_dofs_add(proc_t *p, dof_ioctl_data_t* incoming_dofs, int *dofs_claimed)
15964{
15965 ASSERT(p);
15966 ASSERT(incoming_dofs && incoming_dofs->dofiod_count > 0);
15967
15968 int rval = 0;
15969 *dofs_claimed = 0;
15970
15971 lck_rw_lock_shared(&dtrace_dof_mode_lock);
15972
2d21ac55
A
15973 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
15974 ASSERT(dtrace_dof_mode != DTRACE_DOF_MODE_NEVER);
15975
15976 /*
15977 * Any existing helpers force non-lazy behavior.
15978 */
15979 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
d9a64523 15980 dtrace_sprlock(p);
2d21ac55
A
15981
15982 dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
15983 unsigned int existing_dofs_count = (existing_dofs) ? existing_dofs->dofiod_count : 0;
15984 unsigned int i, merged_dofs_count = incoming_dofs->dofiod_count + existing_dofs_count;
15985
15986 /*
15987 * Range check...
15988 */
15989 if (merged_dofs_count == 0 || merged_dofs_count > 1024) {
15990 dtrace_dof_error(NULL, "lazy_dofs_add merged_dofs_count out of range");
15991 rval = EINVAL;
15992 goto unlock;
15993 }
15994
15995 /*
15996 * Each dof being added must be assigned a unique generation.
15997 */
15998 uint64_t generation = (existing_dofs) ? existing_dofs->dofiod_helpers[existing_dofs_count - 1].dofhp_dof + 1 : 1;
15999 for (i=0; i<incoming_dofs->dofiod_count; i++) {
16000 /*
16001 * We rely on these being the same so we can overwrite dofhp_dof and not lose info.
16002 */
16003 ASSERT(incoming_dofs->dofiod_helpers[i].dofhp_dof == incoming_dofs->dofiod_helpers[i].dofhp_addr);
16004 incoming_dofs->dofiod_helpers[i].dofhp_dof = generation++;
16005 }
16006
16007
16008 if (existing_dofs) {
16009 /*
16010 * Merge the existing and incoming dofs
16011 */
16012 size_t merged_dofs_size = DOF_IOCTL_DATA_T_SIZE(merged_dofs_count);
16013 dof_ioctl_data_t* merged_dofs = kmem_alloc(merged_dofs_size, KM_SLEEP);
16014
16015 bcopy(&existing_dofs->dofiod_helpers[0],
16016 &merged_dofs->dofiod_helpers[0],
16017 sizeof(dof_helper_t) * existing_dofs_count);
16018 bcopy(&incoming_dofs->dofiod_helpers[0],
16019 &merged_dofs->dofiod_helpers[existing_dofs_count],
16020 sizeof(dof_helper_t) * incoming_dofs->dofiod_count);
16021
16022 merged_dofs->dofiod_count = merged_dofs_count;
16023
16024 kmem_free(existing_dofs, DOF_IOCTL_DATA_T_SIZE(existing_dofs_count));
16025
16026 p->p_dtrace_lazy_dofs = merged_dofs;
16027 } else {
16028 /*
16029 * Claim the incoming dofs
16030 */
16031 *dofs_claimed = 1;
16032 p->p_dtrace_lazy_dofs = incoming_dofs;
16033 }
16034
16035#if DEBUG
16036 dof_ioctl_data_t* all_dofs = p->p_dtrace_lazy_dofs;
16037 for (i=0; i<all_dofs->dofiod_count-1; i++) {
16038 ASSERT(all_dofs->dofiod_helpers[i].dofhp_dof < all_dofs->dofiod_helpers[i+1].dofhp_dof);
16039 }
b0d623f7 16040#endif /* DEBUG */
2d21ac55
A
16041
16042unlock:
d9a64523 16043 dtrace_sprunlock(p);
2d21ac55
A
16044 } else {
16045 rval = EACCES;
16046 }
16047
16048 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16049
16050 return rval;
16051}
16052
16053/*
16054 * Returns:
16055 *
16056 * EINVAL: lazy dof is enabled, but the requested generation was not found.
16057 * EACCES: This removal needs to be handled non-lazily.
16058 */
b0d623f7 16059static int
2d21ac55
A
16060dtrace_lazy_dofs_remove(proc_t *p, int generation)
16061{
16062 int rval = EINVAL;
16063
16064 lck_rw_lock_shared(&dtrace_dof_mode_lock);
16065
2d21ac55
A
16066 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16067 ASSERT(dtrace_dof_mode != DTRACE_DOF_MODE_NEVER);
16068
16069 /*
16070 * Any existing helpers force non-lazy behavior.
16071 */
16072 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
d9a64523 16073 dtrace_sprlock(p);
2d21ac55
A
16074
16075 dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
16076
16077 if (existing_dofs) {
16078 int index, existing_dofs_count = existing_dofs->dofiod_count;
16079 for (index=0; index<existing_dofs_count; index++) {
16080 if ((int)existing_dofs->dofiod_helpers[index].dofhp_dof == generation) {
16081 dof_ioctl_data_t* removed_dofs = NULL;
16082
16083 /*
16084 * If there is only 1 dof, we'll delete it and swap in NULL.
16085 */
16086 if (existing_dofs_count > 1) {
16087 int removed_dofs_count = existing_dofs_count - 1;
16088 size_t removed_dofs_size = DOF_IOCTL_DATA_T_SIZE(removed_dofs_count);
16089
16090 removed_dofs = kmem_alloc(removed_dofs_size, KM_SLEEP);
16091 removed_dofs->dofiod_count = removed_dofs_count;
16092
16093 /*
16094 * copy the remaining data.
16095 */
16096 if (index > 0) {
16097 bcopy(&existing_dofs->dofiod_helpers[0],
16098 &removed_dofs->dofiod_helpers[0],
16099 index * sizeof(dof_helper_t));
16100 }
16101
16102 if (index < existing_dofs_count-1) {
16103 bcopy(&existing_dofs->dofiod_helpers[index+1],
16104 &removed_dofs->dofiod_helpers[index],
16105 (existing_dofs_count - index - 1) * sizeof(dof_helper_t));
16106 }
16107 }
16108
16109 kmem_free(existing_dofs, DOF_IOCTL_DATA_T_SIZE(existing_dofs_count));
16110
16111 p->p_dtrace_lazy_dofs = removed_dofs;
16112
16113 rval = KERN_SUCCESS;
16114
16115 break;
16116 }
16117 }
16118
16119#if DEBUG
16120 dof_ioctl_data_t* all_dofs = p->p_dtrace_lazy_dofs;
16121 if (all_dofs) {
16122 unsigned int i;
16123 for (i=0; i<all_dofs->dofiod_count-1; i++) {
16124 ASSERT(all_dofs->dofiod_helpers[i].dofhp_dof < all_dofs->dofiod_helpers[i+1].dofhp_dof);
16125 }
16126 }
16127#endif
16128
16129 }
d9a64523
A
16130 dtrace_sprunlock(p);
16131 } else {
2d21ac55
A
16132 rval = EACCES;
16133 }
16134
16135 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
39037602 16136
2d21ac55
A
16137 return rval;
16138}
16139
16140void
16141dtrace_lazy_dofs_destroy(proc_t *p)
16142{
16143 lck_rw_lock_shared(&dtrace_dof_mode_lock);
d9a64523 16144 dtrace_sprlock(p);
2d21ac55 16145
2d21ac55
A
16146 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16147
16148 dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
16149 p->p_dtrace_lazy_dofs = NULL;
16150
d9a64523 16151 dtrace_sprunlock(p);
2d21ac55
A
16152 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16153
16154 if (lazy_dofs) {
16155 kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
16156 }
16157}
16158
2d21ac55
A
16159static int
16160dtrace_lazy_dofs_proc_iterate_filter(proc_t *p, void* ignored)
16161{
16162#pragma unused(ignored)
16163 /*
16164 * Okay to NULL test without taking the sprlock.
16165 */
16166 return p->p_dtrace_lazy_dofs != NULL;
16167}
16168
39037602
A
16169static void
16170dtrace_lazy_dofs_process(proc_t *p) {
2d21ac55
A
16171 /*
16172 * It is possible this process may exit during our attempt to
16173 * fault in the dof. We could fix this by holding locks longer,
16174 * but the errors are benign.
16175 */
d9a64523 16176 dtrace_sprlock(p);
2d21ac55 16177
39037602 16178
2d21ac55
A
16179 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16180 ASSERT(dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF);
16181
2d21ac55
A
16182 dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
16183 p->p_dtrace_lazy_dofs = NULL;
16184
d9a64523
A
16185 dtrace_sprunlock(p);
16186 lck_mtx_lock(&dtrace_meta_lock);
2d21ac55
A
16187 /*
16188 * Process each dof_helper_t
16189 */
16190 if (lazy_dofs != NULL) {
16191 unsigned int i;
16192 int rval;
16193
16194 for (i=0; i<lazy_dofs->dofiod_count; i++) {
16195 /*
16196 * When loading lazy dof, we depend on the generations being sorted in ascending order.
16197 */
16198 ASSERT(i >= (lazy_dofs->dofiod_count - 1) || lazy_dofs->dofiod_helpers[i].dofhp_dof < lazy_dofs->dofiod_helpers[i+1].dofhp_dof);
16199
16200 dof_helper_t *dhp = &lazy_dofs->dofiod_helpers[i];
16201
16202 /*
16203 * We stored the generation in dofhp_dof. Save it, and restore the original value.
16204 */
16205 int generation = dhp->dofhp_dof;
16206 dhp->dofhp_dof = dhp->dofhp_addr;
16207
16208 dof_hdr_t *dof = dtrace_dof_copyin_from_proc(p, dhp->dofhp_dof, &rval);
39037602 16209
2d21ac55
A
16210 if (dof != NULL) {
16211 dtrace_helpers_t *help;
16212
16213 lck_mtx_lock(&dtrace_lock);
16214
16215 /*
16216 * This must be done with the dtrace_lock held
16217 */
16218 if ((help = p->p_dtrace_helpers) == NULL)
16219 help = dtrace_helpers_create(p);
16220
16221 /*
16222 * If the generation value has been bumped, someone snuck in
16223 * when we released the dtrace lock. We have to dump this generation,
16224 * there is no safe way to load it.
16225 */
16226 if (help->dthps_generation <= generation) {
16227 help->dthps_generation = generation;
16228
16229 /*
16230 * dtrace_helper_slurp() takes responsibility for the dof --
16231 * it may free it now or it may save it and free it later.
16232 */
16233 if ((rval = dtrace_helper_slurp(p, dof, dhp)) != generation) {
16234 dtrace_dof_error(NULL, "returned value did not match expected generation");
16235 }
16236 }
16237
16238 lck_mtx_unlock(&dtrace_lock);
16239 }
16240 }
d9a64523 16241 lck_mtx_unlock(&dtrace_meta_lock);
2d21ac55 16242 kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
d9a64523
A
16243 } else {
16244 lck_mtx_unlock(&dtrace_meta_lock);
2d21ac55 16245 }
39037602
A
16246}
16247
16248static int
16249dtrace_lazy_dofs_proc_iterate_doit(proc_t *p, void* ignored)
16250{
16251#pragma unused(ignored)
16252
16253 dtrace_lazy_dofs_process(p);
2d21ac55
A
16254
16255 return PROC_RETURNED;
16256}
16257
39037602
A
16258#define DTRACE_LAZY_DOFS_DUPLICATED 1
16259
16260static int
16261dtrace_lazy_dofs_duplicate(proc_t *parent, proc_t *child)
16262{
5ba3f43e
A
16263 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
16264 LCK_MTX_ASSERT(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
16265 LCK_MTX_ASSERT(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
39037602
A
16266
16267 lck_rw_lock_shared(&dtrace_dof_mode_lock);
d9a64523 16268 dtrace_sprlock(parent);
39037602
A
16269
16270 /*
16271 * We need to make sure that the transition to lazy dofs -> helpers
16272 * was atomic for our parent
16273 */
16274 ASSERT(parent->p_dtrace_lazy_dofs == NULL || parent->p_dtrace_helpers == NULL);
16275 /*
16276 * In theory we should hold the child sprlock, but this is safe...
16277 */
16278 ASSERT(child->p_dtrace_lazy_dofs == NULL && child->p_dtrace_helpers == NULL);
16279
16280 dof_ioctl_data_t* parent_dofs = parent->p_dtrace_lazy_dofs;
16281 dof_ioctl_data_t* child_dofs = NULL;
16282 if (parent_dofs) {
16283 size_t parent_dofs_size = DOF_IOCTL_DATA_T_SIZE(parent_dofs->dofiod_count);
16284 child_dofs = kmem_alloc(parent_dofs_size, KM_SLEEP);
16285 bcopy(parent_dofs, child_dofs, parent_dofs_size);
16286 }
16287
d9a64523 16288 dtrace_sprunlock(parent);
39037602
A
16289
16290 if (child_dofs) {
d9a64523 16291 dtrace_sprlock(child);
39037602 16292 child->p_dtrace_lazy_dofs = child_dofs;
d9a64523 16293 dtrace_sprunlock(child);
39037602
A
16294 /**
16295 * We process the DOF at this point if the mode is set to
16296 * LAZY_OFF. This can happen if DTrace is still processing the
16297 * DOF of other process (which can happen because the
16298 * protected pager can have a huge latency)
16299 * but has not processed our parent yet
16300 */
16301 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF) {
16302 dtrace_lazy_dofs_process(child);
16303 }
16304 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16305
16306 return DTRACE_LAZY_DOFS_DUPLICATED;
16307 }
16308 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16309
16310 return 0;
16311}
16312
2d21ac55
A
16313static dtrace_helpers_t *
16314dtrace_helpers_create(proc_t *p)
16315{
16316 dtrace_helpers_t *help;
16317
5ba3f43e 16318 LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
16319 ASSERT(p->p_dtrace_helpers == NULL);
16320
16321 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
16322 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
16323 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
16324
16325 p->p_dtrace_helpers = help;
16326 dtrace_helpers++;
16327
16328 return (help);
16329}
16330
2d21ac55
A
16331static void
16332dtrace_helpers_destroy(proc_t* p)
16333{
2d21ac55
A
16334 dtrace_helpers_t *help;
16335 dtrace_vstate_t *vstate;
b0d623f7 16336 uint_t i;
2d21ac55 16337
d9a64523 16338 lck_mtx_lock(&dtrace_meta_lock);
2d21ac55
A
16339 lck_mtx_lock(&dtrace_lock);
16340
16341 ASSERT(p->p_dtrace_helpers != NULL);
16342 ASSERT(dtrace_helpers > 0);
16343
16344 help = p->p_dtrace_helpers;
16345 vstate = &help->dthps_vstate;
16346
16347 /*
16348 * We're now going to lose the help from this process.
16349 */
16350 p->p_dtrace_helpers = NULL;
16351 dtrace_sync();
16352
16353 /*
16354 * Destory the helper actions.
16355 */
16356 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16357 dtrace_helper_action_t *h, *next;
16358
16359 for (h = help->dthps_actions[i]; h != NULL; h = next) {
16360 next = h->dtha_next;
16361 dtrace_helper_action_destroy(h, vstate);
16362 h = next;
16363 }
16364 }
16365
16366 lck_mtx_unlock(&dtrace_lock);
16367
16368 /*
16369 * Destroy the helper providers.
16370 */
16371 if (help->dthps_maxprovs > 0) {
2d21ac55
A
16372 if (dtrace_meta_pid != NULL) {
16373 ASSERT(dtrace_deferred_pid == NULL);
16374
16375 for (i = 0; i < help->dthps_nprovs; i++) {
16376 dtrace_helper_provider_remove(
d190cdc3 16377 &help->dthps_provs[i]->dthp_prov, p);
2d21ac55
A
16378 }
16379 } else {
16380 lck_mtx_lock(&dtrace_lock);
16381 ASSERT(help->dthps_deferred == 0 ||
16382 help->dthps_next != NULL ||
16383 help->dthps_prev != NULL ||
16384 help == dtrace_deferred_pid);
16385
16386 /*
16387 * Remove the helper from the deferred list.
16388 */
16389 if (help->dthps_next != NULL)
16390 help->dthps_next->dthps_prev = help->dthps_prev;
16391 if (help->dthps_prev != NULL)
16392 help->dthps_prev->dthps_next = help->dthps_next;
16393 if (dtrace_deferred_pid == help) {
16394 dtrace_deferred_pid = help->dthps_next;
16395 ASSERT(help->dthps_prev == NULL);
16396 }
16397
16398 lck_mtx_unlock(&dtrace_lock);
16399 }
16400
2d21ac55
A
16401
16402 for (i = 0; i < help->dthps_nprovs; i++) {
16403 dtrace_helper_provider_destroy(help->dthps_provs[i]);
16404 }
16405
16406 kmem_free(help->dthps_provs, help->dthps_maxprovs *
16407 sizeof (dtrace_helper_provider_t *));
16408 }
16409
16410 lck_mtx_lock(&dtrace_lock);
16411
16412 dtrace_vstate_fini(&help->dthps_vstate);
16413 kmem_free(help->dthps_actions,
16414 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
16415 kmem_free(help, sizeof (dtrace_helpers_t));
16416
16417 --dtrace_helpers;
16418 lck_mtx_unlock(&dtrace_lock);
d9a64523 16419 lck_mtx_unlock(&dtrace_meta_lock);
2d21ac55
A
16420}
16421
16422static void
16423dtrace_helpers_duplicate(proc_t *from, proc_t *to)
16424{
16425 dtrace_helpers_t *help, *newhelp;
16426 dtrace_helper_action_t *helper, *new, *last;
16427 dtrace_difo_t *dp;
16428 dtrace_vstate_t *vstate;
b0d623f7
A
16429 uint_t i;
16430 int j, sz, hasprovs = 0;
2d21ac55 16431
d9a64523 16432 lck_mtx_lock(&dtrace_meta_lock);
2d21ac55
A
16433 lck_mtx_lock(&dtrace_lock);
16434 ASSERT(from->p_dtrace_helpers != NULL);
16435 ASSERT(dtrace_helpers > 0);
16436
16437 help = from->p_dtrace_helpers;
16438 newhelp = dtrace_helpers_create(to);
16439 ASSERT(to->p_dtrace_helpers != NULL);
16440
16441 newhelp->dthps_generation = help->dthps_generation;
16442 vstate = &newhelp->dthps_vstate;
16443
16444 /*
16445 * Duplicate the helper actions.
16446 */
16447 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16448 if ((helper = help->dthps_actions[i]) == NULL)
16449 continue;
16450
16451 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
16452 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
16453 KM_SLEEP);
16454 new->dtha_generation = helper->dtha_generation;
16455
16456 if ((dp = helper->dtha_predicate) != NULL) {
16457 dp = dtrace_difo_duplicate(dp, vstate);
16458 new->dtha_predicate = dp;
16459 }
16460
16461 new->dtha_nactions = helper->dtha_nactions;
16462 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
16463 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
16464
b0d623f7
A
16465 for (j = 0; j < new->dtha_nactions; j++) {
16466 dtrace_difo_t *dpj = helper->dtha_actions[j];
16467
16468 ASSERT(dpj != NULL);
16469 dpj = dtrace_difo_duplicate(dpj, vstate);
16470 new->dtha_actions[j] = dpj;
16471 }
2d21ac55
A
16472
16473 if (last != NULL) {
16474 last->dtha_next = new;
16475 } else {
16476 newhelp->dthps_actions[i] = new;
16477 }
16478
16479 last = new;
16480 }
16481 }
16482
16483 /*
16484 * Duplicate the helper providers and register them with the
16485 * DTrace framework.
16486 */
16487 if (help->dthps_nprovs > 0) {
16488 newhelp->dthps_nprovs = help->dthps_nprovs;
16489 newhelp->dthps_maxprovs = help->dthps_nprovs;
16490 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
16491 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
16492 for (i = 0; i < newhelp->dthps_nprovs; i++) {
16493 newhelp->dthps_provs[i] = help->dthps_provs[i];
16494 newhelp->dthps_provs[i]->dthp_ref++;
16495 }
16496
16497 hasprovs = 1;
16498 }
16499
16500 lck_mtx_unlock(&dtrace_lock);
16501
16502 if (hasprovs)
16503 dtrace_helper_provider_register(to, newhelp, NULL);
d9a64523
A
16504
16505 lck_mtx_unlock(&dtrace_meta_lock);
2d21ac55
A
16506}
16507
39037602
A
16508/**
16509 * DTrace Process functions
16510 */
16511
16512void
16513dtrace_proc_fork(proc_t *parent_proc, proc_t *child_proc, int spawn)
16514{
16515 /*
16516 * This code applies to new processes who are copying the task
16517 * and thread state and address spaces of their parent process.
16518 */
16519 if (!spawn) {
16520 /*
16521 * APPLE NOTE: Solaris does a sprlock() and drops the
16522 * proc_lock here. We're cheating a bit and only taking
16523 * the p_dtrace_sprlock lock. A full sprlock would
16524 * task_suspend the parent.
16525 */
d9a64523 16526 dtrace_sprlock(parent_proc);
39037602
A
16527
16528 /*
16529 * Remove all DTrace tracepoints from the child process. We
16530 * need to do this _before_ duplicating USDT providers since
16531 * any associated probes may be immediately enabled.
16532 */
16533 if (parent_proc->p_dtrace_count > 0) {
16534 dtrace_fasttrap_fork(parent_proc, child_proc);
16535 }
16536
d9a64523 16537 dtrace_sprunlock(parent_proc);
39037602
A
16538
16539 /*
16540 * Duplicate any lazy dof(s). This must be done while NOT
16541 * holding the parent sprlock! Lock ordering is
16542 * dtrace_dof_mode_lock, then sprlock. It is imperative we
16543 * always call dtrace_lazy_dofs_duplicate, rather than null
16544 * check and call if !NULL. If we NULL test, during lazy dof
16545 * faulting we can race with the faulting code and proceed
16546 * from here to beyond the helpers copy. The lazy dof
16547 * faulting will then fail to copy the helpers to the child
16548 * process. We return if we duplicated lazy dofs as a process
16549 * can only have one at the same time to avoid a race between
16550 * a dtrace client and dtrace_proc_fork where a process would
16551 * end up with both lazy dofs and helpers.
16552 */
16553 if (dtrace_lazy_dofs_duplicate(parent_proc, child_proc) == DTRACE_LAZY_DOFS_DUPLICATED) {
16554 return;
16555 }
16556
16557 /*
16558 * Duplicate any helper actions and providers if they haven't
16559 * already.
16560 */
16561#if !defined(__APPLE__)
16562 /*
16563 * The SFORKING
16564 * we set above informs the code to enable USDT probes that
16565 * sprlock() may fail because the child is being forked.
16566 */
16567#endif
16568 /*
16569 * APPLE NOTE: As best I can tell, Apple's sprlock() equivalent
16570 * never fails to find the child. We do not set SFORKING.
16571 */
16572 if (parent_proc->p_dtrace_helpers != NULL && dtrace_helpers_fork) {
16573 (*dtrace_helpers_fork)(parent_proc, child_proc);
16574 }
16575 }
16576}
16577
16578void
16579dtrace_proc_exec(proc_t *p)
16580{
16581 /*
16582 * Invalidate any predicate evaluation already cached for this thread by DTrace.
16583 * That's because we've just stored to p_comm and DTrace refers to that when it
16584 * evaluates the "execname" special variable. uid and gid may have changed as well.
16585 */
16586 dtrace_set_thread_predcache(current_thread(), 0);
16587
16588 /*
16589 * Free any outstanding lazy dof entries. It is imperative we
16590 * always call dtrace_lazy_dofs_destroy, rather than null check
16591 * and call if !NULL. If we NULL test, during lazy dof faulting
16592 * we can race with the faulting code and proceed from here to
16593 * beyond the helpers cleanup. The lazy dof faulting will then
16594 * install new helpers which no longer belong to this process!
16595 */
16596 dtrace_lazy_dofs_destroy(p);
16597
16598
16599 /*
16600 * Clean up any DTrace helpers for the process.
16601 */
16602 if (p->p_dtrace_helpers != NULL && dtrace_helpers_cleanup) {
16603 (*dtrace_helpers_cleanup)(p);
16604 }
16605
16606 /*
16607 * Cleanup the DTrace provider associated with this process.
16608 */
16609 proc_lock(p);
16610 if (p->p_dtrace_probes && dtrace_fasttrap_exec_ptr) {
16611 (*dtrace_fasttrap_exec_ptr)(p);
16612 }
16613 proc_unlock(p);
16614}
16615
16616void
16617dtrace_proc_exit(proc_t *p)
16618{
16619 /*
16620 * Free any outstanding lazy dof entries. It is imperative we
16621 * always call dtrace_lazy_dofs_destroy, rather than null check
16622 * and call if !NULL. If we NULL test, during lazy dof faulting
16623 * we can race with the faulting code and proceed from here to
16624 * beyond the helpers cleanup. The lazy dof faulting will then
16625 * install new helpers which will never be cleaned up, and leak.
16626 */
16627 dtrace_lazy_dofs_destroy(p);
16628
16629 /*
16630 * Clean up any DTrace helper actions or probes for the process.
16631 */
16632 if (p->p_dtrace_helpers != NULL) {
16633 (*dtrace_helpers_cleanup)(p);
16634 }
16635
16636 /*
16637 * Clean up any DTrace probes associated with this process.
16638 */
16639 /*
16640 * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(),
16641 * call this after dtrace_helpers_cleanup()
16642 */
16643 proc_lock(p);
16644 if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) {
16645 (*dtrace_fasttrap_exit_ptr)(p);
16646 }
16647 proc_unlock(p);
16648}
16649
2d21ac55
A
16650/*
16651 * DTrace Hook Functions
16652 */
6d2010ae 16653
6d2010ae 16654/*
fe8ab488
A
16655 * APPLE NOTE: dtrace_modctl_* routines for kext support.
16656 * Used to manipulate the modctl list within dtrace xnu.
6d2010ae
A
16657 */
16658
16659modctl_t *dtrace_modctl_list;
16660
16661static void
16662dtrace_modctl_add(struct modctl * newctl)
16663{
16664 struct modctl *nextp, *prevp;
16665
16666 ASSERT(newctl != NULL);
5ba3f43e 16667 LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
6d2010ae
A
16668
16669 // Insert new module at the front of the list,
16670
16671 newctl->mod_next = dtrace_modctl_list;
16672 dtrace_modctl_list = newctl;
16673
16674 /*
16675 * If a module exists with the same name, then that module
16676 * must have been unloaded with enabled probes. We will move
16677 * the unloaded module to the new module's stale chain and
16678 * then stop traversing the list.
16679 */
16680
16681 prevp = newctl;
16682 nextp = newctl->mod_next;
16683
16684 while (nextp != NULL) {
16685 if (nextp->mod_loaded) {
16686 /* This is a loaded module. Keep traversing. */
16687 prevp = nextp;
16688 nextp = nextp->mod_next;
16689 continue;
16690 }
16691 else {
16692 /* Found an unloaded module */
16693 if (strncmp (newctl->mod_modname, nextp->mod_modname, KMOD_MAX_NAME)) {
16694 /* Names don't match. Keep traversing. */
16695 prevp = nextp;
16696 nextp = nextp->mod_next;
16697 continue;
16698 }
16699 else {
16700 /* We found a stale entry, move it. We're done. */
16701 prevp->mod_next = nextp->mod_next;
16702 newctl->mod_stale = nextp;
16703 nextp->mod_next = NULL;
16704 break;
16705 }
16706 }
16707 }
16708}
16709
16710static modctl_t *
16711dtrace_modctl_lookup(struct kmod_info * kmod)
16712{
5ba3f43e 16713 LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
6d2010ae
A
16714
16715 struct modctl * ctl;
16716
16717 for (ctl = dtrace_modctl_list; ctl; ctl=ctl->mod_next) {
16718 if (ctl->mod_id == kmod->id)
16719 return(ctl);
16720 }
16721 return (NULL);
16722}
16723
16724/*
16725 * This routine is called from dtrace_module_unloaded().
16726 * It removes a modctl structure and its stale chain
16727 * from the kext shadow list.
16728 */
16729static void
16730dtrace_modctl_remove(struct modctl * ctl)
16731{
16732 ASSERT(ctl != NULL);
5ba3f43e 16733 LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
6d2010ae
A
16734 modctl_t *prevp, *nextp, *curp;
16735
16736 // Remove stale chain first
16737 for (curp=ctl->mod_stale; curp != NULL; curp=nextp) {
16738 nextp = curp->mod_stale;
16739 /* There should NEVER be user symbols allocated at this point */
16740 ASSERT(curp->mod_user_symbols == NULL);
16741 kmem_free(curp, sizeof(modctl_t));
16742 }
16743
16744 prevp = NULL;
16745 curp = dtrace_modctl_list;
16746
16747 while (curp != ctl) {
16748 prevp = curp;
16749 curp = curp->mod_next;
16750 }
16751
16752 if (prevp != NULL) {
16753 prevp->mod_next = ctl->mod_next;
16754 }
16755 else {
16756 dtrace_modctl_list = ctl->mod_next;
16757 }
16758
16759 /* There should NEVER be user symbols allocated at this point */
16760 ASSERT(ctl->mod_user_symbols == NULL);
16761
16762 kmem_free (ctl, sizeof(modctl_t));
16763}
16764
6d2010ae
A
16765/*
16766 * APPLE NOTE: The kext loader will call dtrace_module_loaded
16767 * when the kext is loaded in memory, but before calling the
16768 * kext's start routine.
16769 *
16770 * Return 0 on success
16771 * Return -1 on failure
16772 */
16773
6d2010ae 16774static int
316670eb 16775dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag)
2d21ac55
A
16776{
16777 dtrace_provider_t *prv;
16778
6d2010ae
A
16779 /*
16780 * If kernel symbols have been disabled, return immediately
16781 * DTRACE_KERNEL_SYMBOLS_NEVER is a permanent mode, it is safe to test without holding locks
16782 */
16783 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER)
16784 return 0;
16785
16786 struct modctl *ctl = NULL;
16787 if (!kmod || kmod->address == 0 || kmod->size == 0)
16788 return(-1);
16789
16790 lck_mtx_lock(&dtrace_provider_lock);
16791 lck_mtx_lock(&mod_lock);
16792
16793 /*
16794 * Have we seen this kext before?
16795 */
2d21ac55 16796
6d2010ae
A
16797 ctl = dtrace_modctl_lookup(kmod);
16798
16799 if (ctl != NULL) {
16800 /* bail... we already have this kext in the modctl list */
16801 lck_mtx_unlock(&mod_lock);
16802 lck_mtx_unlock(&dtrace_provider_lock);
16803 if (dtrace_err_verbose)
16804 cmn_err(CE_WARN, "dtrace load module already exists '%s %u' is failing against '%s %u'", kmod->name, (uint_t)kmod->id, ctl->mod_modname, ctl->mod_id);
16805 return(-1);
16806 }
16807 else {
16808 ctl = kmem_alloc(sizeof(struct modctl), KM_SLEEP);
16809 if (ctl == NULL) {
16810 if (dtrace_err_verbose)
16811 cmn_err(CE_WARN, "dtrace module load '%s %u' is failing ", kmod->name, (uint_t)kmod->id);
16812 lck_mtx_unlock(&mod_lock);
16813 lck_mtx_unlock(&dtrace_provider_lock);
16814 return (-1);
16815 }
16816 ctl->mod_next = NULL;
16817 ctl->mod_stale = NULL;
16818 strlcpy (ctl->mod_modname, kmod->name, sizeof(ctl->mod_modname));
16819 ctl->mod_loadcnt = kmod->id;
16820 ctl->mod_nenabled = 0;
16821 ctl->mod_address = kmod->address;
16822 ctl->mod_size = kmod->size;
16823 ctl->mod_id = kmod->id;
16824 ctl->mod_loaded = 1;
16825 ctl->mod_flags = 0;
16826 ctl->mod_user_symbols = NULL;
d9a64523 16827
6d2010ae
A
16828 /*
16829 * Find the UUID for this module, if it has one
16830 */
16831 kernel_mach_header_t* header = (kernel_mach_header_t *)ctl->mod_address;
16832 struct load_command* load_cmd = (struct load_command *)&header[1];
16833 uint32_t i;
16834 for (i = 0; i < header->ncmds; i++) {
16835 if (load_cmd->cmd == LC_UUID) {
16836 struct uuid_command* uuid_cmd = (struct uuid_command *)load_cmd;
16837 memcpy(ctl->mod_uuid, uuid_cmd->uuid, sizeof(uuid_cmd->uuid));
16838 ctl->mod_flags |= MODCTL_HAS_UUID;
16839 break;
16840 }
16841 load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
16842 }
16843
16844 if (ctl->mod_address == g_kernel_kmod_info.address) {
16845 ctl->mod_flags |= MODCTL_IS_MACH_KERNEL;
d9a64523
A
16846 memcpy(dtrace_kerneluuid, ctl->mod_uuid, sizeof(dtrace_kerneluuid));
16847 }
16848 /*
16849 * Static kexts have a UUID that is not used for symbolication, as all their
16850 * symbols are in kernel
16851 */
16852 else if ((flag & KMOD_DTRACE_STATIC_KEXT) == KMOD_DTRACE_STATIC_KEXT) {
16853 memcpy(ctl->mod_uuid, dtrace_kerneluuid, sizeof(dtrace_kerneluuid));
16854 ctl->mod_flags |= MODCTL_IS_STATIC_KEXT;
6d2010ae
A
16855 }
16856 }
16857 dtrace_modctl_add(ctl);
16858
16859 /*
16860 * We must hold the dtrace_lock to safely test non permanent dtrace_fbt_symbol_mode(s)
16861 */
16862 lck_mtx_lock(&dtrace_lock);
16863
16864 /*
316670eb
A
16865 * DTrace must decide if it will instrument modules lazily via
16866 * userspace symbols (default mode), or instrument immediately via
16867 * kernel symbols (non-default mode)
16868 *
16869 * When in default/lazy mode, DTrace will only support modules
16870 * built with a valid UUID.
16871 *
16872 * Overriding the default can be done explicitly in one of
16873 * the following two ways.
16874 *
16875 * A module can force symbols from kernel space using the plist key,
16876 * OSBundleForceDTraceInit (see kmod.h). If this per kext state is set,
16877 * we fall through and instrument this module now.
16878 *
16879 * Or, the boot-arg, dtrace_kernel_symbol_mode, can be set to force symbols
16880 * from kernel space (see dtrace_impl.h). If this system state is set
16881 * to a non-userspace mode, we fall through and instrument the module now.
6d2010ae 16882 */
316670eb
A
16883
16884 if ((dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) &&
16885 (!(flag & KMOD_DTRACE_FORCE_INIT)))
16886 {
16887 /* We will instrument the module lazily -- this is the default */
6d2010ae
A
16888 lck_mtx_unlock(&dtrace_lock);
16889 lck_mtx_unlock(&mod_lock);
16890 lck_mtx_unlock(&dtrace_provider_lock);
16891 return 0;
16892 }
16893
316670eb 16894 /* We will instrument the module immediately using kernel symbols */
f427ee49
A
16895 if (!(flag & KMOD_DTRACE_NO_KERNEL_SYMS)) {
16896 ctl->mod_flags |= MODCTL_HAS_KERNEL_SYMBOLS;
16897 }
6d2010ae
A
16898
16899 lck_mtx_unlock(&dtrace_lock);
6d2010ae 16900
2d21ac55
A
16901 /*
16902 * We're going to call each providers per-module provide operation
16903 * specifying only this module.
16904 */
16905 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
6d2010ae
A
16906 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
16907
6d2010ae 16908 /*
fe8ab488
A
16909 * APPLE NOTE: The contract with the kext loader is that once this function
16910 * has completed, it may delete kernel symbols at will.
16911 * We must set this while still holding the mod_lock.
6d2010ae
A
16912 */
16913 ctl->mod_flags &= ~MODCTL_HAS_KERNEL_SYMBOLS;
6d2010ae 16914
2d21ac55
A
16915 lck_mtx_unlock(&mod_lock);
16916 lck_mtx_unlock(&dtrace_provider_lock);
6d2010ae 16917
2d21ac55
A
16918 /*
16919 * If we have any retained enablings, we need to match against them.
16920 * Enabling probes requires that cpu_lock be held, and we cannot hold
16921 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
16922 * module. (In particular, this happens when loading scheduling
16923 * classes.) So if we have any retained enablings, we need to dispatch
16924 * our task queue to do the match for us.
16925 */
16926 lck_mtx_lock(&dtrace_lock);
6d2010ae 16927
2d21ac55
A
16928 if (dtrace_retained == NULL) {
16929 lck_mtx_unlock(&dtrace_lock);
6d2010ae 16930 return 0;
2d21ac55 16931 }
6d2010ae 16932
6d2010ae
A
16933 /* APPLE NOTE!
16934 *
16935 * The cpu_lock mentioned above is only held by dtrace code, Apple's xnu never actually
16936 * holds it for any reason. Thus the comment above is invalid, we can directly invoke
16937 * dtrace_enabling_matchall without jumping through all the hoops, and we can avoid
16938 * the delay call as well.
16939 */
16940 lck_mtx_unlock(&dtrace_lock);
16941
16942 dtrace_enabling_matchall();
16943
16944 return 0;
2d21ac55
A
16945}
16946
6d2010ae
A
16947/*
16948 * Return 0 on success
16949 * Return -1 on failure
16950 */
16951static int
16952dtrace_module_unloaded(struct kmod_info *kmod)
2d21ac55 16953{
6d2010ae
A
16954 dtrace_probe_t template, *probe, *first, *next;
16955 dtrace_provider_t *prov;
16956 struct modctl *ctl = NULL;
16957 struct modctl *syncctl = NULL;
16958 struct modctl *nextsyncctl = NULL;
16959 int syncmode = 0;
16960
16961 lck_mtx_lock(&dtrace_provider_lock);
16962 lck_mtx_lock(&mod_lock);
16963 lck_mtx_lock(&dtrace_lock);
2d21ac55 16964
6d2010ae
A
16965 if (kmod == NULL) {
16966 syncmode = 1;
16967 }
16968 else {
16969 ctl = dtrace_modctl_lookup(kmod);
16970 if (ctl == NULL)
16971 {
16972 lck_mtx_unlock(&dtrace_lock);
16973 lck_mtx_unlock(&mod_lock);
16974 lck_mtx_unlock(&dtrace_provider_lock);
16975 return (-1);
16976 }
16977 ctl->mod_loaded = 0;
16978 ctl->mod_address = 0;
16979 ctl->mod_size = 0;
16980 }
16981
16982 if (dtrace_bymod == NULL) {
16983 /*
16984 * The DTrace module is loaded (obviously) but not attached;
16985 * we don't have any work to do.
16986 */
16987 if (ctl != NULL)
16988 (void)dtrace_modctl_remove(ctl);
6d2010ae 16989 lck_mtx_unlock(&dtrace_lock);
fe8ab488
A
16990 lck_mtx_unlock(&mod_lock);
16991 lck_mtx_unlock(&dtrace_provider_lock);
6d2010ae
A
16992 return(0);
16993 }
16994
16995 /* Syncmode set means we target and traverse entire modctl list. */
16996 if (syncmode)
16997 nextsyncctl = dtrace_modctl_list;
16998
16999syncloop:
17000 if (syncmode)
17001 {
17002 /* find a stale modctl struct */
17003 for (syncctl = nextsyncctl; syncctl != NULL; syncctl=syncctl->mod_next) {
17004 if (syncctl->mod_address == 0)
17005 break;
17006 }
17007 if (syncctl==NULL)
17008 {
17009 /* We have no more work to do */
6d2010ae 17010 lck_mtx_unlock(&dtrace_lock);
fe8ab488
A
17011 lck_mtx_unlock(&mod_lock);
17012 lck_mtx_unlock(&dtrace_provider_lock);
6d2010ae
A
17013 return(0);
17014 }
17015 else {
17016 /* keep track of next syncctl in case this one is removed */
17017 nextsyncctl = syncctl->mod_next;
17018 ctl = syncctl;
17019 }
17020 }
17021
17022 template.dtpr_mod = ctl->mod_modname;
17023
17024 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
17025 probe != NULL; probe = probe->dtpr_nextmod) {
17026 if (probe->dtpr_ecb != NULL) {
17027 /*
17028 * This shouldn't _actually_ be possible -- we're
17029 * unloading a module that has an enabled probe in it.
17030 * (It's normally up to the provider to make sure that
17031 * this can't happen.) However, because dtps_enable()
17032 * doesn't have a failure mode, there can be an
17033 * enable/unload race. Upshot: we don't want to
17034 * assert, but we're not going to disable the
17035 * probe, either.
17036 */
17037
17038
17039 if (syncmode) {
17040 /* We're syncing, let's look at next in list */
17041 goto syncloop;
17042 }
17043
6d2010ae 17044 lck_mtx_unlock(&dtrace_lock);
fe8ab488
A
17045 lck_mtx_unlock(&mod_lock);
17046 lck_mtx_unlock(&dtrace_provider_lock);
6d2010ae
A
17047
17048 if (dtrace_err_verbose) {
17049 cmn_err(CE_WARN, "unloaded module '%s' had "
17050 "enabled probes", ctl->mod_modname);
17051 }
17052 return(-1);
17053 }
17054 }
17055
17056 probe = first;
17057
17058 for (first = NULL; probe != NULL; probe = next) {
17059 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
17060
17061 dtrace_probes[probe->dtpr_id - 1] = NULL;
fe8ab488 17062 probe->dtpr_provider->dtpv_probe_count--;
6d2010ae
A
17063
17064 next = probe->dtpr_nextmod;
d9a64523 17065 dtrace_hash_remove(dtrace_byprov, probe);
6d2010ae
A
17066 dtrace_hash_remove(dtrace_bymod, probe);
17067 dtrace_hash_remove(dtrace_byfunc, probe);
17068 dtrace_hash_remove(dtrace_byname, probe);
17069
17070 if (first == NULL) {
17071 first = probe;
17072 probe->dtpr_nextmod = NULL;
17073 } else {
17074 probe->dtpr_nextmod = first;
17075 first = probe;
17076 }
17077 }
17078
17079 /*
17080 * We've removed all of the module's probes from the hash chains and
17081 * from the probe array. Now issue a dtrace_sync() to be sure that
17082 * everyone has cleared out from any probe array processing.
17083 */
17084 dtrace_sync();
17085
17086 for (probe = first; probe != NULL; probe = first) {
17087 first = probe->dtpr_nextmod;
17088 prov = probe->dtpr_provider;
17089 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
17090 probe->dtpr_arg);
d9a64523
A
17091 dtrace_strunref(probe->dtpr_mod);
17092 dtrace_strunref(probe->dtpr_func);
17093 dtrace_strunref(probe->dtpr_name);
6d2010ae
A
17094 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
17095
17096 zfree(dtrace_probe_t_zone, probe);
17097 }
17098
17099 dtrace_modctl_remove(ctl);
17100
17101 if (syncmode)
17102 goto syncloop;
17103
17104 lck_mtx_unlock(&dtrace_lock);
17105 lck_mtx_unlock(&mod_lock);
17106 lck_mtx_unlock(&dtrace_provider_lock);
17107
17108 return(0);
17109}
6d2010ae
A
17110
17111void
17112dtrace_suspend(void)
17113{
17114 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
17115}
17116
17117void
2d21ac55
A
17118dtrace_resume(void)
17119{
17120 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
17121}
17122
17123static int
17124dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
17125{
5ba3f43e 17126 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55
A
17127 lck_mtx_lock(&dtrace_lock);
17128
17129 switch (what) {
17130 case CPU_CONFIG: {
17131 dtrace_state_t *state;
17132 dtrace_optval_t *opt, rs, c;
17133
17134 /*
17135 * For now, we only allocate a new buffer for anonymous state.
17136 */
17137 if ((state = dtrace_anon.dta_state) == NULL)
17138 break;
17139
17140 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
17141 break;
17142
17143 opt = state->dts_options;
17144 c = opt[DTRACEOPT_CPU];
17145
17146 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
17147 break;
17148
17149 /*
17150 * Regardless of what the actual policy is, we're going to
17151 * temporarily set our resize policy to be manual. We're
17152 * also going to temporarily set our CPU option to denote
17153 * the newly configured CPU.
17154 */
17155 rs = opt[DTRACEOPT_BUFRESIZE];
17156 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
17157 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
17158
17159 (void) dtrace_state_buffers(state);
17160
17161 opt[DTRACEOPT_BUFRESIZE] = rs;
17162 opt[DTRACEOPT_CPU] = c;
17163
17164 break;
17165 }
17166
17167 case CPU_UNCONFIG:
17168 /*
17169 * We don't free the buffer in the CPU_UNCONFIG case. (The
17170 * buffer will be freed when the consumer exits.)
17171 */
17172 break;
17173
17174 default:
17175 break;
17176 }
17177
17178 lck_mtx_unlock(&dtrace_lock);
17179 return (0);
17180}
17181
17182static void
17183dtrace_cpu_setup_initial(processorid_t cpu)
17184{
17185 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
17186}
17187
17188static void
17189dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
17190{
17191 if (dtrace_toxranges >= dtrace_toxranges_max) {
17192 int osize, nsize;
17193 dtrace_toxrange_t *range;
17194
17195 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17196
17197 if (osize == 0) {
17198 ASSERT(dtrace_toxrange == NULL);
17199 ASSERT(dtrace_toxranges_max == 0);
17200 dtrace_toxranges_max = 1;
17201 } else {
17202 dtrace_toxranges_max <<= 1;
17203 }
17204
17205 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17206 range = kmem_zalloc(nsize, KM_SLEEP);
17207
17208 if (dtrace_toxrange != NULL) {
17209 ASSERT(osize != 0);
17210 bcopy(dtrace_toxrange, range, osize);
17211 kmem_free(dtrace_toxrange, osize);
17212 }
17213
17214 dtrace_toxrange = range;
17215 }
17216
fe8ab488
A
17217 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
17218 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
2d21ac55
A
17219
17220 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
17221 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
17222 dtrace_toxranges++;
17223}
17224
17225/*
17226 * DTrace Driver Cookbook Functions
17227 */
17228/*ARGSUSED*/
17229static int
d9a64523 17230dtrace_attach(dev_info_t *devi)
2d21ac55
A
17231{
17232 dtrace_provider_id_t id;
17233 dtrace_state_t *state = NULL;
17234 dtrace_enabling_t *enab;
17235
17236 lck_mtx_lock(&cpu_lock);
17237 lck_mtx_lock(&dtrace_provider_lock);
17238 lck_mtx_lock(&dtrace_lock);
17239
b0d623f7 17240 /* Darwin uses BSD cloning device driver to automagically obtain minor device number. */
2d21ac55
A
17241 dtrace_devi = devi;
17242
17243 dtrace_modload = dtrace_module_loaded;
17244 dtrace_modunload = dtrace_module_unloaded;
17245 dtrace_cpu_init = dtrace_cpu_setup_initial;
17246 dtrace_helpers_cleanup = dtrace_helpers_destroy;
17247 dtrace_helpers_fork = dtrace_helpers_duplicate;
17248 dtrace_cpustart_init = dtrace_suspend;
17249 dtrace_cpustart_fini = dtrace_resume;
17250 dtrace_debugger_init = dtrace_suspend;
17251 dtrace_debugger_fini = dtrace_resume;
2d21ac55
A
17252
17253 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17254
5ba3f43e 17255 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
2d21ac55 17256
f427ee49 17257 dtrace_arena = vmem_create("dtrace", (void *)1, INT32_MAX, 1,
2d21ac55 17258 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
2d21ac55
A
17259
17260 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
c910b4d9 17261 sizeof (dtrace_dstate_percpu_t) * (int)NCPU, DTRACE_STATE_ALIGN,
2d21ac55
A
17262 NULL, NULL, NULL, NULL, NULL, 0);
17263
5ba3f43e 17264 LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
39037602 17265
cb323159
A
17266 dtrace_nprobes = dtrace_nprobes_default;
17267 dtrace_probes = kmem_zalloc(sizeof(dtrace_probe_t*) * dtrace_nprobes,
17268 KM_SLEEP);
17269
d9a64523
A
17270 dtrace_byprov = dtrace_hash_create(dtrace_strkey_probe_provider,
17271 0, /* unused */
17272 offsetof(dtrace_probe_t, dtpr_nextprov),
17273 offsetof(dtrace_probe_t, dtpr_prevprov));
17274
17275 dtrace_bymod = dtrace_hash_create(dtrace_strkey_deref_offset,
17276 offsetof(dtrace_probe_t, dtpr_mod),
2d21ac55
A
17277 offsetof(dtrace_probe_t, dtpr_nextmod),
17278 offsetof(dtrace_probe_t, dtpr_prevmod));
17279
d9a64523
A
17280 dtrace_byfunc = dtrace_hash_create(dtrace_strkey_deref_offset,
17281 offsetof(dtrace_probe_t, dtpr_func),
2d21ac55
A
17282 offsetof(dtrace_probe_t, dtpr_nextfunc),
17283 offsetof(dtrace_probe_t, dtpr_prevfunc));
17284
d9a64523
A
17285 dtrace_byname = dtrace_hash_create(dtrace_strkey_deref_offset,
17286 offsetof(dtrace_probe_t, dtpr_name),
2d21ac55
A
17287 offsetof(dtrace_probe_t, dtpr_nextname),
17288 offsetof(dtrace_probe_t, dtpr_prevname));
17289
17290 if (dtrace_retain_max < 1) {
17291 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
17292 "setting to 1", dtrace_retain_max);
17293 dtrace_retain_max = 1;
17294 }
17295
17296 /*
17297 * Now discover our toxic ranges.
17298 */
17299 dtrace_toxic_ranges(dtrace_toxrange_add);
17300
17301 /*
17302 * Before we register ourselves as a provider to our own framework,
17303 * we would like to assert that dtrace_provider is NULL -- but that's
17304 * not true if we were loaded as a dependency of a DTrace provider.
17305 * Once we've registered, we can assert that dtrace_provider is our
17306 * pseudo provider.
17307 */
17308 (void) dtrace_register("dtrace", &dtrace_provider_attr,
17309 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
17310
17311 ASSERT(dtrace_provider != NULL);
17312 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
17313
fe8ab488 17314#if defined (__x86_64__)
2d21ac55
A
17315 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
17316 dtrace_provider, NULL, NULL, "BEGIN", 1, NULL);
17317 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
17318 dtrace_provider, NULL, NULL, "END", 0, NULL);
17319 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
17320 dtrace_provider, NULL, NULL, "ERROR", 3, NULL);
5ba3f43e
A
17321#elif (defined(__arm__) || defined(__arm64__))
17322 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
17323 dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
17324 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
17325 dtrace_provider, NULL, NULL, "END", 1, NULL);
17326 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
17327 dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
2d21ac55
A
17328#else
17329#error Unknown Architecture
fe8ab488 17330#endif
2d21ac55
A
17331
17332 dtrace_anon_property();
17333 lck_mtx_unlock(&cpu_lock);
17334
17335 /*
17336 * If DTrace helper tracing is enabled, we need to allocate the
17337 * trace buffer and initialize the values.
17338 */
17339 if (dtrace_helptrace_enabled) {
17340 ASSERT(dtrace_helptrace_buffer == NULL);
17341 dtrace_helptrace_buffer =
17342 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
17343 dtrace_helptrace_next = 0;
17344 }
17345
17346 /*
17347 * If there are already providers, we must ask them to provide their
17348 * probes, and then match any anonymous enabling against them. Note
17349 * that there should be no other retained enablings at this time:
17350 * the only retained enablings at this time should be the anonymous
17351 * enabling.
17352 */
17353 if (dtrace_anon.dta_enabling != NULL) {
17354 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
17355
6d2010ae 17356 /*
fe8ab488 17357 * APPLE NOTE: if handling anonymous dof, switch symbol modes.
6d2010ae
A
17358 */
17359 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) {
17360 dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL;
17361 }
6d2010ae 17362
2d21ac55
A
17363 dtrace_enabling_provide(NULL);
17364 state = dtrace_anon.dta_state;
17365
17366 /*
17367 * We couldn't hold cpu_lock across the above call to
17368 * dtrace_enabling_provide(), but we must hold it to actually
17369 * enable the probes. We have to drop all of our locks, pick
17370 * up cpu_lock, and regain our locks before matching the
17371 * retained anonymous enabling.
17372 */
17373 lck_mtx_unlock(&dtrace_lock);
17374 lck_mtx_unlock(&dtrace_provider_lock);
17375
17376 lck_mtx_lock(&cpu_lock);
17377 lck_mtx_lock(&dtrace_provider_lock);
17378 lck_mtx_lock(&dtrace_lock);
17379
17380 if ((enab = dtrace_anon.dta_enabling) != NULL)
39037602 17381 (void) dtrace_enabling_match(enab, NULL, NULL);
2d21ac55
A
17382
17383 lck_mtx_unlock(&cpu_lock);
17384 }
17385
17386 lck_mtx_unlock(&dtrace_lock);
17387 lck_mtx_unlock(&dtrace_provider_lock);
17388
17389 if (state != NULL) {
17390 /*
17391 * If we created any anonymous state, set it going now.
17392 */
17393 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
17394 }
17395
17396 return (DDI_SUCCESS);
17397}
17398
2d21ac55
A
17399/*ARGSUSED*/
17400static int
17401dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
17402{
17403#pragma unused(flag, otyp)
17404 dtrace_state_t *state;
17405 uint32_t priv;
17406 uid_t uid;
17407 zoneid_t zoneid;
b0d623f7 17408 int rv;
2d21ac55 17409
fe8ab488 17410 /* APPLE: Darwin puts Helper on its own major device. */
2d21ac55
A
17411
17412 /*
17413 * If no DTRACE_PRIV_* bits are set in the credential, then the
17414 * caller lacks sufficient permission to do anything with DTrace.
17415 */
17416 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
17417 if (priv == DTRACE_PRIV_NONE)
17418 return (EACCES);
17419
2d21ac55 17420 /*
fe8ab488 17421 * APPLE NOTE: We delay the initialization of fasttrap as late as possible.
2d21ac55
A
17422 * It certainly can't be later than now!
17423 */
17424 fasttrap_init();
2d21ac55
A
17425
17426 /*
17427 * Ask all providers to provide all their probes.
17428 */
17429 lck_mtx_lock(&dtrace_provider_lock);
17430 dtrace_probe_provide(NULL, NULL);
17431 lck_mtx_unlock(&dtrace_provider_lock);
17432
17433 lck_mtx_lock(&cpu_lock);
17434 lck_mtx_lock(&dtrace_lock);
17435 dtrace_opens++;
17436 dtrace_membar_producer();
17437
d9a64523 17438#ifdef illumos
2d21ac55
A
17439 /*
17440 * If the kernel debugger is active (that is, if the kernel debugger
17441 * modified text in some way), we won't allow the open.
17442 */
17443 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
17444 dtrace_opens--;
b0d623f7 17445 lck_mtx_unlock(&dtrace_lock);
fe8ab488
A
17446 lck_mtx_unlock(&cpu_lock);
17447 return (EBUSY);
17448 }
d9a64523 17449#endif
2d21ac55 17450
fe8ab488
A
17451 rv = dtrace_state_create(devp, cred_p, &state);
17452 lck_mtx_unlock(&cpu_lock);
2d21ac55 17453
fe8ab488 17454 if (rv != 0 || state == NULL) {
d9a64523
A
17455 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) {
17456#ifdef illumos
fe8ab488 17457 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
d9a64523
A
17458#endif
17459 }
fe8ab488
A
17460 lck_mtx_unlock(&dtrace_lock);
17461 /* propagate EAGAIN or ERESTART */
17462 return (rv);
17463 }
17464
17465 lck_mtx_unlock(&dtrace_lock);
2d21ac55 17466
fe8ab488 17467 lck_rw_lock_exclusive(&dtrace_dof_mode_lock);
2d21ac55 17468
fe8ab488
A
17469 /*
17470 * If we are currently lazy, transition states.
17471 *
17472 * Unlike dtrace_close, we do not need to check the
17473 * value of dtrace_opens, as any positive value (and
17474 * we count as 1) means we transition states.
17475 */
17476 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON) {
17477 dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_OFF;
39037602
A
17478 /*
17479 * We do not need to hold the exclusive lock while processing
17480 * DOF on processes. We do need to make sure the mode does not get
17481 * changed to DTRACE_DOF_MODE_LAZY_ON during that stage though
17482 * (which should not happen anyway since it only happens in
17483 * dtrace_close). There is no way imcomplete USDT probes can be
17484 * activate by any DTrace clients here since they all have to
17485 * call dtrace_open and be blocked on dtrace_dof_mode_lock
17486 */
17487 lck_rw_lock_exclusive_to_shared(&dtrace_dof_mode_lock);
fe8ab488
A
17488 /*
17489 * Iterate all existing processes and load lazy dofs.
17490 */
17491 proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS,
17492 dtrace_lazy_dofs_proc_iterate_doit,
17493 NULL,
17494 dtrace_lazy_dofs_proc_iterate_filter,
17495 NULL);
39037602
A
17496
17497 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
17498 }
17499 else {
17500 lck_rw_unlock_exclusive(&dtrace_dof_mode_lock);
fe8ab488 17501 }
2d21ac55 17502
2d21ac55 17503
fe8ab488
A
17504 /*
17505 * Update kernel symbol state.
17506 *
17507 * We must own the provider and dtrace locks.
17508 *
17509 * NOTE! It may appear there is a race by setting this value so late
17510 * after dtrace_probe_provide. However, any kext loaded after the
17511 * call to probe provide and before we set LAZY_OFF will be marked as
17512 * eligible for symbols from userspace. The same dtrace that is currently
17513 * calling dtrace_open() (this call!) will get a list of kexts needing
17514 * symbols and fill them in, thus closing the race window.
17515 *
17516 * We want to set this value only after it certain it will succeed, as
17517 * this significantly reduces the complexity of error exits.
17518 */
17519 lck_mtx_lock(&dtrace_lock);
17520 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) {
17521 dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL;
2d21ac55 17522 }
fe8ab488 17523 lck_mtx_unlock(&dtrace_lock);
2d21ac55 17524
fe8ab488
A
17525 return (0);
17526}
2d21ac55 17527
fe8ab488
A
17528/*ARGSUSED*/
17529static int
17530dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
17531{
17532#pragma unused(flag, otyp, cred_p) /* __APPLE__ */
17533 minor_t minor = getminor(dev);
17534 dtrace_state_t *state;
2d21ac55 17535
fe8ab488 17536 /* APPLE NOTE: Darwin puts Helper on its own major device. */
39037602 17537 state = dtrace_state_get(minor);
fe8ab488
A
17538
17539 lck_mtx_lock(&cpu_lock);
17540 lck_mtx_lock(&dtrace_lock);
2d21ac55 17541
fe8ab488 17542 if (state->dts_anon) {
2d21ac55 17543 /*
fe8ab488 17544 * There is anonymous state. Destroy that first.
2d21ac55 17545 */
fe8ab488
A
17546 ASSERT(dtrace_anon.dta_state == NULL);
17547 dtrace_state_destroy(state->dts_anon);
17548 }
2d21ac55 17549
fe8ab488
A
17550 dtrace_state_destroy(state);
17551 ASSERT(dtrace_opens > 0);
2d21ac55 17552
fe8ab488
A
17553 /*
17554 * Only relinquish control of the kernel debugger interface when there
17555 * are no consumers and no anonymous enablings.
17556 */
d9a64523
A
17557 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) {
17558#ifdef illumos
fe8ab488 17559 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
d9a64523
A
17560#endif
17561 }
17562
fe8ab488
A
17563 lck_mtx_unlock(&dtrace_lock);
17564 lck_mtx_unlock(&cpu_lock);
2d21ac55 17565
fe8ab488
A
17566 /*
17567 * Lock ordering requires the dof mode lock be taken before
17568 * the dtrace_lock.
17569 */
17570 lck_rw_lock_exclusive(&dtrace_dof_mode_lock);
17571 lck_mtx_lock(&dtrace_lock);
17572
17573 if (dtrace_opens == 0) {
17574 /*
17575 * If we are currently lazy-off, and this is the last close, transition to
17576 * lazy state.
17577 */
17578 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF) {
17579 dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
2d21ac55
A
17580 }
17581
fe8ab488
A
17582 /*
17583 * If we are the last dtrace client, switch back to lazy (from userspace) symbols
17584 */
17585 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_KERNEL) {
17586 dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE;
17587 }
2d21ac55 17588 }
fe8ab488
A
17589
17590 lck_mtx_unlock(&dtrace_lock);
17591 lck_rw_unlock_exclusive(&dtrace_dof_mode_lock);
17592
17593 /*
17594 * Kext probes may be retained past the end of the kext's lifespan. The
17595 * probes are kept until the last reference to them has been removed.
17596 * Since closing an active dtrace context is likely to drop that last reference,
17597 * lets take a shot at cleaning out the orphaned probes now.
17598 */
17599 dtrace_module_unloaded(NULL);
2d21ac55 17600
fe8ab488 17601 return (0);
2d21ac55 17602}
fe8ab488 17603
2d21ac55
A
17604/*ARGSUSED*/
17605static int
b0d623f7 17606dtrace_ioctl_helper(u_long cmd, caddr_t arg, int *rv)
2d21ac55 17607{
b0d623f7
A
17608#pragma unused(rv)
17609 /*
17610 * Safe to check this outside the dof mode lock
17611 */
17612 if (dtrace_dof_mode == DTRACE_DOF_MODE_NEVER)
17613 return KERN_SUCCESS;
2d21ac55
A
17614
17615 switch (cmd) {
5ba3f43e
A
17616#if defined (__arm64__)
17617 case DTRACEHIOC_ADDDOF_U32:
17618 case DTRACEHIOC_ADDDOF_U64:
17619#else
39236c6e 17620 case DTRACEHIOC_ADDDOF:
5ba3f43e 17621#endif /* __arm64__*/
39236c6e 17622 {
b0d623f7
A
17623 dof_helper_t *dhp = NULL;
17624 size_t dof_ioctl_data_size;
17625 dof_ioctl_data_t* multi_dof;
17626 unsigned int i;
17627 int rval = 0;
17628 user_addr_t user_address = *(user_addr_t*)arg;
17629 uint64_t dof_count;
17630 int multi_dof_claimed = 0;
17631 proc_t* p = current_proc();
2d21ac55 17632
5ba3f43e
A
17633 /*
17634 * If this is a restricted process and dtrace is restricted,
17635 * do not allow DOFs to be registered
17636 */
17637 if (dtrace_is_restricted() &&
17638 !dtrace_are_restrictions_relaxed() &&
17639 !dtrace_can_attach_to_proc(current_proc())) {
17640 return (EACCES);
17641 }
17642
b0d623f7
A
17643 /*
17644 * Read the number of DOF sections being passed in.
17645 */
17646 if (copyin(user_address + offsetof(dof_ioctl_data_t, dofiod_count),
17647 &dof_count,
17648 sizeof(dof_count))) {
17649 dtrace_dof_error(NULL, "failed to copyin dofiod_count");
17650 return (EFAULT);
17651 }
5ba3f43e 17652
b0d623f7
A
17653 /*
17654 * Range check the count.
17655 */
17656 if (dof_count == 0 || dof_count > 1024) {
17657 dtrace_dof_error(NULL, "dofiod_count is not valid");
17658 return (EINVAL);
17659 }
17660
17661 /*
17662 * Allocate a correctly sized structure and copyin the data.
17663 */
17664 dof_ioctl_data_size = DOF_IOCTL_DATA_T_SIZE(dof_count);
17665 if ((multi_dof = kmem_alloc(dof_ioctl_data_size, KM_SLEEP)) == NULL)
17666 return (ENOMEM);
17667
17668 /* NOTE! We can no longer exit this method via return */
17669 if (copyin(user_address, multi_dof, dof_ioctl_data_size) != 0) {
17670 dtrace_dof_error(NULL, "failed copyin of dof_ioctl_data_t");
17671 rval = EFAULT;
17672 goto cleanup;
17673 }
17674
17675 /*
17676 * Check that the count didn't change between the first copyin and the second.
17677 */
17678 if (multi_dof->dofiod_count != dof_count) {
17679 rval = EINVAL;
17680 goto cleanup;
17681 }
17682
17683 /*
17684 * Try to process lazily first.
17685 */
17686 rval = dtrace_lazy_dofs_add(p, multi_dof, &multi_dof_claimed);
17687
17688 /*
17689 * If rval is EACCES, we must be non-lazy.
17690 */
17691 if (rval == EACCES) {
17692 rval = 0;
17693 /*
17694 * Process each dof_helper_t
17695 */
17696 i = 0;
17697 do {
17698 dhp = &multi_dof->dofiod_helpers[i];
17699
17700 dof_hdr_t *dof = dtrace_dof_copyin(dhp->dofhp_dof, &rval);
17701
17702 if (dof != NULL) {
d9a64523 17703 lck_mtx_lock(&dtrace_meta_lock);
b0d623f7
A
17704 lck_mtx_lock(&dtrace_lock);
17705
17706 /*
17707 * dtrace_helper_slurp() takes responsibility for the dof --
17708 * it may free it now or it may save it and free it later.
17709 */
17710 if ((dhp->dofhp_dof = (uint64_t)dtrace_helper_slurp(p, dof, dhp)) == -1ULL) {
17711 rval = EINVAL;
17712 }
17713
17714 lck_mtx_unlock(&dtrace_lock);
d9a64523 17715 lck_mtx_unlock(&dtrace_meta_lock);
b0d623f7
A
17716 }
17717 } while (++i < multi_dof->dofiod_count && rval == 0);
17718 }
17719
17720 /*
17721 * We need to copyout the multi_dof struct, because it contains
17722 * the generation (unique id) values needed to call DTRACEHIOC_REMOVE
17723 *
17724 * This could certainly be better optimized.
17725 */
17726 if (copyout(multi_dof, user_address, dof_ioctl_data_size) != 0) {
17727 dtrace_dof_error(NULL, "failed copyout of dof_ioctl_data_t");
17728 /* Don't overwrite pre-existing error code */
17729 if (rval == 0) rval = EFAULT;
17730 }
17731
17732 cleanup:
17733 /*
17734 * If we had to allocate struct memory, free it.
17735 */
17736 if (multi_dof != NULL && !multi_dof_claimed) {
17737 kmem_free(multi_dof, dof_ioctl_data_size);
17738 }
17739
17740 return rval;
17741 }
17742
17743 case DTRACEHIOC_REMOVE: {
17744 int generation = *(int*)arg;
17745 proc_t* p = current_proc();
17746
17747 /*
17748 * Try lazy first.
17749 */
17750 int rval = dtrace_lazy_dofs_remove(p, generation);
17751
17752 /*
17753 * EACCES means non-lazy
17754 */
17755 if (rval == EACCES) {
d9a64523 17756 lck_mtx_lock(&dtrace_meta_lock);
b0d623f7
A
17757 lck_mtx_lock(&dtrace_lock);
17758 rval = dtrace_helper_destroygen(p, generation);
17759 lck_mtx_unlock(&dtrace_lock);
d9a64523 17760 lck_mtx_unlock(&dtrace_meta_lock);
b0d623f7
A
17761 }
17762
17763 return (rval);
17764 }
17765
17766 default:
17767 break;
17768 }
17769
17770 return ENOTTY;
17771}
17772
17773/*ARGSUSED*/
17774static int
17775dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
17776{
17777#pragma unused(md)
17778 minor_t minor = getminor(dev);
17779 dtrace_state_t *state;
17780 int rval;
17781
17782 /* Darwin puts Helper on its own major device. */
17783
39037602 17784 state = dtrace_state_get(minor);
b0d623f7
A
17785
17786 if (state->dts_anon) {
17787 ASSERT(dtrace_anon.dta_state == NULL);
17788 state = state->dts_anon;
17789 }
17790
17791 switch (cmd) {
17792 case DTRACEIOC_PROVIDER: {
17793 dtrace_providerdesc_t pvd;
17794 dtrace_provider_t *pvp;
17795
17796 if (copyin(arg, &pvd, sizeof (pvd)) != 0)
17797 return (EFAULT);
17798
17799 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
17800 lck_mtx_lock(&dtrace_provider_lock);
17801
17802 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
17803 if (strncmp(pvp->dtpv_name, pvd.dtvd_name, DTRACE_PROVNAMELEN) == 0)
17804 break;
17805 }
17806
17807 lck_mtx_unlock(&dtrace_provider_lock);
17808
17809 if (pvp == NULL)
17810 return (ESRCH);
17811
17812 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
17813 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
17814 if (copyout(&pvd, arg, sizeof (pvd)) != 0)
17815 return (EFAULT);
17816
17817 return (0);
17818 }
17819
17820 case DTRACEIOC_EPROBE: {
17821 dtrace_eprobedesc_t epdesc;
17822 dtrace_ecb_t *ecb;
17823 dtrace_action_t *act;
17824 void *buf;
17825 size_t size;
17826 uintptr_t dest;
17827 int nrecs;
17828
17829 if (copyin(arg, &epdesc, sizeof (epdesc)) != 0)
17830 return (EFAULT);
17831
17832 lck_mtx_lock(&dtrace_lock);
17833
17834 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
17835 lck_mtx_unlock(&dtrace_lock);
17836 return (EINVAL);
17837 }
17838
17839 if (ecb->dte_probe == NULL) {
17840 lck_mtx_unlock(&dtrace_lock);
17841 return (EINVAL);
17842 }
17843
17844 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
17845 epdesc.dtepd_uarg = ecb->dte_uarg;
17846 epdesc.dtepd_size = ecb->dte_size;
17847
17848 nrecs = epdesc.dtepd_nrecs;
17849 epdesc.dtepd_nrecs = 0;
17850 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17851 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17852 continue;
17853
17854 epdesc.dtepd_nrecs++;
17855 }
17856
17857 /*
17858 * Now that we have the size, we need to allocate a temporary
17859 * buffer in which to store the complete description. We need
17860 * the temporary buffer to be able to drop dtrace_lock()
17861 * across the copyout(), below.
17862 */
17863 size = sizeof (dtrace_eprobedesc_t) +
17864 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
17865
17866 buf = kmem_alloc(size, KM_SLEEP);
17867 dest = (uintptr_t)buf;
17868
17869 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
17870 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
17871
17872 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17873 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17874 continue;
17875
17876 if (nrecs-- == 0)
17877 break;
17878
17879 bcopy(&act->dta_rec, (void *)dest,
17880 sizeof (dtrace_recdesc_t));
17881 dest += sizeof (dtrace_recdesc_t);
17882 }
17883
17884 lck_mtx_unlock(&dtrace_lock);
17885
17886 if (copyout(buf, arg, dest - (uintptr_t)buf) != 0) {
17887 kmem_free(buf, size);
17888 return (EFAULT);
17889 }
17890
17891 kmem_free(buf, size);
17892 return (0);
17893 }
17894
17895 case DTRACEIOC_AGGDESC: {
17896 dtrace_aggdesc_t aggdesc;
17897 dtrace_action_t *act;
17898 dtrace_aggregation_t *agg;
17899 int nrecs;
17900 uint32_t offs;
17901 dtrace_recdesc_t *lrec;
17902 void *buf;
17903 size_t size;
17904 uintptr_t dest;
17905
17906 if (copyin(arg, &aggdesc, sizeof (aggdesc)) != 0)
17907 return (EFAULT);
17908
17909 lck_mtx_lock(&dtrace_lock);
17910
17911 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
17912 lck_mtx_unlock(&dtrace_lock);
17913 return (EINVAL);
17914 }
17915
17916 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
17917
17918 nrecs = aggdesc.dtagd_nrecs;
17919 aggdesc.dtagd_nrecs = 0;
17920
17921 offs = agg->dtag_base;
17922 lrec = &agg->dtag_action.dta_rec;
17923 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
17924
17925 for (act = agg->dtag_first; ; act = act->dta_next) {
17926 ASSERT(act->dta_intuple ||
17927 DTRACEACT_ISAGG(act->dta_kind));
17928
17929 /*
17930 * If this action has a record size of zero, it
17931 * denotes an argument to the aggregating action.
17932 * Because the presence of this record doesn't (or
17933 * shouldn't) affect the way the data is interpreted,
17934 * we don't copy it out to save user-level the
17935 * confusion of dealing with a zero-length record.
17936 */
17937 if (act->dta_rec.dtrd_size == 0) {
17938 ASSERT(agg->dtag_hasarg);
17939 continue;
17940 }
17941
17942 aggdesc.dtagd_nrecs++;
17943
17944 if (act == &agg->dtag_action)
17945 break;
17946 }
17947
17948 /*
17949 * Now that we have the size, we need to allocate a temporary
17950 * buffer in which to store the complete description. We need
17951 * the temporary buffer to be able to drop dtrace_lock()
17952 * across the copyout(), below.
17953 */
17954 size = sizeof (dtrace_aggdesc_t) +
17955 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
17956
17957 buf = kmem_alloc(size, KM_SLEEP);
17958 dest = (uintptr_t)buf;
17959
17960 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
17961 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
17962
17963 for (act = agg->dtag_first; ; act = act->dta_next) {
17964 dtrace_recdesc_t rec = act->dta_rec;
17965
17966 /*
17967 * See the comment in the above loop for why we pass
17968 * over zero-length records.
17969 */
17970 if (rec.dtrd_size == 0) {
17971 ASSERT(agg->dtag_hasarg);
17972 continue;
17973 }
17974
17975 if (nrecs-- == 0)
17976 break;
17977
17978 rec.dtrd_offset -= offs;
17979 bcopy(&rec, (void *)dest, sizeof (rec));
17980 dest += sizeof (dtrace_recdesc_t);
17981
17982 if (act == &agg->dtag_action)
17983 break;
17984 }
17985
17986 lck_mtx_unlock(&dtrace_lock);
17987
17988 if (copyout(buf, arg, dest - (uintptr_t)buf) != 0) {
17989 kmem_free(buf, size);
17990 return (EFAULT);
17991 }
17992
17993 kmem_free(buf, size);
17994 return (0);
17995 }
17996
17997 case DTRACEIOC_ENABLE: {
17998 dof_hdr_t *dof;
17999 dtrace_enabling_t *enab = NULL;
18000 dtrace_vstate_t *vstate;
18001 int err = 0;
18002
18003 *rv = 0;
18004
18005 /*
18006 * If a NULL argument has been passed, we take this as our
18007 * cue to reevaluate our enablings.
18008 */
fe8ab488 18009 if (arg == 0) {
b0d623f7
A
18010 dtrace_enabling_matchall();
18011
18012 return (0);
18013 }
18014
18015 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
18016 return (rval);
18017
18018 lck_mtx_lock(&cpu_lock);
18019 lck_mtx_lock(&dtrace_lock);
18020 vstate = &state->dts_vstate;
18021
18022 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
18023 lck_mtx_unlock(&dtrace_lock);
18024 lck_mtx_unlock(&cpu_lock);
18025 dtrace_dof_destroy(dof);
18026 return (EBUSY);
18027 }
18028
18029 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
18030 lck_mtx_unlock(&dtrace_lock);
18031 lck_mtx_unlock(&cpu_lock);
18032 dtrace_dof_destroy(dof);
18033 return (EINVAL);
18034 }
18035
18036 if ((rval = dtrace_dof_options(dof, state)) != 0) {
18037 dtrace_enabling_destroy(enab);
18038 lck_mtx_unlock(&dtrace_lock);
18039 lck_mtx_unlock(&cpu_lock);
18040 dtrace_dof_destroy(dof);
18041 return (rval);
18042 }
18043
39037602 18044 if ((err = dtrace_enabling_match(enab, rv, NULL)) == 0) {
b0d623f7
A
18045 err = dtrace_enabling_retain(enab);
18046 } else {
18047 dtrace_enabling_destroy(enab);
18048 }
18049
b0d623f7 18050 lck_mtx_unlock(&dtrace_lock);
fe8ab488 18051 lck_mtx_unlock(&cpu_lock);
b0d623f7
A
18052 dtrace_dof_destroy(dof);
18053
18054 return (err);
18055 }
18056
18057 case DTRACEIOC_REPLICATE: {
18058 dtrace_repldesc_t desc;
18059 dtrace_probedesc_t *match = &desc.dtrpd_match;
18060 dtrace_probedesc_t *create = &desc.dtrpd_create;
18061 int err;
18062
18063 if (copyin(arg, &desc, sizeof (desc)) != 0)
18064 return (EFAULT);
18065
18066 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18067 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18068 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18069 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18070
18071 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18072 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18073 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18074 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18075
18076 lck_mtx_lock(&dtrace_lock);
18077 err = dtrace_enabling_replicate(state, match, create);
18078 lck_mtx_unlock(&dtrace_lock);
18079
18080 return (err);
18081 }
18082
18083 case DTRACEIOC_PROBEMATCH:
18084 case DTRACEIOC_PROBES: {
18085 dtrace_probe_t *probe = NULL;
18086 dtrace_probedesc_t desc;
18087 dtrace_probekey_t pkey;
18088 dtrace_id_t i;
18089 int m = 0;
18090 uint32_t priv;
18091 uid_t uid;
18092 zoneid_t zoneid;
18093
18094 if (copyin(arg, &desc, sizeof (desc)) != 0)
18095 return (EFAULT);
18096
18097 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18098 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18099 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18100 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18101
18102 /*
18103 * Before we attempt to match this probe, we want to give
18104 * all providers the opportunity to provide it.
18105 */
18106 if (desc.dtpd_id == DTRACE_IDNONE) {
18107 lck_mtx_lock(&dtrace_provider_lock);
18108 dtrace_probe_provide(&desc, NULL);
18109 lck_mtx_unlock(&dtrace_provider_lock);
18110 desc.dtpd_id++;
18111 }
18112
b0d623f7
A
18113 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
18114
18115 lck_mtx_lock(&dtrace_lock);
18116
d9a64523
A
18117 if (cmd == DTRACEIOC_PROBEMATCH) {
18118 dtrace_probekey(&desc, &pkey);
18119 pkey.dtpk_id = DTRACE_IDNONE;
18120
18121 /* Quiet compiler warning */
b0d623f7
A
18122 for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
18123 if ((probe = dtrace_probes[i - 1]) != NULL &&
18124 (m = dtrace_match_probe(probe, &pkey,
18125 priv, uid, zoneid)) != 0)
18126 break;
18127 }
18128
18129 if (m < 0) {
18130 lck_mtx_unlock(&dtrace_lock);
18131 return (EINVAL);
18132 }
d9a64523 18133 dtrace_probekey_release(&pkey);
b0d623f7
A
18134
18135 } else {
18136 /* Quiet compiler warning */
18137 for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
18138 if ((probe = dtrace_probes[i - 1]) != NULL &&
18139 dtrace_match_priv(probe, priv, uid, zoneid))
18140 break;
18141 }
18142 }
18143
18144 if (probe == NULL) {
18145 lck_mtx_unlock(&dtrace_lock);
18146 return (ESRCH);
18147 }
18148
18149 dtrace_probe_description(probe, &desc);
18150 lck_mtx_unlock(&dtrace_lock);
18151
18152 if (copyout(&desc, arg, sizeof (desc)) != 0)
18153 return (EFAULT);
18154
18155 return (0);
18156 }
18157
18158 case DTRACEIOC_PROBEARG: {
18159 dtrace_argdesc_t desc;
18160 dtrace_probe_t *probe;
18161 dtrace_provider_t *prov;
18162
18163 if (copyin(arg, &desc, sizeof (desc)) != 0)
18164 return (EFAULT);
18165
18166 if (desc.dtargd_id == DTRACE_IDNONE)
18167 return (EINVAL);
18168
18169 if (desc.dtargd_ndx == DTRACE_ARGNONE)
18170 return (EINVAL);
18171
18172 lck_mtx_lock(&dtrace_provider_lock);
18173 lck_mtx_lock(&mod_lock);
18174 lck_mtx_lock(&dtrace_lock);
18175
18176 /* Quiet compiler warning */
18177 if (desc.dtargd_id > (dtrace_id_t)dtrace_nprobes) {
18178 lck_mtx_unlock(&dtrace_lock);
18179 lck_mtx_unlock(&mod_lock);
18180 lck_mtx_unlock(&dtrace_provider_lock);
18181 return (EINVAL);
18182 }
18183
18184 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
18185 lck_mtx_unlock(&dtrace_lock);
18186 lck_mtx_unlock(&mod_lock);
18187 lck_mtx_unlock(&dtrace_provider_lock);
18188 return (EINVAL);
18189 }
18190
18191 lck_mtx_unlock(&dtrace_lock);
18192
18193 prov = probe->dtpr_provider;
18194
18195 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
18196 /*
18197 * There isn't any typed information for this probe.
18198 * Set the argument number to DTRACE_ARGNONE.
18199 */
18200 desc.dtargd_ndx = DTRACE_ARGNONE;
18201 } else {
18202 desc.dtargd_native[0] = '\0';
18203 desc.dtargd_xlate[0] = '\0';
18204 desc.dtargd_mapping = desc.dtargd_ndx;
18205
18206 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
18207 probe->dtpr_id, probe->dtpr_arg, &desc);
18208 }
18209
18210 lck_mtx_unlock(&mod_lock);
18211 lck_mtx_unlock(&dtrace_provider_lock);
18212
18213 if (copyout(&desc, arg, sizeof (desc)) != 0)
18214 return (EFAULT);
18215
18216 return (0);
18217 }
18218
18219 case DTRACEIOC_GO: {
18220 processorid_t cpuid;
18221 rval = dtrace_state_go(state, &cpuid);
18222
18223 if (rval != 0)
18224 return (rval);
18225
18226 if (copyout(&cpuid, arg, sizeof (cpuid)) != 0)
18227 return (EFAULT);
18228
18229 return (0);
18230 }
18231
18232 case DTRACEIOC_STOP: {
18233 processorid_t cpuid;
18234
18235 lck_mtx_lock(&dtrace_lock);
18236 rval = dtrace_state_stop(state, &cpuid);
18237 lck_mtx_unlock(&dtrace_lock);
18238
18239 if (rval != 0)
18240 return (rval);
18241
18242 if (copyout(&cpuid, arg, sizeof (cpuid)) != 0)
18243 return (EFAULT);
18244
18245 return (0);
18246 }
18247
18248 case DTRACEIOC_DOFGET: {
18249 dof_hdr_t hdr, *dof;
18250 uint64_t len;
18251
18252 if (copyin(arg, &hdr, sizeof (hdr)) != 0)
18253 return (EFAULT);
18254
18255 lck_mtx_lock(&dtrace_lock);
18256 dof = dtrace_dof_create(state);
18257 lck_mtx_unlock(&dtrace_lock);
18258
18259 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
18260 rval = copyout(dof, arg, len);
18261 dtrace_dof_destroy(dof);
18262
18263 return (rval == 0 ? 0 : EFAULT);
18264 }
18265
39037602
A
18266 case DTRACEIOC_SLEEP: {
18267 int64_t time;
18268 uint64_t abstime;
18269 uint64_t rvalue = DTRACE_WAKE_TIMEOUT;
18270
18271 if (copyin(arg, &time, sizeof(time)) != 0)
18272 return (EFAULT);
18273
18274 nanoseconds_to_absolutetime((uint64_t)time, &abstime);
18275 clock_absolutetime_interval_to_deadline(abstime, &abstime);
18276
18277 if (assert_wait_deadline(state, THREAD_ABORTSAFE, abstime) == THREAD_WAITING) {
18278 if (state->dts_buf_over_limit > 0) {
18279 clear_wait(current_thread(), THREAD_INTERRUPTED);
18280 rvalue = DTRACE_WAKE_BUF_LIMIT;
18281 } else {
18282 thread_block(THREAD_CONTINUE_NULL);
18283 if (state->dts_buf_over_limit > 0) {
18284 rvalue = DTRACE_WAKE_BUF_LIMIT;
18285 }
18286 }
18287 }
18288
18289 if (copyout(&rvalue, arg, sizeof(rvalue)) != 0)
18290 return (EFAULT);
18291
18292 return (0);
18293 }
18294
18295 case DTRACEIOC_SIGNAL: {
18296 wakeup(state);
18297 return (0);
18298 }
18299
b0d623f7
A
18300 case DTRACEIOC_AGGSNAP:
18301 case DTRACEIOC_BUFSNAP: {
18302 dtrace_bufdesc_t desc;
18303 caddr_t cached;
39037602 18304 boolean_t over_limit;
b0d623f7
A
18305 dtrace_buffer_t *buf;
18306
18307 if (copyin(arg, &desc, sizeof (desc)) != 0)
18308 return (EFAULT);
18309
18310 if ((int)desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
18311 return (EINVAL);
18312
18313 lck_mtx_lock(&dtrace_lock);
18314
18315 if (cmd == DTRACEIOC_BUFSNAP) {
18316 buf = &state->dts_buffer[desc.dtbd_cpu];
18317 } else {
18318 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
18319 }
18320
18321 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
18322 size_t sz = buf->dtb_offset;
18323
18324 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
18325 lck_mtx_unlock(&dtrace_lock);
18326 return (EBUSY);
18327 }
18328
18329 /*
18330 * If this buffer has already been consumed, we're
18331 * going to indicate that there's nothing left here
18332 * to consume.
18333 */
18334 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
18335 lck_mtx_unlock(&dtrace_lock);
18336
18337 desc.dtbd_size = 0;
18338 desc.dtbd_drops = 0;
18339 desc.dtbd_errors = 0;
18340 desc.dtbd_oldest = 0;
18341 sz = sizeof (desc);
18342
18343 if (copyout(&desc, arg, sz) != 0)
18344 return (EFAULT);
18345
18346 return (0);
18347 }
18348
18349 /*
18350 * If this is a ring buffer that has wrapped, we want
18351 * to copy the whole thing out.
18352 */
18353 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
18354 dtrace_buffer_polish(buf);
18355 sz = buf->dtb_size;
18356 }
18357
18358 if (copyout(buf->dtb_tomax, (user_addr_t)desc.dtbd_data, sz) != 0) {
18359 lck_mtx_unlock(&dtrace_lock);
18360 return (EFAULT);
18361 }
18362
18363 desc.dtbd_size = sz;
18364 desc.dtbd_drops = buf->dtb_drops;
18365 desc.dtbd_errors = buf->dtb_errors;
18366 desc.dtbd_oldest = buf->dtb_xamot_offset;
04b8595b 18367 desc.dtbd_timestamp = dtrace_gethrtime();
b0d623f7
A
18368
18369 lck_mtx_unlock(&dtrace_lock);
18370
18371 if (copyout(&desc, arg, sizeof (desc)) != 0)
18372 return (EFAULT);
18373
18374 buf->dtb_flags |= DTRACEBUF_CONSUMED;
18375
18376 return (0);
18377 }
18378
18379 if (buf->dtb_tomax == NULL) {
18380 ASSERT(buf->dtb_xamot == NULL);
18381 lck_mtx_unlock(&dtrace_lock);
18382 return (ENOENT);
18383 }
18384
18385 cached = buf->dtb_tomax;
39037602
A
18386 over_limit = buf->dtb_cur_limit == buf->dtb_size;
18387
b0d623f7
A
18388 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
18389
18390 dtrace_xcall(desc.dtbd_cpu,
18391 (dtrace_xcall_t)dtrace_buffer_switch, buf);
18392
18393 state->dts_errors += buf->dtb_xamot_errors;
18394
18395 /*
18396 * If the buffers did not actually switch, then the cross call
18397 * did not take place -- presumably because the given CPU is
18398 * not in the ready set. If this is the case, we'll return
18399 * ENOENT.
18400 */
18401 if (buf->dtb_tomax == cached) {
18402 ASSERT(buf->dtb_xamot != cached);
18403 lck_mtx_unlock(&dtrace_lock);
18404 return (ENOENT);
18405 }
18406
18407 ASSERT(cached == buf->dtb_xamot);
39037602
A
18408 /*
18409 * At this point we know the buffer have switched, so we
18410 * can decrement the over limit count if the buffer was over
18411 * its limit. The new buffer might already be over its limit
18412 * yet, but we don't care since we're guaranteed not to be
18413 * checking the buffer over limit count at this point.
18414 */
18415 if (over_limit) {
cb323159 18416 uint32_t old = os_atomic_dec_orig(&state->dts_buf_over_limit, relaxed);
39037602
A
18417 #pragma unused(old)
18418
18419 /*
18420 * Verify that we didn't underflow the value
18421 */
18422 ASSERT(old != 0);
18423 }
b0d623f7
A
18424
18425 /*
18426 * We have our snapshot; now copy it out.
18427 */
5ba3f43e
A
18428 if (dtrace_buffer_copyout(buf->dtb_xamot,
18429 (user_addr_t)desc.dtbd_data,
b0d623f7
A
18430 buf->dtb_xamot_offset) != 0) {
18431 lck_mtx_unlock(&dtrace_lock);
18432 return (EFAULT);
18433 }
18434
18435 desc.dtbd_size = buf->dtb_xamot_offset;
18436 desc.dtbd_drops = buf->dtb_xamot_drops;
18437 desc.dtbd_errors = buf->dtb_xamot_errors;
18438 desc.dtbd_oldest = 0;
04b8595b 18439 desc.dtbd_timestamp = buf->dtb_switched;
b0d623f7
A
18440
18441 lck_mtx_unlock(&dtrace_lock);
18442
18443 /*
18444 * Finally, copy out the buffer description.
18445 */
18446 if (copyout(&desc, arg, sizeof (desc)) != 0)
18447 return (EFAULT);
18448
18449 return (0);
18450 }
18451
18452 case DTRACEIOC_CONF: {
18453 dtrace_conf_t conf;
18454
18455 bzero(&conf, sizeof (conf));
18456 conf.dtc_difversion = DIF_VERSION;
18457 conf.dtc_difintregs = DIF_DIR_NREGS;
18458 conf.dtc_diftupregs = DIF_DTR_NREGS;
18459 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
18460
18461 if (copyout(&conf, arg, sizeof (conf)) != 0)
18462 return (EFAULT);
18463
18464 return (0);
18465 }
18466
18467 case DTRACEIOC_STATUS: {
18468 dtrace_status_t stat;
18469 dtrace_dstate_t *dstate;
18470 int i, j;
18471 uint64_t nerrs;
18472
18473 /*
18474 * See the comment in dtrace_state_deadman() for the reason
18475 * for setting dts_laststatus to INT64_MAX before setting
18476 * it to the correct value.
18477 */
18478 state->dts_laststatus = INT64_MAX;
18479 dtrace_membar_producer();
18480 state->dts_laststatus = dtrace_gethrtime();
18481
18482 bzero(&stat, sizeof (stat));
18483
18484 lck_mtx_lock(&dtrace_lock);
18485
18486 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
18487 lck_mtx_unlock(&dtrace_lock);
18488 return (ENOENT);
18489 }
18490
18491 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
18492 stat.dtst_exiting = 1;
18493
18494 nerrs = state->dts_errors;
18495 dstate = &state->dts_vstate.dtvs_dynvars;
18496
18497 for (i = 0; i < (int)NCPU; i++) {
18498 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
18499
18500 stat.dtst_dyndrops += dcpu->dtdsc_drops;
18501 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
18502 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
18503
18504 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
18505 stat.dtst_filled++;
18506
18507 nerrs += state->dts_buffer[i].dtb_errors;
18508
18509 for (j = 0; j < state->dts_nspeculations; j++) {
18510 dtrace_speculation_t *spec;
18511 dtrace_buffer_t *buf;
18512
18513 spec = &state->dts_speculations[j];
18514 buf = &spec->dtsp_buffer[i];
18515 stat.dtst_specdrops += buf->dtb_xamot_drops;
18516 }
18517 }
18518
18519 stat.dtst_specdrops_busy = state->dts_speculations_busy;
18520 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
18521 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
18522 stat.dtst_dblerrors = state->dts_dblerrors;
18523 stat.dtst_killed =
18524 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
18525 stat.dtst_errors = nerrs;
18526
18527 lck_mtx_unlock(&dtrace_lock);
18528
18529 if (copyout(&stat, arg, sizeof (stat)) != 0)
18530 return (EFAULT);
18531
18532 return (0);
18533 }
18534
18535 case DTRACEIOC_FORMAT: {
18536 dtrace_fmtdesc_t fmt;
18537 char *str;
18538 int len;
18539
18540 if (copyin(arg, &fmt, sizeof (fmt)) != 0)
18541 return (EFAULT);
18542
18543 lck_mtx_lock(&dtrace_lock);
18544
18545 if (fmt.dtfd_format == 0 ||
18546 fmt.dtfd_format > state->dts_nformats) {
18547 lck_mtx_unlock(&dtrace_lock);
18548 return (EINVAL);
18549 }
18550
18551 /*
18552 * Format strings are allocated contiguously and they are
18553 * never freed; if a format index is less than the number
18554 * of formats, we can assert that the format map is non-NULL
18555 * and that the format for the specified index is non-NULL.
18556 */
18557 ASSERT(state->dts_formats != NULL);
4ba76501 18558 str = state->dts_formats[fmt.dtfd_format - 1]->dtf_str;
b0d623f7
A
18559 ASSERT(str != NULL);
18560
18561 len = strlen(str) + 1;
18562
18563 if (len > fmt.dtfd_length) {
18564 fmt.dtfd_length = len;
18565
18566 if (copyout(&fmt, arg, sizeof (fmt)) != 0) {
18567 lck_mtx_unlock(&dtrace_lock);
18568 return (EINVAL);
18569 }
18570 } else {
18571 if (copyout(str, (user_addr_t)fmt.dtfd_string, len) != 0) {
18572 lck_mtx_unlock(&dtrace_lock);
18573 return (EINVAL);
18574 }
18575 }
18576
18577 lck_mtx_unlock(&dtrace_lock);
18578 return (0);
18579 }
18580
6d2010ae
A
18581 case DTRACEIOC_MODUUIDSLIST: {
18582 size_t module_uuids_list_size;
18583 dtrace_module_uuids_list_t* uuids_list;
18584 uint64_t dtmul_count;
fe8ab488
A
18585
18586 /*
18587 * Security restrictions make this operation illegal, if this is enabled DTrace
18588 * must refuse to provide any fbt probes.
18589 */
3e170ce0 18590 if (dtrace_fbt_probes_restricted()) {
fe8ab488
A
18591 cmn_err(CE_WARN, "security restrictions disallow DTRACEIOC_MODUUIDSLIST");
18592 return (EPERM);
18593 }
18594
6d2010ae
A
18595 /*
18596 * Fail if the kernel symbol mode makes this operation illegal.
18597 * Both NEVER & ALWAYS_FROM_KERNEL are permanent states, it is legal to check
18598 * for them without holding the dtrace_lock.
18599 */
18600 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER ||
18601 dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL) {
18602 cmn_err(CE_WARN, "dtrace_kernel_symbol_mode of %u disallows DTRACEIOC_MODUUIDSLIST", dtrace_kernel_symbol_mode);
18603 return (EPERM);
18604 }
18605
18606 /*
18607 * Read the number of symbolsdesc structs being passed in.
18608 */
18609 if (copyin(arg + offsetof(dtrace_module_uuids_list_t, dtmul_count),
18610 &dtmul_count,
18611 sizeof(dtmul_count))) {
18612 cmn_err(CE_WARN, "failed to copyin dtmul_count");
18613 return (EFAULT);
18614 }
18615
18616 /*
18617 * Range check the count. More than 2k kexts is probably an error.
18618 */
18619 if (dtmul_count > 2048) {
18620 cmn_err(CE_WARN, "dtmul_count is not valid");
18621 return (EINVAL);
18622 }
18623
18624 /*
18625 * For all queries, we return EINVAL when the user specified
18626 * count does not match the actual number of modules we find
18627 * available.
18628 *
18629 * If the user specified count is zero, then this serves as a
18630 * simple query to count the available modules in need of symbols.
18631 */
18632
18633 rval = 0;
18634
18635 if (dtmul_count == 0)
18636 {
18637 lck_mtx_lock(&mod_lock);
18638 struct modctl* ctl = dtrace_modctl_list;
18639 while (ctl) {
18640 ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
d9a64523 18641 if (!MOD_SYMBOLS_DONE(ctl) && !MOD_IS_STATIC_KEXT(ctl)) {
6d2010ae
A
18642 dtmul_count++;
18643 rval = EINVAL;
18644 }
18645 ctl = ctl->mod_next;
18646 }
18647 lck_mtx_unlock(&mod_lock);
18648
18649 if (copyout(&dtmul_count, arg, sizeof (dtmul_count)) != 0)
18650 return (EFAULT);
18651 else
18652 return (rval);
18653 }
18654
18655 /*
18656 * If we reach this point, then we have a request for full list data.
18657 * Allocate a correctly sized structure and copyin the data.
18658 */
18659 module_uuids_list_size = DTRACE_MODULE_UUIDS_LIST_SIZE(dtmul_count);
18660 if ((uuids_list = kmem_alloc(module_uuids_list_size, KM_SLEEP)) == NULL)
18661 return (ENOMEM);
18662
18663 /* NOTE! We can no longer exit this method via return */
18664 if (copyin(arg, uuids_list, module_uuids_list_size) != 0) {
18665 cmn_err(CE_WARN, "failed copyin of dtrace_module_uuids_list_t");
18666 rval = EFAULT;
18667 goto moduuidslist_cleanup;
18668 }
18669
18670 /*
18671 * Check that the count didn't change between the first copyin and the second.
18672 */
18673 if (uuids_list->dtmul_count != dtmul_count) {
18674 rval = EINVAL;
18675 goto moduuidslist_cleanup;
18676 }
18677
18678 /*
18679 * Build the list of UUID's that need symbols
18680 */
18681 lck_mtx_lock(&mod_lock);
18682
18683 dtmul_count = 0;
18684
18685 struct modctl* ctl = dtrace_modctl_list;
18686 while (ctl) {
18687 /*
18688 * We assume that userspace symbols will be "better" than kernel level symbols,
18689 * as userspace can search for dSYM(s) and symbol'd binaries. Even if kernel syms
18690 * are available, add user syms if the module might use them.
18691 */
18692 ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
d9a64523 18693 if (!MOD_SYMBOLS_DONE(ctl) && !MOD_IS_STATIC_KEXT(ctl)) {
6d2010ae
A
18694 UUID* uuid = &uuids_list->dtmul_uuid[dtmul_count];
18695 if (dtmul_count++ < uuids_list->dtmul_count) {
18696 memcpy(uuid, ctl->mod_uuid, sizeof(UUID));
18697 }
18698 }
18699 ctl = ctl->mod_next;
18700 }
18701
18702 lck_mtx_unlock(&mod_lock);
18703
18704 if (uuids_list->dtmul_count < dtmul_count)
18705 rval = EINVAL;
18706
18707 uuids_list->dtmul_count = dtmul_count;
18708
18709 /*
18710 * Copyout the symbols list (or at least the count!)
18711 */
18712 if (copyout(uuids_list, arg, module_uuids_list_size) != 0) {
18713 cmn_err(CE_WARN, "failed copyout of dtrace_symbolsdesc_list_t");
18714 rval = EFAULT;
18715 }
18716
18717 moduuidslist_cleanup:
18718 /*
18719 * If we had to allocate struct memory, free it.
18720 */
18721 if (uuids_list != NULL) {
18722 kmem_free(uuids_list, module_uuids_list_size);
18723 }
18724
18725 return rval;
18726 }
18727
18728 case DTRACEIOC_PROVMODSYMS: {
18729 size_t module_symbols_size;
18730 dtrace_module_symbols_t* module_symbols;
18731 uint64_t dtmodsyms_count;
fe8ab488
A
18732
18733 /*
18734 * Security restrictions make this operation illegal, if this is enabled DTrace
18735 * must refuse to provide any fbt probes.
18736 */
3e170ce0 18737 if (dtrace_fbt_probes_restricted()) {
fe8ab488
A
18738 cmn_err(CE_WARN, "security restrictions disallow DTRACEIOC_MODUUIDSLIST");
18739 return (EPERM);
18740 }
18741
6d2010ae
A
18742 /*
18743 * Fail if the kernel symbol mode makes this operation illegal.
18744 * Both NEVER & ALWAYS_FROM_KERNEL are permanent states, it is legal to check
18745 * for them without holding the dtrace_lock.
18746 */
18747 if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER ||
18748 dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL) {
18749 cmn_err(CE_WARN, "dtrace_kernel_symbol_mode of %u disallows DTRACEIOC_PROVMODSYMS", dtrace_kernel_symbol_mode);
18750 return (EPERM);
18751 }
18752
18753 /*
18754 * Read the number of module symbols structs being passed in.
18755 */
18756 if (copyin(arg + offsetof(dtrace_module_symbols_t, dtmodsyms_count),
18757 &dtmodsyms_count,
18758 sizeof(dtmodsyms_count))) {
18759 cmn_err(CE_WARN, "failed to copyin dtmodsyms_count");
18760 return (EFAULT);
18761 }
18762
18763 /*
18764 * Range check the count. How much data can we pass around?
18765 * FIX ME!
18766 */
ea3f0419 18767 if (dtmodsyms_count == 0) {
6d2010ae
A
18768 cmn_err(CE_WARN, "dtmodsyms_count is not valid");
18769 return (EINVAL);
18770 }
18771
18772 /*
18773 * Allocate a correctly sized structure and copyin the data.
18774 */
18775 module_symbols_size = DTRACE_MODULE_SYMBOLS_SIZE(dtmodsyms_count);
ea3f0419
A
18776 if (module_symbols_size > (size_t)dtrace_copy_maxsize()) {
18777 size_t dtmodsyms_max = DTRACE_MODULE_SYMBOLS_COUNT(dtrace_copy_maxsize());
18778 cmn_err(CE_WARN, "dtmodsyms_count %ld is too high, maximum is %ld", dtmodsyms_count, dtmodsyms_max);
18779 return (ENOBUFS);
18780 }
18781
6d2010ae
A
18782 if ((module_symbols = kmem_alloc(module_symbols_size, KM_SLEEP)) == NULL)
18783 return (ENOMEM);
18784
18785 rval = 0;
18786
18787 /* NOTE! We can no longer exit this method via return */
18788 if (copyin(arg, module_symbols, module_symbols_size) != 0) {
39037602 18789 cmn_err(CE_WARN, "failed copyin of dtrace_module_symbols_t");
6d2010ae
A
18790 rval = EFAULT;
18791 goto module_symbols_cleanup;
18792 }
18793
18794 /*
18795 * Check that the count didn't change between the first copyin and the second.
18796 */
18797 if (module_symbols->dtmodsyms_count != dtmodsyms_count) {
18798 rval = EINVAL;
18799 goto module_symbols_cleanup;
18800 }
18801
18802 /*
18803 * Find the modctl to add symbols to.
18804 */
18805 lck_mtx_lock(&dtrace_provider_lock);
18806 lck_mtx_lock(&mod_lock);
18807
18808 struct modctl* ctl = dtrace_modctl_list;
18809 while (ctl) {
18810 ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
d9a64523
A
18811 if (MOD_HAS_UUID(ctl) && !MOD_SYMBOLS_DONE(ctl) && memcmp(module_symbols->dtmodsyms_uuid, ctl->mod_uuid, sizeof(UUID)) == 0) {
18812 dtrace_provider_t *prv;
18813 ctl->mod_user_symbols = module_symbols;
18814
18815 /*
18816 * We're going to call each providers per-module provide operation
18817 * specifying only this module.
18818 */
18819 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
18820 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
18821 /*
18822 * We gave every provider a chance to provide with the user syms, go ahead and clear them
18823 */
18824 ctl->mod_user_symbols = NULL; /* MUST reset this to clear HAS_USERSPACE_SYMBOLS */
6d2010ae
A
18825 }
18826 ctl = ctl->mod_next;
18827 }
18828
6d2010ae
A
18829 lck_mtx_unlock(&mod_lock);
18830 lck_mtx_unlock(&dtrace_provider_lock);
18831
18832 module_symbols_cleanup:
18833 /*
18834 * If we had to allocate struct memory, free it.
18835 */
18836 if (module_symbols != NULL) {
18837 kmem_free(module_symbols, module_symbols_size);
18838 }
18839
18840 return rval;
18841 }
fe8ab488
A
18842
18843 case DTRACEIOC_PROCWAITFOR: {
18844 dtrace_procdesc_t pdesc = {
3e170ce0 18845 .p_name = {0},
fe8ab488
A
18846 .p_pid = -1
18847 };
18848
18849 if ((rval = copyin(arg, &pdesc, sizeof(pdesc))) != 0)
18850 goto proc_waitfor_error;
18851
18852 if ((rval = dtrace_proc_waitfor(&pdesc)) != 0)
18853 goto proc_waitfor_error;
18854
18855 if ((rval = copyout(&pdesc, arg, sizeof(pdesc))) != 0)
18856 goto proc_waitfor_error;
18857
18858 return 0;
18859
18860 proc_waitfor_error:
18861 /* The process was suspended, revert this since the client will not do it. */
18862 if (pdesc.p_pid != -1) {
18863 proc_t *proc = proc_find(pdesc.p_pid);
18864 if (proc != PROC_NULL) {
18865 task_pidresume(proc->task);
18866 proc_rele(proc);
18867 }
18868 }
18869
18870 return rval;
18871 }
18872
18873 default:
18874 break;
b0d623f7
A
18875 }
18876
18877 return (ENOTTY);
18878}
b0d623f7 18879
fe8ab488
A
18880/*
18881 * APPLE NOTE: dtrace_detach not implemented
18882 */
b0d623f7
A
18883#if !defined(__APPLE__)
18884/*ARGSUSED*/
18885static int
18886dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
18887{
18888 dtrace_state_t *state;
18889
18890 switch (cmd) {
18891 case DDI_DETACH:
18892 break;
18893
18894 case DDI_SUSPEND:
18895 return (DDI_SUCCESS);
18896
18897 default:
18898 return (DDI_FAILURE);
18899 }
18900
18901 lck_mtx_lock(&cpu_lock);
18902 lck_mtx_lock(&dtrace_provider_lock);
18903 lck_mtx_lock(&dtrace_lock);
2d21ac55
A
18904
18905 ASSERT(dtrace_opens == 0);
18906
18907 if (dtrace_helpers > 0) {
2d21ac55 18908 lck_mtx_unlock(&dtrace_lock);
fe8ab488 18909 lck_mtx_unlock(&dtrace_provider_lock);
2d21ac55
A
18910 lck_mtx_unlock(&cpu_lock);
18911 return (DDI_FAILURE);
18912 }
18913
18914 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
2d21ac55 18915 lck_mtx_unlock(&dtrace_lock);
fe8ab488 18916 lck_mtx_unlock(&dtrace_provider_lock);
2d21ac55
A
18917 lck_mtx_unlock(&cpu_lock);
18918 return (DDI_FAILURE);
18919 }
18920
18921 dtrace_provider = NULL;
18922
18923 if ((state = dtrace_anon_grab()) != NULL) {
18924 /*
18925 * If there were ECBs on this state, the provider should
18926 * have not been allowed to detach; assert that there is
18927 * none.
18928 */
18929 ASSERT(state->dts_necbs == 0);
18930 dtrace_state_destroy(state);
18931
18932 /*
18933 * If we're being detached with anonymous state, we need to
18934 * indicate to the kernel debugger that DTrace is now inactive.
18935 */
18936 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
18937 }
18938
18939 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
18940 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
18941 dtrace_cpu_init = NULL;
18942 dtrace_helpers_cleanup = NULL;
18943 dtrace_helpers_fork = NULL;
18944 dtrace_cpustart_init = NULL;
18945 dtrace_cpustart_fini = NULL;
18946 dtrace_debugger_init = NULL;
18947 dtrace_debugger_fini = NULL;
18948 dtrace_kreloc_init = NULL;
18949 dtrace_kreloc_fini = NULL;
18950 dtrace_modload = NULL;
18951 dtrace_modunload = NULL;
18952
18953 lck_mtx_unlock(&cpu_lock);
18954
18955 if (dtrace_helptrace_enabled) {
18956 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
18957 dtrace_helptrace_buffer = NULL;
18958 }
18959
18960 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
18961 dtrace_probes = NULL;
18962 dtrace_nprobes = 0;
18963
d9a64523
A
18964 dtrace_hash_destroy(dtrace_strings);
18965 dtrace_hash_destroy(dtrace_byprov);
2d21ac55
A
18966 dtrace_hash_destroy(dtrace_bymod);
18967 dtrace_hash_destroy(dtrace_byfunc);
18968 dtrace_hash_destroy(dtrace_byname);
d9a64523
A
18969 dtrace_strings = NULL;
18970 dtrace_byprov = NULL;
2d21ac55
A
18971 dtrace_bymod = NULL;
18972 dtrace_byfunc = NULL;
18973 dtrace_byname = NULL;
18974
18975 kmem_cache_destroy(dtrace_state_cache);
2d21ac55
A
18976 vmem_destroy(dtrace_arena);
18977
18978 if (dtrace_toxrange != NULL) {
18979 kmem_free(dtrace_toxrange,
18980 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
18981 dtrace_toxrange = NULL;
18982 dtrace_toxranges = 0;
18983 dtrace_toxranges_max = 0;
18984 }
18985
18986 ddi_remove_minor_node(dtrace_devi, NULL);
18987 dtrace_devi = NULL;
18988
18989 ddi_soft_state_fini(&dtrace_softstate);
18990
18991 ASSERT(dtrace_vtime_references == 0);
18992 ASSERT(dtrace_opens == 0);
18993 ASSERT(dtrace_retained == NULL);
18994
18995 lck_mtx_unlock(&dtrace_lock);
18996 lck_mtx_unlock(&dtrace_provider_lock);
18997
d9a64523 18998#ifdef illumos
2d21ac55
A
18999 /*
19000 * We don't destroy the task queue until after we have dropped our
19001 * locks (taskq_destroy() may block on running tasks). To prevent
19002 * attempting to do work after we have effectively detached but before
19003 * the task queue has been destroyed, all tasks dispatched via the
19004 * task queue must check that DTrace is still attached before
19005 * performing any operation.
19006 */
19007 taskq_destroy(dtrace_taskq);
19008 dtrace_taskq = NULL;
d9a64523 19009#endif
2d21ac55
A
19010
19011 return (DDI_SUCCESS);
19012}
fe8ab488 19013#endif /* __APPLE__ */
2d21ac55
A
19014
19015d_open_t _dtrace_open, helper_open;
19016d_close_t _dtrace_close, helper_close;
19017d_ioctl_t _dtrace_ioctl, helper_ioctl;
19018
19019int
19020_dtrace_open(dev_t dev, int flags, int devtype, struct proc *p)
19021{
19022#pragma unused(p)
19023 dev_t locdev = dev;
19024
19025 return dtrace_open( &locdev, flags, devtype, CRED());
19026}
19027
19028int
19029helper_open(dev_t dev, int flags, int devtype, struct proc *p)
19030{
19031#pragma unused(dev,flags,devtype,p)
19032 return 0;
19033}
19034
19035int
19036_dtrace_close(dev_t dev, int flags, int devtype, struct proc *p)
19037{
19038#pragma unused(p)
19039 return dtrace_close( dev, flags, devtype, CRED());
19040}
19041
19042int
19043helper_close(dev_t dev, int flags, int devtype, struct proc *p)
19044{
19045#pragma unused(dev,flags,devtype,p)
19046 return 0;
19047}
19048
19049int
19050_dtrace_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
19051{
19052#pragma unused(p)
19053 int err, rv = 0;
b0d623f7
A
19054 user_addr_t uaddrp;
19055
19056 if (proc_is64bit(p))
19057 uaddrp = *(user_addr_t *)data;
19058 else
19059 uaddrp = (user_addr_t) *(uint32_t *)data;
2d21ac55 19060
b0d623f7 19061 err = dtrace_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2d21ac55 19062
b0d623f7 19063 /* Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2d21ac55
A
19064 if (err != 0) {
19065 ASSERT( (err & 0xfffff000) == 0 );
b0d623f7 19066 return (err & 0xfff); /* ioctl will return -1 and will set errno to an error code < 4096 */
2d21ac55
A
19067 } else if (rv != 0) {
19068 ASSERT( (rv & 0xfff00000) == 0 );
b0d623f7 19069 return (((rv & 0xfffff) << 12)); /* ioctl will return -1 and will set errno to a value >= 4096 */
2d21ac55
A
19070 } else
19071 return 0;
19072}
19073
19074int
19075helper_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
19076{
19077#pragma unused(dev,fflag,p)
19078 int err, rv = 0;
19079
b0d623f7
A
19080 err = dtrace_ioctl_helper(cmd, data, &rv);
19081 /* Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2d21ac55
A
19082 if (err != 0) {
19083 ASSERT( (err & 0xfffff000) == 0 );
b0d623f7 19084 return (err & 0xfff); /* ioctl will return -1 and will set errno to an error code < 4096 */
2d21ac55
A
19085 } else if (rv != 0) {
19086 ASSERT( (rv & 0xfff00000) == 0 );
b0d623f7 19087 return (((rv & 0xfffff) << 12)); /* ioctl will return -1 and will set errno to a value >= 4096 */
2d21ac55
A
19088 } else
19089 return 0;
19090}
19091
19092#define HELPER_MAJOR -24 /* let the kernel pick the device number */
19093
f427ee49
A
19094const static struct cdevsw helper_cdevsw =
19095{
19096 .d_open = helper_open,
19097 .d_close = helper_close,
19098 .d_read = eno_rdwrt,
19099 .d_write = eno_rdwrt,
19100 .d_ioctl = helper_ioctl,
19101 .d_stop = (stop_fcn_t *)nulldev,
19102 .d_reset = (reset_fcn_t *)nulldev,
19103 .d_select = eno_select,
19104 .d_mmap = eno_mmap,
19105 .d_strategy = eno_strat,
19106 .d_reserved_1 = eno_getc,
19107 .d_reserved_2 = eno_putc,
2d21ac55
A
19108};
19109
19110static int helper_majdevno = 0;
19111
19112static int gDTraceInited = 0;
19113
19114void
19115helper_init( void )
19116{
19117 /*
19118 * Once the "helper" is initialized, it can take ioctl calls that use locks
19119 * and zones initialized in dtrace_init. Make certain dtrace_init was called
19120 * before us.
19121 */
19122
19123 if (!gDTraceInited) {
19124 panic("helper_init before dtrace_init\n");
19125 }
19126
19127 if (0 >= helper_majdevno)
19128 {
19129 helper_majdevno = cdevsw_add(HELPER_MAJOR, &helper_cdevsw);
19130
19131 if (helper_majdevno < 0) {
19132 printf("helper_init: failed to allocate a major number!\n");
19133 return;
19134 }
19135
19136 if (NULL == devfs_make_node( makedev(helper_majdevno, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
19137 DTRACEMNR_HELPER, 0 )) {
19138 printf("dtrace_init: failed to devfs_make_node for helper!\n");
19139 return;
19140 }
19141 } else
19142 panic("helper_init: called twice!\n");
19143}
19144
19145#undef HELPER_MAJOR
19146
2d21ac55
A
19147static int
19148dtrace_clone_func(dev_t dev, int action)
19149{
19150#pragma unused(dev)
19151
19152 if (action == DEVFS_CLONE_ALLOC) {
39037602 19153 return dtrace_state_reserve();
2d21ac55
A
19154 }
19155 else if (action == DEVFS_CLONE_FREE) {
19156 return 0;
19157 }
19158 else return -1;
19159}
19160
39037602
A
19161void dtrace_ast(void);
19162
19163void
19164dtrace_ast(void)
19165{
19166 int i;
cb323159 19167 uint32_t clients = os_atomic_xchg(&dtrace_wake_clients, 0, relaxed);
39037602
A
19168 if (clients == 0)
19169 return;
19170 /**
19171 * We disable preemption here to be sure that we won't get
19172 * interrupted by a wakeup to a thread that is higher
19173 * priority than us, so that we do issue all wakeups
19174 */
19175 disable_preemption();
19176 for (i = 0; i < DTRACE_NCLIENTS; i++) {
19177 if (clients & (1 << i)) {
19178 dtrace_state_t *state = dtrace_state_get(i);
19179 if (state) {
19180 wakeup(state);
19181 }
19182
19183 }
19184 }
19185 enable_preemption();
19186}
19187
19188
2d21ac55
A
19189#define DTRACE_MAJOR -24 /* let the kernel pick the device number */
19190
f427ee49
A
19191static const struct cdevsw dtrace_cdevsw =
19192{
19193 .d_open = _dtrace_open,
19194 .d_close = _dtrace_close,
19195 .d_read = eno_rdwrt,
19196 .d_write = eno_rdwrt,
19197 .d_ioctl = _dtrace_ioctl,
19198 .d_stop = (stop_fcn_t *)nulldev,
19199 .d_reset = (reset_fcn_t *)nulldev,
19200 .d_select = eno_select,
19201 .d_mmap = eno_mmap,
19202 .d_strategy = eno_strat,
19203 .d_reserved_1 = eno_getc,
19204 .d_reserved_2 = eno_putc,
2d21ac55
A
19205};
19206
c3c9b80d
A
19207LCK_ATTR_DECLARE(dtrace_lck_attr, 0, 0);
19208LCK_GRP_DECLARE(dtrace_lck_grp, "dtrace");
2d21ac55
A
19209
19210static int gMajDevNo;
19211
d9a64523
A
19212void dtrace_early_init (void)
19213{
19214 dtrace_restriction_policy_load();
19215
19216 /*
19217 * See dtrace_impl.h for a description of kernel symbol modes.
19218 * The default is to wait for symbols from userspace (lazy symbols).
19219 */
19220 if (!PE_parse_boot_argn("dtrace_kernel_symbol_mode", &dtrace_kernel_symbol_mode, sizeof (dtrace_kernel_symbol_mode))) {
19221 dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE;
19222 }
19223}
19224
2d21ac55
A
19225void
19226dtrace_init( void )
19227{
19228 if (0 == gDTraceInited) {
f427ee49 19229 unsigned int i, ncpu;
fe8ab488 19230 size_t size = sizeof(dtrace_buffer_memory_maxsize);
2d21ac55 19231
f427ee49
A
19232 /*
19233 * Disable destructive actions when dtrace is running
19234 * in a restricted environment
19235 */
19236 dtrace_destructive_disallow = dtrace_is_restricted() &&
19237 !dtrace_are_restrictions_relaxed();
19238
39236c6e
A
19239 /*
19240 * DTrace allocates buffers based on the maximum number
19241 * of enabled cpus. This call avoids any race when finding
19242 * that count.
19243 */
19244 ASSERT(dtrace_max_cpus == 0);
f427ee49 19245 ncpu = dtrace_max_cpus = ml_wait_max_cpus();
fe8ab488
A
19246
19247 /*
19248 * Retrieve the size of the physical memory in order to define
19249 * the state buffer memory maximal size. If we cannot retrieve
19250 * this value, we'll consider that we have 1Gb of memory per CPU, that's
19251 * still better than raising a kernel panic.
19252 */
19253 if (0 != kernel_sysctlbyname("hw.memsize", &dtrace_buffer_memory_maxsize,
19254 &size, NULL, 0))
19255 {
19256 dtrace_buffer_memory_maxsize = ncpu * 1024 * 1024 * 1024;
19257 printf("dtrace_init: failed to retrieve the hw.memsize, defaulted to %lld bytes\n",
19258 dtrace_buffer_memory_maxsize);
19259 }
19260
19261 /*
19262 * Finally, divide by three to prevent DTrace from eating too
19263 * much memory.
19264 */
19265 dtrace_buffer_memory_maxsize /= 3;
19266 ASSERT(dtrace_buffer_memory_maxsize > 0);
19267
2d21ac55
A
19268 gMajDevNo = cdevsw_add(DTRACE_MAJOR, &dtrace_cdevsw);
19269
19270 if (gMajDevNo < 0) {
19271 printf("dtrace_init: failed to allocate a major number!\n");
19272 gDTraceInited = 0;
19273 return;
19274 }
19275
19276 if (NULL == devfs_make_node_clone( makedev(gMajDevNo, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
19277 dtrace_clone_func, DTRACEMNR_DTRACE, 0 )) {
19278 printf("dtrace_init: failed to devfs_make_node_clone for dtrace!\n");
19279 gDTraceInited = 0;
19280 return;
19281 }
19282
2d21ac55
A
19283 /*
19284 * The cpu_core structure consists of per-CPU state available in any context.
19285 * On some architectures, this may mean that the page(s) containing the
19286 * NCPU-sized array of cpu_core structures must be locked in the TLB -- it
19287 * is up to the platform to assure that this is performed properly. Note that
19288 * the structure is sized to avoid false sharing.
19289 */
2d21ac55 19290
fe8ab488
A
19291 /*
19292 * Initialize the CPU offline/online hooks.
19293 */
19294 dtrace_install_cpu_hooks();
19295
6d2010ae
A
19296 dtrace_modctl_list = NULL;
19297
2d21ac55
A
19298 cpu_core = (cpu_core_t *)kmem_zalloc( ncpu * sizeof(cpu_core_t), KM_SLEEP );
19299 for (i = 0; i < ncpu; ++i) {
c3c9b80d 19300 lck_mtx_init(&cpu_core[i].cpuc_pid_lock, &dtrace_lck_grp, &dtrace_lck_attr);
2d21ac55
A
19301 }
19302
6d2010ae 19303 cpu_list = (dtrace_cpu_t *)kmem_zalloc( ncpu * sizeof(dtrace_cpu_t), KM_SLEEP );
2d21ac55
A
19304 for (i = 0; i < ncpu; ++i) {
19305 cpu_list[i].cpu_id = (processorid_t)i;
19306 cpu_list[i].cpu_next = &(cpu_list[(i+1) % ncpu]);
fe8ab488 19307 LIST_INIT(&cpu_list[i].cpu_cyc_list);
c3c9b80d 19308 lck_rw_init(&cpu_list[i].cpu_ft_lock, &dtrace_lck_grp, &dtrace_lck_attr);
2d21ac55
A
19309 }
19310
19311 lck_mtx_lock(&cpu_lock);
19312 for (i = 0; i < ncpu; ++i)
39037602 19313 /* FIXME: track CPU configuration */
2d21ac55
A
19314 dtrace_cpu_setup_initial( (processorid_t)i ); /* In lieu of register_cpu_setup_func() callback */
19315 lck_mtx_unlock(&cpu_lock);
19316
19317 (void)dtrace_abs_to_nano(0LL); /* Force once only call to clock_timebase_info (which can take a lock) */
19318
d9a64523
A
19319 dtrace_strings = dtrace_hash_create(dtrace_strkey_offset,
19320 offsetof(dtrace_string_t, dtst_str),
19321 offsetof(dtrace_string_t, dtst_next),
19322 offsetof(dtrace_string_t, dtst_prev));
19323
2d21ac55
A
19324 /*
19325 * See dtrace_impl.h for a description of dof modes.
19326 * The default is lazy dof.
19327 *
b0d623f7 19328 * FIXME: Warn if state is LAZY_OFF? It won't break anything, but
2d21ac55
A
19329 * makes no sense...
19330 */
593a1d5f 19331 if (!PE_parse_boot_argn("dtrace_dof_mode", &dtrace_dof_mode, sizeof (dtrace_dof_mode))) {
f427ee49 19332#if defined(XNU_TARGET_OS_OSX)
2d21ac55 19333 dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
f427ee49
A
19334#else
19335 dtrace_dof_mode = DTRACE_DOF_MODE_NEVER;
5ba3f43e 19336#endif
2d21ac55
A
19337 }
19338
19339 /*
19340 * Sanity check of dof mode value.
19341 */
19342 switch (dtrace_dof_mode) {
19343 case DTRACE_DOF_MODE_NEVER:
19344 case DTRACE_DOF_MODE_LAZY_ON:
19345 /* valid modes, but nothing else we need to do */
19346 break;
19347
19348 case DTRACE_DOF_MODE_LAZY_OFF:
19349 case DTRACE_DOF_MODE_NON_LAZY:
19350 /* Cannot wait for a dtrace_open to init fasttrap */
19351 fasttrap_init();
19352 break;
19353
19354 default:
19355 /* Invalid, clamp to non lazy */
19356 dtrace_dof_mode = DTRACE_DOF_MODE_NON_LAZY;
19357 fasttrap_init();
19358 break;
19359 }
19360
cb323159
A
19361#if CONFIG_DTRACE
19362 if (dtrace_dof_mode != DTRACE_DOF_MODE_NEVER)
19363 commpage_update_dof(true);
19364#endif
19365
2d21ac55
A
19366 gDTraceInited = 1;
19367
19368 } else
19369 panic("dtrace_init: called twice!\n");
19370}
19371
19372void
19373dtrace_postinit(void)
19374{
6d2010ae
A
19375 /*
19376 * Called from bsd_init after all provider's *_init() routines have been
19377 * run. That way, anonymous DOF enabled under dtrace_attach() is safe
19378 * to go.
19379 */
d9a64523 19380 dtrace_attach( (dev_info_t *)(uintptr_t)makedev(gMajDevNo, 0)); /* Punning a dev_t to a dev_info_t* */
6d2010ae
A
19381
19382 /*
19383 * Add the mach_kernel to the module list for lazy processing
19384 */
19385 struct kmod_info fake_kernel_kmod;
19386 memset(&fake_kernel_kmod, 0, sizeof(fake_kernel_kmod));
19387
19388 strlcpy(fake_kernel_kmod.name, "mach_kernel", sizeof(fake_kernel_kmod.name));
19389 fake_kernel_kmod.id = 1;
19390 fake_kernel_kmod.address = g_kernel_kmod_info.address;
19391 fake_kernel_kmod.size = g_kernel_kmod_info.size;
19392
316670eb 19393 if (dtrace_module_loaded(&fake_kernel_kmod, 0) != 0) {
6d2010ae
A
19394 printf("dtrace_postinit: Could not register mach_kernel modctl\n");
19395 }
19396
19397 (void)OSKextRegisterKextsWithDTrace();
2d21ac55
A
19398}
19399#undef DTRACE_MAJOR
19400
19401/*
19402 * Routines used to register interest in cpu's being added to or removed
19403 * from the system.
19404 */
19405void
19406register_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2)
19407{
19408#pragma unused(ignore1,ignore2)
19409}
19410
19411void
19412unregister_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2)
19413{
19414#pragma unused(ignore1,ignore2)
19415}