]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/dtrace.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* #pragma ident "@(#)dtrace.c 1.49 06/08/11 SMI" */
28
29 /*
30 * DTrace - Dynamic Tracing for Solaris
31 *
32 * This is the implementation of the Solaris Dynamic Tracing framework
33 * (DTrace). The user-visible interface to DTrace is described at length in
34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
35 * library, the in-kernel DTrace framework, and the DTrace providers are
36 * described in the block comments in the <sys/dtrace.h> header file. The
37 * internal architecture of DTrace is described in the block comments in the
38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
39 * implementation very much assume mastery of all of these sources; if one has
40 * an unanswered question about the implementation, one should consult them
41 * first.
42 *
43 * The functions here are ordered roughly as follows:
44 *
45 * - Probe context functions
46 * - Probe hashing functions
47 * - Non-probe context utility functions
48 * - Matching functions
49 * - Provider-to-Framework API functions
50 * - Probe management functions
51 * - DIF object functions
52 * - Format functions
53 * - Predicate functions
54 * - ECB functions
55 * - Buffer functions
56 * - Enabling functions
57 * - DOF functions
58 * - Anonymous enabling functions
59 * - Consumer state functions
60 * - Helper functions
61 * - Hook functions
62 * - Driver cookbook functions
63 *
64 * Each group of functions begins with a block comment labelled the "DTrace
65 * [Group] Functions", allowing one to find each block by searching forward
66 * on capital-f functions.
67 */
68
69 #define _DTRACE_WANT_PROC_GLUE_ 1
70
71 #include <sys/errno.h>
72 #include <sys/types.h>
73 #include <sys/stat.h>
74 #include <sys/conf.h>
75 #include <sys/systm.h>
76 #include <sys/dtrace_impl.h>
77 #include <sys/param.h>
78 #include <sys/ioctl.h>
79 #include <sys/fcntl.h>
80 #include <miscfs/devfs/devfs.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel_types.h>
83 #include <sys/proc_internal.h>
84 #include <sys/uio_internal.h>
85 #include <sys/kauth.h>
86 #include <vm/pmap.h>
87 #include <sys/user.h>
88 #include <mach/exception_types.h>
89 #include <sys/signalvar.h>
90 #include <kern/zalloc.h>
91
92 #define t_predcache t_dtrace_predcache /* Cosmetic. Helps readability of thread.h */
93
94 extern void dtrace_suspend(void);
95 extern void dtrace_resume(void);
96 extern void dtrace_init(void);
97 extern void helper_init(void);
98
99 #if defined(__APPLE__)
100
101 #include "../../../osfmk/chud/chud_dtrace.h"
102
103 extern kern_return_t chudxnu_dtrace_callback
104 (uint64_t selector, uint64_t *args, uint32_t count);
105 #endif
106
107 /*
108 * DTrace Tunable Variables
109 *
110 * The following variables may be tuned by adding a line to /etc/system that
111 * includes both the name of the DTrace module ("dtrace") and the name of the
112 * variable. For example:
113 *
114 * set dtrace:dtrace_destructive_disallow = 1
115 *
116 * In general, the only variables that one should be tuning this way are those
117 * that affect system-wide DTrace behavior, and for which the default behavior
118 * is undesirable. Most of these variables are tunable on a per-consumer
119 * basis using DTrace options, and need not be tuned on a system-wide basis.
120 * When tuning these variables, avoid pathological values; while some attempt
121 * is made to verify the integrity of these variables, they are not considered
122 * part of the supported interface to DTrace, and they are therefore not
123 * checked comprehensively. Further, these variables should not be tuned
124 * dynamically via "mdb -kw" or other means; they should only be tuned via
125 * /etc/system.
126 */
127 int dtrace_destructive_disallow = 0;
128 #if defined(__APPLE__)
129 #define proc_t struct proc
130 #endif /* __APPLE__ */
131 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
132 size_t dtrace_difo_maxsize = (256 * 1024);
133 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
134 size_t dtrace_global_maxsize = (16 * 1024);
135 size_t dtrace_actions_max = (16 * 1024);
136 size_t dtrace_retain_max = 1024;
137 dtrace_optval_t dtrace_helper_actions_max = 32;
138 dtrace_optval_t dtrace_helper_providers_max = 32;
139 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
140 size_t dtrace_strsize_default = 256;
141 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
142 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
143 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
144 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
145 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
146 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
147 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
148 dtrace_optval_t dtrace_nspec_default = 1;
149 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
150 dtrace_optval_t dtrace_stackframes_default = 20;
151 dtrace_optval_t dtrace_ustackframes_default = 20;
152 dtrace_optval_t dtrace_jstackframes_default = 50;
153 dtrace_optval_t dtrace_jstackstrsize_default = 512;
154 int dtrace_msgdsize_max = 128;
155 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
156 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
157 int dtrace_devdepth_max = 32;
158 int dtrace_err_verbose;
159 hrtime_t dtrace_deadman_interval = NANOSEC;
160 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
161 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
162
163 /*
164 * DTrace External Variables
165 *
166 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
167 * available to DTrace consumers via the backtick (`) syntax. One of these,
168 * dtrace_zero, is made deliberately so: it is provided as a source of
169 * well-known, zero-filled memory. While this variable is not documented,
170 * it is used by some translators as an implementation detail.
171 */
172 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
173
174 /*
175 * DTrace Internal Variables
176 */
177 static dev_info_t *dtrace_devi; /* device info */
178 static vmem_t *dtrace_arena; /* probe ID arena */
179 static vmem_t *dtrace_minor; /* minor number arena */
180 static taskq_t *dtrace_taskq; /* task queue */
181 static dtrace_probe_t **dtrace_probes; /* array of all probes */
182 static int dtrace_nprobes; /* number of probes */
183 static dtrace_provider_t *dtrace_provider; /* provider list */
184 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
185 static int dtrace_opens; /* number of opens */
186 static int dtrace_helpers; /* number of helpers */
187 static void *dtrace_softstate; /* softstate pointer */
188 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
189 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
190 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
191 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
192 static int dtrace_toxranges; /* number of toxic ranges */
193 static int dtrace_toxranges_max; /* size of toxic range array */
194 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
195 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
196 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
197 static kthread_t *dtrace_panicked; /* panicking thread */
198 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
199 static dtrace_genid_t dtrace_probegen; /* current probe generation */
200 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
201 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
202 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
203 #if defined(__APPLE__)
204 static int dtrace_dof_mode; /* dof mode */
205 #endif
206
207 #if defined(__APPLE__)
208
209 /*
210 * To save memory, some common memory allocations are given a
211 * unique zone. In example, dtrace_probe_t is 72 bytes in size,
212 * which means it would fall into the kalloc.128 bucket. With
213 * 20k elements allocated, the space saved is substantial.
214 */
215
216 struct zone *dtrace_probe_t_zone;
217
218 #endif
219
220 /*
221 * DTrace Locking
222 * DTrace is protected by three (relatively coarse-grained) locks:
223 *
224 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
225 * including enabling state, probes, ECBs, consumer state, helper state,
226 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
227 * probe context is lock-free -- synchronization is handled via the
228 * dtrace_sync() cross call mechanism.
229 *
230 * (2) dtrace_provider_lock is required when manipulating provider state, or
231 * when provider state must be held constant.
232 *
233 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
234 * when meta provider state must be held constant.
235 *
236 * The lock ordering between these three locks is dtrace_meta_lock before
237 * dtrace_provider_lock before dtrace_lock. (In particular, there are
238 * several places where dtrace_provider_lock is held by the framework as it
239 * calls into the providers -- which then call back into the framework,
240 * grabbing dtrace_lock.)
241 *
242 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
243 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
244 * role as a coarse-grained lock; it is acquired before both of these locks.
245 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
246 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
247 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
248 * acquired _between_ dtrace_provider_lock and dtrace_lock.
249 */
250
251 /*
252 * APPLE NOTE:
253 *
254 * All kmutex_t vars have been changed to lck_mtx_t.
255 * Note that lck_mtx_t's require explicit initialization.
256 *
257 * mutex_enter() becomes lck_mtx_lock()
258 * mutex_exit() becomes lck_mtx_unlock()
259 *
260 * Lock asserts are changed like this:
261 *
262 * ASSERT(MUTEX_HELD(&cpu_lock));
263 * becomes:
264 * lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
265 *
266 * Due to the number of these changes, they are not called out explicitly.
267 */
268 static lck_mtx_t dtrace_lock; /* probe state lock */
269 static lck_mtx_t dtrace_provider_lock; /* provider state lock */
270 static lck_mtx_t dtrace_meta_lock; /* meta-provider state lock */
271 #if defined(__APPLE__)
272 static lck_rw_t dtrace_dof_mode_lock; /* dof mode lock */
273 #endif
274
275 /*
276 * DTrace Provider Variables
277 *
278 * These are the variables relating to DTrace as a provider (that is, the
279 * provider of the BEGIN, END, and ERROR probes).
280 */
281 static dtrace_pattr_t dtrace_provider_attr = {
282 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
283 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
284 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
285 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
286 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
287 };
288
289 static void
290 dtrace_nullop(void)
291 {}
292
293 static dtrace_pops_t dtrace_provider_ops = {
294 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
295 (void (*)(void *, struct modctl *))dtrace_nullop,
296 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
297 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
298 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
299 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
300 NULL,
301 NULL,
302 NULL,
303 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
304 };
305
306 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
307 static dtrace_id_t dtrace_probeid_end; /* special END probe */
308 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
309
310 /*
311 * DTrace Helper Tracing Variables
312 */
313 uint32_t dtrace_helptrace_next = 0;
314 uint32_t dtrace_helptrace_nlocals;
315 char *dtrace_helptrace_buffer;
316 int dtrace_helptrace_bufsize = 512 * 1024;
317
318 #ifdef DEBUG
319 int dtrace_helptrace_enabled = 1;
320 #else
321 int dtrace_helptrace_enabled = 0;
322 #endif
323
324 /*
325 * DTrace Error Hashing
326 *
327 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
328 * table. This is very useful for checking coverage of tests that are
329 * expected to induce DIF or DOF processing errors, and may be useful for
330 * debugging problems in the DIF code generator or in DOF generation . The
331 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
332 */
333 #ifdef DEBUG
334 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
335 static const char *dtrace_errlast;
336 static kthread_t *dtrace_errthread;
337 static lck_mtx_t dtrace_errlock;
338 #endif
339
340 /*
341 * DTrace Macros and Constants
342 *
343 * These are various macros that are useful in various spots in the
344 * implementation, along with a few random constants that have no meaning
345 * outside of the implementation. There is no real structure to this cpp
346 * mishmash -- but is there ever?
347 */
348 #define DTRACE_HASHSTR(hash, probe) \
349 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
350
351 #define DTRACE_HASHNEXT(hash, probe) \
352 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
353
354 #define DTRACE_HASHPREV(hash, probe) \
355 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
356
357 #define DTRACE_HASHEQ(hash, lhs, rhs) \
358 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
359 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
360
361 #define DTRACE_AGGHASHSIZE_SLEW 17
362
363 /*
364 * The key for a thread-local variable consists of the lower 61 bits of the
365 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
366 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
367 * equal to a variable identifier. This is necessary (but not sufficient) to
368 * assure that global associative arrays never collide with thread-local
369 * variables. To guarantee that they cannot collide, we must also define the
370 * order for keying dynamic variables. That order is:
371 *
372 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
373 *
374 * Because the variable-key and the tls-key are in orthogonal spaces, there is
375 * no way for a global variable key signature to match a thread-local key
376 * signature.
377 */
378 #if !defined(__APPLE__)
379 #define DTRACE_TLS_THRKEY(where) { \
380 uint_t intr = 0; \
381 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
382 for (; actv; actv >>= 1) \
383 intr++; \
384 ASSERT(intr < (1 << 3)); \
385 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
386 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
387 }
388 #else
389 #define DTRACE_TLS_THRKEY(where) { \
390 uint_t intr = ml_at_interrupt_context(); /* XXX just one measely bit */ \
391 uint_t thr = (uint_t)current_thread(); \
392 uint_t pid = (uint_t)proc_selfpid(); \
393 ASSERT(intr < (1 << 3)); \
394 (where) = ((((uint64_t)thr << 32 | pid) + DIF_VARIABLE_MAX) & \
395 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
396 }
397 #endif /* __APPLE__ */
398
399 #define DTRACE_STORE(type, tomax, offset, what) \
400 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
401
402 #if !defined(__APPLE__)
403 #if !(defined(__i386__) || defined (__x86_64__))
404 #define DTRACE_ALIGNCHECK(addr, size, flags) \
405 if (addr & (size - 1)) { \
406 *flags |= CPU_DTRACE_BADALIGN; \
407 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
408 return (0); \
409 }
410 #else
411 #define DTRACE_ALIGNCHECK(addr, size, flags)
412 #endif
413
414 #define DTRACE_LOADFUNC(bits) \
415 /*CSTYLED*/ \
416 uint##bits##_t \
417 dtrace_load##bits(uintptr_t addr) \
418 { \
419 size_t size = bits / NBBY; \
420 /*CSTYLED*/ \
421 uint##bits##_t rval; \
422 int i; \
423 volatile uint16_t *flags = (volatile uint16_t *) \
424 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
425 \
426 DTRACE_ALIGNCHECK(addr, size, flags); \
427 \
428 for (i = 0; i < dtrace_toxranges; i++) { \
429 if (addr >= dtrace_toxrange[i].dtt_limit) \
430 continue; \
431 \
432 if (addr + size <= dtrace_toxrange[i].dtt_base) \
433 continue; \
434 \
435 /* \
436 * This address falls within a toxic region; return 0. \
437 */ \
438 *flags |= CPU_DTRACE_BADADDR; \
439 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
440 return (0); \
441 } \
442 \
443 *flags |= CPU_DTRACE_NOFAULT; \
444 /*CSTYLED*/ \
445 rval = *((volatile uint##bits##_t *)addr); \
446 *flags &= ~CPU_DTRACE_NOFAULT; \
447 \
448 return (rval); \
449 }
450 #else
451 #define DTRACE_ALIGNCHECK(addr, size, flags) \
452 if (addr & (MIN(size,4) - 1)) { \
453 *flags |= CPU_DTRACE_BADALIGN; \
454 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
455 return (0); \
456 }
457
458 #define RECOVER_LABEL(bits) __asm__ volatile("_dtraceLoadRecover" #bits ":" );
459
460 #define DTRACE_LOADFUNC(bits) \
461 /*CSTYLED*/ \
462 extern vm_offset_t dtraceLoadRecover##bits; \
463 uint##bits##_t dtrace_load##bits(uintptr_t addr); \
464 \
465 uint##bits##_t \
466 dtrace_load##bits(uintptr_t addr) \
467 { \
468 size_t size = bits / NBBY; \
469 /*CSTYLED*/ \
470 uint##bits##_t rval = 0; \
471 int i; \
472 ppnum_t pp; \
473 volatile uint16_t *flags = (volatile uint16_t *) \
474 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
475 \
476 DTRACE_ALIGNCHECK(addr, size, flags); \
477 \
478 for (i = 0; i < dtrace_toxranges; i++) { \
479 if (addr >= dtrace_toxrange[i].dtt_limit) \
480 continue; \
481 \
482 if (addr + size <= dtrace_toxrange[i].dtt_base) \
483 continue; \
484 \
485 /* \
486 * This address falls within a toxic region; return 0. \
487 */ \
488 *flags |= CPU_DTRACE_BADADDR; \
489 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
490 return (0); \
491 } \
492 \
493 pp = pmap_find_phys(kernel_pmap, addr); \
494 \
495 if (0 == pp || /* pmap_find_phys failed ? */ \
496 !dtxnu_is_RAM_page(pp) /* Backed by RAM? */ ) { \
497 *flags |= CPU_DTRACE_BADADDR; \
498 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
499 return (0); \
500 } \
501 \
502 { \
503 volatile vm_offset_t recover = (vm_offset_t)&dtraceLoadRecover##bits; \
504 *flags |= CPU_DTRACE_NOFAULT; \
505 recover = dtrace_set_thread_recover(current_thread(), recover); \
506 /*CSTYLED*/ \
507 rval = *((volatile uint##bits##_t *)addr); \
508 RECOVER_LABEL(bits); \
509 (void)dtrace_set_thread_recover(current_thread(), recover); \
510 *flags &= ~CPU_DTRACE_NOFAULT; \
511 } \
512 \
513 return (rval); \
514 }
515 #endif /* __APPLE__ */
516
517
518 #ifdef __LP64__
519 #define dtrace_loadptr dtrace_load64
520 #else
521 #define dtrace_loadptr dtrace_load32
522 #endif
523
524 #define DTRACE_DYNHASH_FREE 0
525 #define DTRACE_DYNHASH_SINK 1
526 #define DTRACE_DYNHASH_VALID 2
527
528 #define DTRACE_MATCH_NEXT 0
529 #define DTRACE_MATCH_DONE 1
530 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
531 #define DTRACE_STATE_ALIGN 64
532
533 #define DTRACE_FLAGS2FLT(flags) \
534 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
535 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
536 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
537 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
538 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
539 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
540 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
541 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
542 DTRACEFLT_UNKNOWN)
543
544 #define DTRACEACT_ISSTRING(act) \
545 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
546 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
547
548 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
549 static void dtrace_enabling_provide(dtrace_provider_t *);
550 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
551 static void dtrace_enabling_matchall(void);
552 static dtrace_state_t *dtrace_anon_grab(void);
553 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
554 dtrace_state_t *, uint64_t, uint64_t);
555 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
556 static void dtrace_buffer_drop(dtrace_buffer_t *);
557 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
558 dtrace_state_t *, dtrace_mstate_t *);
559 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
560 dtrace_optval_t);
561 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
562 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
563
564 /*
565 * DTrace Probe Context Functions
566 *
567 * These functions are called from probe context. Because probe context is
568 * any context in which C may be called, arbitrarily locks may be held,
569 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
570 * As a result, functions called from probe context may only call other DTrace
571 * support functions -- they may not interact at all with the system at large.
572 * (Note that the ASSERT macro is made probe-context safe by redefining it in
573 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
574 * loads are to be performed from probe context, they _must_ be in terms of
575 * the safe dtrace_load*() variants.
576 *
577 * Some functions in this block are not actually called from probe context;
578 * for these functions, there will be a comment above the function reading
579 * "Note: not called from probe context."
580 */
581 void
582 dtrace_panic(const char *format, ...)
583 {
584 va_list alist;
585
586 va_start(alist, format);
587 dtrace_vpanic(format, alist);
588 va_end(alist);
589 }
590
591 int
592 dtrace_assfail(const char *a, const char *f, int l)
593 {
594 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
595
596 /*
597 * We just need something here that even the most clever compiler
598 * cannot optimize away.
599 */
600 return (a[(uintptr_t)f]);
601 }
602
603 /*
604 * Atomically increment a specified error counter from probe context.
605 */
606 static void
607 dtrace_error(uint32_t *counter)
608 {
609 /*
610 * Most counters stored to in probe context are per-CPU counters.
611 * However, there are some error conditions that are sufficiently
612 * arcane that they don't merit per-CPU storage. If these counters
613 * are incremented concurrently on different CPUs, scalability will be
614 * adversely affected -- but we don't expect them to be white-hot in a
615 * correctly constructed enabling...
616 */
617 uint32_t oval, nval;
618
619 do {
620 oval = *counter;
621
622 if ((nval = oval + 1) == 0) {
623 /*
624 * If the counter would wrap, set it to 1 -- assuring
625 * that the counter is never zero when we have seen
626 * errors. (The counter must be 32-bits because we
627 * aren't guaranteed a 64-bit compare&swap operation.)
628 * To save this code both the infamy of being fingered
629 * by a priggish news story and the indignity of being
630 * the target of a neo-puritan witch trial, we're
631 * carefully avoiding any colorful description of the
632 * likelihood of this condition -- but suffice it to
633 * say that it is only slightly more likely than the
634 * overflow of predicate cache IDs, as discussed in
635 * dtrace_predicate_create().
636 */
637 nval = 1;
638 }
639 } while (dtrace_cas32(counter, oval, nval) != oval);
640 }
641
642 /*
643 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
644 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
645 */
646 DTRACE_LOADFUNC(8)
647 DTRACE_LOADFUNC(16)
648 DTRACE_LOADFUNC(32)
649 DTRACE_LOADFUNC(64)
650
651 static int
652 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
653 {
654 if (dest < mstate->dtms_scratch_base)
655 return (0);
656
657 if (dest + size < dest)
658 return (0);
659
660 if (dest + size > mstate->dtms_scratch_ptr)
661 return (0);
662
663 return (1);
664 }
665
666 static int
667 dtrace_canstore_statvar(uint64_t addr, size_t sz,
668 dtrace_statvar_t **svars, int nsvars)
669 {
670 int i;
671
672 for (i = 0; i < nsvars; i++) {
673 dtrace_statvar_t *svar = svars[i];
674
675 if (svar == NULL || svar->dtsv_size == 0)
676 continue;
677
678 if (addr - svar->dtsv_data < svar->dtsv_size &&
679 addr + sz <= svar->dtsv_data + svar->dtsv_size)
680 return (1);
681 }
682
683 return (0);
684 }
685
686 /*
687 * Check to see if the address is within a memory region to which a store may
688 * be issued. This includes the DTrace scratch areas, and any DTrace variable
689 * region. The caller of dtrace_canstore() is responsible for performing any
690 * alignment checks that are needed before stores are actually executed.
691 */
692 static int
693 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
694 dtrace_vstate_t *vstate)
695 {
696 uintptr_t a;
697 size_t s;
698
699 /*
700 * First, check to see if the address is in scratch space...
701 */
702 a = mstate->dtms_scratch_base;
703 s = mstate->dtms_scratch_size;
704
705 if (addr - a < s && addr + sz <= a + s)
706 return (1);
707
708 /*
709 * Now check to see if it's a dynamic variable. This check will pick
710 * up both thread-local variables and any global dynamically-allocated
711 * variables.
712 */
713 a = (uintptr_t)vstate->dtvs_dynvars.dtds_base;
714 s = vstate->dtvs_dynvars.dtds_size;
715 if (addr - a < s && addr + sz <= a + s)
716 return (1);
717
718 /*
719 * Finally, check the static local and global variables. These checks
720 * take the longest, so we perform them last.
721 */
722 if (dtrace_canstore_statvar(addr, sz,
723 vstate->dtvs_locals, vstate->dtvs_nlocals))
724 return (1);
725
726 if (dtrace_canstore_statvar(addr, sz,
727 vstate->dtvs_globals, vstate->dtvs_nglobals))
728 return (1);
729
730 return (0);
731 }
732
733 /*
734 * Compare two strings using safe loads.
735 */
736 static int
737 dtrace_strncmp(char *s1, char *s2, size_t limit)
738 {
739 uint8_t c1, c2;
740 volatile uint16_t *flags;
741
742 if (s1 == s2 || limit == 0)
743 return (0);
744
745 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
746
747 do {
748 if (s1 == NULL)
749 c1 = '\0';
750 else
751 c1 = dtrace_load8((uintptr_t)s1++);
752
753 if (s2 == NULL)
754 c2 = '\0';
755 else
756 c2 = dtrace_load8((uintptr_t)s2++);
757
758 if (c1 != c2)
759 return (c1 - c2);
760 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
761
762 return (0);
763 }
764
765 /*
766 * Compute strlen(s) for a string using safe memory accesses. The additional
767 * len parameter is used to specify a maximum length to ensure completion.
768 */
769 static size_t
770 dtrace_strlen(const char *s, size_t lim)
771 {
772 uint_t len;
773
774 for (len = 0; len != lim; len++)
775 if (dtrace_load8((uintptr_t)s++) == '\0')
776 break;
777
778 return (len);
779 }
780
781 /*
782 * Check if an address falls within a toxic region.
783 */
784 static int
785 dtrace_istoxic(uintptr_t kaddr, size_t size)
786 {
787 uintptr_t taddr, tsize;
788 int i;
789
790 for (i = 0; i < dtrace_toxranges; i++) {
791 taddr = dtrace_toxrange[i].dtt_base;
792 tsize = dtrace_toxrange[i].dtt_limit - taddr;
793
794 if (kaddr - taddr < tsize) {
795 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
796 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
797 return (1);
798 }
799
800 if (taddr - kaddr < size) {
801 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
802 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
803 return (1);
804 }
805 }
806
807 return (0);
808 }
809
810 /*
811 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
812 * memory specified by the DIF program. The dst is assumed to be safe memory
813 * that we can store to directly because it is managed by DTrace. As with
814 * standard bcopy, overlapping copies are handled properly.
815 */
816 static void
817 dtrace_bcopy(const void *src, void *dst, size_t len)
818 {
819 if (len != 0) {
820 uint8_t *s1 = dst;
821 const uint8_t *s2 = src;
822
823 if (s1 <= s2) {
824 do {
825 *s1++ = dtrace_load8((uintptr_t)s2++);
826 } while (--len != 0);
827 } else {
828 s2 += len;
829 s1 += len;
830
831 do {
832 *--s1 = dtrace_load8((uintptr_t)--s2);
833 } while (--len != 0);
834 }
835 }
836 }
837
838 /*
839 * Copy src to dst using safe memory accesses, up to either the specified
840 * length, or the point that a nul byte is encountered. The src is assumed to
841 * be unsafe memory specified by the DIF program. The dst is assumed to be
842 * safe memory that we can store to directly because it is managed by DTrace.
843 * Unlike dtrace_bcopy(), overlapping regions are not handled.
844 */
845 static void
846 dtrace_strcpy(const void *src, void *dst, size_t len)
847 {
848 if (len != 0) {
849 uint8_t *s1 = dst, c;
850 const uint8_t *s2 = src;
851
852 do {
853 *s1++ = c = dtrace_load8((uintptr_t)s2++);
854 } while (--len != 0 && c != '\0');
855 }
856 }
857
858 /*
859 * Copy src to dst, deriving the size and type from the specified (BYREF)
860 * variable type. The src is assumed to be unsafe memory specified by the DIF
861 * program. The dst is assumed to be DTrace variable memory that is of the
862 * specified type; we assume that we can store to directly.
863 */
864 static void
865 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
866 {
867 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
868
869 if (type->dtdt_kind == DIF_TYPE_STRING)
870 dtrace_strcpy(src, dst, type->dtdt_size);
871 else
872 dtrace_bcopy(src, dst, type->dtdt_size);
873 }
874
875 /*
876 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
877 * unsafe memory specified by the DIF program. The s2 data is assumed to be
878 * safe memory that we can access directly because it is managed by DTrace.
879 */
880 static int
881 dtrace_bcmp(const void *s1, const void *s2, size_t len)
882 {
883 volatile uint16_t *flags;
884
885 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
886
887 if (s1 == s2)
888 return (0);
889
890 if (s1 == NULL || s2 == NULL)
891 return (1);
892
893 if (s1 != s2 && len != 0) {
894 const uint8_t *ps1 = s1;
895 const uint8_t *ps2 = s2;
896
897 do {
898 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
899 return (1);
900 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
901 }
902 return (0);
903 }
904
905 /*
906 * Zero the specified region using a simple byte-by-byte loop. Note that this
907 * is for safe DTrace-managed memory only.
908 */
909 static void
910 dtrace_bzero(void *dst, size_t len)
911 {
912 uchar_t *cp;
913
914 for (cp = dst; len != 0; len--)
915 *cp++ = 0;
916 }
917
918 /*
919 * This privilege check should be used by actions and subroutines to
920 * verify that the user credentials of the process that enabled the
921 * invoking ECB match the target credentials
922 */
923 static int
924 dtrace_priv_proc_common_user(dtrace_state_t *state)
925 {
926 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
927
928 /*
929 * We should always have a non-NULL state cred here, since if cred
930 * is null (anonymous tracing), we fast-path bypass this routine.
931 */
932 ASSERT(s_cr != NULL);
933
934 #if !defined(__APPLE__)
935 if ((cr = CRED()) != NULL &&
936 #else
937 if ((cr = dtrace_CRED()) != NULL &&
938 #endif /* __APPLE__ */
939 s_cr->cr_uid == cr->cr_uid &&
940 s_cr->cr_uid == cr->cr_ruid &&
941 s_cr->cr_uid == cr->cr_suid &&
942 s_cr->cr_gid == cr->cr_gid &&
943 s_cr->cr_gid == cr->cr_rgid &&
944 s_cr->cr_gid == cr->cr_sgid)
945 return (1);
946
947 return (0);
948 }
949
950 /*
951 * This privilege check should be used by actions and subroutines to
952 * verify that the zone of the process that enabled the invoking ECB
953 * matches the target credentials
954 */
955 static int
956 dtrace_priv_proc_common_zone(dtrace_state_t *state)
957 {
958 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
959
960 /*
961 * We should always have a non-NULL state cred here, since if cred
962 * is null (anonymous tracing), we fast-path bypass this routine.
963 */
964 ASSERT(s_cr != NULL);
965
966 #if !defined(__APPLE__)
967 if ((cr = CRED()) != NULL &&
968 s_cr->cr_zone == cr->cr_zone)
969 return (1);
970
971 return (0);
972 #else
973 return 1; /* Darwin doesn't do zones. */
974 #endif /* __APPLE__ */
975 }
976
977 /*
978 * This privilege check should be used by actions and subroutines to
979 * verify that the process has not setuid or changed credentials.
980 */
981 #if !defined(__APPLE__)
982 static int
983 dtrace_priv_proc_common_nocd()
984 {
985 proc_t *proc;
986
987 if ((proc = ttoproc(curthread)) != NULL &&
988 !(proc->p_flag & SNOCD))
989 return (1);
990
991 return (0);
992 }
993 #else
994 static int
995 dtrace_priv_proc_common_nocd(void)
996 {
997 return 1; /* Darwin omits "No Core Dump" flag. */
998 }
999 #endif /* __APPLE__ */
1000
1001 static int
1002 dtrace_priv_proc_destructive(dtrace_state_t *state)
1003 {
1004 int action = state->dts_cred.dcr_action;
1005
1006 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1007 dtrace_priv_proc_common_zone(state) == 0)
1008 goto bad;
1009
1010 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1011 dtrace_priv_proc_common_user(state) == 0)
1012 goto bad;
1013
1014 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1015 dtrace_priv_proc_common_nocd() == 0)
1016 goto bad;
1017
1018 return (1);
1019
1020 bad:
1021 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1022
1023 return (0);
1024 }
1025
1026 static int
1027 dtrace_priv_proc_control(dtrace_state_t *state)
1028 {
1029 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1030 return (1);
1031
1032 if (dtrace_priv_proc_common_zone(state) &&
1033 dtrace_priv_proc_common_user(state) &&
1034 dtrace_priv_proc_common_nocd())
1035 return (1);
1036
1037 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1038
1039 return (0);
1040 }
1041
1042 static int
1043 dtrace_priv_proc(dtrace_state_t *state)
1044 {
1045 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1046 return (1);
1047
1048 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1049
1050 return (0);
1051 }
1052
1053 static int
1054 dtrace_priv_kernel(dtrace_state_t *state)
1055 {
1056 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1057 return (1);
1058
1059 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1060
1061 return (0);
1062 }
1063
1064 static int
1065 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1066 {
1067 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1068 return (1);
1069
1070 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1071
1072 return (0);
1073 }
1074
1075 /*
1076 * Note: not called from probe context. This function is called
1077 * asynchronously (and at a regular interval) from outside of probe context to
1078 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1079 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1080 */
1081 #if defined(__APPLE__)
1082 static
1083 #endif /* __APPLE__ */
1084 void
1085 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1086 {
1087 dtrace_dynvar_t *dirty;
1088 dtrace_dstate_percpu_t *dcpu;
1089 int i, work = 0;
1090
1091 for (i = 0; i < NCPU; i++) {
1092 dcpu = &dstate->dtds_percpu[i];
1093
1094 ASSERT(dcpu->dtdsc_rinsing == NULL);
1095
1096 /*
1097 * If the dirty list is NULL, there is no dirty work to do.
1098 */
1099 if (dcpu->dtdsc_dirty == NULL)
1100 continue;
1101
1102 /*
1103 * If the clean list is non-NULL, then we're not going to do
1104 * any work for this CPU -- it means that there has not been
1105 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1106 * since the last time we cleaned house.
1107 */
1108 if (dcpu->dtdsc_clean != NULL)
1109 continue;
1110
1111 work = 1;
1112
1113 /*
1114 * Atomically move the dirty list aside.
1115 */
1116 do {
1117 dirty = dcpu->dtdsc_dirty;
1118
1119 /*
1120 * Before we zap the dirty list, set the rinsing list.
1121 * (This allows for a potential assertion in
1122 * dtrace_dynvar(): if a free dynamic variable appears
1123 * on a hash chain, either the dirty list or the
1124 * rinsing list for some CPU must be non-NULL.)
1125 */
1126 dcpu->dtdsc_rinsing = dirty;
1127 dtrace_membar_producer();
1128 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1129 dirty, NULL) != dirty);
1130 }
1131
1132 if (!work) {
1133 /*
1134 * We have no work to do; we can simply return.
1135 */
1136 return;
1137 }
1138
1139 dtrace_sync();
1140
1141 for (i = 0; i < NCPU; i++) {
1142 dcpu = &dstate->dtds_percpu[i];
1143
1144 if (dcpu->dtdsc_rinsing == NULL)
1145 continue;
1146
1147 /*
1148 * We are now guaranteed that no hash chain contains a pointer
1149 * into this dirty list; we can make it clean.
1150 */
1151 ASSERT(dcpu->dtdsc_clean == NULL);
1152 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1153 dcpu->dtdsc_rinsing = NULL;
1154 }
1155
1156 /*
1157 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1158 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1159 * This prevents a race whereby a CPU incorrectly decides that
1160 * the state should be something other than DTRACE_DSTATE_CLEAN
1161 * after dtrace_dynvar_clean() has completed.
1162 */
1163 dtrace_sync();
1164
1165 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1166 }
1167
1168 /*
1169 * Depending on the value of the op parameter, this function looks-up,
1170 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1171 * allocation is requested, this function will return a pointer to a
1172 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1173 * variable can be allocated. If NULL is returned, the appropriate counter
1174 * will be incremented.
1175 */
1176 #if defined(__APPLE__)
1177 static
1178 #endif /* __APPLE__ */
1179 dtrace_dynvar_t *
1180 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1181 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op)
1182 {
1183 uint64_t hashval = DTRACE_DYNHASH_VALID;
1184 dtrace_dynhash_t *hash = dstate->dtds_hash;
1185 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1186 processorid_t me = CPU->cpu_id, cpu = me;
1187 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1188 size_t bucket, ksize;
1189 size_t chunksize = dstate->dtds_chunksize;
1190 uintptr_t kdata, lock, nstate;
1191 uint_t i;
1192
1193 ASSERT(nkeys != 0);
1194
1195 /*
1196 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1197 * algorithm. For the by-value portions, we perform the algorithm in
1198 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1199 * bit, and seems to have only a minute effect on distribution. For
1200 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1201 * over each referenced byte. It's painful to do this, but it's much
1202 * better than pathological hash distribution. The efficacy of the
1203 * hashing algorithm (and a comparison with other algorithms) may be
1204 * found by running the ::dtrace_dynstat MDB dcmd.
1205 */
1206 for (i = 0; i < nkeys; i++) {
1207 if (key[i].dttk_size == 0) {
1208 uint64_t val = key[i].dttk_value;
1209
1210 hashval += (val >> 48) & 0xffff;
1211 hashval += (hashval << 10);
1212 hashval ^= (hashval >> 6);
1213
1214 hashval += (val >> 32) & 0xffff;
1215 hashval += (hashval << 10);
1216 hashval ^= (hashval >> 6);
1217
1218 hashval += (val >> 16) & 0xffff;
1219 hashval += (hashval << 10);
1220 hashval ^= (hashval >> 6);
1221
1222 hashval += val & 0xffff;
1223 hashval += (hashval << 10);
1224 hashval ^= (hashval >> 6);
1225 } else {
1226 /*
1227 * This is incredibly painful, but it beats the hell
1228 * out of the alternative.
1229 */
1230 uint64_t j, size = key[i].dttk_size;
1231 uintptr_t base = (uintptr_t)key[i].dttk_value;
1232
1233 for (j = 0; j < size; j++) {
1234 hashval += dtrace_load8(base + j);
1235 hashval += (hashval << 10);
1236 hashval ^= (hashval >> 6);
1237 }
1238 }
1239 }
1240
1241 hashval += (hashval << 3);
1242 hashval ^= (hashval >> 11);
1243 hashval += (hashval << 15);
1244
1245 /*
1246 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1247 * comes out to be one of our two sentinel hash values. If this
1248 * actually happens, we set the hashval to be a value known to be a
1249 * non-sentinel value.
1250 */
1251 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1252 hashval = DTRACE_DYNHASH_VALID;
1253
1254 /*
1255 * Yes, it's painful to do a divide here. If the cycle count becomes
1256 * important here, tricks can be pulled to reduce it. (However, it's
1257 * critical that hash collisions be kept to an absolute minimum;
1258 * they're much more painful than a divide.) It's better to have a
1259 * solution that generates few collisions and still keeps things
1260 * relatively simple.
1261 */
1262 bucket = hashval % dstate->dtds_hashsize;
1263
1264 if (op == DTRACE_DYNVAR_DEALLOC) {
1265 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1266
1267 for (;;) {
1268 while ((lock = *lockp) & 1)
1269 continue;
1270
1271 if (dtrace_casptr((void *)lockp,
1272 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1273 break;
1274 }
1275
1276 dtrace_membar_producer();
1277 }
1278
1279 top:
1280 prev = NULL;
1281 lock = hash[bucket].dtdh_lock;
1282
1283 dtrace_membar_consumer();
1284
1285 start = hash[bucket].dtdh_chain;
1286 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1287 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1288 op != DTRACE_DYNVAR_DEALLOC));
1289
1290 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1291 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1292 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1293
1294 if (dvar->dtdv_hashval != hashval) {
1295 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1296 /*
1297 * We've reached the sink, and therefore the
1298 * end of the hash chain; we can kick out of
1299 * the loop knowing that we have seen a valid
1300 * snapshot of state.
1301 */
1302 ASSERT(dvar->dtdv_next == NULL);
1303 ASSERT(dvar == &dtrace_dynhash_sink);
1304 break;
1305 }
1306
1307 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1308 /*
1309 * We've gone off the rails: somewhere along
1310 * the line, one of the members of this hash
1311 * chain was deleted. Note that we could also
1312 * detect this by simply letting this loop run
1313 * to completion, as we would eventually hit
1314 * the end of the dirty list. However, we
1315 * want to avoid running the length of the
1316 * dirty list unnecessarily (it might be quite
1317 * long), so we catch this as early as
1318 * possible by detecting the hash marker. In
1319 * this case, we simply set dvar to NULL and
1320 * break; the conditional after the loop will
1321 * send us back to top.
1322 */
1323 dvar = NULL;
1324 break;
1325 }
1326
1327 goto next;
1328 }
1329
1330 if (dtuple->dtt_nkeys != nkeys)
1331 goto next;
1332
1333 for (i = 0; i < nkeys; i++, dkey++) {
1334 if (dkey->dttk_size != key[i].dttk_size)
1335 goto next; /* size or type mismatch */
1336
1337 if (dkey->dttk_size != 0) {
1338 if (dtrace_bcmp(
1339 (void *)(uintptr_t)key[i].dttk_value,
1340 (void *)(uintptr_t)dkey->dttk_value,
1341 dkey->dttk_size))
1342 goto next;
1343 } else {
1344 if (dkey->dttk_value != key[i].dttk_value)
1345 goto next;
1346 }
1347 }
1348
1349 if (op != DTRACE_DYNVAR_DEALLOC)
1350 return (dvar);
1351
1352 ASSERT(dvar->dtdv_next == NULL ||
1353 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1354
1355 if (prev != NULL) {
1356 ASSERT(hash[bucket].dtdh_chain != dvar);
1357 ASSERT(start != dvar);
1358 ASSERT(prev->dtdv_next == dvar);
1359 prev->dtdv_next = dvar->dtdv_next;
1360 } else {
1361 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1362 start, dvar->dtdv_next) != start) {
1363 /*
1364 * We have failed to atomically swing the
1365 * hash table head pointer, presumably because
1366 * of a conflicting allocation on another CPU.
1367 * We need to reread the hash chain and try
1368 * again.
1369 */
1370 goto top;
1371 }
1372 }
1373
1374 dtrace_membar_producer();
1375
1376 /*
1377 * Now set the hash value to indicate that it's free.
1378 */
1379 ASSERT(hash[bucket].dtdh_chain != dvar);
1380 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1381
1382 dtrace_membar_producer();
1383
1384 /*
1385 * Set the next pointer to point at the dirty list, and
1386 * atomically swing the dirty pointer to the newly freed dvar.
1387 */
1388 do {
1389 next = dcpu->dtdsc_dirty;
1390 dvar->dtdv_next = next;
1391 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1392
1393 /*
1394 * Finally, unlock this hash bucket.
1395 */
1396 ASSERT(hash[bucket].dtdh_lock == lock);
1397 ASSERT(lock & 1);
1398 hash[bucket].dtdh_lock++;
1399
1400 return (NULL);
1401 next:
1402 prev = dvar;
1403 continue;
1404 }
1405
1406 if (dvar == NULL) {
1407 /*
1408 * If dvar is NULL, it is because we went off the rails:
1409 * one of the elements that we traversed in the hash chain
1410 * was deleted while we were traversing it. In this case,
1411 * we assert that we aren't doing a dealloc (deallocs lock
1412 * the hash bucket to prevent themselves from racing with
1413 * one another), and retry the hash chain traversal.
1414 */
1415 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1416 goto top;
1417 }
1418
1419 if (op != DTRACE_DYNVAR_ALLOC) {
1420 /*
1421 * If we are not to allocate a new variable, we want to
1422 * return NULL now. Before we return, check that the value
1423 * of the lock word hasn't changed. If it has, we may have
1424 * seen an inconsistent snapshot.
1425 */
1426 if (op == DTRACE_DYNVAR_NOALLOC) {
1427 if (hash[bucket].dtdh_lock != lock)
1428 goto top;
1429 } else {
1430 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1431 ASSERT(hash[bucket].dtdh_lock == lock);
1432 ASSERT(lock & 1);
1433 hash[bucket].dtdh_lock++;
1434 }
1435
1436 return (NULL);
1437 }
1438
1439 /*
1440 * We need to allocate a new dynamic variable. The size we need is the
1441 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1442 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1443 * the size of any referred-to data (dsize). We then round the final
1444 * size up to the chunksize for allocation.
1445 */
1446 for (ksize = 0, i = 0; i < nkeys; i++)
1447 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1448
1449 /*
1450 * This should be pretty much impossible, but could happen if, say,
1451 * strange DIF specified the tuple. Ideally, this should be an
1452 * assertion and not an error condition -- but that requires that the
1453 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1454 * bullet-proof. (That is, it must not be able to be fooled by
1455 * malicious DIF.) Given the lack of backwards branches in DIF,
1456 * solving this would presumably not amount to solving the Halting
1457 * Problem -- but it still seems awfully hard.
1458 */
1459 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1460 ksize + dsize > chunksize) {
1461 dcpu->dtdsc_drops++;
1462 return (NULL);
1463 }
1464
1465 nstate = DTRACE_DSTATE_EMPTY;
1466
1467 do {
1468 retry:
1469 free = dcpu->dtdsc_free;
1470
1471 if (free == NULL) {
1472 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1473 void *rval;
1474
1475 if (clean == NULL) {
1476 /*
1477 * We're out of dynamic variable space on
1478 * this CPU. Unless we have tried all CPUs,
1479 * we'll try to allocate from a different
1480 * CPU.
1481 */
1482 switch (dstate->dtds_state) {
1483 case DTRACE_DSTATE_CLEAN: {
1484 void *sp = &dstate->dtds_state;
1485
1486 if (++cpu >= NCPU)
1487 cpu = 0;
1488
1489 if (dcpu->dtdsc_dirty != NULL &&
1490 nstate == DTRACE_DSTATE_EMPTY)
1491 nstate = DTRACE_DSTATE_DIRTY;
1492
1493 if (dcpu->dtdsc_rinsing != NULL)
1494 nstate = DTRACE_DSTATE_RINSING;
1495
1496 dcpu = &dstate->dtds_percpu[cpu];
1497
1498 if (cpu != me)
1499 goto retry;
1500
1501 (void) dtrace_cas32(sp,
1502 DTRACE_DSTATE_CLEAN, nstate);
1503
1504 /*
1505 * To increment the correct bean
1506 * counter, take another lap.
1507 */
1508 goto retry;
1509 }
1510
1511 case DTRACE_DSTATE_DIRTY:
1512 dcpu->dtdsc_dirty_drops++;
1513 break;
1514
1515 case DTRACE_DSTATE_RINSING:
1516 dcpu->dtdsc_rinsing_drops++;
1517 break;
1518
1519 case DTRACE_DSTATE_EMPTY:
1520 dcpu->dtdsc_drops++;
1521 break;
1522 }
1523
1524 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1525 return (NULL);
1526 }
1527
1528 /*
1529 * The clean list appears to be non-empty. We want to
1530 * move the clean list to the free list; we start by
1531 * moving the clean pointer aside.
1532 */
1533 if (dtrace_casptr(&dcpu->dtdsc_clean,
1534 clean, NULL) != clean) {
1535 /*
1536 * We are in one of two situations:
1537 *
1538 * (a) The clean list was switched to the
1539 * free list by another CPU.
1540 *
1541 * (b) The clean list was added to by the
1542 * cleansing cyclic.
1543 *
1544 * In either of these situations, we can
1545 * just reattempt the free list allocation.
1546 */
1547 goto retry;
1548 }
1549
1550 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1551
1552 /*
1553 * Now we'll move the clean list to the free list.
1554 * It's impossible for this to fail: the only way
1555 * the free list can be updated is through this
1556 * code path, and only one CPU can own the clean list.
1557 * Thus, it would only be possible for this to fail if
1558 * this code were racing with dtrace_dynvar_clean().
1559 * (That is, if dtrace_dynvar_clean() updated the clean
1560 * list, and we ended up racing to update the free
1561 * list.) This race is prevented by the dtrace_sync()
1562 * in dtrace_dynvar_clean() -- which flushes the
1563 * owners of the clean lists out before resetting
1564 * the clean lists.
1565 */
1566 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1567 ASSERT(rval == NULL);
1568 goto retry;
1569 }
1570
1571 dvar = free;
1572 new_free = dvar->dtdv_next;
1573 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1574
1575 /*
1576 * We have now allocated a new chunk. We copy the tuple keys into the
1577 * tuple array and copy any referenced key data into the data space
1578 * following the tuple array. As we do this, we relocate dttk_value
1579 * in the final tuple to point to the key data address in the chunk.
1580 */
1581 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1582 dvar->dtdv_data = (void *)(kdata + ksize);
1583 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1584
1585 for (i = 0; i < nkeys; i++) {
1586 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1587 size_t kesize = key[i].dttk_size;
1588
1589 if (kesize != 0) {
1590 dtrace_bcopy(
1591 (const void *)(uintptr_t)key[i].dttk_value,
1592 (void *)kdata, kesize);
1593 dkey->dttk_value = kdata;
1594 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1595 } else {
1596 dkey->dttk_value = key[i].dttk_value;
1597 }
1598
1599 dkey->dttk_size = kesize;
1600 }
1601
1602 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1603 dvar->dtdv_hashval = hashval;
1604 dvar->dtdv_next = start;
1605
1606 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1607 return (dvar);
1608
1609 /*
1610 * The cas has failed. Either another CPU is adding an element to
1611 * this hash chain, or another CPU is deleting an element from this
1612 * hash chain. The simplest way to deal with both of these cases
1613 * (though not necessarily the most efficient) is to free our
1614 * allocated block and tail-call ourselves. Note that the free is
1615 * to the dirty list and _not_ to the free list. This is to prevent
1616 * races with allocators, above.
1617 */
1618 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1619
1620 dtrace_membar_producer();
1621
1622 do {
1623 free = dcpu->dtdsc_dirty;
1624 dvar->dtdv_next = free;
1625 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1626
1627 return (dtrace_dynvar(dstate, nkeys, key, dsize, op));
1628 }
1629
1630 /*ARGSUSED*/
1631 static void
1632 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1633 {
1634 if (nval < *oval)
1635 *oval = nval;
1636 }
1637
1638 /*ARGSUSED*/
1639 static void
1640 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1641 {
1642 if (nval > *oval)
1643 *oval = nval;
1644 }
1645
1646 static void
1647 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1648 {
1649 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1650 int64_t val = (int64_t)nval;
1651
1652 if (val < 0) {
1653 for (i = 0; i < zero; i++) {
1654 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1655 quanta[i] += incr;
1656 return;
1657 }
1658 }
1659 } else {
1660 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1661 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1662 quanta[i - 1] += incr;
1663 return;
1664 }
1665 }
1666
1667 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1668 return;
1669 }
1670
1671 ASSERT(0);
1672 }
1673
1674 static void
1675 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1676 {
1677 uint64_t arg = *lquanta++;
1678 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1679 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1680 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1681 int32_t val = (int32_t)nval, level;
1682
1683 ASSERT(step != 0);
1684 ASSERT(levels != 0);
1685
1686 if (val < base) {
1687 /*
1688 * This is an underflow.
1689 */
1690 lquanta[0] += incr;
1691 return;
1692 }
1693
1694 level = (val - base) / step;
1695
1696 if (level < levels) {
1697 lquanta[level + 1] += incr;
1698 return;
1699 }
1700
1701 /*
1702 * This is an overflow.
1703 */
1704 lquanta[levels + 1] += incr;
1705 }
1706
1707 /*ARGSUSED*/
1708 static void
1709 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1710 {
1711 data[0]++;
1712 data[1] += nval;
1713 }
1714
1715 /*ARGSUSED*/
1716 static void
1717 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1718 {
1719 *oval = *oval + 1;
1720 }
1721
1722 /*ARGSUSED*/
1723 static void
1724 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1725 {
1726 *oval += nval;
1727 }
1728
1729 /*
1730 * Aggregate given the tuple in the principal data buffer, and the aggregating
1731 * action denoted by the specified dtrace_aggregation_t. The aggregation
1732 * buffer is specified as the buf parameter. This routine does not return
1733 * failure; if there is no space in the aggregation buffer, the data will be
1734 * dropped, and a corresponding counter incremented.
1735 */
1736 static void
1737 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1738 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1739 {
1740 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1741 uint32_t i, ndx, size, fsize;
1742 uint32_t align = sizeof (uint64_t) - 1;
1743 dtrace_aggbuffer_t *agb;
1744 dtrace_aggkey_t *key;
1745 uint32_t hashval = 0, limit, isstr;
1746 caddr_t tomax, data, kdata;
1747 dtrace_actkind_t action;
1748 dtrace_action_t *act;
1749 uintptr_t offs;
1750
1751 if (buf == NULL)
1752 return;
1753
1754 if (!agg->dtag_hasarg) {
1755 /*
1756 * Currently, only quantize() and lquantize() take additional
1757 * arguments, and they have the same semantics: an increment
1758 * value that defaults to 1 when not present. If additional
1759 * aggregating actions take arguments, the setting of the
1760 * default argument value will presumably have to become more
1761 * sophisticated...
1762 */
1763 arg = 1;
1764 }
1765
1766 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
1767 size = rec->dtrd_offset - agg->dtag_base;
1768 fsize = size + rec->dtrd_size;
1769
1770 ASSERT(dbuf->dtb_tomax != NULL);
1771 data = dbuf->dtb_tomax + offset + agg->dtag_base;
1772
1773 if ((tomax = buf->dtb_tomax) == NULL) {
1774 dtrace_buffer_drop(buf);
1775 return;
1776 }
1777
1778 /*
1779 * The metastructure is always at the bottom of the buffer.
1780 */
1781 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
1782 sizeof (dtrace_aggbuffer_t));
1783
1784 if (buf->dtb_offset == 0) {
1785 /*
1786 * We just kludge up approximately 1/8th of the size to be
1787 * buckets. If this guess ends up being routinely
1788 * off-the-mark, we may need to dynamically readjust this
1789 * based on past performance.
1790 */
1791 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
1792
1793 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
1794 (uintptr_t)tomax || hashsize == 0) {
1795 /*
1796 * We've been given a ludicrously small buffer;
1797 * increment our drop count and leave.
1798 */
1799 dtrace_buffer_drop(buf);
1800 return;
1801 }
1802
1803 /*
1804 * And now, a pathetic attempt to try to get a an odd (or
1805 * perchance, a prime) hash size for better hash distribution.
1806 */
1807 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
1808 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
1809
1810 agb->dtagb_hashsize = hashsize;
1811 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
1812 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
1813 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
1814
1815 for (i = 0; i < agb->dtagb_hashsize; i++)
1816 agb->dtagb_hash[i] = NULL;
1817 }
1818
1819 ASSERT(agg->dtag_first != NULL);
1820 ASSERT(agg->dtag_first->dta_intuple);
1821
1822 /*
1823 * Calculate the hash value based on the key. Note that we _don't_
1824 * include the aggid in the hashing (but we will store it as part of
1825 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
1826 * algorithm: a simple, quick algorithm that has no known funnels, and
1827 * gets good distribution in practice. The efficacy of the hashing
1828 * algorithm (and a comparison with other algorithms) may be found by
1829 * running the ::dtrace_aggstat MDB dcmd.
1830 */
1831 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
1832 i = act->dta_rec.dtrd_offset - agg->dtag_base;
1833 limit = i + act->dta_rec.dtrd_size;
1834 ASSERT(limit <= size);
1835 isstr = DTRACEACT_ISSTRING(act);
1836
1837 for (; i < limit; i++) {
1838 hashval += data[i];
1839 hashval += (hashval << 10);
1840 hashval ^= (hashval >> 6);
1841
1842 if (isstr && data[i] == '\0')
1843 break;
1844 }
1845 }
1846
1847 hashval += (hashval << 3);
1848 hashval ^= (hashval >> 11);
1849 hashval += (hashval << 15);
1850
1851 /*
1852 * Yes, the divide here is expensive -- but it's generally the least
1853 * of the performance issues given the amount of data that we iterate
1854 * over to compute hash values, compare data, etc.
1855 */
1856 ndx = hashval % agb->dtagb_hashsize;
1857
1858 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
1859 ASSERT((caddr_t)key >= tomax);
1860 ASSERT((caddr_t)key < tomax + buf->dtb_size);
1861
1862 if (hashval != key->dtak_hashval || key->dtak_size != size)
1863 continue;
1864
1865 kdata = key->dtak_data;
1866 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
1867
1868 for (act = agg->dtag_first; act->dta_intuple;
1869 act = act->dta_next) {
1870 i = act->dta_rec.dtrd_offset - agg->dtag_base;
1871 limit = i + act->dta_rec.dtrd_size;
1872 ASSERT(limit <= size);
1873 isstr = DTRACEACT_ISSTRING(act);
1874
1875 for (; i < limit; i++) {
1876 if (kdata[i] != data[i])
1877 goto next;
1878
1879 if (isstr && data[i] == '\0')
1880 break;
1881 }
1882 }
1883
1884 if (action != key->dtak_action) {
1885 /*
1886 * We are aggregating on the same value in the same
1887 * aggregation with two different aggregating actions.
1888 * (This should have been picked up in the compiler,
1889 * so we may be dealing with errant or devious DIF.)
1890 * This is an error condition; we indicate as much,
1891 * and return.
1892 */
1893 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
1894 return;
1895 }
1896
1897 /*
1898 * This is a hit: we need to apply the aggregator to
1899 * the value at this key.
1900 */
1901 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
1902 return;
1903 next:
1904 continue;
1905 }
1906
1907 /*
1908 * We didn't find it. We need to allocate some zero-filled space,
1909 * link it into the hash table appropriately, and apply the aggregator
1910 * to the (zero-filled) value.
1911 */
1912 offs = buf->dtb_offset;
1913 while (offs & (align - 1))
1914 offs += sizeof (uint32_t);
1915
1916 /*
1917 * If we don't have enough room to both allocate a new key _and_
1918 * its associated data, increment the drop count and return.
1919 */
1920 if ((uintptr_t)tomax + offs + fsize >
1921 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
1922 dtrace_buffer_drop(buf);
1923 return;
1924 }
1925
1926 /*CONSTCOND*/
1927 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
1928 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
1929 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
1930
1931 key->dtak_data = kdata = tomax + offs;
1932 buf->dtb_offset = offs + fsize;
1933
1934 /*
1935 * Now copy the data across.
1936 */
1937 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
1938
1939 for (i = sizeof (dtrace_aggid_t); i < size; i++)
1940 kdata[i] = data[i];
1941
1942 /*
1943 * Because strings are not zeroed out by default, we need to iterate
1944 * looking for actions that store strings, and we need to explicitly
1945 * pad these strings out with zeroes.
1946 */
1947 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
1948 int nul;
1949
1950 if (!DTRACEACT_ISSTRING(act))
1951 continue;
1952
1953 i = act->dta_rec.dtrd_offset - agg->dtag_base;
1954 limit = i + act->dta_rec.dtrd_size;
1955 ASSERT(limit <= size);
1956
1957 for (nul = 0; i < limit; i++) {
1958 if (nul) {
1959 kdata[i] = '\0';
1960 continue;
1961 }
1962
1963 if (data[i] != '\0')
1964 continue;
1965
1966 nul = 1;
1967 }
1968 }
1969
1970 for (i = size; i < fsize; i++)
1971 kdata[i] = 0;
1972
1973 key->dtak_hashval = hashval;
1974 key->dtak_size = size;
1975 key->dtak_action = action;
1976 key->dtak_next = agb->dtagb_hash[ndx];
1977 agb->dtagb_hash[ndx] = key;
1978
1979 /*
1980 * Finally, apply the aggregator.
1981 */
1982 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
1983 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
1984 }
1985
1986 /*
1987 * Given consumer state, this routine finds a speculation in the INACTIVE
1988 * state and transitions it into the ACTIVE state. If there is no speculation
1989 * in the INACTIVE state, 0 is returned. In this case, no error counter is
1990 * incremented -- it is up to the caller to take appropriate action.
1991 */
1992 static int
1993 dtrace_speculation(dtrace_state_t *state)
1994 {
1995 int i = 0;
1996 dtrace_speculation_state_t current;
1997 uint32_t *stat = &state->dts_speculations_unavail, count;
1998
1999 while (i < state->dts_nspeculations) {
2000 dtrace_speculation_t *spec = &state->dts_speculations[i];
2001
2002 current = spec->dtsp_state;
2003
2004 if (current != DTRACESPEC_INACTIVE) {
2005 if (current == DTRACESPEC_COMMITTINGMANY ||
2006 current == DTRACESPEC_COMMITTING ||
2007 current == DTRACESPEC_DISCARDING)
2008 stat = &state->dts_speculations_busy;
2009 i++;
2010 continue;
2011 }
2012
2013 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2014 current, DTRACESPEC_ACTIVE) == current)
2015 return (i + 1);
2016 }
2017
2018 /*
2019 * We couldn't find a speculation. If we found as much as a single
2020 * busy speculation buffer, we'll attribute this failure as "busy"
2021 * instead of "unavail".
2022 */
2023 do {
2024 count = *stat;
2025 } while (dtrace_cas32(stat, count, count + 1) != count);
2026
2027 return (0);
2028 }
2029
2030 /*
2031 * This routine commits an active speculation. If the specified speculation
2032 * is not in a valid state to perform a commit(), this routine will silently do
2033 * nothing. The state of the specified speculation is transitioned according
2034 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2035 */
2036 static void
2037 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2038 dtrace_specid_t which)
2039 {
2040 dtrace_speculation_t *spec;
2041 dtrace_buffer_t *src, *dest;
2042 uintptr_t daddr, saddr, dlimit;
2043 dtrace_speculation_state_t current, new;
2044 intptr_t offs;
2045
2046 if (which == 0)
2047 return;
2048
2049 if (which > state->dts_nspeculations) {
2050 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2051 return;
2052 }
2053
2054 spec = &state->dts_speculations[which - 1];
2055 src = &spec->dtsp_buffer[cpu];
2056 dest = &state->dts_buffer[cpu];
2057
2058 do {
2059 current = spec->dtsp_state;
2060
2061 if (current == DTRACESPEC_COMMITTINGMANY)
2062 break;
2063
2064 switch (current) {
2065 case DTRACESPEC_INACTIVE:
2066 case DTRACESPEC_DISCARDING:
2067 return;
2068
2069 case DTRACESPEC_COMMITTING:
2070 /*
2071 * This is only possible if we are (a) commit()'ing
2072 * without having done a prior speculate() on this CPU
2073 * and (b) racing with another commit() on a different
2074 * CPU. There's nothing to do -- we just assert that
2075 * our offset is 0.
2076 */
2077 ASSERT(src->dtb_offset == 0);
2078 return;
2079
2080 case DTRACESPEC_ACTIVE:
2081 new = DTRACESPEC_COMMITTING;
2082 break;
2083
2084 case DTRACESPEC_ACTIVEONE:
2085 /*
2086 * This speculation is active on one CPU. If our
2087 * buffer offset is non-zero, we know that the one CPU
2088 * must be us. Otherwise, we are committing on a
2089 * different CPU from the speculate(), and we must
2090 * rely on being asynchronously cleaned.
2091 */
2092 if (src->dtb_offset != 0) {
2093 new = DTRACESPEC_COMMITTING;
2094 break;
2095 }
2096 /*FALLTHROUGH*/
2097
2098 case DTRACESPEC_ACTIVEMANY:
2099 new = DTRACESPEC_COMMITTINGMANY;
2100 break;
2101
2102 default:
2103 ASSERT(0);
2104 }
2105 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2106 current, new) != current);
2107
2108 /*
2109 * We have set the state to indicate that we are committing this
2110 * speculation. Now reserve the necessary space in the destination
2111 * buffer.
2112 */
2113 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2114 sizeof (uint64_t), state, NULL)) < 0) {
2115 dtrace_buffer_drop(dest);
2116 goto out;
2117 }
2118
2119 /*
2120 * We have the space; copy the buffer across. (Note that this is a
2121 * highly subobtimal bcopy(); in the unlikely event that this becomes
2122 * a serious performance issue, a high-performance DTrace-specific
2123 * bcopy() should obviously be invented.)
2124 */
2125 daddr = (uintptr_t)dest->dtb_tomax + offs;
2126 dlimit = daddr + src->dtb_offset;
2127 saddr = (uintptr_t)src->dtb_tomax;
2128
2129 /*
2130 * First, the aligned portion.
2131 */
2132 while (dlimit - daddr >= sizeof (uint64_t)) {
2133 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2134
2135 daddr += sizeof (uint64_t);
2136 saddr += sizeof (uint64_t);
2137 }
2138
2139 /*
2140 * Now any left-over bit...
2141 */
2142 while (dlimit - daddr)
2143 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2144
2145 /*
2146 * Finally, commit the reserved space in the destination buffer.
2147 */
2148 dest->dtb_offset = offs + src->dtb_offset;
2149
2150 out:
2151 /*
2152 * If we're lucky enough to be the only active CPU on this speculation
2153 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2154 */
2155 if (current == DTRACESPEC_ACTIVE ||
2156 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2157 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2158 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2159
2160 ASSERT(rval == DTRACESPEC_COMMITTING);
2161 }
2162
2163 src->dtb_offset = 0;
2164 src->dtb_xamot_drops += src->dtb_drops;
2165 src->dtb_drops = 0;
2166 }
2167
2168 /*
2169 * This routine discards an active speculation. If the specified speculation
2170 * is not in a valid state to perform a discard(), this routine will silently
2171 * do nothing. The state of the specified speculation is transitioned
2172 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2173 */
2174 static void
2175 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2176 dtrace_specid_t which)
2177 {
2178 dtrace_speculation_t *spec;
2179 dtrace_speculation_state_t current, new;
2180 dtrace_buffer_t *buf;
2181
2182 if (which == 0)
2183 return;
2184
2185 if (which > state->dts_nspeculations) {
2186 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2187 return;
2188 }
2189
2190 spec = &state->dts_speculations[which - 1];
2191 buf = &spec->dtsp_buffer[cpu];
2192
2193 do {
2194 current = spec->dtsp_state;
2195
2196 switch (current) {
2197 case DTRACESPEC_INACTIVE:
2198 case DTRACESPEC_COMMITTINGMANY:
2199 case DTRACESPEC_COMMITTING:
2200 case DTRACESPEC_DISCARDING:
2201 return;
2202
2203 case DTRACESPEC_ACTIVE:
2204 case DTRACESPEC_ACTIVEMANY:
2205 new = DTRACESPEC_DISCARDING;
2206 break;
2207
2208 case DTRACESPEC_ACTIVEONE:
2209 if (buf->dtb_offset != 0) {
2210 new = DTRACESPEC_INACTIVE;
2211 } else {
2212 new = DTRACESPEC_DISCARDING;
2213 }
2214 break;
2215
2216 default:
2217 ASSERT(0);
2218 }
2219 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2220 current, new) != current);
2221
2222 buf->dtb_offset = 0;
2223 buf->dtb_drops = 0;
2224 }
2225
2226 /*
2227 * Note: not called from probe context. This function is called
2228 * asynchronously from cross call context to clean any speculations that are
2229 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2230 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2231 * speculation.
2232 */
2233 static void
2234 dtrace_speculation_clean_here(dtrace_state_t *state)
2235 {
2236 dtrace_icookie_t cookie;
2237 processorid_t cpu = CPU->cpu_id;
2238 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2239 dtrace_specid_t i;
2240
2241 cookie = dtrace_interrupt_disable();
2242
2243 if (dest->dtb_tomax == NULL) {
2244 dtrace_interrupt_enable(cookie);
2245 return;
2246 }
2247
2248 for (i = 0; i < state->dts_nspeculations; i++) {
2249 dtrace_speculation_t *spec = &state->dts_speculations[i];
2250 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2251
2252 if (src->dtb_tomax == NULL)
2253 continue;
2254
2255 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2256 src->dtb_offset = 0;
2257 continue;
2258 }
2259
2260 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2261 continue;
2262
2263 if (src->dtb_offset == 0)
2264 continue;
2265
2266 dtrace_speculation_commit(state, cpu, i + 1);
2267 }
2268
2269 dtrace_interrupt_enable(cookie);
2270 }
2271
2272 /*
2273 * Note: not called from probe context. This function is called
2274 * asynchronously (and at a regular interval) to clean any speculations that
2275 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2276 * is work to be done, it cross calls all CPUs to perform that work;
2277 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2278 * INACTIVE state until they have been cleaned by all CPUs.
2279 */
2280 static void
2281 dtrace_speculation_clean(dtrace_state_t *state)
2282 {
2283 int work = 0, rv;
2284 dtrace_specid_t i;
2285
2286 for (i = 0; i < state->dts_nspeculations; i++) {
2287 dtrace_speculation_t *spec = &state->dts_speculations[i];
2288
2289 ASSERT(!spec->dtsp_cleaning);
2290
2291 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2292 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2293 continue;
2294
2295 work++;
2296 spec->dtsp_cleaning = 1;
2297 }
2298
2299 if (!work)
2300 return;
2301
2302 dtrace_xcall(DTRACE_CPUALL,
2303 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2304
2305 /*
2306 * We now know that all CPUs have committed or discarded their
2307 * speculation buffers, as appropriate. We can now set the state
2308 * to inactive.
2309 */
2310 for (i = 0; i < state->dts_nspeculations; i++) {
2311 dtrace_speculation_t *spec = &state->dts_speculations[i];
2312 dtrace_speculation_state_t current, new;
2313
2314 if (!spec->dtsp_cleaning)
2315 continue;
2316
2317 current = spec->dtsp_state;
2318 ASSERT(current == DTRACESPEC_DISCARDING ||
2319 current == DTRACESPEC_COMMITTINGMANY);
2320
2321 new = DTRACESPEC_INACTIVE;
2322
2323 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2324 ASSERT(rv == current);
2325 spec->dtsp_cleaning = 0;
2326 }
2327 }
2328
2329 /*
2330 * Called as part of a speculate() to get the speculative buffer associated
2331 * with a given speculation. Returns NULL if the specified speculation is not
2332 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2333 * the active CPU is not the specified CPU -- the speculation will be
2334 * atomically transitioned into the ACTIVEMANY state.
2335 */
2336 static dtrace_buffer_t *
2337 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2338 dtrace_specid_t which)
2339 {
2340 dtrace_speculation_t *spec;
2341 dtrace_speculation_state_t current, new;
2342 dtrace_buffer_t *buf;
2343
2344 if (which == 0)
2345 return (NULL);
2346
2347 if (which > state->dts_nspeculations) {
2348 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2349 return (NULL);
2350 }
2351
2352 spec = &state->dts_speculations[which - 1];
2353 buf = &spec->dtsp_buffer[cpuid];
2354
2355 do {
2356 current = spec->dtsp_state;
2357
2358 switch (current) {
2359 case DTRACESPEC_INACTIVE:
2360 case DTRACESPEC_COMMITTINGMANY:
2361 case DTRACESPEC_DISCARDING:
2362 return (NULL);
2363
2364 case DTRACESPEC_COMMITTING:
2365 ASSERT(buf->dtb_offset == 0);
2366 return (NULL);
2367
2368 case DTRACESPEC_ACTIVEONE:
2369 /*
2370 * This speculation is currently active on one CPU.
2371 * Check the offset in the buffer; if it's non-zero,
2372 * that CPU must be us (and we leave the state alone).
2373 * If it's zero, assume that we're starting on a new
2374 * CPU -- and change the state to indicate that the
2375 * speculation is active on more than one CPU.
2376 */
2377 if (buf->dtb_offset != 0)
2378 return (buf);
2379
2380 new = DTRACESPEC_ACTIVEMANY;
2381 break;
2382
2383 case DTRACESPEC_ACTIVEMANY:
2384 return (buf);
2385
2386 case DTRACESPEC_ACTIVE:
2387 new = DTRACESPEC_ACTIVEONE;
2388 break;
2389
2390 default:
2391 ASSERT(0);
2392 }
2393 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2394 current, new) != current);
2395
2396 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2397 return (buf);
2398 }
2399
2400 /*
2401 * This function implements the DIF emulator's variable lookups. The emulator
2402 * passes a reserved variable identifier and optional built-in array index.
2403 */
2404 static uint64_t
2405 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2406 uint64_t ndx)
2407 {
2408 /*
2409 * If we're accessing one of the uncached arguments, we'll turn this
2410 * into a reference in the args array.
2411 */
2412 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2413 ndx = v - DIF_VAR_ARG0;
2414 v = DIF_VAR_ARGS;
2415 }
2416
2417 switch (v) {
2418 case DIF_VAR_ARGS:
2419 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2420 if (ndx >= sizeof (mstate->dtms_arg) /
2421 sizeof (mstate->dtms_arg[0])) {
2422 #if !defined(__APPLE__)
2423 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2424 #else
2425 /* Account for introduction of __dtrace_probe() on xnu. */
2426 int aframes = mstate->dtms_probe->dtpr_aframes + 3;
2427 #endif /* __APPLE__ */
2428 dtrace_provider_t *pv;
2429 uint64_t val;
2430
2431 pv = mstate->dtms_probe->dtpr_provider;
2432 if (pv->dtpv_pops.dtps_getargval != NULL)
2433 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2434 mstate->dtms_probe->dtpr_id,
2435 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2436 #if defined(__APPLE__)
2437 /* Special case access of arg5 as passed to dtrace_probeid_error (which see.) */
2438 else if (mstate->dtms_probe->dtpr_id == dtrace_probeid_error && ndx == 5) {
2439 return ((dtrace_state_t *)(mstate->dtms_arg[0]))->dts_arg_error_illval;
2440 }
2441 #endif /* __APPLE__ */
2442 else
2443 val = dtrace_getarg(ndx, aframes);
2444
2445 /*
2446 * This is regrettably required to keep the compiler
2447 * from tail-optimizing the call to dtrace_getarg().
2448 * The condition always evaluates to true, but the
2449 * compiler has no way of figuring that out a priori.
2450 * (None of this would be necessary if the compiler
2451 * could be relied upon to _always_ tail-optimize
2452 * the call to dtrace_getarg() -- but it can't.)
2453 */
2454 if (mstate->dtms_probe != NULL)
2455 return (val);
2456
2457 ASSERT(0);
2458 }
2459
2460 return (mstate->dtms_arg[ndx]);
2461
2462 #if !defined(__APPLE__)
2463 case DIF_VAR_UREGS: {
2464 klwp_t *lwp;
2465
2466 if (!dtrace_priv_proc(state))
2467 return (0);
2468
2469 if ((lwp = curthread->t_lwp) == NULL) {
2470 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2471 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
2472 return (0);
2473 }
2474
2475 return (dtrace_getreg(lwp->lwp_regs, ndx));
2476 }
2477 #else
2478 case DIF_VAR_UREGS: {
2479 thread_t thread;
2480
2481 if (!dtrace_priv_proc(state))
2482 return (0);
2483
2484 if ((thread = current_thread()) == NULL) {
2485 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2486 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = 0;
2487 return (0);
2488 }
2489
2490 return (dtrace_getreg(find_user_regs(thread), ndx));
2491 }
2492 #endif /* __APPLE__ */
2493
2494 #if !defined(__APPLE__)
2495 case DIF_VAR_CURTHREAD:
2496 if (!dtrace_priv_kernel(state))
2497 return (0);
2498 return ((uint64_t)(uintptr_t)curthread);
2499 #else
2500 case DIF_VAR_CURTHREAD:
2501 if (!dtrace_priv_kernel(state))
2502 return (0);
2503
2504 return ((uint64_t)(uintptr_t)current_thread());
2505 #endif /* __APPLE__ */
2506
2507 case DIF_VAR_TIMESTAMP:
2508 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2509 mstate->dtms_timestamp = dtrace_gethrtime();
2510 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2511 }
2512 return (mstate->dtms_timestamp);
2513
2514 #if !defined(__APPLE__)
2515 case DIF_VAR_VTIMESTAMP:
2516 ASSERT(dtrace_vtime_references != 0);
2517 return (curthread->t_dtrace_vtime);
2518 #else
2519 case DIF_VAR_VTIMESTAMP:
2520 ASSERT(dtrace_vtime_references != 0);
2521 return (dtrace_get_thread_vtime(current_thread()));
2522 #endif /* __APPLE__ */
2523
2524 case DIF_VAR_WALLTIMESTAMP:
2525 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2526 mstate->dtms_walltimestamp = dtrace_gethrestime();
2527 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2528 }
2529 return (mstate->dtms_walltimestamp);
2530
2531 case DIF_VAR_IPL:
2532 if (!dtrace_priv_kernel(state))
2533 return (0);
2534 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2535 mstate->dtms_ipl = dtrace_getipl();
2536 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2537 }
2538 return (mstate->dtms_ipl);
2539
2540 case DIF_VAR_EPID:
2541 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2542 return (mstate->dtms_epid);
2543
2544 case DIF_VAR_ID:
2545 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2546 return (mstate->dtms_probe->dtpr_id);
2547
2548 case DIF_VAR_STACKDEPTH:
2549 if (!dtrace_priv_kernel(state))
2550 return (0);
2551 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2552 #if !defined(__APPLE__)
2553 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2554 #else
2555 /* Account for introduction of __dtrace_probe() on xnu. */
2556 int aframes = mstate->dtms_probe->dtpr_aframes + 3;
2557 #endif /* __APPLE__ */
2558
2559 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2560 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2561 }
2562 return (mstate->dtms_stackdepth);
2563
2564 case DIF_VAR_USTACKDEPTH:
2565 if (!dtrace_priv_proc(state))
2566 return (0);
2567 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2568 /*
2569 * See comment in DIF_VAR_PID.
2570 */
2571 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2572 CPU_ON_INTR(CPU)) {
2573 mstate->dtms_ustackdepth = 0;
2574 } else {
2575 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2576 mstate->dtms_ustackdepth =
2577 dtrace_getustackdepth();
2578 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2579 }
2580 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2581 }
2582 return (mstate->dtms_ustackdepth);
2583
2584 case DIF_VAR_CALLER:
2585 if (!dtrace_priv_kernel(state))
2586 return (0);
2587 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2588 #if !defined(__APPLE__)
2589 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2590 #else
2591 /* Account for introduction of __dtrace_probe() on xnu. */
2592 int aframes = mstate->dtms_probe->dtpr_aframes + 3;
2593 #endif /* __APPLE__ */
2594
2595 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2596 /*
2597 * If this is an unanchored probe, we are
2598 * required to go through the slow path:
2599 * dtrace_caller() only guarantees correct
2600 * results for anchored probes.
2601 */
2602 pc_t caller[2];
2603
2604 dtrace_getpcstack(caller, 2, aframes,
2605 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2606 mstate->dtms_caller = caller[1];
2607 } else if ((mstate->dtms_caller =
2608 dtrace_caller(aframes)) == -1) {
2609 /*
2610 * We have failed to do this the quick way;
2611 * we must resort to the slower approach of
2612 * calling dtrace_getpcstack().
2613 */
2614 pc_t caller;
2615
2616 dtrace_getpcstack(&caller, 1, aframes, NULL);
2617 mstate->dtms_caller = caller;
2618 }
2619
2620 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2621 }
2622 return (mstate->dtms_caller);
2623
2624 case DIF_VAR_UCALLER:
2625 if (!dtrace_priv_proc(state))
2626 return (0);
2627
2628 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2629 uint64_t ustack[3];
2630
2631 /*
2632 * dtrace_getupcstack() fills in the first uint64_t
2633 * with the current PID. The second uint64_t will
2634 * be the program counter at user-level. The third
2635 * uint64_t will contain the caller, which is what
2636 * we're after.
2637 */
2638 ustack[2] = NULL;
2639 dtrace_getupcstack(ustack, 3);
2640 mstate->dtms_ucaller = ustack[2];
2641 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2642 }
2643
2644 return (mstate->dtms_ucaller);
2645
2646 case DIF_VAR_PROBEPROV:
2647 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2648 return ((uint64_t)(uintptr_t)
2649 mstate->dtms_probe->dtpr_provider->dtpv_name);
2650
2651 case DIF_VAR_PROBEMOD:
2652 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2653 return ((uint64_t)(uintptr_t)
2654 mstate->dtms_probe->dtpr_mod);
2655
2656 case DIF_VAR_PROBEFUNC:
2657 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2658 return ((uint64_t)(uintptr_t)
2659 mstate->dtms_probe->dtpr_func);
2660
2661 case DIF_VAR_PROBENAME:
2662 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2663 return ((uint64_t)(uintptr_t)
2664 mstate->dtms_probe->dtpr_name);
2665
2666 #if !defined(__APPLE__)
2667 case DIF_VAR_PID:
2668 if (!dtrace_priv_proc(state))
2669 return (0);
2670
2671 /*
2672 * Note that we are assuming that an unanchored probe is
2673 * always due to a high-level interrupt. (And we're assuming
2674 * that there is only a single high level interrupt.)
2675 */
2676 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2677 return (pid0.pid_id);
2678
2679 /*
2680 * It is always safe to dereference one's own t_procp pointer:
2681 * it always points to a valid, allocated proc structure.
2682 * Further, it is always safe to dereference the p_pidp member
2683 * of one's own proc structure. (These are truisms becuase
2684 * threads and processes don't clean up their own state --
2685 * they leave that task to whomever reaps them.)
2686 */
2687 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2688
2689 #else
2690 case DIF_VAR_PID:
2691 if (!dtrace_priv_proc(state))
2692 return (0);
2693
2694 /*
2695 * Note that we are assuming that an unanchored probe is
2696 * always due to a high-level interrupt. (And we're assuming
2697 * that there is only a single high level interrupt.)
2698 */
2699 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2700 /* Anchored probe that fires while on an interrupt accrues to process 0 */
2701 return 0;
2702
2703 return ((uint64_t)proc_selfpid());
2704 #endif /* __APPLE__ */
2705
2706 #if !defined(__APPLE__)
2707 case DIF_VAR_PPID:
2708 if (!dtrace_priv_proc(state))
2709 return (0);
2710
2711 /*
2712 * See comment in DIF_VAR_PID.
2713 */
2714 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2715 return (pid0.pid_id);
2716
2717 return ((uint64_t)curthread->t_procp->p_ppid);
2718 #else
2719 case DIF_VAR_PPID:
2720 if (!dtrace_priv_proc(state))
2721 return (0);
2722
2723 /*
2724 * See comment in DIF_VAR_PID.
2725 */
2726 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2727 return (0);
2728
2729 return ((uint64_t)(uintptr_t)(current_proc()->p_ppid));
2730 #endif /* __APPLE__ */
2731
2732 #if !defined(__APPLE__)
2733 case DIF_VAR_TID:
2734 /*
2735 * See comment in DIF_VAR_PID.
2736 */
2737 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2738 return (0);
2739
2740 return ((uint64_t)curthread->t_tid);
2741 #else
2742 case DIF_VAR_TID:
2743 /*
2744 * See comment in DIF_VAR_PID.
2745 */
2746 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2747 return (0);
2748
2749 return ((uint64_t)(uintptr_t)current_thread()); /* Is user's (pthread_t)t->kernel_thread */
2750 #endif /* __APPLE__ */
2751
2752 #if !defined(__APPLE__)
2753 case DIF_VAR_EXECNAME:
2754 if (!dtrace_priv_proc(state))
2755 return (0);
2756
2757 /*
2758 * See comment in DIF_VAR_PID.
2759 */
2760 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2761 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
2762
2763 /*
2764 * It is always safe to dereference one's own t_procp pointer:
2765 * it always points to a valid, allocated proc structure.
2766 * (This is true because threads don't clean up their own
2767 * state -- they leave that task to whomever reaps them.)
2768 */
2769 return ((uint64_t)(uintptr_t)
2770 curthread->t_procp->p_user.u_comm);
2771 #else
2772 case DIF_VAR_EXECNAME:
2773 {
2774 char *xname = (char *)mstate->dtms_scratch_ptr;
2775 size_t scratch_size = MAXCOMLEN+1;
2776
2777 /* The scratch allocation's lifetime is that of the clause. */
2778 if (mstate->dtms_scratch_ptr + scratch_size >
2779 mstate->dtms_scratch_base + mstate->dtms_scratch_size)
2780 return 0;
2781
2782 if (!dtrace_priv_proc(state))
2783 return (0);
2784
2785 mstate->dtms_scratch_ptr += scratch_size;
2786 proc_selfname( xname, MAXCOMLEN );
2787
2788 return ((uint64_t)(uintptr_t)xname);
2789 }
2790 #endif /* __APPLE__ */
2791 #if !defined(__APPLE__)
2792 case DIF_VAR_ZONENAME:
2793 if (!dtrace_priv_proc(state))
2794 return (0);
2795
2796 /*
2797 * See comment in DIF_VAR_PID.
2798 */
2799 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2800 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
2801
2802 /*
2803 * It is always safe to dereference one's own t_procp pointer:
2804 * it always points to a valid, allocated proc structure.
2805 * (This is true because threads don't clean up their own
2806 * state -- they leave that task to whomever reaps them.)
2807 */
2808 return ((uint64_t)(uintptr_t)
2809 curthread->t_procp->p_zone->zone_name);
2810
2811 #else
2812 case DIF_VAR_ZONENAME:
2813 if (!dtrace_priv_proc(state))
2814 return (0);
2815
2816 return ((uint64_t)(uintptr_t)NULL); /* Darwin doesn't do "zones" */
2817 #endif /* __APPLE__ */
2818
2819 #if !defined(__APPLE__)
2820 case DIF_VAR_UID:
2821 if (!dtrace_priv_proc(state))
2822 return (0);
2823
2824 /*
2825 * See comment in DIF_VAR_PID.
2826 */
2827 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2828 return ((uint64_t)p0.p_cred->cr_uid);
2829
2830 return ((uint64_t)curthread->t_cred->cr_uid);
2831 #else
2832 case DIF_VAR_UID:
2833 if (!dtrace_priv_proc(state))
2834 return (0);
2835
2836 /*
2837 * See comment in DIF_VAR_PID.
2838 */
2839 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2840 return (0);
2841
2842 if (dtrace_CRED() != NULL)
2843 return ((uint64_t)kauth_getuid());
2844 else
2845 return -1LL;
2846 #endif /* __APPLE__ */
2847
2848 #if !defined(__APPLE__)
2849 case DIF_VAR_GID:
2850 if (!dtrace_priv_proc(state))
2851 return (0);
2852
2853 /*
2854 * See comment in DIF_VAR_PID.
2855 */
2856 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2857 return ((uint64_t)p0.p_cred->cr_gid);
2858
2859 return ((uint64_t)curthread->t_cred->cr_gid);
2860 #else
2861 case DIF_VAR_GID:
2862 if (!dtrace_priv_proc(state))
2863 return (0);
2864
2865 /*
2866 * See comment in DIF_VAR_PID.
2867 */
2868 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2869 return (0);
2870
2871 if (dtrace_CRED() != NULL)
2872 return ((uint64_t)kauth_getgid());
2873 else
2874 return -1LL;
2875 #endif /* __APPLE__ */
2876
2877 #if !defined(__APPLE__)
2878 case DIF_VAR_ERRNO: {
2879 klwp_t *lwp;
2880 if (!dtrace_priv_proc(state))
2881 return (0);
2882
2883 /*
2884 * See comment in DIF_VAR_PID.
2885 */
2886 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2887 return (0);
2888
2889 if ((lwp = curthread->t_lwp) == NULL)
2890 return (0);
2891
2892 return ((uint64_t)lwp->lwp_errno);
2893 }
2894 #else
2895 case DIF_VAR_ERRNO: {
2896 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
2897 if (!dtrace_priv_proc(state))
2898 return (0);
2899
2900 /*
2901 * See comment in DIF_VAR_PID.
2902 */
2903 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2904 return (0);
2905
2906 return (uthread ? uthread->t_dtrace_errno : -1);
2907 }
2908 #endif /* __APPLE__ */
2909
2910 default:
2911 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2912 return (0);
2913 }
2914 }
2915
2916 /*
2917 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
2918 * Notice that we don't bother validating the proper number of arguments or
2919 * their types in the tuple stack. This isn't needed because all argument
2920 * interpretation is safe because of our load safety -- the worst that can
2921 * happen is that a bogus program can obtain bogus results.
2922 */
2923 static void
2924 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
2925 dtrace_key_t *tupregs, int nargs,
2926 dtrace_mstate_t *mstate, dtrace_state_t *state)
2927 {
2928 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
2929 #if !defined(__APPLE__)
2930 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
2931 #else
2932 volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
2933 #endif /* __APPLE__ */
2934
2935 #if !defined(__APPLE__)
2936 union {
2937 mutex_impl_t mi;
2938 uint64_t mx;
2939 } m;
2940
2941 union {
2942 krwlock_t ri;
2943 uintptr_t rw;
2944 } r;
2945 #else
2946 /* XXX awaits lock/mutex work */
2947 #endif /* __APPLE__ */
2948
2949 switch (subr) {
2950 case DIF_SUBR_RAND:
2951 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
2952 break;
2953
2954 #if !defined(__APPLE__)
2955 case DIF_SUBR_MUTEX_OWNED:
2956 m.mx = dtrace_load64(tupregs[0].dttk_value);
2957 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
2958 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
2959 else
2960 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
2961 break;
2962
2963 case DIF_SUBR_MUTEX_OWNER:
2964 m.mx = dtrace_load64(tupregs[0].dttk_value);
2965 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
2966 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
2967 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
2968 else
2969 regs[rd] = 0;
2970 break;
2971
2972 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
2973 m.mx = dtrace_load64(tupregs[0].dttk_value);
2974 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
2975 break;
2976
2977 case DIF_SUBR_MUTEX_TYPE_SPIN:
2978 m.mx = dtrace_load64(tupregs[0].dttk_value);
2979 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
2980 break;
2981
2982 case DIF_SUBR_RW_READ_HELD: {
2983 uintptr_t tmp;
2984
2985 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
2986 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
2987 break;
2988 }
2989
2990 case DIF_SUBR_RW_WRITE_HELD:
2991 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
2992 regs[rd] = _RW_WRITE_HELD(&r.ri);
2993 break;
2994
2995 case DIF_SUBR_RW_ISWRITER:
2996 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
2997 regs[rd] = _RW_ISWRITER(&r.ri);
2998 break;
2999 #else
3000 /* XXX awaits lock/mutex work */
3001 #endif /* __APPLE__ */
3002
3003 case DIF_SUBR_BCOPY: {
3004 /*
3005 * We need to be sure that the destination is in the scratch
3006 * region -- no other region is allowed.
3007 */
3008 uintptr_t src = tupregs[0].dttk_value;
3009 uintptr_t dest = tupregs[1].dttk_value;
3010 size_t size = tupregs[2].dttk_value;
3011
3012 if (!dtrace_inscratch(dest, size, mstate)) {
3013 *flags |= CPU_DTRACE_BADADDR;
3014 *illval = regs[rd];
3015 break;
3016 }
3017
3018 dtrace_bcopy((void *)src, (void *)dest, size);
3019 break;
3020 }
3021
3022 case DIF_SUBR_ALLOCA:
3023 case DIF_SUBR_COPYIN: {
3024 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3025 uint64_t size =
3026 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3027 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3028
3029 /*
3030 * This action doesn't require any credential checks since
3031 * probes will not activate in user contexts to which the
3032 * enabling user does not have permissions.
3033 */
3034 if (mstate->dtms_scratch_ptr + scratch_size >
3035 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3036 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3037 regs[rd] = NULL;
3038 break;
3039 }
3040
3041 if (subr == DIF_SUBR_COPYIN) {
3042 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3043 dtrace_copyin(tupregs[0].dttk_value, dest, size);
3044 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3045 }
3046
3047 mstate->dtms_scratch_ptr += scratch_size;
3048 regs[rd] = dest;
3049 break;
3050 }
3051
3052 case DIF_SUBR_COPYINTO: {
3053 uint64_t size = tupregs[1].dttk_value;
3054 uintptr_t dest = tupregs[2].dttk_value;
3055
3056 /*
3057 * This action doesn't require any credential checks since
3058 * probes will not activate in user contexts to which the
3059 * enabling user does not have permissions.
3060 */
3061 if (!dtrace_inscratch(dest, size, mstate)) {
3062 *flags |= CPU_DTRACE_BADADDR;
3063 *illval = regs[rd];
3064 break;
3065 }
3066
3067 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3068 dtrace_copyin(tupregs[0].dttk_value, dest, size);
3069 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3070 break;
3071 }
3072
3073 case DIF_SUBR_COPYINSTR: {
3074 uintptr_t dest = mstate->dtms_scratch_ptr;
3075 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3076
3077 if (nargs > 1 && tupregs[1].dttk_value < size)
3078 size = tupregs[1].dttk_value + 1;
3079
3080 /*
3081 * This action doesn't require any credential checks since
3082 * probes will not activate in user contexts to which the
3083 * enabling user does not have permissions.
3084 */
3085 if (mstate->dtms_scratch_ptr + size >
3086 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3087 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3088 regs[rd] = NULL;
3089 break;
3090 }
3091
3092 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3093 dtrace_copyinstr(tupregs[0].dttk_value, dest, size);
3094 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3095
3096 ((char *)dest)[size - 1] = '\0';
3097 mstate->dtms_scratch_ptr += size;
3098 regs[rd] = dest;
3099 break;
3100 }
3101
3102 #if !defined(__APPLE__)
3103 case DIF_SUBR_MSGSIZE:
3104 case DIF_SUBR_MSGDSIZE: {
3105 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3106 uintptr_t wptr, rptr;
3107 size_t count = 0;
3108 int cont = 0;
3109
3110 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3111 wptr = dtrace_loadptr(baddr +
3112 offsetof(mblk_t, b_wptr));
3113
3114 rptr = dtrace_loadptr(baddr +
3115 offsetof(mblk_t, b_rptr));
3116
3117 if (wptr < rptr) {
3118 *flags |= CPU_DTRACE_BADADDR;
3119 *illval = tupregs[0].dttk_value;
3120 break;
3121 }
3122
3123 daddr = dtrace_loadptr(baddr +
3124 offsetof(mblk_t, b_datap));
3125
3126 baddr = dtrace_loadptr(baddr +
3127 offsetof(mblk_t, b_cont));
3128
3129 /*
3130 * We want to prevent against denial-of-service here,
3131 * so we're only going to search the list for
3132 * dtrace_msgdsize_max mblks.
3133 */
3134 if (cont++ > dtrace_msgdsize_max) {
3135 *flags |= CPU_DTRACE_ILLOP;
3136 break;
3137 }
3138
3139 if (subr == DIF_SUBR_MSGDSIZE) {
3140 if (dtrace_load8(daddr +
3141 offsetof(dblk_t, db_type)) != M_DATA)
3142 continue;
3143 }
3144
3145 count += wptr - rptr;
3146 }
3147
3148 if (!(*flags & CPU_DTRACE_FAULT))
3149 regs[rd] = count;
3150
3151 break;
3152 }
3153 #else
3154 case DIF_SUBR_MSGSIZE:
3155 case DIF_SUBR_MSGDSIZE: {
3156 /* Darwin does not implement SysV streams messages */
3157 regs[rd] = 0;
3158 break;
3159 }
3160 #endif /* __APPLE__ */
3161
3162 #if !defined(__APPLE__)
3163 case DIF_SUBR_PROGENYOF: {
3164 pid_t pid = tupregs[0].dttk_value;
3165 proc_t *p;
3166 int rval = 0;
3167
3168 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3169
3170 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3171 if (p->p_pidp->pid_id == pid) {
3172 rval = 1;
3173 break;
3174 }
3175 }
3176
3177 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3178
3179 regs[rd] = rval;
3180 break;
3181 }
3182 #else
3183 case DIF_SUBR_PROGENYOF: {
3184 pid_t pid = tupregs[0].dttk_value;
3185 struct proc *p = current_proc();
3186 int rval = 0, lim = nprocs;
3187
3188 while(p && (lim-- > 0)) {
3189 pid_t ppid;
3190
3191 ppid = (pid_t)dtrace_load32((uintptr_t)&(p->p_pid));
3192 if (*flags & CPU_DTRACE_FAULT)
3193 break;
3194
3195 if (ppid == pid) {
3196 rval = 1;
3197 break;
3198 }
3199
3200 if (ppid == 0)
3201 break; /* Can't climb process tree any further. */
3202
3203 p = (struct proc *)dtrace_loadptr((uintptr_t)&(p->p_pptr));
3204 if (*flags & CPU_DTRACE_FAULT)
3205 break;
3206 }
3207
3208 regs[rd] = rval;
3209 break;
3210 }
3211 #endif /* __APPLE__ */
3212
3213 case DIF_SUBR_SPECULATION:
3214 regs[rd] = dtrace_speculation(state);
3215 break;
3216
3217 #if !defined(__APPLE__)
3218 case DIF_SUBR_COPYOUT: {
3219 uintptr_t kaddr = tupregs[0].dttk_value;
3220 uintptr_t uaddr = tupregs[1].dttk_value;
3221 uint64_t size = tupregs[2].dttk_value;
3222
3223 if (!dtrace_destructive_disallow &&
3224 dtrace_priv_proc_control(state) &&
3225 !dtrace_istoxic(kaddr, size)) {
3226 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3227 dtrace_copyout(kaddr, uaddr, size);
3228 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3229 }
3230 break;
3231 }
3232
3233 case DIF_SUBR_COPYOUTSTR: {
3234 uintptr_t kaddr = tupregs[0].dttk_value;
3235 uintptr_t uaddr = tupregs[1].dttk_value;
3236 uint64_t size = tupregs[2].dttk_value;
3237
3238 if (!dtrace_destructive_disallow &&
3239 dtrace_priv_proc_control(state) &&
3240 !dtrace_istoxic(kaddr, size)) {
3241 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3242 dtrace_copyoutstr(kaddr, uaddr, size);
3243 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3244 }
3245 break;
3246 }
3247 #else
3248 case DIF_SUBR_COPYOUT: {
3249 uintptr_t kaddr = tupregs[0].dttk_value;
3250 user_addr_t uaddr = tupregs[1].dttk_value;
3251 uint64_t size = tupregs[2].dttk_value;
3252
3253 if (!dtrace_destructive_disallow &&
3254 dtrace_priv_proc_control(state) &&
3255 !dtrace_istoxic(kaddr, size)) {
3256 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3257 dtrace_copyout(kaddr, uaddr, size);
3258 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3259 }
3260 break;
3261 }
3262
3263 case DIF_SUBR_COPYOUTSTR: {
3264 uintptr_t kaddr = tupregs[0].dttk_value;
3265 user_addr_t uaddr = tupregs[1].dttk_value;
3266 uint64_t size = tupregs[2].dttk_value;
3267
3268 if (!dtrace_destructive_disallow &&
3269 dtrace_priv_proc_control(state) &&
3270 !dtrace_istoxic(kaddr, size)) {
3271 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3272 dtrace_copyoutstr(kaddr, uaddr, size);
3273 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3274 }
3275 break;
3276 }
3277 #endif /* __APPLE__ */
3278
3279 case DIF_SUBR_STRLEN:
3280 regs[rd] = dtrace_strlen((char *)(uintptr_t)
3281 tupregs[0].dttk_value,
3282 state->dts_options[DTRACEOPT_STRSIZE]);
3283 break;
3284
3285 case DIF_SUBR_STRCHR:
3286 case DIF_SUBR_STRRCHR: {
3287 /*
3288 * We're going to iterate over the string looking for the
3289 * specified character. We will iterate until we have reached
3290 * the string length or we have found the character. If this
3291 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3292 * of the specified character instead of the first.
3293 */
3294 uintptr_t addr = tupregs[0].dttk_value;
3295 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3296 char c, target = (char)tupregs[1].dttk_value;
3297
3298 for (regs[rd] = NULL; addr < limit; addr++) {
3299 if ((c = dtrace_load8(addr)) == target) {
3300 regs[rd] = addr;
3301
3302 if (subr == DIF_SUBR_STRCHR)
3303 break;
3304 }
3305
3306 if (c == '\0')
3307 break;
3308 }
3309
3310 break;
3311 }
3312
3313 case DIF_SUBR_STRSTR:
3314 case DIF_SUBR_INDEX:
3315 case DIF_SUBR_RINDEX: {
3316 /*
3317 * We're going to iterate over the string looking for the
3318 * specified string. We will iterate until we have reached
3319 * the string length or we have found the string. (Yes, this
3320 * is done in the most naive way possible -- but considering
3321 * that the string we're searching for is likely to be
3322 * relatively short, the complexity of Rabin-Karp or similar
3323 * hardly seems merited.)
3324 */
3325 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3326 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3327 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3328 size_t len = dtrace_strlen(addr, size);
3329 size_t sublen = dtrace_strlen(substr, size);
3330 char *limit = addr + len, *orig = addr;
3331 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3332 int inc = 1;
3333
3334 regs[rd] = notfound;
3335
3336 /*
3337 * strstr() and index()/rindex() have similar semantics if
3338 * both strings are the empty string: strstr() returns a
3339 * pointer to the (empty) string, and index() and rindex()
3340 * both return index 0 (regardless of any position argument).
3341 */
3342 if (sublen == 0 && len == 0) {
3343 if (subr == DIF_SUBR_STRSTR)
3344 regs[rd] = (uintptr_t)addr;
3345 else
3346 regs[rd] = 0;
3347 break;
3348 }
3349
3350 if (subr != DIF_SUBR_STRSTR) {
3351 if (subr == DIF_SUBR_RINDEX) {
3352 limit = orig - 1;
3353 addr += len;
3354 inc = -1;
3355 }
3356
3357 /*
3358 * Both index() and rindex() take an optional position
3359 * argument that denotes the starting position.
3360 */
3361 if (nargs == 3) {
3362 int64_t pos = (int64_t)tupregs[2].dttk_value;
3363
3364 /*
3365 * If the position argument to index() is
3366 * negative, Perl implicitly clamps it at
3367 * zero. This semantic is a little surprising
3368 * given the special meaning of negative
3369 * positions to similar Perl functions like
3370 * substr(), but it appears to reflect a
3371 * notion that index() can start from a
3372 * negative index and increment its way up to
3373 * the string. Given this notion, Perl's
3374 * rindex() is at least self-consistent in
3375 * that it implicitly clamps positions greater
3376 * than the string length to be the string
3377 * length. Where Perl completely loses
3378 * coherence, however, is when the specified
3379 * substring is the empty string (""). In
3380 * this case, even if the position is
3381 * negative, rindex() returns 0 -- and even if
3382 * the position is greater than the length,
3383 * index() returns the string length. These
3384 * semantics violate the notion that index()
3385 * should never return a value less than the
3386 * specified position and that rindex() should
3387 * never return a value greater than the
3388 * specified position. (One assumes that
3389 * these semantics are artifacts of Perl's
3390 * implementation and not the results of
3391 * deliberate design -- it beggars belief that
3392 * even Larry Wall could desire such oddness.)
3393 * While in the abstract one would wish for
3394 * consistent position semantics across
3395 * substr(), index() and rindex() -- or at the
3396 * very least self-consistent position
3397 * semantics for index() and rindex() -- we
3398 * instead opt to keep with the extant Perl
3399 * semantics, in all their broken glory. (Do
3400 * we have more desire to maintain Perl's
3401 * semantics than Perl does? Probably.)
3402 */
3403 if (subr == DIF_SUBR_RINDEX) {
3404 if (pos < 0) {
3405 if (sublen == 0)
3406 regs[rd] = 0;
3407 break;
3408 }
3409
3410 if (pos > len)
3411 pos = len;
3412 } else {
3413 if (pos < 0)
3414 pos = 0;
3415
3416 if (pos >= len) {
3417 if (sublen == 0)
3418 regs[rd] = len;
3419 break;
3420 }
3421 }
3422
3423 addr = orig + pos;
3424 }
3425 }
3426
3427 for (regs[rd] = notfound; addr != limit; addr += inc) {
3428 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3429 if (subr != DIF_SUBR_STRSTR) {
3430 /*
3431 * As D index() and rindex() are
3432 * modeled on Perl (and not on awk),
3433 * we return a zero-based (and not a
3434 * one-based) index. (For you Perl
3435 * weenies: no, we're not going to add
3436 * $[ -- and shouldn't you be at a con
3437 * or something?)
3438 */
3439 regs[rd] = (uintptr_t)(addr - orig);
3440 break;
3441 }
3442
3443 ASSERT(subr == DIF_SUBR_STRSTR);
3444 regs[rd] = (uintptr_t)addr;
3445 break;
3446 }
3447 }
3448
3449 break;
3450 }
3451
3452 case DIF_SUBR_STRTOK: {
3453 uintptr_t addr = tupregs[0].dttk_value;
3454 uintptr_t tokaddr = tupregs[1].dttk_value;
3455 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3456 uintptr_t limit, toklimit = tokaddr + size;
3457 uint8_t c, tokmap[32]; /* 256 / 8 */
3458 char *dest = (char *)mstate->dtms_scratch_ptr;
3459 int i;
3460
3461 if (mstate->dtms_scratch_ptr + size >
3462 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3463 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3464 regs[rd] = NULL;
3465 break;
3466 }
3467
3468 if (addr == NULL) {
3469 /*
3470 * If the address specified is NULL, we use our saved
3471 * strtok pointer from the mstate. Note that this
3472 * means that the saved strtok pointer is _only_
3473 * valid within multiple enablings of the same probe --
3474 * it behaves like an implicit clause-local variable.
3475 */
3476 addr = mstate->dtms_strtok;
3477 }
3478
3479 /*
3480 * First, zero the token map, and then process the token
3481 * string -- setting a bit in the map for every character
3482 * found in the token string.
3483 */
3484 for (i = 0; i < sizeof (tokmap); i++)
3485 tokmap[i] = 0;
3486
3487 for (; tokaddr < toklimit; tokaddr++) {
3488 if ((c = dtrace_load8(tokaddr)) == '\0')
3489 break;
3490
3491 ASSERT((c >> 3) < sizeof (tokmap));
3492 tokmap[c >> 3] |= (1 << (c & 0x7));
3493 }
3494
3495 for (limit = addr + size; addr < limit; addr++) {
3496 /*
3497 * We're looking for a character that is _not_ contained
3498 * in the token string.
3499 */
3500 if ((c = dtrace_load8(addr)) == '\0')
3501 break;
3502
3503 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3504 break;
3505 }
3506
3507 if (c == '\0') {
3508 /*
3509 * We reached the end of the string without finding
3510 * any character that was not in the token string.
3511 * We return NULL in this case, and we set the saved
3512 * address to NULL as well.
3513 */
3514 regs[rd] = NULL;
3515 mstate->dtms_strtok = NULL;
3516 break;
3517 }
3518
3519 /*
3520 * From here on, we're copying into the destination string.
3521 */
3522 for (i = 0; addr < limit && i < size - 1; addr++) {
3523 if ((c = dtrace_load8(addr)) == '\0')
3524 break;
3525
3526 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3527 break;
3528
3529 ASSERT(i < size);
3530 dest[i++] = c;
3531 }
3532
3533 ASSERT(i < size);
3534 dest[i] = '\0';
3535 regs[rd] = (uintptr_t)dest;
3536 mstate->dtms_scratch_ptr += size;
3537 mstate->dtms_strtok = addr;
3538 break;
3539 }
3540
3541 case DIF_SUBR_SUBSTR: {
3542 uintptr_t s = tupregs[0].dttk_value;
3543 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3544 char *d = (char *)mstate->dtms_scratch_ptr;
3545 int64_t index = (int64_t)tupregs[1].dttk_value;
3546 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3547 size_t len = dtrace_strlen((char *)s, size);
3548 int64_t i = 0;
3549
3550 if (nargs <= 2)
3551 remaining = (int64_t)size;
3552
3553 if (mstate->dtms_scratch_ptr + size >
3554 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3555 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3556 regs[rd] = NULL;
3557 break;
3558 }
3559
3560 if (index < 0) {
3561 index += len;
3562
3563 if (index < 0 && index + remaining > 0) {
3564 remaining += index;
3565 index = 0;
3566 }
3567 }
3568
3569 if (index >= len || index < 0)
3570 index = len;
3571
3572 for (d[0] = '\0'; remaining > 0; remaining--) {
3573 if ((d[i++] = dtrace_load8(s++ + index)) == '\0')
3574 break;
3575
3576 if (i == size) {
3577 d[i - 1] = '\0';
3578 break;
3579 }
3580 }
3581
3582 mstate->dtms_scratch_ptr += size;
3583 regs[rd] = (uintptr_t)d;
3584 break;
3585 }
3586
3587 #if !defined(__APPLE__)
3588 case DIF_SUBR_GETMAJOR:
3589 #ifdef __LP64__
3590 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3591 #else
3592 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3593 #endif
3594 break;
3595
3596 #else /* __APPLE__ */
3597 case DIF_SUBR_GETMAJOR:
3598 regs[rd] = (uintptr_t)major( (dev_t)tupregs[0].dttk_value );
3599 break;
3600 #endif /* __APPLE__ */
3601
3602 #if !defined(__APPLE__)
3603 case DIF_SUBR_GETMINOR:
3604 #ifdef __LP64__
3605 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3606 #else
3607 regs[rd] = tupregs[0].dttk_value & MAXMIN;
3608 #endif
3609 break;
3610
3611 #else /* __APPLE__ */
3612 case DIF_SUBR_GETMINOR:
3613 regs[rd] = (uintptr_t)minor( (dev_t)tupregs[0].dttk_value );
3614 break;
3615 #endif /* __APPLE__ */
3616
3617 #if !defined(__APPLE__)
3618 case DIF_SUBR_DDI_PATHNAME: {
3619 /*
3620 * This one is a galactic mess. We are going to roughly
3621 * emulate ddi_pathname(), but it's made more complicated
3622 * by the fact that we (a) want to include the minor name and
3623 * (b) must proceed iteratively instead of recursively.
3624 */
3625 uintptr_t dest = mstate->dtms_scratch_ptr;
3626 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3627 char *start = (char *)dest, *end = start + size - 1;
3628 uintptr_t daddr = tupregs[0].dttk_value;
3629 int64_t minor = (int64_t)tupregs[1].dttk_value;
3630 char *s;
3631 int i, len, depth = 0;
3632
3633 if (size == 0 || mstate->dtms_scratch_ptr + size >
3634 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3635 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3636 regs[rd] = NULL;
3637 break;
3638 }
3639
3640 *end = '\0';
3641
3642 /*
3643 * We want to have a name for the minor. In order to do this,
3644 * we need to walk the minor list from the devinfo. We want
3645 * to be sure that we don't infinitely walk a circular list,
3646 * so we check for circularity by sending a scout pointer
3647 * ahead two elements for every element that we iterate over;
3648 * if the list is circular, these will ultimately point to the
3649 * same element. You may recognize this little trick as the
3650 * answer to a stupid interview question -- one that always
3651 * seems to be asked by those who had to have it laboriously
3652 * explained to them, and who can't even concisely describe
3653 * the conditions under which one would be forced to resort to
3654 * this technique. Needless to say, those conditions are
3655 * found here -- and probably only here. Is this is the only
3656 * use of this infamous trick in shipping, production code?
3657 * If it isn't, it probably should be...
3658 */
3659 if (minor != -1) {
3660 uintptr_t maddr = dtrace_loadptr(daddr +
3661 offsetof(struct dev_info, devi_minor));
3662
3663 uintptr_t next = offsetof(struct ddi_minor_data, next);
3664 uintptr_t name = offsetof(struct ddi_minor_data,
3665 d_minor) + offsetof(struct ddi_minor, name);
3666 uintptr_t dev = offsetof(struct ddi_minor_data,
3667 d_minor) + offsetof(struct ddi_minor, dev);
3668 uintptr_t scout;
3669
3670 if (maddr != NULL)
3671 scout = dtrace_loadptr(maddr + next);
3672
3673 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3674 uint64_t m;
3675 #ifdef __LP64__
3676 m = dtrace_load64(maddr + dev) & MAXMIN64;
3677 #else
3678 m = dtrace_load32(maddr + dev) & MAXMIN;
3679 #endif
3680 if (m != minor) {
3681 maddr = dtrace_loadptr(maddr + next);
3682
3683 if (scout == NULL)
3684 continue;
3685
3686 scout = dtrace_loadptr(scout + next);
3687
3688 if (scout == NULL)
3689 continue;
3690
3691 scout = dtrace_loadptr(scout + next);
3692
3693 if (scout == NULL)
3694 continue;
3695
3696 if (scout == maddr) {
3697 *flags |= CPU_DTRACE_ILLOP;
3698 break;
3699 }
3700
3701 continue;
3702 }
3703
3704 /*
3705 * We have the minor data. Now we need to
3706 * copy the minor's name into the end of the
3707 * pathname.
3708 */
3709 s = (char *)dtrace_loadptr(maddr + name);
3710 len = dtrace_strlen(s, size);
3711
3712 if (*flags & CPU_DTRACE_FAULT)
3713 break;
3714
3715 if (len != 0) {
3716 if ((end -= (len + 1)) < start)
3717 break;
3718
3719 *end = ':';
3720 }
3721
3722 for (i = 1; i <= len; i++)
3723 end[i] = dtrace_load8((uintptr_t)s++);
3724 break;
3725 }
3726 }
3727
3728 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3729 ddi_node_state_t devi_state;
3730
3731 devi_state = dtrace_load32(daddr +
3732 offsetof(struct dev_info, devi_node_state));
3733
3734 if (*flags & CPU_DTRACE_FAULT)
3735 break;
3736
3737 if (devi_state >= DS_INITIALIZED) {
3738 s = (char *)dtrace_loadptr(daddr +
3739 offsetof(struct dev_info, devi_addr));
3740 len = dtrace_strlen(s, size);
3741
3742 if (*flags & CPU_DTRACE_FAULT)
3743 break;
3744
3745 if (len != 0) {
3746 if ((end -= (len + 1)) < start)
3747 break;
3748
3749 *end = '@';
3750 }
3751
3752 for (i = 1; i <= len; i++)
3753 end[i] = dtrace_load8((uintptr_t)s++);
3754 }
3755
3756 /*
3757 * Now for the node name...
3758 */
3759 s = (char *)dtrace_loadptr(daddr +
3760 offsetof(struct dev_info, devi_node_name));
3761
3762 daddr = dtrace_loadptr(daddr +
3763 offsetof(struct dev_info, devi_parent));
3764
3765 /*
3766 * If our parent is NULL (that is, if we're the root
3767 * node), we're going to use the special path
3768 * "devices".
3769 */
3770 if (daddr == NULL)
3771 s = "devices";
3772
3773 len = dtrace_strlen(s, size);
3774 if (*flags & CPU_DTRACE_FAULT)
3775 break;
3776
3777 if ((end -= (len + 1)) < start)
3778 break;
3779
3780 for (i = 1; i <= len; i++)
3781 end[i] = dtrace_load8((uintptr_t)s++);
3782 *end = '/';
3783
3784 if (depth++ > dtrace_devdepth_max) {
3785 *flags |= CPU_DTRACE_ILLOP;
3786 break;
3787 }
3788 }
3789
3790 if (end < start)
3791 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3792
3793 if (daddr == NULL) {
3794 regs[rd] = (uintptr_t)end;
3795 mstate->dtms_scratch_ptr += size;
3796 }
3797
3798 break;
3799 }
3800 #else
3801 case DIF_SUBR_DDI_PATHNAME: {
3802 /* XXX awaits galactic disentanglement ;-} */
3803 regs[rd] = NULL;
3804 break;
3805 }
3806 #endif /* __APPLE__ */
3807
3808 case DIF_SUBR_STRJOIN: {
3809 char *d = (char *)mstate->dtms_scratch_ptr;
3810 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3811 uintptr_t s1 = tupregs[0].dttk_value;
3812 uintptr_t s2 = tupregs[1].dttk_value;
3813 int i = 0;
3814
3815 if (mstate->dtms_scratch_ptr + size >
3816 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3817 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3818 regs[rd] = NULL;
3819 break;
3820 }
3821
3822 for (;;) {
3823 if (i >= size) {
3824 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3825 regs[rd] = NULL;
3826 break;
3827 }
3828
3829 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
3830 i--;
3831 break;
3832 }
3833 }
3834
3835 for (;;) {
3836 if (i >= size) {
3837 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3838 regs[rd] = NULL;
3839 break;
3840 }
3841
3842 if ((d[i++] = dtrace_load8(s2++)) == '\0')
3843 break;
3844 }
3845
3846 if (i < size) {
3847 mstate->dtms_scratch_ptr += i;
3848 regs[rd] = (uintptr_t)d;
3849 }
3850
3851 break;
3852 }
3853
3854 case DIF_SUBR_LLTOSTR: {
3855 int64_t i = (int64_t)tupregs[0].dttk_value;
3856 int64_t val = i < 0 ? i * -1 : i;
3857 uint64_t size = 22; /* enough room for 2^64 in decimal */
3858 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
3859
3860 if (mstate->dtms_scratch_ptr + size >
3861 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3862 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3863 regs[rd] = NULL;
3864 break;
3865 }
3866
3867 for (*end-- = '\0'; val; val /= 10)
3868 *end-- = '0' + (val % 10);
3869
3870 if (i == 0)
3871 *end-- = '0';
3872
3873 if (i < 0)
3874 *end-- = '-';
3875
3876 regs[rd] = (uintptr_t)end + 1;
3877 mstate->dtms_scratch_ptr += size;
3878 break;
3879 }
3880
3881 case DIF_SUBR_DIRNAME:
3882 case DIF_SUBR_BASENAME: {
3883 char *dest = (char *)mstate->dtms_scratch_ptr;
3884 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3885 uintptr_t src = tupregs[0].dttk_value;
3886 int i, j, len = dtrace_strlen((char *)src, size);
3887 int lastbase = -1, firstbase = -1, lastdir = -1;
3888 int start, end;
3889
3890 if (mstate->dtms_scratch_ptr + size >
3891 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3892 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3893 regs[rd] = NULL;
3894 break;
3895 }
3896
3897 /*
3898 * The basename and dirname for a zero-length string is
3899 * defined to be "."
3900 */
3901 if (len == 0) {
3902 len = 1;
3903 src = (uintptr_t)".";
3904 }
3905
3906 /*
3907 * Start from the back of the string, moving back toward the
3908 * front until we see a character that isn't a slash. That
3909 * character is the last character in the basename.
3910 */
3911 for (i = len - 1; i >= 0; i--) {
3912 if (dtrace_load8(src + i) != '/')
3913 break;
3914 }
3915
3916 if (i >= 0)
3917 lastbase = i;
3918
3919 /*
3920 * Starting from the last character in the basename, move
3921 * towards the front until we find a slash. The character
3922 * that we processed immediately before that is the first
3923 * character in the basename.
3924 */
3925 for (; i >= 0; i--) {
3926 if (dtrace_load8(src + i) == '/')
3927 break;
3928 }
3929
3930 if (i >= 0)
3931 firstbase = i + 1;
3932
3933 /*
3934 * Now keep going until we find a non-slash character. That
3935 * character is the last character in the dirname.
3936 */
3937 for (; i >= 0; i--) {
3938 if (dtrace_load8(src + i) != '/')
3939 break;
3940 }
3941
3942 if (i >= 0)
3943 lastdir = i;
3944
3945 ASSERT(!(lastbase == -1 && firstbase != -1));
3946 ASSERT(!(firstbase == -1 && lastdir != -1));
3947
3948 if (lastbase == -1) {
3949 /*
3950 * We didn't find a non-slash character. We know that
3951 * the length is non-zero, so the whole string must be
3952 * slashes. In either the dirname or the basename
3953 * case, we return '/'.
3954 */
3955 ASSERT(firstbase == -1);
3956 firstbase = lastbase = lastdir = 0;
3957 }
3958
3959 if (firstbase == -1) {
3960 /*
3961 * The entire string consists only of a basename
3962 * component. If we're looking for dirname, we need
3963 * to change our string to be just "."; if we're
3964 * looking for a basename, we'll just set the first
3965 * character of the basename to be 0.
3966 */
3967 if (subr == DIF_SUBR_DIRNAME) {
3968 ASSERT(lastdir == -1);
3969 src = (uintptr_t)".";
3970 lastdir = 0;
3971 } else {
3972 firstbase = 0;
3973 }
3974 }
3975
3976 if (subr == DIF_SUBR_DIRNAME) {
3977 if (lastdir == -1) {
3978 /*
3979 * We know that we have a slash in the name --
3980 * or lastdir would be set to 0, above. And
3981 * because lastdir is -1, we know that this
3982 * slash must be the first character. (That
3983 * is, the full string must be of the form
3984 * "/basename".) In this case, the last
3985 * character of the directory name is 0.
3986 */
3987 lastdir = 0;
3988 }
3989
3990 start = 0;
3991 end = lastdir;
3992 } else {
3993 ASSERT(subr == DIF_SUBR_BASENAME);
3994 ASSERT(firstbase != -1 && lastbase != -1);
3995 start = firstbase;
3996 end = lastbase;
3997 }
3998
3999 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4000 dest[j] = dtrace_load8(src + i);
4001
4002 dest[j] = '\0';
4003 regs[rd] = (uintptr_t)dest;
4004 mstate->dtms_scratch_ptr += size;
4005 break;
4006 }
4007
4008 case DIF_SUBR_CLEANPATH: {
4009 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4010 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4011 uintptr_t src = tupregs[0].dttk_value;
4012 int i = 0, j = 0;
4013
4014 if (mstate->dtms_scratch_ptr + size >
4015 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
4016 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4017 regs[rd] = NULL;
4018 break;
4019 }
4020
4021 /*
4022 * Move forward, loading each character.
4023 */
4024 do {
4025 c = dtrace_load8(src + i++);
4026 next:
4027 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4028 break;
4029
4030 if (c != '/') {
4031 dest[j++] = c;
4032 continue;
4033 }
4034
4035 c = dtrace_load8(src + i++);
4036
4037 if (c == '/') {
4038 /*
4039 * We have two slashes -- we can just advance
4040 * to the next character.
4041 */
4042 goto next;
4043 }
4044
4045 if (c != '.') {
4046 /*
4047 * This is not "." and it's not ".." -- we can
4048 * just store the "/" and this character and
4049 * drive on.
4050 */
4051 dest[j++] = '/';
4052 dest[j++] = c;
4053 continue;
4054 }
4055
4056 c = dtrace_load8(src + i++);
4057
4058 if (c == '/') {
4059 /*
4060 * This is a "/./" component. We're not going
4061 * to store anything in the destination buffer;
4062 * we're just going to go to the next component.
4063 */
4064 goto next;
4065 }
4066
4067 if (c != '.') {
4068 /*
4069 * This is not ".." -- we can just store the
4070 * "/." and this character and continue
4071 * processing.
4072 */
4073 dest[j++] = '/';
4074 dest[j++] = '.';
4075 dest[j++] = c;
4076 continue;
4077 }
4078
4079 c = dtrace_load8(src + i++);
4080
4081 if (c != '/' && c != '\0') {
4082 /*
4083 * This is not ".." -- it's "..[mumble]".
4084 * We'll store the "/.." and this character
4085 * and continue processing.
4086 */
4087 dest[j++] = '/';
4088 dest[j++] = '.';
4089 dest[j++] = '.';
4090 dest[j++] = c;
4091 continue;
4092 }
4093
4094 /*
4095 * This is "/../" or "/..\0". We need to back up
4096 * our destination pointer until we find a "/".
4097 */
4098 i--;
4099 while (j != 0 && dest[--j] != '/')
4100 continue;
4101
4102 if (c == '\0')
4103 dest[++j] = '/';
4104 } while (c != '\0');
4105
4106 dest[j] = '\0';
4107 regs[rd] = (uintptr_t)dest;
4108 mstate->dtms_scratch_ptr += size;
4109 break;
4110 }
4111 #ifdef __APPLE__
4112
4113 /* CHUD callback ('chud(uint64_t, [uint64_t], [uint64_t] ...)') */
4114 case DIF_SUBR_CHUD: {
4115 uint64_t selector = tupregs[0].dttk_value;
4116 uint64_t args[DIF_DTR_NREGS-1] = {0ULL};
4117 uint32_t ii;
4118
4119 /* copy in any variadic argument list */
4120 for(ii = 0; ii < DIF_DTR_NREGS-1; ii++) {
4121 args[ii] = tupregs[ii+1].dttk_value;
4122 }
4123
4124 kern_return_t ret =
4125 chudxnu_dtrace_callback(selector, args, DIF_DTR_NREGS-1);
4126 if(KERN_SUCCESS != ret) {
4127 /* error */
4128 }
4129 break;
4130 }
4131
4132 #endif /* __APPLE__ */
4133
4134 }
4135 }
4136
4137 /*
4138 * Emulate the execution of DTrace IR instructions specified by the given
4139 * DIF object. This function is deliberately void of assertions as all of
4140 * the necessary checks are handled by a call to dtrace_difo_validate().
4141 */
4142 static uint64_t
4143 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4144 dtrace_vstate_t *vstate, dtrace_state_t *state)
4145 {
4146 const dif_instr_t *text = difo->dtdo_buf;
4147 const uint_t textlen = difo->dtdo_len;
4148 const char *strtab = difo->dtdo_strtab;
4149 const uint64_t *inttab = difo->dtdo_inttab;
4150
4151 uint64_t rval = 0;
4152 dtrace_statvar_t *svar;
4153 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4154 dtrace_difv_t *v;
4155 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4156 #if !defined(__APPLE__)
4157 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4158 #else
4159 volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4160 #endif /* __APPLE__ */
4161
4162 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4163 uint64_t regs[DIF_DIR_NREGS];
4164 uint64_t *tmp;
4165
4166 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4167 int64_t cc_r;
4168 uint_t pc = 0, id, opc;
4169 uint8_t ttop = 0;
4170 dif_instr_t instr;
4171 uint_t r1, r2, rd;
4172
4173 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4174
4175 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4176 opc = pc;
4177
4178 instr = text[pc++];
4179 r1 = DIF_INSTR_R1(instr);
4180 r2 = DIF_INSTR_R2(instr);
4181 rd = DIF_INSTR_RD(instr);
4182
4183 switch (DIF_INSTR_OP(instr)) {
4184 case DIF_OP_OR:
4185 regs[rd] = regs[r1] | regs[r2];
4186 break;
4187 case DIF_OP_XOR:
4188 regs[rd] = regs[r1] ^ regs[r2];
4189 break;
4190 case DIF_OP_AND:
4191 regs[rd] = regs[r1] & regs[r2];
4192 break;
4193 case DIF_OP_SLL:
4194 regs[rd] = regs[r1] << regs[r2];
4195 break;
4196 case DIF_OP_SRL:
4197 regs[rd] = regs[r1] >> regs[r2];
4198 break;
4199 case DIF_OP_SUB:
4200 regs[rd] = regs[r1] - regs[r2];
4201 break;
4202 case DIF_OP_ADD:
4203 regs[rd] = regs[r1] + regs[r2];
4204 break;
4205 case DIF_OP_MUL:
4206 regs[rd] = regs[r1] * regs[r2];
4207 break;
4208 case DIF_OP_SDIV:
4209 if (regs[r2] == 0) {
4210 regs[rd] = 0;
4211 *flags |= CPU_DTRACE_DIVZERO;
4212 } else {
4213 regs[rd] = (int64_t)regs[r1] /
4214 (int64_t)regs[r2];
4215 }
4216 break;
4217
4218 case DIF_OP_UDIV:
4219 if (regs[r2] == 0) {
4220 regs[rd] = 0;
4221 *flags |= CPU_DTRACE_DIVZERO;
4222 } else {
4223 regs[rd] = regs[r1] / regs[r2];
4224 }
4225 break;
4226
4227 case DIF_OP_SREM:
4228 if (regs[r2] == 0) {
4229 regs[rd] = 0;
4230 *flags |= CPU_DTRACE_DIVZERO;
4231 } else {
4232 regs[rd] = (int64_t)regs[r1] %
4233 (int64_t)regs[r2];
4234 }
4235 break;
4236
4237 case DIF_OP_UREM:
4238 if (regs[r2] == 0) {
4239 regs[rd] = 0;
4240 *flags |= CPU_DTRACE_DIVZERO;
4241 } else {
4242 regs[rd] = regs[r1] % regs[r2];
4243 }
4244 break;
4245
4246 case DIF_OP_NOT:
4247 regs[rd] = ~regs[r1];
4248 break;
4249 case DIF_OP_MOV:
4250 regs[rd] = regs[r1];
4251 break;
4252 case DIF_OP_CMP:
4253 cc_r = regs[r1] - regs[r2];
4254 cc_n = cc_r < 0;
4255 cc_z = cc_r == 0;
4256 cc_v = 0;
4257 cc_c = regs[r1] < regs[r2];
4258 break;
4259 case DIF_OP_TST:
4260 cc_n = cc_v = cc_c = 0;
4261 cc_z = regs[r1] == 0;
4262 break;
4263 case DIF_OP_BA:
4264 pc = DIF_INSTR_LABEL(instr);
4265 break;
4266 case DIF_OP_BE:
4267 if (cc_z)
4268 pc = DIF_INSTR_LABEL(instr);
4269 break;
4270 case DIF_OP_BNE:
4271 if (cc_z == 0)
4272 pc = DIF_INSTR_LABEL(instr);
4273 break;
4274 case DIF_OP_BG:
4275 if ((cc_z | (cc_n ^ cc_v)) == 0)
4276 pc = DIF_INSTR_LABEL(instr);
4277 break;
4278 case DIF_OP_BGU:
4279 if ((cc_c | cc_z) == 0)
4280 pc = DIF_INSTR_LABEL(instr);
4281 break;
4282 case DIF_OP_BGE:
4283 if ((cc_n ^ cc_v) == 0)
4284 pc = DIF_INSTR_LABEL(instr);
4285 break;
4286 case DIF_OP_BGEU:
4287 if (cc_c == 0)
4288 pc = DIF_INSTR_LABEL(instr);
4289 break;
4290 case DIF_OP_BL:
4291 if (cc_n ^ cc_v)
4292 pc = DIF_INSTR_LABEL(instr);
4293 break;
4294 case DIF_OP_BLU:
4295 if (cc_c)
4296 pc = DIF_INSTR_LABEL(instr);
4297 break;
4298 case DIF_OP_BLE:
4299 if (cc_z | (cc_n ^ cc_v))
4300 pc = DIF_INSTR_LABEL(instr);
4301 break;
4302 case DIF_OP_BLEU:
4303 if (cc_c | cc_z)
4304 pc = DIF_INSTR_LABEL(instr);
4305 break;
4306 case DIF_OP_RLDSB:
4307 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4308 *flags |= CPU_DTRACE_KPRIV;
4309 *illval = regs[r1];
4310 break;
4311 }
4312 /*FALLTHROUGH*/
4313 case DIF_OP_LDSB:
4314 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4315 break;
4316 case DIF_OP_RLDSH:
4317 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4318 *flags |= CPU_DTRACE_KPRIV;
4319 *illval = regs[r1];
4320 break;
4321 }
4322 /*FALLTHROUGH*/
4323 case DIF_OP_LDSH:
4324 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4325 break;
4326 case DIF_OP_RLDSW:
4327 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4328 *flags |= CPU_DTRACE_KPRIV;
4329 *illval = regs[r1];
4330 break;
4331 }
4332 /*FALLTHROUGH*/
4333 case DIF_OP_LDSW:
4334 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4335 break;
4336 case DIF_OP_RLDUB:
4337 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4338 *flags |= CPU_DTRACE_KPRIV;
4339 *illval = regs[r1];
4340 break;
4341 }
4342 /*FALLTHROUGH*/
4343 case DIF_OP_LDUB:
4344 regs[rd] = dtrace_load8(regs[r1]);
4345 break;
4346 case DIF_OP_RLDUH:
4347 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4348 *flags |= CPU_DTRACE_KPRIV;
4349 *illval = regs[r1];
4350 break;
4351 }
4352 /*FALLTHROUGH*/
4353 case DIF_OP_LDUH:
4354 regs[rd] = dtrace_load16(regs[r1]);
4355 break;
4356 case DIF_OP_RLDUW:
4357 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4358 *flags |= CPU_DTRACE_KPRIV;
4359 *illval = regs[r1];
4360 break;
4361 }
4362 /*FALLTHROUGH*/
4363 case DIF_OP_LDUW:
4364 regs[rd] = dtrace_load32(regs[r1]);
4365 break;
4366 case DIF_OP_RLDX:
4367 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4368 *flags |= CPU_DTRACE_KPRIV;
4369 *illval = regs[r1];
4370 break;
4371 }
4372 /*FALLTHROUGH*/
4373 case DIF_OP_LDX:
4374 regs[rd] = dtrace_load64(regs[r1]);
4375 break;
4376 case DIF_OP_ULDSB:
4377 regs[rd] = (int8_t)
4378 dtrace_fuword8(regs[r1]);
4379 break;
4380 case DIF_OP_ULDSH:
4381 regs[rd] = (int16_t)
4382 dtrace_fuword16(regs[r1]);
4383 break;
4384 case DIF_OP_ULDSW:
4385 regs[rd] = (int32_t)
4386 dtrace_fuword32(regs[r1]);
4387 break;
4388 case DIF_OP_ULDUB:
4389 regs[rd] =
4390 dtrace_fuword8(regs[r1]);
4391 break;
4392 case DIF_OP_ULDUH:
4393 regs[rd] =
4394 dtrace_fuword16(regs[r1]);
4395 break;
4396 case DIF_OP_ULDUW:
4397 regs[rd] =
4398 dtrace_fuword32(regs[r1]);
4399 break;
4400 case DIF_OP_ULDX:
4401 regs[rd] =
4402 dtrace_fuword64(regs[r1]);
4403 break;
4404 case DIF_OP_RET:
4405 rval = regs[rd];
4406 break;
4407 case DIF_OP_NOP:
4408 break;
4409 case DIF_OP_SETX:
4410 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
4411 break;
4412 case DIF_OP_SETS:
4413 regs[rd] = (uint64_t)(uintptr_t)
4414 (strtab + DIF_INSTR_STRING(instr));
4415 break;
4416 case DIF_OP_SCMP:
4417 cc_r = dtrace_strncmp((char *)(uintptr_t)regs[r1],
4418 (char *)(uintptr_t)regs[r2],
4419 state->dts_options[DTRACEOPT_STRSIZE]);
4420
4421 cc_n = cc_r < 0;
4422 cc_z = cc_r == 0;
4423 cc_v = cc_c = 0;
4424 break;
4425 case DIF_OP_LDGA:
4426 regs[rd] = dtrace_dif_variable(mstate, state,
4427 r1, regs[r2]);
4428 break;
4429 case DIF_OP_LDGS:
4430 id = DIF_INSTR_VAR(instr);
4431
4432 if (id >= DIF_VAR_OTHER_UBASE) {
4433 uintptr_t a;
4434
4435 id -= DIF_VAR_OTHER_UBASE;
4436 svar = vstate->dtvs_globals[id];
4437 ASSERT(svar != NULL);
4438 v = &svar->dtsv_var;
4439
4440 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
4441 regs[rd] = svar->dtsv_data;
4442 break;
4443 }
4444
4445 a = (uintptr_t)svar->dtsv_data;
4446
4447 if (*(uint8_t *)a == UINT8_MAX) {
4448 /*
4449 * If the 0th byte is set to UINT8_MAX
4450 * then this is to be treated as a
4451 * reference to a NULL variable.
4452 */
4453 regs[rd] = NULL;
4454 } else {
4455 regs[rd] = a + sizeof (uint64_t);
4456 }
4457
4458 break;
4459 }
4460
4461 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
4462 break;
4463
4464 case DIF_OP_STGS:
4465 id = DIF_INSTR_VAR(instr);
4466
4467 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4468 id -= DIF_VAR_OTHER_UBASE;
4469
4470 svar = vstate->dtvs_globals[id];
4471 ASSERT(svar != NULL);
4472 v = &svar->dtsv_var;
4473
4474 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4475 uintptr_t a = (uintptr_t)svar->dtsv_data;
4476
4477 ASSERT(a != NULL);
4478 ASSERT(svar->dtsv_size != 0);
4479
4480 if (regs[rd] == NULL) {
4481 *(uint8_t *)a = UINT8_MAX;
4482 break;
4483 } else {
4484 *(uint8_t *)a = 0;
4485 a += sizeof (uint64_t);
4486 }
4487
4488 dtrace_vcopy((void *)(uintptr_t)regs[rd],
4489 (void *)a, &v->dtdv_type);
4490 break;
4491 }
4492
4493 svar->dtsv_data = regs[rd];
4494 break;
4495
4496 case DIF_OP_LDTA:
4497 /*
4498 * There are no DTrace built-in thread-local arrays at
4499 * present. This opcode is saved for future work.
4500 */
4501 *flags |= CPU_DTRACE_ILLOP;
4502 regs[rd] = 0;
4503 break;
4504
4505 case DIF_OP_LDLS:
4506 id = DIF_INSTR_VAR(instr);
4507
4508 if (id < DIF_VAR_OTHER_UBASE) {
4509 /*
4510 * For now, this has no meaning.
4511 */
4512 regs[rd] = 0;
4513 break;
4514 }
4515
4516 id -= DIF_VAR_OTHER_UBASE;
4517
4518 ASSERT(id < vstate->dtvs_nlocals);
4519 ASSERT(vstate->dtvs_locals != NULL);
4520
4521 svar = vstate->dtvs_locals[id];
4522 ASSERT(svar != NULL);
4523 v = &svar->dtsv_var;
4524
4525 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4526 uintptr_t a = (uintptr_t)svar->dtsv_data;
4527 size_t sz = v->dtdv_type.dtdt_size;
4528
4529 sz += sizeof (uint64_t);
4530 ASSERT(svar->dtsv_size == NCPU * sz);
4531 a += CPU->cpu_id * sz;
4532
4533 if (*(uint8_t *)a == UINT8_MAX) {
4534 /*
4535 * If the 0th byte is set to UINT8_MAX
4536 * then this is to be treated as a
4537 * reference to a NULL variable.
4538 */
4539 regs[rd] = NULL;
4540 } else {
4541 regs[rd] = a + sizeof (uint64_t);
4542 }
4543
4544 break;
4545 }
4546
4547 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
4548 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
4549 regs[rd] = tmp[CPU->cpu_id];
4550 break;
4551
4552 case DIF_OP_STLS:
4553 id = DIF_INSTR_VAR(instr);
4554
4555 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4556 id -= DIF_VAR_OTHER_UBASE;
4557 ASSERT(id < vstate->dtvs_nlocals);
4558
4559 ASSERT(vstate->dtvs_locals != NULL);
4560 svar = vstate->dtvs_locals[id];
4561 ASSERT(svar != NULL);
4562 v = &svar->dtsv_var;
4563
4564 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4565 uintptr_t a = (uintptr_t)svar->dtsv_data;
4566 size_t sz = v->dtdv_type.dtdt_size;
4567
4568 sz += sizeof (uint64_t);
4569 ASSERT(svar->dtsv_size == NCPU * sz);
4570 a += CPU->cpu_id * sz;
4571
4572 if (regs[rd] == NULL) {
4573 *(uint8_t *)a = UINT8_MAX;
4574 break;
4575 } else {
4576 *(uint8_t *)a = 0;
4577 a += sizeof (uint64_t);
4578 }
4579
4580 dtrace_vcopy((void *)(uintptr_t)regs[rd],
4581 (void *)a, &v->dtdv_type);
4582 break;
4583 }
4584
4585 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
4586 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
4587 tmp[CPU->cpu_id] = regs[rd];
4588 break;
4589
4590 case DIF_OP_LDTS: {
4591 dtrace_dynvar_t *dvar;
4592 dtrace_key_t *key;
4593
4594 id = DIF_INSTR_VAR(instr);
4595 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4596 id -= DIF_VAR_OTHER_UBASE;
4597 v = &vstate->dtvs_tlocals[id];
4598
4599 key = &tupregs[DIF_DTR_NREGS];
4600 key[0].dttk_value = (uint64_t)id;
4601 key[0].dttk_size = 0;
4602 DTRACE_TLS_THRKEY(key[1].dttk_value);
4603 key[1].dttk_size = 0;
4604
4605 dvar = dtrace_dynvar(dstate, 2, key,
4606 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC);
4607
4608 if (dvar == NULL) {
4609 regs[rd] = 0;
4610 break;
4611 }
4612
4613 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4614 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
4615 } else {
4616 regs[rd] = *((uint64_t *)dvar->dtdv_data);
4617 }
4618
4619 break;
4620 }
4621
4622 case DIF_OP_STTS: {
4623 dtrace_dynvar_t *dvar;
4624 dtrace_key_t *key;
4625
4626 id = DIF_INSTR_VAR(instr);
4627 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4628 id -= DIF_VAR_OTHER_UBASE;
4629
4630 key = &tupregs[DIF_DTR_NREGS];
4631 key[0].dttk_value = (uint64_t)id;
4632 key[0].dttk_size = 0;
4633 DTRACE_TLS_THRKEY(key[1].dttk_value);
4634 key[1].dttk_size = 0;
4635 v = &vstate->dtvs_tlocals[id];
4636
4637 dvar = dtrace_dynvar(dstate, 2, key,
4638 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
4639 v->dtdv_type.dtdt_size : sizeof (uint64_t),
4640 regs[rd] ? DTRACE_DYNVAR_ALLOC :
4641 DTRACE_DYNVAR_DEALLOC);
4642
4643 /*
4644 * Given that we're storing to thread-local data,
4645 * we need to flush our predicate cache.
4646 */
4647 #if !defined(__APPLE__)
4648 curthread->t_predcache = NULL;
4649 #else
4650 dtrace_set_thread_predcache(current_thread(), 0);
4651 #endif /* __APPLE__ */
4652
4653
4654 if (dvar == NULL)
4655 break;
4656
4657 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4658 dtrace_vcopy((void *)(uintptr_t)regs[rd],
4659 dvar->dtdv_data, &v->dtdv_type);
4660 } else {
4661 *((uint64_t *)dvar->dtdv_data) = regs[rd];
4662 }
4663
4664 break;
4665 }
4666
4667 case DIF_OP_SRA:
4668 regs[rd] = (int64_t)regs[r1] >> regs[r2];
4669 break;
4670
4671 case DIF_OP_CALL:
4672 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
4673 regs, tupregs, ttop, mstate, state);
4674 break;
4675
4676 case DIF_OP_PUSHTR:
4677 if (ttop == DIF_DTR_NREGS) {
4678 *flags |= CPU_DTRACE_TUPOFLOW;
4679 break;
4680 }
4681
4682 if (r1 == DIF_TYPE_STRING) {
4683 /*
4684 * If this is a string type and the size is 0,
4685 * we'll use the system-wide default string
4686 * size. Note that we are _not_ looking at
4687 * the value of the DTRACEOPT_STRSIZE option;
4688 * had this been set, we would expect to have
4689 * a non-zero size value in the "pushtr".
4690 */
4691 tupregs[ttop].dttk_size =
4692 dtrace_strlen((char *)(uintptr_t)regs[rd],
4693 regs[r2] ? regs[r2] :
4694 dtrace_strsize_default) + 1;
4695 } else {
4696 tupregs[ttop].dttk_size = regs[r2];
4697 }
4698
4699 tupregs[ttop++].dttk_value = regs[rd];
4700 break;
4701
4702 case DIF_OP_PUSHTV:
4703 if (ttop == DIF_DTR_NREGS) {
4704 *flags |= CPU_DTRACE_TUPOFLOW;
4705 break;
4706 }
4707
4708 tupregs[ttop].dttk_value = regs[rd];
4709 tupregs[ttop++].dttk_size = 0;
4710 break;
4711
4712 case DIF_OP_POPTS:
4713 if (ttop != 0)
4714 ttop--;
4715 break;
4716
4717 case DIF_OP_FLUSHTS:
4718 ttop = 0;
4719 break;
4720
4721 case DIF_OP_LDGAA:
4722 case DIF_OP_LDTAA: {
4723 dtrace_dynvar_t *dvar;
4724 dtrace_key_t *key = tupregs;
4725 uint_t nkeys = ttop;
4726
4727 id = DIF_INSTR_VAR(instr);
4728 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4729 id -= DIF_VAR_OTHER_UBASE;
4730
4731 key[nkeys].dttk_value = (uint64_t)id;
4732 key[nkeys++].dttk_size = 0;
4733
4734 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
4735 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
4736 key[nkeys++].dttk_size = 0;
4737 v = &vstate->dtvs_tlocals[id];
4738 } else {
4739 v = &vstate->dtvs_globals[id]->dtsv_var;
4740 }
4741
4742 dvar = dtrace_dynvar(dstate, nkeys, key,
4743 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
4744 v->dtdv_type.dtdt_size : sizeof (uint64_t),
4745 DTRACE_DYNVAR_NOALLOC);
4746
4747 if (dvar == NULL) {
4748 regs[rd] = 0;
4749 break;
4750 }
4751
4752 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4753 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
4754 } else {
4755 regs[rd] = *((uint64_t *)dvar->dtdv_data);
4756 }
4757
4758 break;
4759 }
4760
4761 case DIF_OP_STGAA:
4762 case DIF_OP_STTAA: {
4763 dtrace_dynvar_t *dvar;
4764 dtrace_key_t *key = tupregs;
4765 uint_t nkeys = ttop;
4766
4767 id = DIF_INSTR_VAR(instr);
4768 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4769 id -= DIF_VAR_OTHER_UBASE;
4770
4771 key[nkeys].dttk_value = (uint64_t)id;
4772 key[nkeys++].dttk_size = 0;
4773
4774 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
4775 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
4776 key[nkeys++].dttk_size = 0;
4777 v = &vstate->dtvs_tlocals[id];
4778 } else {
4779 v = &vstate->dtvs_globals[id]->dtsv_var;
4780 }
4781
4782 dvar = dtrace_dynvar(dstate, nkeys, key,
4783 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
4784 v->dtdv_type.dtdt_size : sizeof (uint64_t),
4785 regs[rd] ? DTRACE_DYNVAR_ALLOC :
4786 DTRACE_DYNVAR_DEALLOC);
4787
4788 if (dvar == NULL)
4789 break;
4790
4791 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4792 dtrace_vcopy((void *)(uintptr_t)regs[rd],
4793 dvar->dtdv_data, &v->dtdv_type);
4794 } else {
4795 *((uint64_t *)dvar->dtdv_data) = regs[rd];
4796 }
4797
4798 break;
4799 }
4800
4801 case DIF_OP_ALLOCS: {
4802 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4803 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
4804
4805 if (mstate->dtms_scratch_ptr + size >
4806 mstate->dtms_scratch_base +
4807 mstate->dtms_scratch_size) {
4808 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4809 regs[rd] = NULL;
4810 } else {
4811 dtrace_bzero((void *)
4812 mstate->dtms_scratch_ptr, size);
4813 mstate->dtms_scratch_ptr += size;
4814 regs[rd] = ptr;
4815 }
4816 break;
4817 }
4818
4819 case DIF_OP_COPYS:
4820 if (!dtrace_canstore(regs[rd], regs[r2],
4821 mstate, vstate)) {
4822 *flags |= CPU_DTRACE_BADADDR;
4823 *illval = regs[rd];
4824 break;
4825 }
4826
4827 dtrace_bcopy((void *)(uintptr_t)regs[r1],
4828 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
4829 break;
4830
4831 case DIF_OP_STB:
4832 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
4833 *flags |= CPU_DTRACE_BADADDR;
4834 *illval = regs[rd];
4835 break;
4836 }
4837 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
4838 break;
4839
4840 case DIF_OP_STH:
4841 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
4842 *flags |= CPU_DTRACE_BADADDR;
4843 *illval = regs[rd];
4844 break;
4845 }
4846 if (regs[rd] & 1) {
4847 *flags |= CPU_DTRACE_BADALIGN;
4848 *illval = regs[rd];
4849 break;
4850 }
4851 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
4852 break;
4853
4854 case DIF_OP_STW:
4855 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
4856 *flags |= CPU_DTRACE_BADADDR;
4857 *illval = regs[rd];
4858 break;
4859 }
4860 if (regs[rd] & 3) {
4861 *flags |= CPU_DTRACE_BADALIGN;
4862 *illval = regs[rd];
4863 break;
4864 }
4865 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
4866 break;
4867
4868 case DIF_OP_STX:
4869 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
4870 *flags |= CPU_DTRACE_BADADDR;
4871 *illval = regs[rd];
4872 break;
4873 }
4874 #if !defined(__APPLE__)
4875 if (regs[rd] & 7) {
4876 #else
4877 if (regs[rd] & 3) { /* Darwin kmem_zalloc() called from dtrace_difo_init() is 4-byte aligned. */
4878 #endif /* __APPLE__ */
4879 *flags |= CPU_DTRACE_BADALIGN;
4880 *illval = regs[rd];
4881 break;
4882 }
4883 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
4884 break;
4885 }
4886 }
4887
4888 if (!(*flags & CPU_DTRACE_FAULT))
4889 return (rval);
4890
4891 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
4892 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
4893
4894 return (0);
4895 }
4896
4897 static void
4898 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
4899 {
4900 dtrace_probe_t *probe = ecb->dte_probe;
4901 dtrace_provider_t *prov = probe->dtpr_provider;
4902 char c[DTRACE_FULLNAMELEN + 80], *str;
4903 char *msg = "dtrace: breakpoint action at probe ";
4904 char *ecbmsg = " (ecb ";
4905 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
4906 uintptr_t val = (uintptr_t)ecb;
4907 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
4908
4909 if (dtrace_destructive_disallow)
4910 return;
4911
4912 /*
4913 * It's impossible to be taking action on the NULL probe.
4914 */
4915 ASSERT(probe != NULL);
4916
4917 /*
4918 * This is a poor man's (destitute man's?) sprintf(): we want to
4919 * print the provider name, module name, function name and name of
4920 * the probe, along with the hex address of the ECB with the breakpoint
4921 * action -- all of which we must place in the character buffer by
4922 * hand.
4923 */
4924 while (*msg != '\0')
4925 c[i++] = *msg++;
4926
4927 for (str = prov->dtpv_name; *str != '\0'; str++)
4928 c[i++] = *str;
4929 c[i++] = ':';
4930
4931 for (str = probe->dtpr_mod; *str != '\0'; str++)
4932 c[i++] = *str;
4933 c[i++] = ':';
4934
4935 for (str = probe->dtpr_func; *str != '\0'; str++)
4936 c[i++] = *str;
4937 c[i++] = ':';
4938
4939 for (str = probe->dtpr_name; *str != '\0'; str++)
4940 c[i++] = *str;
4941
4942 while (*ecbmsg != '\0')
4943 c[i++] = *ecbmsg++;
4944
4945 while (shift >= 0) {
4946 mask = (uintptr_t)0xf << shift;
4947
4948 if (val >= ((uintptr_t)1 << shift))
4949 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
4950 shift -= 4;
4951 }
4952
4953 c[i++] = ')';
4954 c[i] = '\0';
4955
4956 debug_enter(c);
4957 }
4958
4959 static void
4960 dtrace_action_panic(dtrace_ecb_t *ecb)
4961 {
4962 dtrace_probe_t *probe = ecb->dte_probe;
4963
4964 /*
4965 * It's impossible to be taking action on the NULL probe.
4966 */
4967 ASSERT(probe != NULL);
4968
4969 if (dtrace_destructive_disallow)
4970 return;
4971
4972 if (dtrace_panicked != NULL)
4973 return;
4974
4975 #if !defined(__APPLE__)
4976 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
4977 return;
4978 #else
4979 if (dtrace_casptr(&dtrace_panicked, NULL, current_thread()) != NULL)
4980 return;
4981 #endif /* __APPLE__ */
4982
4983 /*
4984 * We won the right to panic. (We want to be sure that only one
4985 * thread calls panic() from dtrace_probe(), and that panic() is
4986 * called exactly once.)
4987 */
4988 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
4989 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
4990 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
4991
4992 #if defined(__APPLE__)
4993 /* Mac OS X debug feature -- can return from panic() */
4994 dtrace_panicked = NULL;
4995 #endif /* __APPLE__ */
4996 }
4997
4998 static void
4999 dtrace_action_raise(uint64_t sig)
5000 {
5001 if (dtrace_destructive_disallow)
5002 return;
5003
5004 if (sig >= NSIG) {
5005 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5006 return;
5007 }
5008
5009 #if !defined(__APPLE__)
5010 /*
5011 * raise() has a queue depth of 1 -- we ignore all subsequent
5012 * invocations of the raise() action.
5013 */
5014 if (curthread->t_dtrace_sig == 0)
5015 curthread->t_dtrace_sig = (uint8_t)sig;
5016
5017 curthread->t_sig_check = 1;
5018 aston(curthread);
5019 #else
5020 uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
5021
5022 if (uthread && uthread->t_dtrace_sig == 0) {
5023 uthread->t_dtrace_sig = sig;
5024 psignal(current_proc(), (int)sig);
5025 }
5026 #endif /* __APPLE__ */
5027 }
5028
5029 static void
5030 dtrace_action_stop(void)
5031 {
5032 if (dtrace_destructive_disallow)
5033 return;
5034
5035 #if !defined(__APPLE__)
5036 if (!curthread->t_dtrace_stop) {
5037 curthread->t_dtrace_stop = 1;
5038 curthread->t_sig_check = 1;
5039 aston(curthread);
5040 }
5041 #else
5042 psignal(current_proc(), SIGSTOP);
5043 #endif /* __APPLE__ */
5044 }
5045
5046 static void
5047 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5048 {
5049 hrtime_t now;
5050 volatile uint16_t *flags;
5051 cpu_t *cpu = CPU;
5052
5053 if (dtrace_destructive_disallow)
5054 return;
5055
5056 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5057
5058 now = dtrace_gethrtime();
5059
5060 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5061 /*
5062 * We need to advance the mark to the current time.
5063 */
5064 cpu->cpu_dtrace_chillmark = now;
5065 cpu->cpu_dtrace_chilled = 0;
5066 }
5067
5068 /*
5069 * Now check to see if the requested chill time would take us over
5070 * the maximum amount of time allowed in the chill interval. (Or
5071 * worse, if the calculation itself induces overflow.)
5072 */
5073 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5074 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5075 *flags |= CPU_DTRACE_ILLOP;
5076 return;
5077 }
5078
5079 while (dtrace_gethrtime() - now < val)
5080 continue;
5081
5082 /*
5083 * Normally, we assure that the value of the variable "timestamp" does
5084 * not change within an ECB. The presence of chill() represents an
5085 * exception to this rule, however.
5086 */
5087 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5088 cpu->cpu_dtrace_chilled += val;
5089 }
5090
5091 static void
5092 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5093 uint64_t *buf, uint64_t arg)
5094 {
5095 int nframes = DTRACE_USTACK_NFRAMES(arg);
5096 int strsize = DTRACE_USTACK_STRSIZE(arg);
5097 uint64_t *pcs = &buf[1], *fps;
5098 char *str = (char *)&pcs[nframes];
5099 int size, offs = 0, i, j;
5100 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5101 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5102 char *sym;
5103
5104 /*
5105 * Should be taking a faster path if string space has not been
5106 * allocated.
5107 */
5108 ASSERT(strsize != 0);
5109
5110 /*
5111 * We will first allocate some temporary space for the frame pointers.
5112 */
5113 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5114 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5115 (nframes * sizeof (uint64_t));
5116
5117 if (mstate->dtms_scratch_ptr + size >
5118 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
5119 /*
5120 * Not enough room for our frame pointers -- need to indicate
5121 * that we ran out of scratch space.
5122 */
5123 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5124 return;
5125 }
5126
5127 mstate->dtms_scratch_ptr += size;
5128 saved = mstate->dtms_scratch_ptr;
5129
5130 /*
5131 * Now get a stack with both program counters and frame pointers.
5132 */
5133 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5134 dtrace_getufpstack(buf, fps, nframes + 1);
5135 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5136
5137 /*
5138 * If that faulted, we're cooked.
5139 */
5140 if (*flags & CPU_DTRACE_FAULT)
5141 goto out;
5142
5143 /*
5144 * Now we want to walk up the stack, calling the USTACK helper. For
5145 * each iteration, we restore the scratch pointer.
5146 */
5147 for (i = 0; i < nframes; i++) {
5148 mstate->dtms_scratch_ptr = saved;
5149
5150 if (offs >= strsize)
5151 break;
5152
5153 sym = (char *)(uintptr_t)dtrace_helper(
5154 DTRACE_HELPER_ACTION_USTACK,
5155 mstate, state, pcs[i], fps[i]);
5156
5157 /*
5158 * If we faulted while running the helper, we're going to
5159 * clear the fault and null out the corresponding string.
5160 */
5161 if (*flags & CPU_DTRACE_FAULT) {
5162 *flags &= ~CPU_DTRACE_FAULT;
5163 str[offs++] = '\0';
5164 continue;
5165 }
5166
5167 if (sym == NULL) {
5168 str[offs++] = '\0';
5169 continue;
5170 }
5171
5172 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5173
5174 /*
5175 * Now copy in the string that the helper returned to us.
5176 */
5177 for (j = 0; offs + j < strsize; j++) {
5178 if ((str[offs + j] = sym[j]) == '\0')
5179 break;
5180 }
5181
5182 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5183
5184 offs += j + 1;
5185 }
5186
5187 if (offs >= strsize) {
5188 /*
5189 * If we didn't have room for all of the strings, we don't
5190 * abort processing -- this needn't be a fatal error -- but we
5191 * still want to increment a counter (dts_stkstroverflows) to
5192 * allow this condition to be warned about. (If this is from
5193 * a jstack() action, it is easily tuned via jstackstrsize.)
5194 */
5195 dtrace_error(&state->dts_stkstroverflows);
5196 }
5197
5198 while (offs < strsize)
5199 str[offs++] = '\0';
5200
5201 out:
5202 mstate->dtms_scratch_ptr = old;
5203 }
5204
5205 /*
5206 * If you're looking for the epicenter of DTrace, you just found it. This
5207 * is the function called by the provider to fire a probe -- from which all
5208 * subsequent probe-context DTrace activity emanates.
5209 */
5210 #if !defined(__APPLE__)
5211 void
5212 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5213 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5214 #else
5215 static void
5216 __dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
5217 uint64_t arg2, uint64_t arg3, uint64_t arg4)
5218 #endif /* __APPLE__ */
5219 {
5220 processorid_t cpuid;
5221 dtrace_icookie_t cookie;
5222 dtrace_probe_t *probe;
5223 dtrace_mstate_t mstate;
5224 dtrace_ecb_t *ecb;
5225 dtrace_action_t *act;
5226 intptr_t offs;
5227 size_t size;
5228 int vtime, onintr;
5229 volatile uint16_t *flags;
5230 hrtime_t now;
5231
5232 #if !defined(__APPLE__)
5233 /*
5234 * Kick out immediately if this CPU is still being born (in which case
5235 * curthread will be set to -1)
5236 */
5237 if ((uintptr_t)curthread & 1)
5238 return;
5239 #else
5240 #endif /* __APPLE__ */
5241
5242 cookie = dtrace_interrupt_disable();
5243 probe = dtrace_probes[id - 1];
5244 cpuid = CPU->cpu_id;
5245 onintr = CPU_ON_INTR(CPU);
5246
5247 #if !defined(__APPLE__)
5248 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5249 probe->dtpr_predcache == curthread->t_predcache) {
5250 #else
5251 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5252 probe->dtpr_predcache == dtrace_get_thread_predcache(current_thread())) {
5253 #endif /* __APPLE__ */
5254 /*
5255 * We have hit in the predicate cache; we know that
5256 * this predicate would evaluate to be false.
5257 */
5258 dtrace_interrupt_enable(cookie);
5259 return;
5260 }
5261
5262 if (panic_quiesce) {
5263 /*
5264 * We don't trace anything if we're panicking.
5265 */
5266 dtrace_interrupt_enable(cookie);
5267 return;
5268 }
5269
5270 #if !defined(__APPLE__)
5271 now = dtrace_gethrtime();
5272 vtime = dtrace_vtime_references != 0;
5273
5274 if (vtime && curthread->t_dtrace_start)
5275 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5276 #else
5277 vtime = dtrace_vtime_references != 0;
5278
5279 if (vtime)
5280 {
5281 int64_t dtrace_accum_time, recent_vtime;
5282 thread_t thread = current_thread();
5283
5284 dtrace_accum_time = dtrace_get_thread_tracing(thread); /* Time spent inside DTrace so far (nanoseconds) */
5285
5286 if (dtrace_accum_time >= 0) {
5287 recent_vtime = dtrace_abs_to_nano(dtrace_calc_thread_recent_vtime(thread)); /* up to the moment thread vtime */
5288
5289 recent_vtime = recent_vtime - dtrace_accum_time; /* Time without DTrace contribution */
5290
5291 dtrace_set_thread_vtime(thread, recent_vtime);
5292 }
5293 }
5294
5295 now = dtrace_gethrtime(); /* must not precede dtrace_calc_thread_recent_vtime() call! */
5296 #endif /* __APPLE__ */
5297
5298 mstate.dtms_probe = probe;
5299 mstate.dtms_arg[0] = arg0;
5300 mstate.dtms_arg[1] = arg1;
5301 mstate.dtms_arg[2] = arg2;
5302 mstate.dtms_arg[3] = arg3;
5303 mstate.dtms_arg[4] = arg4;
5304
5305 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5306
5307 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5308 dtrace_predicate_t *pred = ecb->dte_predicate;
5309 dtrace_state_t *state = ecb->dte_state;
5310 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5311 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5312 dtrace_vstate_t *vstate = &state->dts_vstate;
5313 dtrace_provider_t *prov = probe->dtpr_provider;
5314 int committed = 0;
5315 caddr_t tomax;
5316
5317 /*
5318 * A little subtlety with the following (seemingly innocuous)
5319 * declaration of the automatic 'val': by looking at the
5320 * code, you might think that it could be declared in the
5321 * action processing loop, below. (That is, it's only used in
5322 * the action processing loop.) However, it must be declared
5323 * out of that scope because in the case of DIF expression
5324 * arguments to aggregating actions, one iteration of the
5325 * action loop will use the last iteration's value.
5326 */
5327 #ifdef lint
5328 uint64_t val = 0;
5329 #else
5330 uint64_t val;
5331 #endif
5332
5333 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5334 *flags &= ~CPU_DTRACE_ERROR;
5335
5336 if (prov == dtrace_provider) {
5337 /*
5338 * If dtrace itself is the provider of this probe,
5339 * we're only going to continue processing the ECB if
5340 * arg0 (the dtrace_state_t) is equal to the ECB's
5341 * creating state. (This prevents disjoint consumers
5342 * from seeing one another's metaprobes.)
5343 */
5344 if (arg0 != (uint64_t)(uintptr_t)state)
5345 continue;
5346 }
5347
5348 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5349 /*
5350 * We're not currently active. If our provider isn't
5351 * the dtrace pseudo provider, we're not interested.
5352 */
5353 if (prov != dtrace_provider)
5354 continue;
5355
5356 /*
5357 * Now we must further check if we are in the BEGIN
5358 * probe. If we are, we will only continue processing
5359 * if we're still in WARMUP -- if one BEGIN enabling
5360 * has invoked the exit() action, we don't want to
5361 * evaluate subsequent BEGIN enablings.
5362 */
5363 if (probe->dtpr_id == dtrace_probeid_begin &&
5364 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5365 ASSERT(state->dts_activity ==
5366 DTRACE_ACTIVITY_DRAINING);
5367 continue;
5368 }
5369 }
5370
5371 #if defined(__APPLE__)
5372 /*
5373 * If the thread on which this probe has fired belongs to a process marked P_LNOATTACH
5374 * then this enabling is not permitted to observe it. Move along, nothing to see here.
5375 */
5376 if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
5377 continue;
5378 }
5379 #endif /* __APPLE__ */
5380
5381 if (ecb->dte_cond) {
5382 /*
5383 * If the dte_cond bits indicate that this
5384 * consumer is only allowed to see user-mode firings
5385 * of this probe, call the provider's dtps_usermode()
5386 * entry point to check that the probe was fired
5387 * while in a user context. Skip this ECB if that's
5388 * not the case.
5389 */
5390 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
5391 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
5392 probe->dtpr_id, probe->dtpr_arg) == 0)
5393 continue;
5394
5395 /*
5396 * This is more subtle than it looks. We have to be
5397 * absolutely certain that CRED() isn't going to
5398 * change out from under us so it's only legit to
5399 * examine that structure if we're in constrained
5400 * situations. Currently, the only times we'll this
5401 * check is if a non-super-user has enabled the
5402 * profile or syscall providers -- providers that
5403 * allow visibility of all processes. For the
5404 * profile case, the check above will ensure that
5405 * we're examining a user context.
5406 */
5407 if (ecb->dte_cond & DTRACE_COND_OWNER) {
5408 cred_t *cr;
5409 cred_t *s_cr =
5410 ecb->dte_state->dts_cred.dcr_cred;
5411 proc_t *proc;
5412
5413 ASSERT(s_cr != NULL);
5414
5415 #if !defined(__APPLE__)
5416 if ((cr = CRED()) == NULL ||
5417 #else
5418 if ((cr = dtrace_CRED()) == NULL ||
5419 #endif /* __APPLE__ */
5420 s_cr->cr_uid != cr->cr_uid ||
5421 s_cr->cr_uid != cr->cr_ruid ||
5422 s_cr->cr_uid != cr->cr_suid ||
5423 s_cr->cr_gid != cr->cr_gid ||
5424 s_cr->cr_gid != cr->cr_rgid ||
5425 s_cr->cr_gid != cr->cr_sgid ||
5426 #if !defined(__APPLE__)
5427 (proc = ttoproc(curthread)) == NULL ||
5428 (proc->p_flag & SNOCD))
5429 #else
5430 1) /* Darwin omits "No Core Dump" flag. */
5431 #endif /* __APPLE__ */
5432 continue;
5433 }
5434
5435 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
5436 cred_t *cr;
5437 cred_t *s_cr =
5438 ecb->dte_state->dts_cred.dcr_cred;
5439
5440 ASSERT(s_cr != NULL);
5441
5442 #if !defined(__APPLE__) /* Darwin doesn't do zones. */
5443 if ((cr = CRED()) == NULL ||
5444 s_cr->cr_zone->zone_id !=
5445 cr->cr_zone->zone_id)
5446 continue;
5447 #endif /* __APPLE__ */
5448 }
5449 }
5450
5451 if (now - state->dts_alive > dtrace_deadman_timeout) {
5452 /*
5453 * We seem to be dead. Unless we (a) have kernel
5454 * destructive permissions (b) have expicitly enabled
5455 * destructive actions and (c) destructive actions have
5456 * not been disabled, we're going to transition into
5457 * the KILLED state, from which no further processing
5458 * on this state will be performed.
5459 */
5460 if (!dtrace_priv_kernel_destructive(state) ||
5461 !state->dts_cred.dcr_destructive ||
5462 dtrace_destructive_disallow) {
5463 void *activity = &state->dts_activity;
5464 dtrace_activity_t current;
5465
5466 do {
5467 current = state->dts_activity;
5468 } while (dtrace_cas32(activity, current,
5469 DTRACE_ACTIVITY_KILLED) != current);
5470
5471 continue;
5472 }
5473 }
5474
5475 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
5476 ecb->dte_alignment, state, &mstate)) < 0)
5477 continue;
5478
5479 tomax = buf->dtb_tomax;
5480 ASSERT(tomax != NULL);
5481
5482 if (ecb->dte_size != 0)
5483 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
5484
5485 mstate.dtms_epid = ecb->dte_epid;
5486 mstate.dtms_present |= DTRACE_MSTATE_EPID;
5487
5488 if (pred != NULL) {
5489 dtrace_difo_t *dp = pred->dtp_difo;
5490 int rval;
5491
5492 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
5493
5494 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
5495 dtrace_cacheid_t cid = probe->dtpr_predcache;
5496
5497 if (cid != DTRACE_CACHEIDNONE && !onintr) {
5498 /*
5499 * Update the predicate cache...
5500 */
5501 ASSERT(cid == pred->dtp_cacheid);
5502 #if !defined(__APPLE__)
5503 curthread->t_predcache = cid;
5504 #else
5505 dtrace_set_thread_predcache(current_thread(), cid);
5506 #endif /* __APPLE__ */
5507 }
5508
5509 continue;
5510 }
5511 }
5512
5513 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
5514 act != NULL; act = act->dta_next) {
5515 size_t valoffs;
5516 dtrace_difo_t *dp;
5517 dtrace_recdesc_t *rec = &act->dta_rec;
5518
5519 size = rec->dtrd_size;
5520 valoffs = offs + rec->dtrd_offset;
5521
5522 if (DTRACEACT_ISAGG(act->dta_kind)) {
5523 uint64_t v = 0xbad;
5524 dtrace_aggregation_t *agg;
5525
5526 agg = (dtrace_aggregation_t *)act;
5527
5528 if ((dp = act->dta_difo) != NULL)
5529 v = dtrace_dif_emulate(dp,
5530 &mstate, vstate, state);
5531
5532 if (*flags & CPU_DTRACE_ERROR)
5533 continue;
5534
5535 /*
5536 * Note that we always pass the expression
5537 * value from the previous iteration of the
5538 * action loop. This value will only be used
5539 * if there is an expression argument to the
5540 * aggregating action, denoted by the
5541 * dtag_hasarg field.
5542 */
5543 dtrace_aggregate(agg, buf,
5544 offs, aggbuf, v, val);
5545 continue;
5546 }
5547
5548 switch (act->dta_kind) {
5549 case DTRACEACT_STOP:
5550 if (dtrace_priv_proc_destructive(state))
5551 dtrace_action_stop();
5552 continue;
5553
5554 case DTRACEACT_BREAKPOINT:
5555 if (dtrace_priv_kernel_destructive(state))
5556 dtrace_action_breakpoint(ecb);
5557 continue;
5558
5559 case DTRACEACT_PANIC:
5560 if (dtrace_priv_kernel_destructive(state))
5561 dtrace_action_panic(ecb);
5562 continue;
5563
5564 case DTRACEACT_STACK:
5565 if (!dtrace_priv_kernel(state))
5566 continue;
5567
5568 dtrace_getpcstack((pc_t *)(tomax + valoffs),
5569 size / sizeof (pc_t), probe->dtpr_aframes,
5570 DTRACE_ANCHORED(probe) ? NULL :
5571 (uint32_t *)arg0);
5572
5573 continue;
5574
5575 case DTRACEACT_JSTACK:
5576 case DTRACEACT_USTACK:
5577 if (!dtrace_priv_proc(state))
5578 continue;
5579
5580 /*
5581 * See comment in DIF_VAR_PID.
5582 */
5583 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
5584 CPU_ON_INTR(CPU)) {
5585 int depth = DTRACE_USTACK_NFRAMES(
5586 rec->dtrd_arg) + 1;
5587
5588 dtrace_bzero((void *)(tomax + valoffs),
5589 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
5590 + depth * sizeof (uint64_t));
5591
5592 continue;
5593 }
5594
5595 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
5596 curproc->p_dtrace_helpers != NULL) {
5597 /*
5598 * This is the slow path -- we have
5599 * allocated string space, and we're
5600 * getting the stack of a process that
5601 * has helpers. Call into a separate
5602 * routine to perform this processing.
5603 */
5604 dtrace_action_ustack(&mstate, state,
5605 (uint64_t *)(tomax + valoffs),
5606 rec->dtrd_arg);
5607 continue;
5608 }
5609
5610 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5611 dtrace_getupcstack((uint64_t *)
5612 (tomax + valoffs),
5613 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
5614 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5615 continue;
5616
5617 default:
5618 break;
5619 }
5620
5621 dp = act->dta_difo;
5622 ASSERT(dp != NULL);
5623
5624 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
5625
5626 if (*flags & CPU_DTRACE_ERROR)
5627 continue;
5628
5629 switch (act->dta_kind) {
5630 case DTRACEACT_SPECULATE:
5631 ASSERT(buf == &state->dts_buffer[cpuid]);
5632 buf = dtrace_speculation_buffer(state,
5633 cpuid, val);
5634
5635 if (buf == NULL) {
5636 *flags |= CPU_DTRACE_DROP;
5637 continue;
5638 }
5639
5640 offs = dtrace_buffer_reserve(buf,
5641 ecb->dte_needed, ecb->dte_alignment,
5642 state, NULL);
5643
5644 if (offs < 0) {
5645 *flags |= CPU_DTRACE_DROP;
5646 continue;
5647 }
5648
5649 tomax = buf->dtb_tomax;
5650 ASSERT(tomax != NULL);
5651
5652 if (ecb->dte_size != 0)
5653 DTRACE_STORE(uint32_t, tomax, offs,
5654 ecb->dte_epid);
5655 continue;
5656
5657 case DTRACEACT_CHILL:
5658 if (dtrace_priv_kernel_destructive(state))
5659 dtrace_action_chill(&mstate, val);
5660 continue;
5661
5662 case DTRACEACT_RAISE:
5663 if (dtrace_priv_proc_destructive(state))
5664 dtrace_action_raise(val);
5665 continue;
5666
5667 case DTRACEACT_COMMIT:
5668 ASSERT(!committed);
5669
5670 /*
5671 * We need to commit our buffer state.
5672 */
5673 if (ecb->dte_size)
5674 buf->dtb_offset = offs + ecb->dte_size;
5675 buf = &state->dts_buffer[cpuid];
5676 dtrace_speculation_commit(state, cpuid, val);
5677 committed = 1;
5678 continue;
5679
5680 case DTRACEACT_DISCARD:
5681 dtrace_speculation_discard(state, cpuid, val);
5682 continue;
5683
5684 case DTRACEACT_DIFEXPR:
5685 case DTRACEACT_LIBACT:
5686 case DTRACEACT_PRINTF:
5687 case DTRACEACT_PRINTA:
5688 case DTRACEACT_SYSTEM:
5689 case DTRACEACT_FREOPEN:
5690 break;
5691
5692 case DTRACEACT_SYM:
5693 case DTRACEACT_MOD:
5694 if (!dtrace_priv_kernel(state))
5695 continue;
5696 break;
5697
5698 #if !defined(__APPLE__)
5699 case DTRACEACT_USYM:
5700 case DTRACEACT_UMOD:
5701 case DTRACEACT_UADDR: {
5702 struct pid *pid = curthread->t_procp->p_pidp;
5703
5704 if (!dtrace_priv_proc(state))
5705 continue;
5706
5707 DTRACE_STORE(uint64_t, tomax,
5708 valoffs, (uint64_t)pid->pid_id);
5709 DTRACE_STORE(uint64_t, tomax,
5710 valoffs + sizeof (uint64_t), val);
5711
5712 continue;
5713 }
5714 #else
5715 case DTRACEACT_USYM:
5716 case DTRACEACT_UMOD:
5717 case DTRACEACT_UADDR: {
5718 if (!dtrace_priv_proc(state))
5719 continue;
5720
5721 DTRACE_STORE(uint64_t, tomax,
5722 valoffs, (uint64_t)proc_selfpid());
5723 DTRACE_STORE(uint64_t, tomax,
5724 valoffs + sizeof (uint64_t), val);
5725
5726 continue;
5727 }
5728 #endif /* __APPLE__ */
5729
5730 case DTRACEACT_EXIT: {
5731 /*
5732 * For the exit action, we are going to attempt
5733 * to atomically set our activity to be
5734 * draining. If this fails (either because
5735 * another CPU has beat us to the exit action,
5736 * or because our current activity is something
5737 * other than ACTIVE or WARMUP), we will
5738 * continue. This assures that the exit action
5739 * can be successfully recorded at most once
5740 * when we're in the ACTIVE state. If we're
5741 * encountering the exit() action while in
5742 * COOLDOWN, however, we want to honor the new
5743 * status code. (We know that we're the only
5744 * thread in COOLDOWN, so there is no race.)
5745 */
5746 void *activity = &state->dts_activity;
5747 dtrace_activity_t current = state->dts_activity;
5748
5749 if (current == DTRACE_ACTIVITY_COOLDOWN)
5750 break;
5751
5752 if (current != DTRACE_ACTIVITY_WARMUP)
5753 current = DTRACE_ACTIVITY_ACTIVE;
5754
5755 if (dtrace_cas32(activity, current,
5756 DTRACE_ACTIVITY_DRAINING) != current) {
5757 *flags |= CPU_DTRACE_DROP;
5758 continue;
5759 }
5760
5761 break;
5762 }
5763
5764 default:
5765 ASSERT(0);
5766 }
5767
5768 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
5769 uintptr_t end = valoffs + size;
5770
5771 /*
5772 * If this is a string, we're going to only
5773 * load until we find the zero byte -- after
5774 * which we'll store zero bytes.
5775 */
5776 if (dp->dtdo_rtype.dtdt_kind ==
5777 DIF_TYPE_STRING) {
5778 char c = '\0' + 1;
5779 int intuple = act->dta_intuple;
5780 size_t s;
5781
5782 for (s = 0; s < size; s++) {
5783 if (c != '\0')
5784 c = dtrace_load8(val++);
5785
5786 DTRACE_STORE(uint8_t, tomax,
5787 valoffs++, c);
5788
5789 if (c == '\0' && intuple)
5790 break;
5791 }
5792
5793 continue;
5794 }
5795
5796 while (valoffs < end) {
5797 DTRACE_STORE(uint8_t, tomax, valoffs++,
5798 dtrace_load8(val++));
5799 }
5800
5801 continue;
5802 }
5803
5804 switch (size) {
5805 case 0:
5806 break;
5807
5808 case sizeof (uint8_t):
5809 DTRACE_STORE(uint8_t, tomax, valoffs, val);
5810 break;
5811 case sizeof (uint16_t):
5812 DTRACE_STORE(uint16_t, tomax, valoffs, val);
5813 break;
5814 case sizeof (uint32_t):
5815 DTRACE_STORE(uint32_t, tomax, valoffs, val);
5816 break;
5817 case sizeof (uint64_t):
5818 DTRACE_STORE(uint64_t, tomax, valoffs, val);
5819 break;
5820 default:
5821 /*
5822 * Any other size should have been returned by
5823 * reference, not by value.
5824 */
5825 ASSERT(0);
5826 break;
5827 }
5828 }
5829
5830 if (*flags & CPU_DTRACE_DROP)
5831 continue;
5832
5833 if (*flags & CPU_DTRACE_FAULT) {
5834 int ndx;
5835 dtrace_action_t *err;
5836
5837 buf->dtb_errors++;
5838
5839 if (probe->dtpr_id == dtrace_probeid_error) {
5840 /*
5841 * There's nothing we can do -- we had an
5842 * error on the error probe. We bump an
5843 * error counter to at least indicate that
5844 * this condition happened.
5845 */
5846 dtrace_error(&state->dts_dblerrors);
5847 continue;
5848 }
5849
5850 if (vtime) {
5851 /*
5852 * Before recursing on dtrace_probe(), we
5853 * need to explicitly clear out our start
5854 * time to prevent it from being accumulated
5855 * into t_dtrace_vtime.
5856 */
5857 #if !defined(__APPLE__)
5858 curthread->t_dtrace_start = 0;
5859 #else
5860 /* Set the sign bit on t_dtrace_tracing to suspend accumulation to it. */
5861 dtrace_set_thread_tracing(current_thread(),
5862 (1ULL<<63) | dtrace_get_thread_tracing(current_thread()));
5863 #endif /* __APPLE__ */
5864 }
5865
5866 /*
5867 * Iterate over the actions to figure out which action
5868 * we were processing when we experienced the error.
5869 * Note that act points _past_ the faulting action; if
5870 * act is ecb->dte_action, the fault was in the
5871 * predicate, if it's ecb->dte_action->dta_next it's
5872 * in action #1, and so on.
5873 */
5874 for (err = ecb->dte_action, ndx = 0;
5875 err != act; err = err->dta_next, ndx++)
5876 continue;
5877
5878 dtrace_probe_error(state, ecb->dte_epid, ndx,
5879 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
5880 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
5881 cpu_core[cpuid].cpuc_dtrace_illval);
5882
5883 continue;
5884 }
5885
5886 if (!committed)
5887 buf->dtb_offset = offs + ecb->dte_size;
5888 }
5889
5890 #if !defined(__APPLE__)
5891 if (vtime)
5892 curthread->t_dtrace_start = dtrace_gethrtime();
5893 #else
5894 if (vtime) {
5895 thread_t thread = current_thread();
5896 int64_t t = dtrace_get_thread_tracing(thread);
5897
5898 if (t >= 0) {
5899 /* Usual case, accumulate time spent here into t_dtrace_tracing */
5900 dtrace_set_thread_tracing(thread, t + (dtrace_gethrtime() - now));
5901 } else {
5902 /* Return from error recursion. No accumulation, just clear the sign bit on t_dtrace_tracing. */
5903 dtrace_set_thread_tracing(thread, (~(1ULL<<63)) & t);
5904 }
5905 }
5906 #endif /* __APPLE__ */
5907
5908 dtrace_interrupt_enable(cookie);
5909 }
5910
5911 #if defined(__APPLE__)
5912 /* Don't allow a thread to re-enter dtrace_probe() */
5913 void
5914 dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
5915 uint64_t arg2, uint64_t arg3, uint64_t arg4)
5916 {
5917 thread_t thread = current_thread();
5918
5919 if (id == dtrace_probeid_error) {
5920 __dtrace_probe(id, arg0, arg1, arg2, arg3, arg4);
5921 dtrace_getfp(); /* Defeat tail-call optimization of __dtrace_probe() */
5922 } else if (!dtrace_get_thread_reentering(thread)) {
5923 dtrace_set_thread_reentering(thread, TRUE);
5924 __dtrace_probe(id, arg0, arg1, arg2, arg3, arg4);
5925 dtrace_set_thread_reentering(thread, FALSE);
5926 }
5927 }
5928 #endif /* __APPLE__ */
5929
5930 /*
5931 * DTrace Probe Hashing Functions
5932 *
5933 * The functions in this section (and indeed, the functions in remaining
5934 * sections) are not _called_ from probe context. (Any exceptions to this are
5935 * marked with a "Note:".) Rather, they are called from elsewhere in the
5936 * DTrace framework to look-up probes in, add probes to and remove probes from
5937 * the DTrace probe hashes. (Each probe is hashed by each element of the
5938 * probe tuple -- allowing for fast lookups, regardless of what was
5939 * specified.)
5940 */
5941 static uint_t
5942 dtrace_hash_str(char *p)
5943 {
5944 unsigned int g;
5945 uint_t hval = 0;
5946
5947 while (*p) {
5948 hval = (hval << 4) + *p++;
5949 if ((g = (hval & 0xf0000000)) != 0)
5950 hval ^= g >> 24;
5951 hval &= ~g;
5952 }
5953 return (hval);
5954 }
5955
5956 static dtrace_hash_t *
5957 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
5958 {
5959 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
5960
5961 hash->dth_stroffs = stroffs;
5962 hash->dth_nextoffs = nextoffs;
5963 hash->dth_prevoffs = prevoffs;
5964
5965 hash->dth_size = 1;
5966 hash->dth_mask = hash->dth_size - 1;
5967
5968 hash->dth_tab = kmem_zalloc(hash->dth_size *
5969 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
5970
5971 return (hash);
5972 }
5973
5974 #if !defined(__APPLE__) /* Quiet compiler warning */
5975 static void
5976 dtrace_hash_destroy(dtrace_hash_t *hash)
5977 {
5978 #ifdef DEBUG
5979 int i;
5980
5981 for (i = 0; i < hash->dth_size; i++)
5982 ASSERT(hash->dth_tab[i] == NULL);
5983 #endif
5984
5985 kmem_free(hash->dth_tab,
5986 hash->dth_size * sizeof (dtrace_hashbucket_t *));
5987 kmem_free(hash, sizeof (dtrace_hash_t));
5988 }
5989 #endif /* __APPLE__ */
5990
5991 static void
5992 dtrace_hash_resize(dtrace_hash_t *hash)
5993 {
5994 int size = hash->dth_size, i, ndx;
5995 int new_size = hash->dth_size << 1;
5996 int new_mask = new_size - 1;
5997 dtrace_hashbucket_t **new_tab, *bucket, *next;
5998
5999 ASSERT((new_size & new_mask) == 0);
6000
6001 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6002
6003 for (i = 0; i < size; i++) {
6004 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6005 dtrace_probe_t *probe = bucket->dthb_chain;
6006
6007 ASSERT(probe != NULL);
6008 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6009
6010 next = bucket->dthb_next;
6011 bucket->dthb_next = new_tab[ndx];
6012 new_tab[ndx] = bucket;
6013 }
6014 }
6015
6016 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6017 hash->dth_tab = new_tab;
6018 hash->dth_size = new_size;
6019 hash->dth_mask = new_mask;
6020 }
6021
6022 static void
6023 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6024 {
6025 int hashval = DTRACE_HASHSTR(hash, new);
6026 int ndx = hashval & hash->dth_mask;
6027 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6028 dtrace_probe_t **nextp, **prevp;
6029
6030 for (; bucket != NULL; bucket = bucket->dthb_next) {
6031 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6032 goto add;
6033 }
6034
6035 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6036 dtrace_hash_resize(hash);
6037 dtrace_hash_add(hash, new);
6038 return;
6039 }
6040
6041 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6042 bucket->dthb_next = hash->dth_tab[ndx];
6043 hash->dth_tab[ndx] = bucket;
6044 hash->dth_nbuckets++;
6045
6046 add:
6047 nextp = DTRACE_HASHNEXT(hash, new);
6048 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6049 *nextp = bucket->dthb_chain;
6050
6051 if (bucket->dthb_chain != NULL) {
6052 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6053 ASSERT(*prevp == NULL);
6054 *prevp = new;
6055 }
6056
6057 bucket->dthb_chain = new;
6058 bucket->dthb_len++;
6059 }
6060
6061 static dtrace_probe_t *
6062 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6063 {
6064 int hashval = DTRACE_HASHSTR(hash, template);
6065 int ndx = hashval & hash->dth_mask;
6066 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6067
6068 for (; bucket != NULL; bucket = bucket->dthb_next) {
6069 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6070 return (bucket->dthb_chain);
6071 }
6072
6073 return (NULL);
6074 }
6075
6076 static int
6077 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6078 {
6079 int hashval = DTRACE_HASHSTR(hash, template);
6080 int ndx = hashval & hash->dth_mask;
6081 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6082
6083 for (; bucket != NULL; bucket = bucket->dthb_next) {
6084 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6085 return (bucket->dthb_len);
6086 }
6087
6088 return (NULL);
6089 }
6090
6091 static void
6092 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6093 {
6094 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6095 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6096
6097 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6098 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6099
6100 /*
6101 * Find the bucket that we're removing this probe from.
6102 */
6103 for (; bucket != NULL; bucket = bucket->dthb_next) {
6104 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6105 break;
6106 }
6107
6108 ASSERT(bucket != NULL);
6109
6110 if (*prevp == NULL) {
6111 if (*nextp == NULL) {
6112 /*
6113 * The removed probe was the only probe on this
6114 * bucket; we need to remove the bucket.
6115 */
6116 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6117
6118 ASSERT(bucket->dthb_chain == probe);
6119 ASSERT(b != NULL);
6120
6121 if (b == bucket) {
6122 hash->dth_tab[ndx] = bucket->dthb_next;
6123 } else {
6124 while (b->dthb_next != bucket)
6125 b = b->dthb_next;
6126 b->dthb_next = bucket->dthb_next;
6127 }
6128
6129 ASSERT(hash->dth_nbuckets > 0);
6130 hash->dth_nbuckets--;
6131 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6132 return;
6133 }
6134
6135 bucket->dthb_chain = *nextp;
6136 } else {
6137 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6138 }
6139
6140 if (*nextp != NULL)
6141 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6142 }
6143
6144 /*
6145 * DTrace Utility Functions
6146 *
6147 * These are random utility functions that are _not_ called from probe context.
6148 */
6149 static int
6150 dtrace_badattr(const dtrace_attribute_t *a)
6151 {
6152 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6153 a->dtat_data > DTRACE_STABILITY_MAX ||
6154 a->dtat_class > DTRACE_CLASS_MAX);
6155 }
6156
6157 /*
6158 * Return a duplicate copy of a string. If the specified string is NULL,
6159 * this function returns a zero-length string.
6160 */
6161 static char *
6162 dtrace_strdup(const char *str)
6163 {
6164 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6165
6166 if (str != NULL)
6167 (void) strcpy(new, str);
6168
6169 return (new);
6170 }
6171
6172 #define DTRACE_ISALPHA(c) \
6173 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6174
6175 static int
6176 dtrace_badname(const char *s)
6177 {
6178 char c;
6179
6180 if (s == NULL || (c = *s++) == '\0')
6181 return (0);
6182
6183 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6184 return (1);
6185
6186 while ((c = *s++) != '\0') {
6187 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6188 c != '-' && c != '_' && c != '.' && c != '`')
6189 return (1);
6190 }
6191
6192 return (0);
6193 }
6194
6195 static void
6196 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6197 {
6198 uint32_t priv;
6199
6200 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6201 /*
6202 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6203 */
6204 priv = DTRACE_PRIV_ALL;
6205 } else {
6206 *uidp = crgetuid(cr);
6207 *zoneidp = crgetzoneid(cr);
6208
6209 priv = 0;
6210 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6211 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6212 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6213 priv |= DTRACE_PRIV_USER;
6214 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6215 priv |= DTRACE_PRIV_PROC;
6216 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6217 priv |= DTRACE_PRIV_OWNER;
6218 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6219 priv |= DTRACE_PRIV_ZONEOWNER;
6220 }
6221
6222 *privp = priv;
6223 }
6224
6225 #ifdef DTRACE_ERRDEBUG
6226 static void
6227 dtrace_errdebug(const char *str)
6228 {
6229 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
6230 int occupied = 0;
6231
6232 lck_mtx_lock(&dtrace_errlock);
6233 dtrace_errlast = str;
6234 #if !defined(__APPLE__)
6235 dtrace_errthread = curthread;
6236 #else
6237 dtrace_errthread = current_thread();
6238 #endif /* __APPLE__ */
6239
6240 while (occupied++ < DTRACE_ERRHASHSZ) {
6241 if (dtrace_errhash[hval].dter_msg == str) {
6242 dtrace_errhash[hval].dter_count++;
6243 goto out;
6244 }
6245
6246 if (dtrace_errhash[hval].dter_msg != NULL) {
6247 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6248 continue;
6249 }
6250
6251 dtrace_errhash[hval].dter_msg = str;
6252 dtrace_errhash[hval].dter_count = 1;
6253 goto out;
6254 }
6255
6256 panic("dtrace: undersized error hash");
6257 out:
6258 lck_mtx_unlock(&dtrace_errlock);
6259 }
6260 #endif
6261
6262 /*
6263 * DTrace Matching Functions
6264 *
6265 * These functions are used to match groups of probes, given some elements of
6266 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6267 */
6268 static int
6269 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6270 zoneid_t zoneid)
6271 {
6272 if (priv != DTRACE_PRIV_ALL) {
6273 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6274 uint32_t match = priv & ppriv;
6275
6276 /*
6277 * No PRIV_DTRACE_* privileges...
6278 */
6279 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6280 DTRACE_PRIV_KERNEL)) == 0)
6281 return (0);
6282
6283 /*
6284 * No matching bits, but there were bits to match...
6285 */
6286 if (match == 0 && ppriv != 0)
6287 return (0);
6288
6289 /*
6290 * Need to have permissions to the process, but don't...
6291 */
6292 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6293 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6294 return (0);
6295 }
6296
6297 /*
6298 * Need to be in the same zone unless we possess the
6299 * privilege to examine all zones.
6300 */
6301 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6302 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6303 return (0);
6304 }
6305 }
6306
6307 return (1);
6308 }
6309
6310 /*
6311 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6312 * consists of input pattern strings and an ops-vector to evaluate them.
6313 * This function returns >0 for match, 0 for no match, and <0 for error.
6314 */
6315 static int
6316 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6317 uint32_t priv, uid_t uid, zoneid_t zoneid)
6318 {
6319 dtrace_provider_t *pvp = prp->dtpr_provider;
6320 int rv;
6321
6322 if (pvp->dtpv_defunct)
6323 return (0);
6324
6325 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6326 return (rv);
6327
6328 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6329 return (rv);
6330
6331 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6332 return (rv);
6333
6334 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6335 return (rv);
6336
6337 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6338 return (0);
6339
6340 return (rv);
6341 }
6342
6343 /*
6344 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6345 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
6346 * libc's version, the kernel version only applies to 8-bit ASCII strings.
6347 * In addition, all of the recursion cases except for '*' matching have been
6348 * unwound. For '*', we still implement recursive evaluation, but a depth
6349 * counter is maintained and matching is aborted if we recurse too deep.
6350 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
6351 */
6352 static int
6353 dtrace_match_glob(const char *s, const char *p, int depth)
6354 {
6355 const char *olds;
6356 char s1, c;
6357 int gs;
6358
6359 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
6360 return (-1);
6361
6362 if (s == NULL)
6363 s = ""; /* treat NULL as empty string */
6364
6365 top:
6366 olds = s;
6367 s1 = *s++;
6368
6369 if (p == NULL)
6370 return (0);
6371
6372 if ((c = *p++) == '\0')
6373 return (s1 == '\0');
6374
6375 switch (c) {
6376 case '[': {
6377 int ok = 0, notflag = 0;
6378 char lc = '\0';
6379
6380 if (s1 == '\0')
6381 return (0);
6382
6383 if (*p == '!') {
6384 notflag = 1;
6385 p++;
6386 }
6387
6388 if ((c = *p++) == '\0')
6389 return (0);
6390
6391 do {
6392 if (c == '-' && lc != '\0' && *p != ']') {
6393 if ((c = *p++) == '\0')
6394 return (0);
6395 if (c == '\\' && (c = *p++) == '\0')
6396 return (0);
6397
6398 if (notflag) {
6399 if (s1 < lc || s1 > c)
6400 ok++;
6401 else
6402 return (0);
6403 } else if (lc <= s1 && s1 <= c)
6404 ok++;
6405
6406 } else if (c == '\\' && (c = *p++) == '\0')
6407 return (0);
6408
6409 lc = c; /* save left-hand 'c' for next iteration */
6410
6411 if (notflag) {
6412 if (s1 != c)
6413 ok++;
6414 else
6415 return (0);
6416 } else if (s1 == c)
6417 ok++;
6418
6419 if ((c = *p++) == '\0')
6420 return (0);
6421
6422 } while (c != ']');
6423
6424 if (ok)
6425 goto top;
6426
6427 return (0);
6428 }
6429
6430 case '\\':
6431 if ((c = *p++) == '\0')
6432 return (0);
6433 /*FALLTHRU*/
6434
6435 default:
6436 if (c != s1)
6437 return (0);
6438 /*FALLTHRU*/
6439
6440 case '?':
6441 if (s1 != '\0')
6442 goto top;
6443 return (0);
6444
6445 case '*':
6446 while (*p == '*')
6447 p++; /* consecutive *'s are identical to a single one */
6448
6449 if (*p == '\0')
6450 return (1);
6451
6452 for (s = olds; *s != '\0'; s++) {
6453 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
6454 return (gs);
6455 }
6456
6457 return (0);
6458 }
6459 }
6460
6461 /*ARGSUSED*/
6462 static int
6463 dtrace_match_string(const char *s, const char *p, int depth)
6464 {
6465 return (s != NULL && strcmp(s, p) == 0);
6466 }
6467
6468 /*ARGSUSED*/
6469 static int
6470 dtrace_match_nul(const char *s, const char *p, int depth)
6471 {
6472 return (1); /* always match the empty pattern */
6473 }
6474
6475 /*ARGSUSED*/
6476 static int
6477 dtrace_match_nonzero(const char *s, const char *p, int depth)
6478 {
6479 return (s != NULL && s[0] != '\0');
6480 }
6481
6482 static int
6483 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
6484 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
6485 {
6486 dtrace_probe_t template, *probe;
6487 dtrace_hash_t *hash = NULL;
6488 int len, best = INT_MAX, nmatched = 0;
6489 dtrace_id_t i;
6490
6491 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
6492
6493 /*
6494 * If the probe ID is specified in the key, just lookup by ID and
6495 * invoke the match callback once if a matching probe is found.
6496 */
6497 if (pkp->dtpk_id != DTRACE_IDNONE) {
6498 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
6499 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
6500 (void) (*matched)(probe, arg);
6501 nmatched++;
6502 }
6503 return (nmatched);
6504 }
6505
6506 template.dtpr_mod = (char *)pkp->dtpk_mod;
6507 template.dtpr_func = (char *)pkp->dtpk_func;
6508 template.dtpr_name = (char *)pkp->dtpk_name;
6509
6510 /*
6511 * We want to find the most distinct of the module name, function
6512 * name, and name. So for each one that is not a glob pattern or
6513 * empty string, we perform a lookup in the corresponding hash and
6514 * use the hash table with the fewest collisions to do our search.
6515 */
6516 if (pkp->dtpk_mmatch == &dtrace_match_string &&
6517 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
6518 best = len;
6519 hash = dtrace_bymod;
6520 }
6521
6522 if (pkp->dtpk_fmatch == &dtrace_match_string &&
6523 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
6524 best = len;
6525 hash = dtrace_byfunc;
6526 }
6527
6528 if (pkp->dtpk_nmatch == &dtrace_match_string &&
6529 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
6530 best = len;
6531 hash = dtrace_byname;
6532 }
6533
6534 /*
6535 * If we did not select a hash table, iterate over every probe and
6536 * invoke our callback for each one that matches our input probe key.
6537 */
6538 if (hash == NULL) {
6539 for (i = 0; i < dtrace_nprobes; i++) {
6540 if ((probe = dtrace_probes[i]) == NULL ||
6541 dtrace_match_probe(probe, pkp, priv, uid,
6542 zoneid) <= 0)
6543 continue;
6544
6545 nmatched++;
6546
6547 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
6548 break;
6549 }
6550
6551 return (nmatched);
6552 }
6553
6554 /*
6555 * If we selected a hash table, iterate over each probe of the same key
6556 * name and invoke the callback for every probe that matches the other
6557 * attributes of our input probe key.
6558 */
6559 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
6560 probe = *(DTRACE_HASHNEXT(hash, probe))) {
6561
6562 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
6563 continue;
6564
6565 nmatched++;
6566
6567 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
6568 break;
6569 }
6570
6571 return (nmatched);
6572 }
6573
6574 /*
6575 * Return the function pointer dtrace_probecmp() should use to compare the
6576 * specified pattern with a string. For NULL or empty patterns, we select
6577 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
6578 * For non-empty non-glob strings, we use dtrace_match_string().
6579 */
6580 static dtrace_probekey_f *
6581 dtrace_probekey_func(const char *p)
6582 {
6583 char c;
6584
6585 if (p == NULL || *p == '\0')
6586 return (&dtrace_match_nul);
6587
6588 while ((c = *p++) != '\0') {
6589 if (c == '[' || c == '?' || c == '*' || c == '\\')
6590 return (&dtrace_match_glob);
6591 }
6592
6593 return (&dtrace_match_string);
6594 }
6595
6596 /*
6597 * Build a probe comparison key for use with dtrace_match_probe() from the
6598 * given probe description. By convention, a null key only matches anchored
6599 * probes: if each field is the empty string, reset dtpk_fmatch to
6600 * dtrace_match_nonzero().
6601 */
6602 static void
6603 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
6604 {
6605 pkp->dtpk_prov = pdp->dtpd_provider;
6606 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
6607
6608 pkp->dtpk_mod = pdp->dtpd_mod;
6609 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
6610
6611 pkp->dtpk_func = pdp->dtpd_func;
6612 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
6613
6614 pkp->dtpk_name = pdp->dtpd_name;
6615 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
6616
6617 pkp->dtpk_id = pdp->dtpd_id;
6618
6619 if (pkp->dtpk_id == DTRACE_IDNONE &&
6620 pkp->dtpk_pmatch == &dtrace_match_nul &&
6621 pkp->dtpk_mmatch == &dtrace_match_nul &&
6622 pkp->dtpk_fmatch == &dtrace_match_nul &&
6623 pkp->dtpk_nmatch == &dtrace_match_nul)
6624 pkp->dtpk_fmatch = &dtrace_match_nonzero;
6625 }
6626
6627 /*
6628 * DTrace Provider-to-Framework API Functions
6629 *
6630 * These functions implement much of the Provider-to-Framework API, as
6631 * described in <sys/dtrace.h>. The parts of the API not in this section are
6632 * the functions in the API for probe management (found below), and
6633 * dtrace_probe() itself (found above).
6634 */
6635
6636 /*
6637 * Register the calling provider with the DTrace framework. This should
6638 * generally be called by DTrace providers in their attach(9E) entry point.
6639 */
6640 int
6641 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
6642 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
6643 {
6644 dtrace_provider_t *provider;
6645
6646 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
6647 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6648 "arguments", name ? name : "<NULL>");
6649 return (EINVAL);
6650 }
6651
6652 if (name[0] == '\0' || dtrace_badname(name)) {
6653 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6654 "provider name", name);
6655 return (EINVAL);
6656 }
6657
6658 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
6659 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
6660 pops->dtps_destroy == NULL ||
6661 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
6662 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6663 "provider ops", name);
6664 return (EINVAL);
6665 }
6666
6667 if (dtrace_badattr(&pap->dtpa_provider) ||
6668 dtrace_badattr(&pap->dtpa_mod) ||
6669 dtrace_badattr(&pap->dtpa_func) ||
6670 dtrace_badattr(&pap->dtpa_name) ||
6671 dtrace_badattr(&pap->dtpa_args)) {
6672 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6673 "provider attributes", name);
6674 return (EINVAL);
6675 }
6676
6677 if (priv & ~DTRACE_PRIV_ALL) {
6678 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6679 "privilege attributes", name);
6680 return (EINVAL);
6681 }
6682
6683 if ((priv & DTRACE_PRIV_KERNEL) &&
6684 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
6685 pops->dtps_usermode == NULL) {
6686 cmn_err(CE_WARN, "failed to register provider '%s': need "
6687 "dtps_usermode() op for given privilege attributes", name);
6688 return (EINVAL);
6689 }
6690
6691 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
6692 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
6693 (void) strcpy(provider->dtpv_name, name);
6694
6695 provider->dtpv_attr = *pap;
6696 provider->dtpv_priv.dtpp_flags = priv;
6697 if (cr != NULL) {
6698 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
6699 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
6700 }
6701 provider->dtpv_pops = *pops;
6702
6703 if (pops->dtps_provide == NULL) {
6704 ASSERT(pops->dtps_provide_module != NULL);
6705 provider->dtpv_pops.dtps_provide =
6706 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
6707 }
6708
6709 if (pops->dtps_provide_module == NULL) {
6710 ASSERT(pops->dtps_provide != NULL);
6711 provider->dtpv_pops.dtps_provide_module =
6712 (void (*)(void *, struct modctl *))dtrace_nullop;
6713 }
6714
6715 if (pops->dtps_suspend == NULL) {
6716 ASSERT(pops->dtps_resume == NULL);
6717 provider->dtpv_pops.dtps_suspend =
6718 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
6719 provider->dtpv_pops.dtps_resume =
6720 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
6721 }
6722
6723 provider->dtpv_arg = arg;
6724 *idp = (dtrace_provider_id_t)provider;
6725
6726 if (pops == &dtrace_provider_ops) {
6727 lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
6728 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
6729 ASSERT(dtrace_anon.dta_enabling == NULL);
6730
6731 /*
6732 * We make sure that the DTrace provider is at the head of
6733 * the provider chain.
6734 */
6735 provider->dtpv_next = dtrace_provider;
6736 dtrace_provider = provider;
6737 return (0);
6738 }
6739
6740 lck_mtx_lock(&dtrace_provider_lock);
6741 lck_mtx_lock(&dtrace_lock);
6742
6743 /*
6744 * If there is at least one provider registered, we'll add this
6745 * provider after the first provider.
6746 */
6747 if (dtrace_provider != NULL) {
6748 provider->dtpv_next = dtrace_provider->dtpv_next;
6749 dtrace_provider->dtpv_next = provider;
6750 } else {
6751 dtrace_provider = provider;
6752 }
6753
6754 if (dtrace_retained != NULL) {
6755 dtrace_enabling_provide(provider);
6756
6757 /*
6758 * Now we need to call dtrace_enabling_matchall() -- which
6759 * will acquire cpu_lock and dtrace_lock. We therefore need
6760 * to drop all of our locks before calling into it...
6761 */
6762 lck_mtx_unlock(&dtrace_lock);
6763 lck_mtx_unlock(&dtrace_provider_lock);
6764 dtrace_enabling_matchall();
6765
6766 return (0);
6767 }
6768
6769 lck_mtx_unlock(&dtrace_lock);
6770 lck_mtx_unlock(&dtrace_provider_lock);
6771
6772 return (0);
6773 }
6774
6775 /*
6776 * Unregister the specified provider from the DTrace framework. This should
6777 * generally be called by DTrace providers in their detach(9E) entry point.
6778 */
6779 int
6780 dtrace_unregister(dtrace_provider_id_t id)
6781 {
6782 dtrace_provider_t *old = (dtrace_provider_t *)id;
6783 dtrace_provider_t *prev = NULL;
6784 int i, self = 0;
6785 dtrace_probe_t *probe, *first = NULL;
6786
6787 if (old->dtpv_pops.dtps_enable ==
6788 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
6789 /*
6790 * If DTrace itself is the provider, we're called with locks
6791 * already held.
6792 */
6793 ASSERT(old == dtrace_provider);
6794 ASSERT(dtrace_devi != NULL);
6795 lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
6796 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
6797
6798 self = 1;
6799
6800 if (dtrace_provider->dtpv_next != NULL) {
6801 /*
6802 * There's another provider here; return failure.
6803 */
6804 return (EBUSY);
6805 }
6806 } else {
6807 lck_mtx_lock(&dtrace_provider_lock);
6808 lck_mtx_lock(&mod_lock);
6809 lck_mtx_lock(&dtrace_lock);
6810 }
6811
6812 /*
6813 * If anyone has /dev/dtrace open, or if there are anonymous enabled
6814 * probes, we refuse to let providers slither away, unless this
6815 * provider has already been explicitly invalidated.
6816 */
6817 if (!old->dtpv_defunct &&
6818 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
6819 dtrace_anon.dta_state->dts_necbs > 0))) {
6820 if (!self) {
6821 lck_mtx_unlock(&dtrace_lock);
6822 lck_mtx_unlock(&mod_lock);
6823 lck_mtx_unlock(&dtrace_provider_lock);
6824 }
6825 return (EBUSY);
6826 }
6827
6828 /*
6829 * Attempt to destroy the probes associated with this provider.
6830 */
6831 for (i = 0; i < dtrace_nprobes; i++) {
6832 if ((probe = dtrace_probes[i]) == NULL)
6833 continue;
6834
6835 if (probe->dtpr_provider != old)
6836 continue;
6837
6838 if (probe->dtpr_ecb == NULL)
6839 continue;
6840
6841 /*
6842 * We have at least one ECB; we can't remove this provider.
6843 */
6844 if (!self) {
6845 lck_mtx_unlock(&dtrace_lock);
6846 lck_mtx_unlock(&mod_lock);
6847 lck_mtx_unlock(&dtrace_provider_lock);
6848 }
6849 return (EBUSY);
6850 }
6851
6852 /*
6853 * All of the probes for this provider are disabled; we can safely
6854 * remove all of them from their hash chains and from the probe array.
6855 */
6856 for (i = 0; i < dtrace_nprobes; i++) {
6857 if ((probe = dtrace_probes[i]) == NULL)
6858 continue;
6859
6860 if (probe->dtpr_provider != old)
6861 continue;
6862
6863 dtrace_probes[i] = NULL;
6864
6865 dtrace_hash_remove(dtrace_bymod, probe);
6866 dtrace_hash_remove(dtrace_byfunc, probe);
6867 dtrace_hash_remove(dtrace_byname, probe);
6868
6869 if (first == NULL) {
6870 first = probe;
6871 probe->dtpr_nextmod = NULL;
6872 } else {
6873 probe->dtpr_nextmod = first;
6874 first = probe;
6875 }
6876 }
6877
6878 /*
6879 * The provider's probes have been removed from the hash chains and
6880 * from the probe array. Now issue a dtrace_sync() to be sure that
6881 * everyone has cleared out from any probe array processing.
6882 */
6883 dtrace_sync();
6884
6885 for (probe = first; probe != NULL; probe = first) {
6886 first = probe->dtpr_nextmod;
6887
6888 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
6889 probe->dtpr_arg);
6890 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
6891 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
6892 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
6893 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
6894 #if !defined(__APPLE__)
6895 kmem_free(probe, sizeof (dtrace_probe_t));
6896 #else
6897 zfree(dtrace_probe_t_zone, probe);
6898 #endif
6899 }
6900
6901 if ((prev = dtrace_provider) == old) {
6902 ASSERT(self || dtrace_devi == NULL);
6903 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
6904 dtrace_provider = old->dtpv_next;
6905 } else {
6906 while (prev != NULL && prev->dtpv_next != old)
6907 prev = prev->dtpv_next;
6908
6909 if (prev == NULL) {
6910 panic("attempt to unregister non-existent "
6911 "dtrace provider %p\n", (void *)id);
6912 }
6913
6914 prev->dtpv_next = old->dtpv_next;
6915 }
6916
6917 if (!self) {
6918 lck_mtx_unlock(&dtrace_lock);
6919 lck_mtx_unlock(&mod_lock);
6920 lck_mtx_unlock(&dtrace_provider_lock);
6921 }
6922
6923 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
6924 kmem_free(old, sizeof (dtrace_provider_t));
6925
6926 return (0);
6927 }
6928
6929 /*
6930 * Invalidate the specified provider. All subsequent probe lookups for the
6931 * specified provider will fail, but its probes will not be removed.
6932 */
6933 void
6934 dtrace_invalidate(dtrace_provider_id_t id)
6935 {
6936 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
6937
6938 ASSERT(pvp->dtpv_pops.dtps_enable !=
6939 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
6940
6941 lck_mtx_lock(&dtrace_provider_lock);
6942 lck_mtx_lock(&dtrace_lock);
6943
6944 pvp->dtpv_defunct = 1;
6945
6946 lck_mtx_unlock(&dtrace_lock);
6947 lck_mtx_unlock(&dtrace_provider_lock);
6948 }
6949
6950 /*
6951 * Indicate whether or not DTrace has attached.
6952 */
6953 int
6954 dtrace_attached(void)
6955 {
6956 /*
6957 * dtrace_provider will be non-NULL iff the DTrace driver has
6958 * attached. (It's non-NULL because DTrace is always itself a
6959 * provider.)
6960 */
6961 return (dtrace_provider != NULL);
6962 }
6963
6964 /*
6965 * Remove all the unenabled probes for the given provider. This function is
6966 * not unlike dtrace_unregister(), except that it doesn't remove the provider
6967 * -- just as many of its associated probes as it can.
6968 */
6969 int
6970 dtrace_condense(dtrace_provider_id_t id)
6971 {
6972 dtrace_provider_t *prov = (dtrace_provider_t *)id;
6973 int i;
6974 dtrace_probe_t *probe;
6975
6976 /*
6977 * Make sure this isn't the dtrace provider itself.
6978 */
6979 ASSERT(prov->dtpv_pops.dtps_enable !=
6980 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
6981
6982 lck_mtx_lock(&dtrace_provider_lock);
6983 lck_mtx_lock(&dtrace_lock);
6984
6985 /*
6986 * Attempt to destroy the probes associated with this provider.
6987 */
6988 for (i = 0; i < dtrace_nprobes; i++) {
6989 if ((probe = dtrace_probes[i]) == NULL)
6990 continue;
6991
6992 if (probe->dtpr_provider != prov)
6993 continue;
6994
6995 if (probe->dtpr_ecb != NULL)
6996 continue;
6997
6998 dtrace_probes[i] = NULL;
6999
7000 dtrace_hash_remove(dtrace_bymod, probe);
7001 dtrace_hash_remove(dtrace_byfunc, probe);
7002 dtrace_hash_remove(dtrace_byname, probe);
7003
7004 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7005 probe->dtpr_arg);
7006 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7007 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7008 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7009 #if !defined(__APPLE__)
7010 kmem_free(probe, sizeof (dtrace_probe_t));
7011 #else
7012 zfree(dtrace_probe_t_zone, probe);
7013 #endif
7014 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7015 }
7016
7017 lck_mtx_unlock(&dtrace_lock);
7018 lck_mtx_unlock(&dtrace_provider_lock);
7019
7020 return (0);
7021 }
7022
7023 /*
7024 * DTrace Probe Management Functions
7025 *
7026 * The functions in this section perform the DTrace probe management,
7027 * including functions to create probes, look-up probes, and call into the
7028 * providers to request that probes be provided. Some of these functions are
7029 * in the Provider-to-Framework API; these functions can be identified by the
7030 * fact that they are not declared "static".
7031 */
7032
7033 /*
7034 * Create a probe with the specified module name, function name, and name.
7035 */
7036 dtrace_id_t
7037 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7038 const char *func, const char *name, int aframes, void *arg)
7039 {
7040 dtrace_probe_t *probe, **probes;
7041 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7042 dtrace_id_t id;
7043
7044 if (provider == dtrace_provider) {
7045 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
7046 } else {
7047 lck_mtx_lock(&dtrace_lock);
7048 }
7049
7050 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7051 VM_BESTFIT | VM_SLEEP);
7052 #if !defined(__APPLE__)
7053 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7054 #else
7055 probe = zalloc(dtrace_probe_t_zone);
7056 bzero(probe, sizeof (dtrace_probe_t));
7057 #endif
7058
7059 probe->dtpr_id = id;
7060 probe->dtpr_gen = dtrace_probegen++;
7061 probe->dtpr_mod = dtrace_strdup(mod);
7062 probe->dtpr_func = dtrace_strdup(func);
7063 probe->dtpr_name = dtrace_strdup(name);
7064 probe->dtpr_arg = arg;
7065 probe->dtpr_aframes = aframes;
7066 probe->dtpr_provider = provider;
7067
7068 dtrace_hash_add(dtrace_bymod, probe);
7069 dtrace_hash_add(dtrace_byfunc, probe);
7070 dtrace_hash_add(dtrace_byname, probe);
7071
7072 if (id - 1 >= dtrace_nprobes) {
7073 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7074 size_t nsize = osize << 1;
7075
7076 if (nsize == 0) {
7077 ASSERT(osize == 0);
7078 ASSERT(dtrace_probes == NULL);
7079 nsize = sizeof (dtrace_probe_t *);
7080 }
7081
7082 probes = kmem_zalloc(nsize, KM_SLEEP);
7083
7084 if (dtrace_probes == NULL) {
7085 ASSERT(osize == 0);
7086 dtrace_probes = probes;
7087 dtrace_nprobes = 1;
7088 } else {
7089 dtrace_probe_t **oprobes = dtrace_probes;
7090
7091 bcopy(oprobes, probes, osize);
7092 dtrace_membar_producer();
7093 dtrace_probes = probes;
7094
7095 dtrace_sync();
7096
7097 /*
7098 * All CPUs are now seeing the new probes array; we can
7099 * safely free the old array.
7100 */
7101 kmem_free(oprobes, osize);
7102 dtrace_nprobes <<= 1;
7103 }
7104
7105 ASSERT(id - 1 < dtrace_nprobes);
7106 }
7107
7108 ASSERT(dtrace_probes[id - 1] == NULL);
7109 dtrace_probes[id - 1] = probe;
7110
7111 if (provider != dtrace_provider)
7112 lck_mtx_unlock(&dtrace_lock);
7113
7114 return (id);
7115 }
7116
7117 static dtrace_probe_t *
7118 dtrace_probe_lookup_id(dtrace_id_t id)
7119 {
7120 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
7121
7122 if (id == 0 || id > dtrace_nprobes)
7123 return (NULL);
7124
7125 return (dtrace_probes[id - 1]);
7126 }
7127
7128 static int
7129 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7130 {
7131 *((dtrace_id_t *)arg) = probe->dtpr_id;
7132
7133 return (DTRACE_MATCH_DONE);
7134 }
7135
7136 /*
7137 * Look up a probe based on provider and one or more of module name, function
7138 * name and probe name.
7139 */
7140 dtrace_id_t
7141 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
7142 const char *func, const char *name)
7143 {
7144 dtrace_probekey_t pkey;
7145 dtrace_id_t id;
7146 int match;
7147
7148 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7149 pkey.dtpk_pmatch = &dtrace_match_string;
7150 pkey.dtpk_mod = mod;
7151 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7152 pkey.dtpk_func = func;
7153 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7154 pkey.dtpk_name = name;
7155 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7156 pkey.dtpk_id = DTRACE_IDNONE;
7157
7158 lck_mtx_lock(&dtrace_lock);
7159 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7160 dtrace_probe_lookup_match, &id);
7161 lck_mtx_unlock(&dtrace_lock);
7162
7163 ASSERT(match == 1 || match == 0);
7164 return (match ? id : 0);
7165 }
7166
7167 /*
7168 * Returns the probe argument associated with the specified probe.
7169 */
7170 void *
7171 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7172 {
7173 dtrace_probe_t *probe;
7174 void *rval = NULL;
7175
7176 lck_mtx_lock(&dtrace_lock);
7177
7178 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7179 probe->dtpr_provider == (dtrace_provider_t *)id)
7180 rval = probe->dtpr_arg;
7181
7182 lck_mtx_unlock(&dtrace_lock);
7183
7184 return (rval);
7185 }
7186
7187 /*
7188 * Copy a probe into a probe description.
7189 */
7190 static void
7191 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7192 {
7193 bzero(pdp, sizeof (dtrace_probedesc_t));
7194 pdp->dtpd_id = prp->dtpr_id;
7195
7196 (void) strlcpy(pdp->dtpd_provider,
7197 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN);
7198
7199 (void) strlcpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN);
7200 (void) strlcpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN);
7201 (void) strlcpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN);
7202 }
7203
7204 /*
7205 * Called to indicate that a probe -- or probes -- should be provided by a
7206 * specfied provider. If the specified description is NULL, the provider will
7207 * be told to provide all of its probes. (This is done whenever a new
7208 * consumer comes along, or whenever a retained enabling is to be matched.) If
7209 * the specified description is non-NULL, the provider is given the
7210 * opportunity to dynamically provide the specified probe, allowing providers
7211 * to support the creation of probes on-the-fly. (So-called _autocreated_
7212 * probes.) If the provider is NULL, the operations will be applied to all
7213 * providers; if the provider is non-NULL the operations will only be applied
7214 * to the specified provider. The dtrace_provider_lock must be held, and the
7215 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7216 * will need to grab the dtrace_lock when it reenters the framework through
7217 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7218 */
7219 static void
7220 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7221 {
7222 struct modctl *ctl;
7223 int all = 0;
7224
7225 lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
7226
7227 if (prv == NULL) {
7228 all = 1;
7229 prv = dtrace_provider;
7230 }
7231
7232 do {
7233 kmod_info_t *ktl;
7234 /*
7235 * First, call the blanket provide operation.
7236 */
7237 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7238
7239 #if !defined(__APPLE__)
7240 /*
7241 * Now call the per-module provide operation. We will grab
7242 * mod_lock to prevent the list from being modified. Note
7243 * that this also prevents the mod_busy bits from changing.
7244 * (mod_busy can only be changed with mod_lock held.)
7245 */
7246 lck_mtx_lock(&mod_lock);
7247
7248 ctl = &modules;
7249 do {
7250 if (ctl->mod_busy || ctl->mod_mp == NULL)
7251 continue;
7252
7253 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7254
7255 } while ((ctl = ctl->mod_next) != &modules);
7256
7257 lck_mtx_unlock(&mod_lock);
7258 #else
7259 #if 0 /* XXX Workaround for PR_4643546 XXX */
7260 simple_lock(&kmod_lock);
7261
7262 ktl = kmod;
7263 while (ktl) {
7264 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ktl);
7265 ktl = ktl->next;
7266 }
7267
7268 simple_unlock(&kmod_lock);
7269 #else
7270 /*
7271 * Don't bother to iterate over the kmod list. At present only fbt
7272 * offers a provide_module in its dtpv_pops, and then it ignores the
7273 * module anyway.
7274 */
7275 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, NULL);
7276 #endif
7277 #endif /* __APPLE__ */
7278 } while (all && (prv = prv->dtpv_next) != NULL);
7279 }
7280
7281 /*
7282 * Iterate over each probe, and call the Framework-to-Provider API function
7283 * denoted by offs.
7284 */
7285 static void
7286 dtrace_probe_foreach(uintptr_t offs)
7287 {
7288 dtrace_provider_t *prov;
7289 void (*func)(void *, dtrace_id_t, void *);
7290 dtrace_probe_t *probe;
7291 dtrace_icookie_t cookie;
7292 int i;
7293
7294 /*
7295 * We disable interrupts to walk through the probe array. This is
7296 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7297 * won't see stale data.
7298 */
7299 cookie = dtrace_interrupt_disable();
7300
7301 for (i = 0; i < dtrace_nprobes; i++) {
7302 if ((probe = dtrace_probes[i]) == NULL)
7303 continue;
7304
7305 if (probe->dtpr_ecb == NULL) {
7306 /*
7307 * This probe isn't enabled -- don't call the function.
7308 */
7309 continue;
7310 }
7311
7312 prov = probe->dtpr_provider;
7313 func = *((void(**)(void *, dtrace_id_t, void *))
7314 ((uintptr_t)&prov->dtpv_pops + offs));
7315
7316 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7317 }
7318
7319 dtrace_interrupt_enable(cookie);
7320 }
7321
7322 static int
7323 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7324 {
7325 dtrace_probekey_t pkey;
7326 uint32_t priv;
7327 uid_t uid;
7328 zoneid_t zoneid;
7329
7330 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
7331
7332 dtrace_ecb_create_cache = NULL;
7333
7334 if (desc == NULL) {
7335 /*
7336 * If we're passed a NULL description, we're being asked to
7337 * create an ECB with a NULL probe.
7338 */
7339 (void) dtrace_ecb_create_enable(NULL, enab);
7340 return (0);
7341 }
7342
7343 dtrace_probekey(desc, &pkey);
7344 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7345 &priv, &uid, &zoneid);
7346
7347 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
7348 enab));
7349 }
7350
7351 /*
7352 * DTrace Helper Provider Functions
7353 */
7354 static void
7355 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
7356 {
7357 attr->dtat_name = DOF_ATTR_NAME(dofattr);
7358 attr->dtat_data = DOF_ATTR_DATA(dofattr);
7359 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
7360 }
7361
7362 static void
7363 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
7364 const dof_provider_t *dofprov, char *strtab)
7365 {
7366 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
7367 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
7368 dofprov->dofpv_provattr);
7369 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
7370 dofprov->dofpv_modattr);
7371 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
7372 dofprov->dofpv_funcattr);
7373 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
7374 dofprov->dofpv_nameattr);
7375 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
7376 dofprov->dofpv_argsattr);
7377 }
7378
7379 static void
7380 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7381 {
7382 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7383 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7384 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
7385 dof_provider_t *provider;
7386 dof_probe_t *probe;
7387 uint32_t *off, *enoff;
7388 uint8_t *arg;
7389 char *strtab;
7390 uint_t i, nprobes;
7391 dtrace_helper_provdesc_t dhpv;
7392 dtrace_helper_probedesc_t dhpb;
7393 dtrace_meta_t *meta = dtrace_meta_pid;
7394 dtrace_mops_t *mops = &meta->dtm_mops;
7395 void *parg;
7396
7397 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7398 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7399 provider->dofpv_strtab * dof->dofh_secsize);
7400 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7401 provider->dofpv_probes * dof->dofh_secsize);
7402 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7403 provider->dofpv_prargs * dof->dofh_secsize);
7404 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7405 provider->dofpv_proffs * dof->dofh_secsize);
7406
7407 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7408 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
7409 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
7410 enoff = NULL;
7411
7412 /*
7413 * See dtrace_helper_provider_validate().
7414 */
7415 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
7416 provider->dofpv_prenoffs != DOF_SECT_NONE) {
7417 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7418 provider->dofpv_prenoffs * dof->dofh_secsize);
7419 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
7420 }
7421
7422 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
7423
7424 /*
7425 * Create the provider.
7426 */
7427 dtrace_dofprov2hprov(&dhpv, provider, strtab);
7428
7429 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
7430 return;
7431
7432 meta->dtm_count++;
7433
7434 /*
7435 * Create the probes.
7436 */
7437 for (i = 0; i < nprobes; i++) {
7438 probe = (dof_probe_t *)(uintptr_t)(daddr +
7439 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
7440
7441 dhpb.dthpb_mod = dhp->dofhp_mod;
7442 dhpb.dthpb_func = strtab + probe->dofpr_func;
7443 dhpb.dthpb_name = strtab + probe->dofpr_name;
7444 #if defined(__APPLE__)
7445 dhpb.dthpb_base = dhp->dofhp_addr;
7446 #else
7447 dhpb.dthpb_base = probe->dofpr_addr;
7448 #endif
7449 dhpb.dthpb_offs = off + probe->dofpr_offidx;
7450 dhpb.dthpb_noffs = probe->dofpr_noffs;
7451 if (enoff != NULL) {
7452 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
7453 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
7454 } else {
7455 dhpb.dthpb_enoffs = NULL;
7456 dhpb.dthpb_nenoffs = 0;
7457 }
7458 dhpb.dthpb_args = arg + probe->dofpr_argidx;
7459 dhpb.dthpb_nargc = probe->dofpr_nargc;
7460 dhpb.dthpb_xargc = probe->dofpr_xargc;
7461 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
7462 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
7463
7464 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
7465 }
7466 }
7467
7468 static void
7469 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
7470 {
7471 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7472 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7473 int i;
7474
7475 lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
7476
7477 for (i = 0; i < dof->dofh_secnum; i++) {
7478 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7479 dof->dofh_secoff + i * dof->dofh_secsize);
7480
7481 if (sec->dofs_type != DOF_SECT_PROVIDER)
7482 continue;
7483
7484 dtrace_helper_provide_one(dhp, sec, pid);
7485 }
7486
7487 /*
7488 * We may have just created probes, so we must now rematch against
7489 * any retained enablings. Note that this call will acquire both
7490 * cpu_lock and dtrace_lock; the fact that we are holding
7491 * dtrace_meta_lock now is what defines the ordering with respect to
7492 * these three locks.
7493 */
7494 dtrace_enabling_matchall();
7495 }
7496
7497 static void
7498 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7499 {
7500 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7501 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7502 dof_sec_t *str_sec;
7503 dof_provider_t *provider;
7504 char *strtab;
7505 dtrace_helper_provdesc_t dhpv;
7506 dtrace_meta_t *meta = dtrace_meta_pid;
7507 dtrace_mops_t *mops = &meta->dtm_mops;
7508
7509 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7510 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7511 provider->dofpv_strtab * dof->dofh_secsize);
7512
7513 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7514
7515 /*
7516 * Create the provider.
7517 */
7518 dtrace_dofprov2hprov(&dhpv, provider, strtab);
7519
7520 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
7521
7522 meta->dtm_count--;
7523 }
7524
7525 static void
7526 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
7527 {
7528 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7529 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7530 int i;
7531
7532 lck_mtx_assert(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
7533
7534 for (i = 0; i < dof->dofh_secnum; i++) {
7535 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7536 dof->dofh_secoff + i * dof->dofh_secsize);
7537
7538 if (sec->dofs_type != DOF_SECT_PROVIDER)
7539 continue;
7540
7541 dtrace_helper_provider_remove_one(dhp, sec, pid);
7542 }
7543 }
7544
7545 /*
7546 * DTrace Meta Provider-to-Framework API Functions
7547 *
7548 * These functions implement the Meta Provider-to-Framework API, as described
7549 * in <sys/dtrace.h>.
7550 */
7551 int
7552 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
7553 dtrace_meta_provider_id_t *idp)
7554 {
7555 dtrace_meta_t *meta;
7556 dtrace_helpers_t *help, *next;
7557 int i;
7558
7559 *idp = DTRACE_METAPROVNONE;
7560
7561 /*
7562 * We strictly don't need the name, but we hold onto it for
7563 * debuggability. All hail error queues!
7564 */
7565 if (name == NULL) {
7566 cmn_err(CE_WARN, "failed to register meta-provider: "
7567 "invalid name");
7568 return (EINVAL);
7569 }
7570
7571 if (mops == NULL ||
7572 mops->dtms_create_probe == NULL ||
7573 mops->dtms_provide_pid == NULL ||
7574 mops->dtms_remove_pid == NULL) {
7575 cmn_err(CE_WARN, "failed to register meta-register %s: "
7576 "invalid ops", name);
7577 return (EINVAL);
7578 }
7579
7580 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
7581 meta->dtm_mops = *mops;
7582 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7583 (void) strcpy(meta->dtm_name, name);
7584 meta->dtm_arg = arg;
7585
7586 lck_mtx_lock(&dtrace_meta_lock);
7587 lck_mtx_lock(&dtrace_lock);
7588
7589 if (dtrace_meta_pid != NULL) {
7590 lck_mtx_unlock(&dtrace_lock);
7591 lck_mtx_unlock(&dtrace_meta_lock);
7592 cmn_err(CE_WARN, "failed to register meta-register %s: "
7593 "user-land meta-provider exists", name);
7594 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
7595 kmem_free(meta, sizeof (dtrace_meta_t));
7596 return (EINVAL);
7597 }
7598
7599 dtrace_meta_pid = meta;
7600 *idp = (dtrace_meta_provider_id_t)meta;
7601
7602 /*
7603 * If there are providers and probes ready to go, pass them
7604 * off to the new meta provider now.
7605 */
7606
7607 help = dtrace_deferred_pid;
7608 dtrace_deferred_pid = NULL;
7609
7610 lck_mtx_unlock(&dtrace_lock);
7611
7612 while (help != NULL) {
7613 for (i = 0; i < help->dthps_nprovs; i++) {
7614 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
7615 help->dthps_pid);
7616 }
7617
7618 next = help->dthps_next;
7619 help->dthps_next = NULL;
7620 help->dthps_prev = NULL;
7621 help->dthps_deferred = 0;
7622 help = next;
7623 }
7624
7625 lck_mtx_unlock(&dtrace_meta_lock);
7626
7627 return (0);
7628 }
7629
7630 int
7631 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
7632 {
7633 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
7634
7635 lck_mtx_lock(&dtrace_meta_lock);
7636 lck_mtx_lock(&dtrace_lock);
7637
7638 if (old == dtrace_meta_pid) {
7639 pp = &dtrace_meta_pid;
7640 } else {
7641 panic("attempt to unregister non-existent "
7642 "dtrace meta-provider %p\n", (void *)old);
7643 }
7644
7645 if (old->dtm_count != 0) {
7646 lck_mtx_unlock(&dtrace_lock);
7647 lck_mtx_unlock(&dtrace_meta_lock);
7648 return (EBUSY);
7649 }
7650
7651 *pp = NULL;
7652
7653 lck_mtx_unlock(&dtrace_lock);
7654 lck_mtx_unlock(&dtrace_meta_lock);
7655
7656 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
7657 kmem_free(old, sizeof (dtrace_meta_t));
7658
7659 return (0);
7660 }
7661
7662
7663 /*
7664 * DTrace DIF Object Functions
7665 */
7666 static int
7667 dtrace_difo_err(uint_t pc, const char *format, ...)
7668 {
7669 if (dtrace_err_verbose) {
7670 va_list alist;
7671
7672 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
7673 va_start(alist, format);
7674 (void) vuprintf(format, alist);
7675 va_end(alist);
7676 }
7677
7678 #ifdef DTRACE_ERRDEBUG
7679 dtrace_errdebug(format);
7680 #endif
7681 return (1);
7682 }
7683
7684 /*
7685 * Validate a DTrace DIF object by checking the IR instructions. The following
7686 * rules are currently enforced by dtrace_difo_validate():
7687 *
7688 * 1. Each instruction must have a valid opcode
7689 * 2. Each register, string, variable, or subroutine reference must be valid
7690 * 3. No instruction can modify register %r0 (must be zero)
7691 * 4. All instruction reserved bits must be set to zero
7692 * 5. The last instruction must be a "ret" instruction
7693 * 6. All branch targets must reference a valid instruction _after_ the branch
7694 */
7695 static int
7696 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
7697 cred_t *cr)
7698 {
7699 int err = 0, i;
7700 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
7701 int kcheck;
7702 uint_t pc;
7703
7704 kcheck = cr == NULL ||
7705 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE) == 0;
7706
7707 dp->dtdo_destructive = 0;
7708
7709 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
7710 dif_instr_t instr = dp->dtdo_buf[pc];
7711
7712 uint_t r1 = DIF_INSTR_R1(instr);
7713 uint_t r2 = DIF_INSTR_R2(instr);
7714 uint_t rd = DIF_INSTR_RD(instr);
7715 uint_t rs = DIF_INSTR_RS(instr);
7716 uint_t label = DIF_INSTR_LABEL(instr);
7717 uint_t v = DIF_INSTR_VAR(instr);
7718 uint_t subr = DIF_INSTR_SUBR(instr);
7719 uint_t type = DIF_INSTR_TYPE(instr);
7720 uint_t op = DIF_INSTR_OP(instr);
7721
7722 switch (op) {
7723 case DIF_OP_OR:
7724 case DIF_OP_XOR:
7725 case DIF_OP_AND:
7726 case DIF_OP_SLL:
7727 case DIF_OP_SRL:
7728 case DIF_OP_SRA:
7729 case DIF_OP_SUB:
7730 case DIF_OP_ADD:
7731 case DIF_OP_MUL:
7732 case DIF_OP_SDIV:
7733 case DIF_OP_UDIV:
7734 case DIF_OP_SREM:
7735 case DIF_OP_UREM:
7736 case DIF_OP_COPYS:
7737 if (r1 >= nregs)
7738 err += efunc(pc, "invalid register %u\n", r1);
7739 if (r2 >= nregs)
7740 err += efunc(pc, "invalid register %u\n", r2);
7741 if (rd >= nregs)
7742 err += efunc(pc, "invalid register %u\n", rd);
7743 if (rd == 0)
7744 err += efunc(pc, "cannot write to %r0\n");
7745 break;
7746 case DIF_OP_NOT:
7747 case DIF_OP_MOV:
7748 case DIF_OP_ALLOCS:
7749 if (r1 >= nregs)
7750 err += efunc(pc, "invalid register %u\n", r1);
7751 if (r2 != 0)
7752 err += efunc(pc, "non-zero reserved bits\n");
7753 if (rd >= nregs)
7754 err += efunc(pc, "invalid register %u\n", rd);
7755 if (rd == 0)
7756 err += efunc(pc, "cannot write to %r0\n");
7757 break;
7758 case DIF_OP_LDSB:
7759 case DIF_OP_LDSH:
7760 case DIF_OP_LDSW:
7761 case DIF_OP_LDUB:
7762 case DIF_OP_LDUH:
7763 case DIF_OP_LDUW:
7764 case DIF_OP_LDX:
7765 if (r1 >= nregs)
7766 err += efunc(pc, "invalid register %u\n", r1);
7767 if (r2 != 0)
7768 err += efunc(pc, "non-zero reserved bits\n");
7769 if (rd >= nregs)
7770 err += efunc(pc, "invalid register %u\n", rd);
7771 if (rd == 0)
7772 err += efunc(pc, "cannot write to %r0\n");
7773 if (kcheck)
7774 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
7775 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
7776 break;
7777 case DIF_OP_RLDSB:
7778 case DIF_OP_RLDSH:
7779 case DIF_OP_RLDSW:
7780 case DIF_OP_RLDUB:
7781 case DIF_OP_RLDUH:
7782 case DIF_OP_RLDUW:
7783 case DIF_OP_RLDX:
7784 if (r1 >= nregs)
7785 err += efunc(pc, "invalid register %u\n", r1);
7786 if (r2 != 0)
7787 err += efunc(pc, "non-zero reserved bits\n");
7788 if (rd >= nregs)
7789 err += efunc(pc, "invalid register %u\n", rd);
7790 if (rd == 0)
7791 err += efunc(pc, "cannot write to %r0\n");
7792 break;
7793 case DIF_OP_ULDSB:
7794 case DIF_OP_ULDSH:
7795 case DIF_OP_ULDSW:
7796 case DIF_OP_ULDUB:
7797 case DIF_OP_ULDUH:
7798 case DIF_OP_ULDUW:
7799 case DIF_OP_ULDX:
7800 if (r1 >= nregs)
7801 err += efunc(pc, "invalid register %u\n", r1);
7802 if (r2 != 0)
7803 err += efunc(pc, "non-zero reserved bits\n");
7804 if (rd >= nregs)
7805 err += efunc(pc, "invalid register %u\n", rd);
7806 if (rd == 0)
7807 err += efunc(pc, "cannot write to %r0\n");
7808 break;
7809 case DIF_OP_STB:
7810 case DIF_OP_STH:
7811 case DIF_OP_STW:
7812 case DIF_OP_STX:
7813 if (r1 >= nregs)
7814 err += efunc(pc, "invalid register %u\n", r1);
7815 if (r2 != 0)
7816 err += efunc(pc, "non-zero reserved bits\n");
7817 if (rd >= nregs)
7818 err += efunc(pc, "invalid register %u\n", rd);
7819 if (rd == 0)
7820 err += efunc(pc, "cannot write to 0 address\n");
7821 break;
7822 case DIF_OP_CMP:
7823 case DIF_OP_SCMP:
7824 if (r1 >= nregs)
7825 err += efunc(pc, "invalid register %u\n", r1);
7826 if (r2 >= nregs)
7827 err += efunc(pc, "invalid register %u\n", r2);
7828 if (rd != 0)
7829 err += efunc(pc, "non-zero reserved bits\n");
7830 break;
7831 case DIF_OP_TST:
7832 if (r1 >= nregs)
7833 err += efunc(pc, "invalid register %u\n", r1);
7834 if (r2 != 0 || rd != 0)
7835 err += efunc(pc, "non-zero reserved bits\n");
7836 break;
7837 case DIF_OP_BA:
7838 case DIF_OP_BE:
7839 case DIF_OP_BNE:
7840 case DIF_OP_BG:
7841 case DIF_OP_BGU:
7842 case DIF_OP_BGE:
7843 case DIF_OP_BGEU:
7844 case DIF_OP_BL:
7845 case DIF_OP_BLU:
7846 case DIF_OP_BLE:
7847 case DIF_OP_BLEU:
7848 if (label >= dp->dtdo_len) {
7849 err += efunc(pc, "invalid branch target %u\n",
7850 label);
7851 }
7852 if (label <= pc) {
7853 err += efunc(pc, "backward branch to %u\n",
7854 label);
7855 }
7856 break;
7857 case DIF_OP_RET:
7858 if (r1 != 0 || r2 != 0)
7859 err += efunc(pc, "non-zero reserved bits\n");
7860 if (rd >= nregs)
7861 err += efunc(pc, "invalid register %u\n", rd);
7862 break;
7863 case DIF_OP_NOP:
7864 case DIF_OP_POPTS:
7865 case DIF_OP_FLUSHTS:
7866 if (r1 != 0 || r2 != 0 || rd != 0)
7867 err += efunc(pc, "non-zero reserved bits\n");
7868 break;
7869 case DIF_OP_SETX:
7870 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
7871 err += efunc(pc, "invalid integer ref %u\n",
7872 DIF_INSTR_INTEGER(instr));
7873 }
7874 if (rd >= nregs)
7875 err += efunc(pc, "invalid register %u\n", rd);
7876 if (rd == 0)
7877 err += efunc(pc, "cannot write to %r0\n");
7878 break;
7879 case DIF_OP_SETS:
7880 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
7881 err += efunc(pc, "invalid string ref %u\n",
7882 DIF_INSTR_STRING(instr));
7883 }
7884 if (rd >= nregs)
7885 err += efunc(pc, "invalid register %u\n", rd);
7886 if (rd == 0)
7887 err += efunc(pc, "cannot write to %r0\n");
7888 break;
7889 case DIF_OP_LDGA:
7890 case DIF_OP_LDTA:
7891 if (r1 > DIF_VAR_ARRAY_MAX)
7892 err += efunc(pc, "invalid array %u\n", r1);
7893 if (r2 >= nregs)
7894 err += efunc(pc, "invalid register %u\n", r2);
7895 if (rd >= nregs)
7896 err += efunc(pc, "invalid register %u\n", rd);
7897 if (rd == 0)
7898 err += efunc(pc, "cannot write to %r0\n");
7899 break;
7900 case DIF_OP_LDGS:
7901 case DIF_OP_LDTS:
7902 case DIF_OP_LDLS:
7903 case DIF_OP_LDGAA:
7904 case DIF_OP_LDTAA:
7905 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
7906 err += efunc(pc, "invalid variable %u\n", v);
7907 if (rd >= nregs)
7908 err += efunc(pc, "invalid register %u\n", rd);
7909 if (rd == 0)
7910 err += efunc(pc, "cannot write to %r0\n");
7911 break;
7912 case DIF_OP_STGS:
7913 case DIF_OP_STTS:
7914 case DIF_OP_STLS:
7915 case DIF_OP_STGAA:
7916 case DIF_OP_STTAA:
7917 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
7918 err += efunc(pc, "invalid variable %u\n", v);
7919 if (rs >= nregs)
7920 err += efunc(pc, "invalid register %u\n", rd);
7921 break;
7922 case DIF_OP_CALL:
7923 if (subr > DIF_SUBR_MAX)
7924 err += efunc(pc, "invalid subr %u\n", subr);
7925 if (rd >= nregs)
7926 err += efunc(pc, "invalid register %u\n", rd);
7927 if (rd == 0)
7928 err += efunc(pc, "cannot write to %r0\n");
7929
7930 if (subr == DIF_SUBR_COPYOUT ||
7931 subr == DIF_SUBR_COPYOUTSTR) {
7932 dp->dtdo_destructive = 1;
7933 }
7934 break;
7935 case DIF_OP_PUSHTR:
7936 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
7937 err += efunc(pc, "invalid ref type %u\n", type);
7938 if (r2 >= nregs)
7939 err += efunc(pc, "invalid register %u\n", r2);
7940 if (rs >= nregs)
7941 err += efunc(pc, "invalid register %u\n", rs);
7942 break;
7943 case DIF_OP_PUSHTV:
7944 if (type != DIF_TYPE_CTF)
7945 err += efunc(pc, "invalid val type %u\n", type);
7946 if (r2 >= nregs)
7947 err += efunc(pc, "invalid register %u\n", r2);
7948 if (rs >= nregs)
7949 err += efunc(pc, "invalid register %u\n", rs);
7950 break;
7951 default:
7952 err += efunc(pc, "invalid opcode %u\n",
7953 DIF_INSTR_OP(instr));
7954 }
7955 }
7956
7957 if (dp->dtdo_len != 0 &&
7958 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
7959 err += efunc(dp->dtdo_len - 1,
7960 "expected 'ret' as last DIF instruction\n");
7961 }
7962
7963 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
7964 /*
7965 * If we're not returning by reference, the size must be either
7966 * 0 or the size of one of the base types.
7967 */
7968 switch (dp->dtdo_rtype.dtdt_size) {
7969 case 0:
7970 case sizeof (uint8_t):
7971 case sizeof (uint16_t):
7972 case sizeof (uint32_t):
7973 case sizeof (uint64_t):
7974 break;
7975
7976 default:
7977 err += efunc(dp->dtdo_len - 1, "bad return size");
7978 }
7979 }
7980
7981 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
7982 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
7983 dtrace_diftype_t *vt, *et;
7984 uint_t id, ndx;
7985
7986 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
7987 v->dtdv_scope != DIFV_SCOPE_THREAD &&
7988 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
7989 err += efunc(i, "unrecognized variable scope %d\n",
7990 v->dtdv_scope);
7991 break;
7992 }
7993
7994 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
7995 v->dtdv_kind != DIFV_KIND_SCALAR) {
7996 err += efunc(i, "unrecognized variable type %d\n",
7997 v->dtdv_kind);
7998 break;
7999 }
8000
8001 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8002 err += efunc(i, "%d exceeds variable id limit\n", id);
8003 break;
8004 }
8005
8006 if (id < DIF_VAR_OTHER_UBASE)
8007 continue;
8008
8009 /*
8010 * For user-defined variables, we need to check that this
8011 * definition is identical to any previous definition that we
8012 * encountered.
8013 */
8014 ndx = id - DIF_VAR_OTHER_UBASE;
8015
8016 switch (v->dtdv_scope) {
8017 case DIFV_SCOPE_GLOBAL:
8018 if (ndx < vstate->dtvs_nglobals) {
8019 dtrace_statvar_t *svar;
8020
8021 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8022 existing = &svar->dtsv_var;
8023 }
8024
8025 break;
8026
8027 case DIFV_SCOPE_THREAD:
8028 if (ndx < vstate->dtvs_ntlocals)
8029 existing = &vstate->dtvs_tlocals[ndx];
8030 break;
8031
8032 case DIFV_SCOPE_LOCAL:
8033 if (ndx < vstate->dtvs_nlocals) {
8034 dtrace_statvar_t *svar;
8035
8036 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8037 existing = &svar->dtsv_var;
8038 }
8039
8040 break;
8041 }
8042
8043 vt = &v->dtdv_type;
8044
8045 if (vt->dtdt_flags & DIF_TF_BYREF) {
8046 if (vt->dtdt_size == 0) {
8047 err += efunc(i, "zero-sized variable\n");
8048 break;
8049 }
8050
8051 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8052 vt->dtdt_size > dtrace_global_maxsize) {
8053 err += efunc(i, "oversized by-ref global\n");
8054 break;
8055 }
8056 }
8057
8058 if (existing == NULL || existing->dtdv_id == 0)
8059 continue;
8060
8061 ASSERT(existing->dtdv_id == v->dtdv_id);
8062 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8063
8064 if (existing->dtdv_kind != v->dtdv_kind)
8065 err += efunc(i, "%d changed variable kind\n", id);
8066
8067 et = &existing->dtdv_type;
8068
8069 if (vt->dtdt_flags != et->dtdt_flags) {
8070 err += efunc(i, "%d changed variable type flags\n", id);
8071 break;
8072 }
8073
8074 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8075 err += efunc(i, "%d changed variable type size\n", id);
8076 break;
8077 }
8078 }
8079
8080 return (err);
8081 }
8082
8083 /*
8084 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8085 * are much more constrained than normal DIFOs. Specifically, they may
8086 * not:
8087 *
8088 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8089 * miscellaneous string routines
8090 * 2. Access DTrace variables other than the args[] array, and the
8091 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8092 * 3. Have thread-local variables.
8093 * 4. Have dynamic variables.
8094 */
8095 static int
8096 dtrace_difo_validate_helper(dtrace_difo_t *dp)
8097 {
8098 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8099 int err = 0;
8100 uint_t pc;
8101
8102 for (pc = 0; pc < dp->dtdo_len; pc++) {
8103 dif_instr_t instr = dp->dtdo_buf[pc];
8104
8105 uint_t v = DIF_INSTR_VAR(instr);
8106 uint_t subr = DIF_INSTR_SUBR(instr);
8107 uint_t op = DIF_INSTR_OP(instr);
8108
8109 switch (op) {
8110 case DIF_OP_OR:
8111 case DIF_OP_XOR:
8112 case DIF_OP_AND:
8113 case DIF_OP_SLL:
8114 case DIF_OP_SRL:
8115 case DIF_OP_SRA:
8116 case DIF_OP_SUB:
8117 case DIF_OP_ADD:
8118 case DIF_OP_MUL:
8119 case DIF_OP_SDIV:
8120 case DIF_OP_UDIV:
8121 case DIF_OP_SREM:
8122 case DIF_OP_UREM:
8123 case DIF_OP_COPYS:
8124 case DIF_OP_NOT:
8125 case DIF_OP_MOV:
8126 case DIF_OP_RLDSB:
8127 case DIF_OP_RLDSH:
8128 case DIF_OP_RLDSW:
8129 case DIF_OP_RLDUB:
8130 case DIF_OP_RLDUH:
8131 case DIF_OP_RLDUW:
8132 case DIF_OP_RLDX:
8133 case DIF_OP_ULDSB:
8134 case DIF_OP_ULDSH:
8135 case DIF_OP_ULDSW:
8136 case DIF_OP_ULDUB:
8137 case DIF_OP_ULDUH:
8138 case DIF_OP_ULDUW:
8139 case DIF_OP_ULDX:
8140 case DIF_OP_STB:
8141 case DIF_OP_STH:
8142 case DIF_OP_STW:
8143 case DIF_OP_STX:
8144 case DIF_OP_ALLOCS:
8145 case DIF_OP_CMP:
8146 case DIF_OP_SCMP:
8147 case DIF_OP_TST:
8148 case DIF_OP_BA:
8149 case DIF_OP_BE:
8150 case DIF_OP_BNE:
8151 case DIF_OP_BG:
8152 case DIF_OP_BGU:
8153 case DIF_OP_BGE:
8154 case DIF_OP_BGEU:
8155 case DIF_OP_BL:
8156 case DIF_OP_BLU:
8157 case DIF_OP_BLE:
8158 case DIF_OP_BLEU:
8159 case DIF_OP_RET:
8160 case DIF_OP_NOP:
8161 case DIF_OP_POPTS:
8162 case DIF_OP_FLUSHTS:
8163 case DIF_OP_SETX:
8164 case DIF_OP_SETS:
8165 case DIF_OP_LDGA:
8166 case DIF_OP_LDLS:
8167 case DIF_OP_STGS:
8168 case DIF_OP_STLS:
8169 case DIF_OP_PUSHTR:
8170 case DIF_OP_PUSHTV:
8171 break;
8172
8173 case DIF_OP_LDGS:
8174 if (v >= DIF_VAR_OTHER_UBASE)
8175 break;
8176
8177 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8178 break;
8179
8180 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8181 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8182 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8183 v == DIF_VAR_UID || v == DIF_VAR_GID)
8184 break;
8185
8186 err += efunc(pc, "illegal variable %u\n", v);
8187 break;
8188
8189 case DIF_OP_LDTA:
8190 case DIF_OP_LDTS:
8191 case DIF_OP_LDGAA:
8192 case DIF_OP_LDTAA:
8193 err += efunc(pc, "illegal dynamic variable load\n");
8194 break;
8195
8196 case DIF_OP_STTS:
8197 case DIF_OP_STGAA:
8198 case DIF_OP_STTAA:
8199 err += efunc(pc, "illegal dynamic variable store\n");
8200 break;
8201
8202 case DIF_OP_CALL:
8203 if (subr == DIF_SUBR_ALLOCA ||
8204 subr == DIF_SUBR_BCOPY ||
8205 subr == DIF_SUBR_COPYIN ||
8206 subr == DIF_SUBR_COPYINTO ||
8207 subr == DIF_SUBR_COPYINSTR ||
8208 subr == DIF_SUBR_INDEX ||
8209 subr == DIF_SUBR_LLTOSTR ||
8210 subr == DIF_SUBR_RINDEX ||
8211 subr == DIF_SUBR_STRCHR ||
8212 subr == DIF_SUBR_STRJOIN ||
8213 subr == DIF_SUBR_STRRCHR ||
8214 subr == DIF_SUBR_STRSTR ||
8215 subr == DIF_SUBR_CHUD)
8216 break;
8217
8218 err += efunc(pc, "invalid subr %u\n", subr);
8219 break;
8220
8221 default:
8222 err += efunc(pc, "invalid opcode %u\n",
8223 DIF_INSTR_OP(instr));
8224 }
8225 }
8226
8227 return (err);
8228 }
8229
8230 /*
8231 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8232 * basis; 0 if not.
8233 */
8234 static int
8235 dtrace_difo_cacheable(dtrace_difo_t *dp)
8236 {
8237 int i;
8238
8239 if (dp == NULL)
8240 return (0);
8241
8242 for (i = 0; i < dp->dtdo_varlen; i++) {
8243 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8244
8245 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8246 continue;
8247
8248 switch (v->dtdv_id) {
8249 case DIF_VAR_CURTHREAD:
8250 case DIF_VAR_PID:
8251 case DIF_VAR_TID:
8252 case DIF_VAR_EXECNAME:
8253 case DIF_VAR_ZONENAME:
8254 break;
8255
8256 default:
8257 return (0);
8258 }
8259 }
8260
8261 /*
8262 * This DIF object may be cacheable. Now we need to look for any
8263 * array loading instructions, any memory loading instructions, or
8264 * any stores to thread-local variables.
8265 */
8266 for (i = 0; i < dp->dtdo_len; i++) {
8267 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8268
8269 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8270 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8271 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8272 op == DIF_OP_LDGA || op == DIF_OP_STTS)
8273 return (0);
8274 }
8275
8276 return (1);
8277 }
8278
8279 static void
8280 dtrace_difo_hold(dtrace_difo_t *dp)
8281 {
8282 int i;
8283
8284 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8285
8286 dp->dtdo_refcnt++;
8287 ASSERT(dp->dtdo_refcnt != 0);
8288
8289 /*
8290 * We need to check this DIF object for references to the variable
8291 * DIF_VAR_VTIMESTAMP.
8292 */
8293 for (i = 0; i < dp->dtdo_varlen; i++) {
8294 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8295
8296 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8297 continue;
8298
8299 if (dtrace_vtime_references++ == 0)
8300 dtrace_vtime_enable();
8301 }
8302 }
8303
8304 /*
8305 * This routine calculates the dynamic variable chunksize for a given DIF
8306 * object. The calculation is not fool-proof, and can probably be tricked by
8307 * malicious DIF -- but it works for all compiler-generated DIF. Because this
8308 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8309 * if a dynamic variable size exceeds the chunksize.
8310 */
8311 static void
8312 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8313 {
8314 uint64_t sval;
8315 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8316 const dif_instr_t *text = dp->dtdo_buf;
8317 uint_t pc, srd = 0;
8318 uint_t ttop = 0;
8319 size_t size, ksize;
8320 uint_t id, i;
8321
8322 for (pc = 0; pc < dp->dtdo_len; pc++) {
8323 dif_instr_t instr = text[pc];
8324 uint_t op = DIF_INSTR_OP(instr);
8325 uint_t rd = DIF_INSTR_RD(instr);
8326 uint_t r1 = DIF_INSTR_R1(instr);
8327 uint_t nkeys = 0;
8328 uchar_t scope;
8329
8330 dtrace_key_t *key = tupregs;
8331
8332 switch (op) {
8333 case DIF_OP_SETX:
8334 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8335 srd = rd;
8336 continue;
8337
8338 case DIF_OP_STTS:
8339 key = &tupregs[DIF_DTR_NREGS];
8340 key[0].dttk_size = 0;
8341 key[1].dttk_size = 0;
8342 nkeys = 2;
8343 scope = DIFV_SCOPE_THREAD;
8344 break;
8345
8346 case DIF_OP_STGAA:
8347 case DIF_OP_STTAA:
8348 nkeys = ttop;
8349
8350 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
8351 key[nkeys++].dttk_size = 0;
8352
8353 key[nkeys++].dttk_size = 0;
8354
8355 if (op == DIF_OP_STTAA) {
8356 scope = DIFV_SCOPE_THREAD;
8357 } else {
8358 scope = DIFV_SCOPE_GLOBAL;
8359 }
8360
8361 break;
8362
8363 case DIF_OP_PUSHTR:
8364 if (ttop == DIF_DTR_NREGS)
8365 return;
8366
8367 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
8368 /*
8369 * If the register for the size of the "pushtr"
8370 * is %r0 (or the value is 0) and the type is
8371 * a string, we'll use the system-wide default
8372 * string size.
8373 */
8374 tupregs[ttop++].dttk_size =
8375 dtrace_strsize_default;
8376 } else {
8377 if (srd == 0)
8378 return;
8379
8380 tupregs[ttop++].dttk_size = sval;
8381 }
8382
8383 break;
8384
8385 case DIF_OP_PUSHTV:
8386 if (ttop == DIF_DTR_NREGS)
8387 return;
8388
8389 tupregs[ttop++].dttk_size = 0;
8390 break;
8391
8392 case DIF_OP_FLUSHTS:
8393 ttop = 0;
8394 break;
8395
8396 case DIF_OP_POPTS:
8397 if (ttop != 0)
8398 ttop--;
8399 break;
8400 }
8401
8402 sval = 0;
8403 srd = 0;
8404
8405 if (nkeys == 0)
8406 continue;
8407
8408 /*
8409 * We have a dynamic variable allocation; calculate its size.
8410 */
8411 for (ksize = 0, i = 0; i < nkeys; i++)
8412 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
8413
8414 size = sizeof (dtrace_dynvar_t);
8415 size += sizeof (dtrace_key_t) * (nkeys - 1);
8416 size += ksize;
8417
8418 /*
8419 * Now we need to determine the size of the stored data.
8420 */
8421 id = DIF_INSTR_VAR(instr);
8422
8423 for (i = 0; i < dp->dtdo_varlen; i++) {
8424 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8425
8426 if (v->dtdv_id == id && v->dtdv_scope == scope) {
8427 size += v->dtdv_type.dtdt_size;
8428 break;
8429 }
8430 }
8431
8432 if (i == dp->dtdo_varlen)
8433 return;
8434
8435 /*
8436 * We have the size. If this is larger than the chunk size
8437 * for our dynamic variable state, reset the chunk size.
8438 */
8439 size = P2ROUNDUP(size, sizeof (uint64_t));
8440
8441 if (size > vstate->dtvs_dynvars.dtds_chunksize)
8442 vstate->dtvs_dynvars.dtds_chunksize = size;
8443 }
8444 }
8445
8446 static void
8447 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8448 {
8449 int i, oldsvars, osz, nsz, otlocals, ntlocals;
8450 uint_t id;
8451
8452 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8453 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
8454
8455 for (i = 0; i < dp->dtdo_varlen; i++) {
8456 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8457 dtrace_statvar_t *svar, ***svarp;
8458 size_t dsize = 0;
8459 uint8_t scope = v->dtdv_scope;
8460 int *np;
8461
8462 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8463 continue;
8464
8465 id -= DIF_VAR_OTHER_UBASE;
8466
8467 switch (scope) {
8468 case DIFV_SCOPE_THREAD:
8469 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
8470 dtrace_difv_t *tlocals;
8471
8472 if ((ntlocals = (otlocals << 1)) == 0)
8473 ntlocals = 1;
8474
8475 osz = otlocals * sizeof (dtrace_difv_t);
8476 nsz = ntlocals * sizeof (dtrace_difv_t);
8477
8478 tlocals = kmem_zalloc(nsz, KM_SLEEP);
8479
8480 if (osz != 0) {
8481 bcopy(vstate->dtvs_tlocals,
8482 tlocals, osz);
8483 kmem_free(vstate->dtvs_tlocals, osz);
8484 }
8485
8486 vstate->dtvs_tlocals = tlocals;
8487 vstate->dtvs_ntlocals = ntlocals;
8488 }
8489
8490 vstate->dtvs_tlocals[id] = *v;
8491 continue;
8492
8493 case DIFV_SCOPE_LOCAL:
8494 np = &vstate->dtvs_nlocals;
8495 svarp = &vstate->dtvs_locals;
8496
8497 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
8498 dsize = NCPU * (v->dtdv_type.dtdt_size +
8499 sizeof (uint64_t));
8500 else
8501 dsize = NCPU * sizeof (uint64_t);
8502
8503 break;
8504
8505 case DIFV_SCOPE_GLOBAL:
8506 np = &vstate->dtvs_nglobals;
8507 svarp = &vstate->dtvs_globals;
8508
8509 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
8510 dsize = v->dtdv_type.dtdt_size +
8511 sizeof (uint64_t);
8512
8513 break;
8514
8515 default:
8516 ASSERT(0);
8517 }
8518
8519 while (id >= (oldsvars = *np)) {
8520 dtrace_statvar_t **statics;
8521 int newsvars, oldsize, newsize;
8522
8523 if ((newsvars = (oldsvars << 1)) == 0)
8524 newsvars = 1;
8525
8526 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
8527 newsize = newsvars * sizeof (dtrace_statvar_t *);
8528
8529 statics = kmem_zalloc(newsize, KM_SLEEP);
8530
8531 if (oldsize != 0) {
8532 bcopy(*svarp, statics, oldsize);
8533 kmem_free(*svarp, oldsize);
8534 }
8535
8536 *svarp = statics;
8537 *np = newsvars;
8538 }
8539
8540 if ((svar = (*svarp)[id]) == NULL) {
8541 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
8542 svar->dtsv_var = *v;
8543
8544 if ((svar->dtsv_size = dsize) != 0) {
8545 svar->dtsv_data = (uint64_t)(uintptr_t)
8546 kmem_zalloc(dsize, KM_SLEEP);
8547 }
8548
8549 (*svarp)[id] = svar;
8550 }
8551
8552 svar->dtsv_refcnt++;
8553 }
8554
8555 dtrace_difo_chunksize(dp, vstate);
8556 dtrace_difo_hold(dp);
8557 }
8558
8559 static dtrace_difo_t *
8560 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8561 {
8562 dtrace_difo_t *new;
8563 size_t sz;
8564
8565 ASSERT(dp->dtdo_buf != NULL);
8566 ASSERT(dp->dtdo_refcnt != 0);
8567
8568 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
8569
8570 ASSERT(dp->dtdo_buf != NULL);
8571 sz = dp->dtdo_len * sizeof (dif_instr_t);
8572 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
8573 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
8574 new->dtdo_len = dp->dtdo_len;
8575
8576 if (dp->dtdo_strtab != NULL) {
8577 ASSERT(dp->dtdo_strlen != 0);
8578 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
8579 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
8580 new->dtdo_strlen = dp->dtdo_strlen;
8581 }
8582
8583 if (dp->dtdo_inttab != NULL) {
8584 ASSERT(dp->dtdo_intlen != 0);
8585 sz = dp->dtdo_intlen * sizeof (uint64_t);
8586 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
8587 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
8588 new->dtdo_intlen = dp->dtdo_intlen;
8589 }
8590
8591 if (dp->dtdo_vartab != NULL) {
8592 ASSERT(dp->dtdo_varlen != 0);
8593 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
8594 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
8595 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
8596 new->dtdo_varlen = dp->dtdo_varlen;
8597 }
8598
8599 dtrace_difo_init(new, vstate);
8600 return (new);
8601 }
8602
8603 static void
8604 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8605 {
8606 int i;
8607
8608 ASSERT(dp->dtdo_refcnt == 0);
8609
8610 for (i = 0; i < dp->dtdo_varlen; i++) {
8611 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8612 dtrace_statvar_t *svar, **svarp;
8613 uint_t id;
8614 uint8_t scope = v->dtdv_scope;
8615 int *np;
8616
8617 switch (scope) {
8618 case DIFV_SCOPE_THREAD:
8619 continue;
8620
8621 case DIFV_SCOPE_LOCAL:
8622 np = &vstate->dtvs_nlocals;
8623 svarp = vstate->dtvs_locals;
8624 break;
8625
8626 case DIFV_SCOPE_GLOBAL:
8627 np = &vstate->dtvs_nglobals;
8628 svarp = vstate->dtvs_globals;
8629 break;
8630
8631 default:
8632 ASSERT(0);
8633 }
8634
8635 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8636 continue;
8637
8638 id -= DIF_VAR_OTHER_UBASE;
8639 ASSERT(id < *np);
8640
8641 svar = svarp[id];
8642 ASSERT(svar != NULL);
8643 ASSERT(svar->dtsv_refcnt > 0);
8644
8645 if (--svar->dtsv_refcnt > 0)
8646 continue;
8647
8648 if (svar->dtsv_size != 0) {
8649 ASSERT(svar->dtsv_data != NULL);
8650 kmem_free((void *)(uintptr_t)svar->dtsv_data,
8651 svar->dtsv_size);
8652 }
8653
8654 kmem_free(svar, sizeof (dtrace_statvar_t));
8655 svarp[id] = NULL;
8656 }
8657
8658 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
8659 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
8660 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
8661 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
8662
8663 kmem_free(dp, sizeof (dtrace_difo_t));
8664 }
8665
8666 static void
8667 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8668 {
8669 int i;
8670
8671 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8672 ASSERT(dp->dtdo_refcnt != 0);
8673
8674 for (i = 0; i < dp->dtdo_varlen; i++) {
8675 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8676
8677 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8678 continue;
8679
8680 ASSERT(dtrace_vtime_references > 0);
8681 if (--dtrace_vtime_references == 0)
8682 dtrace_vtime_disable();
8683 }
8684
8685 if (--dp->dtdo_refcnt == 0)
8686 dtrace_difo_destroy(dp, vstate);
8687 }
8688
8689 /*
8690 * DTrace Format Functions
8691 */
8692 static uint16_t
8693 dtrace_format_add(dtrace_state_t *state, char *str)
8694 {
8695 char *fmt, **new;
8696 uint16_t ndx, len = strlen(str) + 1;
8697
8698 fmt = kmem_zalloc(len, KM_SLEEP);
8699 bcopy(str, fmt, len);
8700
8701 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
8702 if (state->dts_formats[ndx] == NULL) {
8703 state->dts_formats[ndx] = fmt;
8704 return (ndx + 1);
8705 }
8706 }
8707
8708 if (state->dts_nformats == USHRT_MAX) {
8709 /*
8710 * This is only likely if a denial-of-service attack is being
8711 * attempted. As such, it's okay to fail silently here.
8712 */
8713 kmem_free(fmt, len);
8714 return (0);
8715 }
8716
8717 /*
8718 * For simplicity, we always resize the formats array to be exactly the
8719 * number of formats.
8720 */
8721 ndx = state->dts_nformats++;
8722 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
8723
8724 if (state->dts_formats != NULL) {
8725 ASSERT(ndx != 0);
8726 bcopy(state->dts_formats, new, ndx * sizeof (char *));
8727 kmem_free(state->dts_formats, ndx * sizeof (char *));
8728 }
8729
8730 state->dts_formats = new;
8731 state->dts_formats[ndx] = fmt;
8732
8733 return (ndx + 1);
8734 }
8735
8736 static void
8737 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
8738 {
8739 char *fmt;
8740
8741 ASSERT(state->dts_formats != NULL);
8742 ASSERT(format <= state->dts_nformats);
8743 ASSERT(state->dts_formats[format - 1] != NULL);
8744
8745 fmt = state->dts_formats[format - 1];
8746 kmem_free(fmt, strlen(fmt) + 1);
8747 state->dts_formats[format - 1] = NULL;
8748 }
8749
8750 static void
8751 dtrace_format_destroy(dtrace_state_t *state)
8752 {
8753 int i;
8754
8755 if (state->dts_nformats == 0) {
8756 ASSERT(state->dts_formats == NULL);
8757 return;
8758 }
8759
8760 ASSERT(state->dts_formats != NULL);
8761
8762 for (i = 0; i < state->dts_nformats; i++) {
8763 char *fmt = state->dts_formats[i];
8764
8765 if (fmt == NULL)
8766 continue;
8767
8768 kmem_free(fmt, strlen(fmt) + 1);
8769 }
8770
8771 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
8772 state->dts_nformats = 0;
8773 state->dts_formats = NULL;
8774 }
8775
8776 /*
8777 * DTrace Predicate Functions
8778 */
8779 static dtrace_predicate_t *
8780 dtrace_predicate_create(dtrace_difo_t *dp)
8781 {
8782 dtrace_predicate_t *pred;
8783
8784 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8785 ASSERT(dp->dtdo_refcnt != 0);
8786
8787 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
8788 pred->dtp_difo = dp;
8789 pred->dtp_refcnt = 1;
8790
8791 if (!dtrace_difo_cacheable(dp))
8792 return (pred);
8793
8794 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
8795 /*
8796 * This is only theoretically possible -- we have had 2^32
8797 * cacheable predicates on this machine. We cannot allow any
8798 * more predicates to become cacheable: as unlikely as it is,
8799 * there may be a thread caching a (now stale) predicate cache
8800 * ID. (N.B.: the temptation is being successfully resisted to
8801 * have this cmn_err() "Holy shit -- we executed this code!")
8802 */
8803 return (pred);
8804 }
8805
8806 pred->dtp_cacheid = dtrace_predcache_id++;
8807
8808 return (pred);
8809 }
8810
8811 static void
8812 dtrace_predicate_hold(dtrace_predicate_t *pred)
8813 {
8814 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8815 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
8816 ASSERT(pred->dtp_refcnt > 0);
8817
8818 pred->dtp_refcnt++;
8819 }
8820
8821 static void
8822 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
8823 {
8824 dtrace_difo_t *dp = pred->dtp_difo;
8825
8826 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8827 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
8828 ASSERT(pred->dtp_refcnt > 0);
8829
8830 if (--pred->dtp_refcnt == 0) {
8831 dtrace_difo_release(pred->dtp_difo, vstate);
8832 kmem_free(pred, sizeof (dtrace_predicate_t));
8833 }
8834 }
8835
8836 /*
8837 * DTrace Action Description Functions
8838 */
8839 static dtrace_actdesc_t *
8840 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
8841 uint64_t uarg, uint64_t arg)
8842 {
8843 dtrace_actdesc_t *act;
8844
8845 /* ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
8846 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));*/
8847
8848 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
8849 act->dtad_kind = kind;
8850 act->dtad_ntuple = ntuple;
8851 act->dtad_uarg = uarg;
8852 act->dtad_arg = arg;
8853 act->dtad_refcnt = 1;
8854
8855 return (act);
8856 }
8857
8858 static void
8859 dtrace_actdesc_hold(dtrace_actdesc_t *act)
8860 {
8861 ASSERT(act->dtad_refcnt >= 1);
8862 act->dtad_refcnt++;
8863 }
8864
8865 static void
8866 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
8867 {
8868 dtrace_actkind_t kind = act->dtad_kind;
8869 dtrace_difo_t *dp;
8870
8871 ASSERT(act->dtad_refcnt >= 1);
8872
8873 if (--act->dtad_refcnt != 0)
8874 return;
8875
8876 if ((dp = act->dtad_difo) != NULL)
8877 dtrace_difo_release(dp, vstate);
8878
8879 if (DTRACEACT_ISPRINTFLIKE(kind)) {
8880 char *str = (char *)(uintptr_t)act->dtad_arg;
8881
8882 /* ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
8883 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));*/
8884
8885 if (str != NULL)
8886 kmem_free(str, strlen(str) + 1);
8887 }
8888
8889 kmem_free(act, sizeof (dtrace_actdesc_t));
8890 }
8891
8892 /*
8893 * DTrace ECB Functions
8894 */
8895 static dtrace_ecb_t *
8896 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
8897 {
8898 dtrace_ecb_t *ecb;
8899 dtrace_epid_t epid;
8900
8901 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8902
8903 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
8904 ecb->dte_predicate = NULL;
8905 ecb->dte_probe = probe;
8906
8907 /*
8908 * The default size is the size of the default action: recording
8909 * the epid.
8910 */
8911 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
8912 ecb->dte_alignment = sizeof (dtrace_epid_t);
8913
8914 epid = state->dts_epid++;
8915
8916 if (epid - 1 >= state->dts_necbs) {
8917 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
8918 int necbs = state->dts_necbs << 1;
8919
8920 ASSERT(epid == state->dts_necbs + 1);
8921
8922 if (necbs == 0) {
8923 ASSERT(oecbs == NULL);
8924 necbs = 1;
8925 }
8926
8927 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
8928
8929 if (oecbs != NULL)
8930 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
8931
8932 dtrace_membar_producer();
8933 state->dts_ecbs = ecbs;
8934
8935 if (oecbs != NULL) {
8936 /*
8937 * If this state is active, we must dtrace_sync()
8938 * before we can free the old dts_ecbs array: we're
8939 * coming in hot, and there may be active ring
8940 * buffer processing (which indexes into the dts_ecbs
8941 * array) on another CPU.
8942 */
8943 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
8944 dtrace_sync();
8945
8946 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
8947 }
8948
8949 dtrace_membar_producer();
8950 state->dts_necbs = necbs;
8951 }
8952
8953 ecb->dte_state = state;
8954
8955 ASSERT(state->dts_ecbs[epid - 1] == NULL);
8956 dtrace_membar_producer();
8957 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
8958
8959 return (ecb);
8960 }
8961
8962 static void
8963 dtrace_ecb_enable(dtrace_ecb_t *ecb)
8964 {
8965 dtrace_probe_t *probe = ecb->dte_probe;
8966
8967 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
8968 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8969 ASSERT(ecb->dte_next == NULL);
8970
8971 if (probe == NULL) {
8972 /*
8973 * This is the NULL probe -- there's nothing to do.
8974 */
8975 return;
8976 }
8977
8978 if (probe->dtpr_ecb == NULL) {
8979 dtrace_provider_t *prov = probe->dtpr_provider;
8980
8981 /*
8982 * We're the first ECB on this probe.
8983 */
8984 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
8985
8986 if (ecb->dte_predicate != NULL)
8987 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
8988
8989 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
8990 probe->dtpr_id, probe->dtpr_arg);
8991 } else {
8992 /*
8993 * This probe is already active. Swing the last pointer to
8994 * point to the new ECB, and issue a dtrace_sync() to assure
8995 * that all CPUs have seen the change.
8996 */
8997 ASSERT(probe->dtpr_ecb_last != NULL);
8998 probe->dtpr_ecb_last->dte_next = ecb;
8999 probe->dtpr_ecb_last = ecb;
9000 probe->dtpr_predcache = 0;
9001
9002 dtrace_sync();
9003 }
9004 }
9005
9006 static void
9007 dtrace_ecb_resize(dtrace_ecb_t *ecb)
9008 {
9009 uint32_t maxalign = sizeof (dtrace_epid_t);
9010 uint32_t align = sizeof (uint8_t), offs, diff;
9011 dtrace_action_t *act;
9012 int wastuple = 0;
9013 uint32_t aggbase = UINT32_MAX;
9014 dtrace_state_t *state = ecb->dte_state;
9015
9016 /*
9017 * If we record anything, we always record the epid. (And we always
9018 * record it first.)
9019 */
9020 offs = sizeof (dtrace_epid_t);
9021 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9022
9023 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9024 dtrace_recdesc_t *rec = &act->dta_rec;
9025
9026 if ((align = rec->dtrd_alignment) > maxalign)
9027 maxalign = align;
9028
9029 if (!wastuple && act->dta_intuple) {
9030 /*
9031 * This is the first record in a tuple. Align the
9032 * offset to be at offset 4 in an 8-byte aligned
9033 * block.
9034 */
9035 diff = offs + sizeof (dtrace_aggid_t);
9036
9037 if (diff = (diff & (sizeof (uint64_t) - 1)))
9038 offs += sizeof (uint64_t) - diff;
9039
9040 aggbase = offs - sizeof (dtrace_aggid_t);
9041 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9042 }
9043
9044 /*LINTED*/
9045 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9046 /*
9047 * The current offset is not properly aligned; align it.
9048 */
9049 offs += align - diff;
9050 }
9051
9052 rec->dtrd_offset = offs;
9053
9054 if (offs + rec->dtrd_size > ecb->dte_needed) {
9055 ecb->dte_needed = offs + rec->dtrd_size;
9056
9057 if (ecb->dte_needed > state->dts_needed)
9058 state->dts_needed = ecb->dte_needed;
9059 }
9060
9061 if (DTRACEACT_ISAGG(act->dta_kind)) {
9062 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9063 dtrace_action_t *first = agg->dtag_first, *prev;
9064
9065 ASSERT(rec->dtrd_size != 0 && first != NULL);
9066 ASSERT(wastuple);
9067 ASSERT(aggbase != UINT32_MAX);
9068
9069 agg->dtag_base = aggbase;
9070
9071 while ((prev = first->dta_prev) != NULL &&
9072 DTRACEACT_ISAGG(prev->dta_kind)) {
9073 agg = (dtrace_aggregation_t *)prev;
9074 first = agg->dtag_first;
9075 }
9076
9077 if (prev != NULL) {
9078 offs = prev->dta_rec.dtrd_offset +
9079 prev->dta_rec.dtrd_size;
9080 } else {
9081 offs = sizeof (dtrace_epid_t);
9082 }
9083 wastuple = 0;
9084 } else {
9085 if (!act->dta_intuple)
9086 ecb->dte_size = offs + rec->dtrd_size;
9087
9088 offs += rec->dtrd_size;
9089 }
9090
9091 wastuple = act->dta_intuple;
9092 }
9093
9094 if ((act = ecb->dte_action) != NULL &&
9095 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9096 ecb->dte_size == sizeof (dtrace_epid_t)) {
9097 /*
9098 * If the size is still sizeof (dtrace_epid_t), then all
9099 * actions store no data; set the size to 0.
9100 */
9101 ecb->dte_alignment = maxalign;
9102 ecb->dte_size = 0;
9103
9104 /*
9105 * If the needed space is still sizeof (dtrace_epid_t), then
9106 * all actions need no additional space; set the needed
9107 * size to 0.
9108 */
9109 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9110 ecb->dte_needed = 0;
9111
9112 return;
9113 }
9114
9115 /*
9116 * Set our alignment, and make sure that the dte_size and dte_needed
9117 * are aligned to the size of an EPID.
9118 */
9119 ecb->dte_alignment = maxalign;
9120 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9121 ~(sizeof (dtrace_epid_t) - 1);
9122 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9123 ~(sizeof (dtrace_epid_t) - 1);
9124 ASSERT(ecb->dte_size <= ecb->dte_needed);
9125 }
9126
9127 static dtrace_action_t *
9128 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9129 {
9130 dtrace_aggregation_t *agg;
9131 size_t size = sizeof (uint64_t);
9132 int ntuple = desc->dtad_ntuple;
9133 dtrace_action_t *act;
9134 dtrace_recdesc_t *frec;
9135 dtrace_aggid_t aggid;
9136 dtrace_state_t *state = ecb->dte_state;
9137
9138 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9139 agg->dtag_ecb = ecb;
9140
9141 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9142
9143 switch (desc->dtad_kind) {
9144 case DTRACEAGG_MIN:
9145 agg->dtag_initial = UINT64_MAX;
9146 agg->dtag_aggregate = dtrace_aggregate_min;
9147 break;
9148
9149 case DTRACEAGG_MAX:
9150 agg->dtag_aggregate = dtrace_aggregate_max;
9151 break;
9152
9153 case DTRACEAGG_COUNT:
9154 agg->dtag_aggregate = dtrace_aggregate_count;
9155 break;
9156
9157 case DTRACEAGG_QUANTIZE:
9158 agg->dtag_aggregate = dtrace_aggregate_quantize;
9159 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9160 sizeof (uint64_t);
9161 break;
9162
9163 case DTRACEAGG_LQUANTIZE: {
9164 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9165 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9166
9167 agg->dtag_initial = desc->dtad_arg;
9168 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9169
9170 if (step == 0 || levels == 0)
9171 goto err;
9172
9173 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9174 break;
9175 }
9176
9177 case DTRACEAGG_AVG:
9178 agg->dtag_aggregate = dtrace_aggregate_avg;
9179 size = sizeof (uint64_t) * 2;
9180 break;
9181
9182 case DTRACEAGG_SUM:
9183 agg->dtag_aggregate = dtrace_aggregate_sum;
9184 break;
9185
9186 default:
9187 goto err;
9188 }
9189
9190 agg->dtag_action.dta_rec.dtrd_size = size;
9191
9192 if (ntuple == 0)
9193 goto err;
9194
9195 /*
9196 * We must make sure that we have enough actions for the n-tuple.
9197 */
9198 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9199 if (DTRACEACT_ISAGG(act->dta_kind))
9200 break;
9201
9202 if (--ntuple == 0) {
9203 /*
9204 * This is the action with which our n-tuple begins.
9205 */
9206 agg->dtag_first = act;
9207 goto success;
9208 }
9209 }
9210
9211 /*
9212 * This n-tuple is short by ntuple elements. Return failure.
9213 */
9214 ASSERT(ntuple != 0);
9215 err:
9216 kmem_free(agg, sizeof (dtrace_aggregation_t));
9217 return (NULL);
9218
9219 success:
9220 /*
9221 * If the last action in the tuple has a size of zero, it's actually
9222 * an expression argument for the aggregating action.
9223 */
9224 ASSERT(ecb->dte_action_last != NULL);
9225 act = ecb->dte_action_last;
9226
9227 if (act->dta_kind == DTRACEACT_DIFEXPR) {
9228 ASSERT(act->dta_difo != NULL);
9229
9230 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9231 agg->dtag_hasarg = 1;
9232 }
9233
9234 /*
9235 * We need to allocate an id for this aggregation.
9236 */
9237 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9238 VM_BESTFIT | VM_SLEEP);
9239
9240 if (aggid - 1 >= state->dts_naggregations) {
9241 dtrace_aggregation_t **oaggs = state->dts_aggregations;
9242 dtrace_aggregation_t **aggs;
9243 int naggs = state->dts_naggregations << 1;
9244 int onaggs = state->dts_naggregations;
9245
9246 ASSERT(aggid == state->dts_naggregations + 1);
9247
9248 if (naggs == 0) {
9249 ASSERT(oaggs == NULL);
9250 naggs = 1;
9251 }
9252
9253 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9254
9255 if (oaggs != NULL) {
9256 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9257 kmem_free(oaggs, onaggs * sizeof (*aggs));
9258 }
9259
9260 state->dts_aggregations = aggs;
9261 state->dts_naggregations = naggs;
9262 }
9263
9264 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9265 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9266
9267 frec = &agg->dtag_first->dta_rec;
9268 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9269 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9270
9271 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9272 ASSERT(!act->dta_intuple);
9273 act->dta_intuple = 1;
9274 }
9275
9276 return (&agg->dtag_action);
9277 }
9278
9279 static void
9280 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9281 {
9282 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9283 dtrace_state_t *state = ecb->dte_state;
9284 dtrace_aggid_t aggid = agg->dtag_id;
9285
9286 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9287 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9288
9289 ASSERT(state->dts_aggregations[aggid - 1] == agg);
9290 state->dts_aggregations[aggid - 1] = NULL;
9291
9292 kmem_free(agg, sizeof (dtrace_aggregation_t));
9293 }
9294
9295 static int
9296 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9297 {
9298 dtrace_action_t *action, *last;
9299 dtrace_difo_t *dp = desc->dtad_difo;
9300 uint32_t size = 0, align = sizeof (uint8_t), mask;
9301 uint16_t format = 0;
9302 dtrace_recdesc_t *rec;
9303 dtrace_state_t *state = ecb->dte_state;
9304 dtrace_optval_t *opt = state->dts_options, nframes, strsize;
9305 uint64_t arg = desc->dtad_arg;
9306
9307 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9308 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9309
9310 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9311 /*
9312 * If this is an aggregating action, there must be neither
9313 * a speculate nor a commit on the action chain.
9314 */
9315 dtrace_action_t *act;
9316
9317 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9318 if (act->dta_kind == DTRACEACT_COMMIT)
9319 return (EINVAL);
9320
9321 if (act->dta_kind == DTRACEACT_SPECULATE)
9322 return (EINVAL);
9323 }
9324
9325 action = dtrace_ecb_aggregation_create(ecb, desc);
9326
9327 if (action == NULL)
9328 return (EINVAL);
9329 } else {
9330 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
9331 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
9332 dp != NULL && dp->dtdo_destructive)) {
9333 state->dts_destructive = 1;
9334 }
9335
9336 switch (desc->dtad_kind) {
9337 case DTRACEACT_PRINTF:
9338 case DTRACEACT_PRINTA:
9339 case DTRACEACT_SYSTEM:
9340 case DTRACEACT_FREOPEN:
9341 /*
9342 * We know that our arg is a string -- turn it into a
9343 * format.
9344 */
9345 if (arg == NULL) {
9346 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
9347 format = 0;
9348 } else {
9349 ASSERT(arg != NULL);
9350 /* ASSERT(arg > KERNELBASE); */
9351 format = dtrace_format_add(state,
9352 (char *)(uintptr_t)arg);
9353 }
9354
9355 /*FALLTHROUGH*/
9356 case DTRACEACT_LIBACT:
9357 case DTRACEACT_DIFEXPR:
9358 if (dp == NULL)
9359 return (EINVAL);
9360
9361 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
9362 break;
9363
9364 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
9365 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9366 return (EINVAL);
9367
9368 size = opt[DTRACEOPT_STRSIZE];
9369 }
9370
9371 break;
9372
9373 case DTRACEACT_STACK:
9374 if ((nframes = arg) == 0) {
9375 nframes = opt[DTRACEOPT_STACKFRAMES];
9376 ASSERT(nframes > 0);
9377 arg = nframes;
9378 }
9379
9380 size = nframes * sizeof (pc_t);
9381 break;
9382
9383 case DTRACEACT_JSTACK:
9384 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
9385 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
9386
9387 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
9388 nframes = opt[DTRACEOPT_JSTACKFRAMES];
9389
9390 arg = DTRACE_USTACK_ARG(nframes, strsize);
9391
9392 /*FALLTHROUGH*/
9393 case DTRACEACT_USTACK:
9394 if (desc->dtad_kind != DTRACEACT_JSTACK &&
9395 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
9396 strsize = DTRACE_USTACK_STRSIZE(arg);
9397 nframes = opt[DTRACEOPT_USTACKFRAMES];
9398 ASSERT(nframes > 0);
9399 arg = DTRACE_USTACK_ARG(nframes, strsize);
9400 }
9401
9402 /*
9403 * Save a slot for the pid.
9404 */
9405 size = (nframes + 1) * sizeof (uint64_t);
9406 size += DTRACE_USTACK_STRSIZE(arg);
9407 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
9408
9409 break;
9410
9411 case DTRACEACT_SYM:
9412 case DTRACEACT_MOD:
9413 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
9414 sizeof (uint64_t)) ||
9415 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9416 return (EINVAL);
9417 break;
9418
9419 case DTRACEACT_USYM:
9420 case DTRACEACT_UMOD:
9421 case DTRACEACT_UADDR:
9422 if (dp == NULL ||
9423 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
9424 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9425 return (EINVAL);
9426
9427 /*
9428 * We have a slot for the pid, plus a slot for the
9429 * argument. To keep things simple (aligned with
9430 * bitness-neutral sizing), we store each as a 64-bit
9431 * quantity.
9432 */
9433 size = 2 * sizeof (uint64_t);
9434 break;
9435
9436 case DTRACEACT_STOP:
9437 case DTRACEACT_BREAKPOINT:
9438 case DTRACEACT_PANIC:
9439 break;
9440
9441 case DTRACEACT_CHILL:
9442 case DTRACEACT_DISCARD:
9443 case DTRACEACT_RAISE:
9444 if (dp == NULL)
9445 return (EINVAL);
9446 break;
9447
9448 case DTRACEACT_EXIT:
9449 if (dp == NULL ||
9450 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
9451 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9452 return (EINVAL);
9453 break;
9454
9455 case DTRACEACT_SPECULATE:
9456 if (ecb->dte_size > sizeof (dtrace_epid_t))
9457 return (EINVAL);
9458
9459 if (dp == NULL)
9460 return (EINVAL);
9461
9462 state->dts_speculates = 1;
9463 break;
9464
9465 case DTRACEACT_COMMIT: {
9466 dtrace_action_t *act = ecb->dte_action;
9467
9468 for (; act != NULL; act = act->dta_next) {
9469 if (act->dta_kind == DTRACEACT_COMMIT)
9470 return (EINVAL);
9471 }
9472
9473 if (dp == NULL)
9474 return (EINVAL);
9475 break;
9476 }
9477
9478 default:
9479 return (EINVAL);
9480 }
9481
9482 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
9483 /*
9484 * If this is a data-storing action or a speculate,
9485 * we must be sure that there isn't a commit on the
9486 * action chain.
9487 */
9488 dtrace_action_t *act = ecb->dte_action;
9489
9490 for (; act != NULL; act = act->dta_next) {
9491 if (act->dta_kind == DTRACEACT_COMMIT)
9492 return (EINVAL);
9493 }
9494 }
9495
9496 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
9497 action->dta_rec.dtrd_size = size;
9498 }
9499
9500 action->dta_refcnt = 1;
9501 rec = &action->dta_rec;
9502 size = rec->dtrd_size;
9503
9504 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
9505 if (!(size & mask)) {
9506 align = mask + 1;
9507 break;
9508 }
9509 }
9510
9511 action->dta_kind = desc->dtad_kind;
9512
9513 if ((action->dta_difo = dp) != NULL)
9514 dtrace_difo_hold(dp);
9515
9516 rec->dtrd_action = action->dta_kind;
9517 rec->dtrd_arg = arg;
9518 rec->dtrd_uarg = desc->dtad_uarg;
9519 rec->dtrd_alignment = (uint16_t)align;
9520 rec->dtrd_format = format;
9521
9522 if ((last = ecb->dte_action_last) != NULL) {
9523 ASSERT(ecb->dte_action != NULL);
9524 action->dta_prev = last;
9525 last->dta_next = action;
9526 } else {
9527 ASSERT(ecb->dte_action == NULL);
9528 ecb->dte_action = action;
9529 }
9530
9531 ecb->dte_action_last = action;
9532
9533 return (0);
9534 }
9535
9536 static void
9537 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
9538 {
9539 dtrace_action_t *act = ecb->dte_action, *next;
9540 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
9541 dtrace_difo_t *dp;
9542 uint16_t format;
9543
9544 if (act != NULL && act->dta_refcnt > 1) {
9545 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
9546 act->dta_refcnt--;
9547 } else {
9548 for (; act != NULL; act = next) {
9549 next = act->dta_next;
9550 ASSERT(next != NULL || act == ecb->dte_action_last);
9551 ASSERT(act->dta_refcnt == 1);
9552
9553 if ((format = act->dta_rec.dtrd_format) != 0)
9554 dtrace_format_remove(ecb->dte_state, format);
9555
9556 if ((dp = act->dta_difo) != NULL)
9557 dtrace_difo_release(dp, vstate);
9558
9559 if (DTRACEACT_ISAGG(act->dta_kind)) {
9560 dtrace_ecb_aggregation_destroy(ecb, act);
9561 } else {
9562 kmem_free(act, sizeof (dtrace_action_t));
9563 }
9564 }
9565 }
9566
9567 ecb->dte_action = NULL;
9568 ecb->dte_action_last = NULL;
9569 ecb->dte_size = sizeof (dtrace_epid_t);
9570 }
9571
9572 static void
9573 dtrace_ecb_disable(dtrace_ecb_t *ecb)
9574 {
9575 /*
9576 * We disable the ECB by removing it from its probe.
9577 */
9578 dtrace_ecb_t *pecb, *prev = NULL;
9579 dtrace_probe_t *probe = ecb->dte_probe;
9580
9581 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9582
9583 if (probe == NULL) {
9584 /*
9585 * This is the NULL probe; there is nothing to disable.
9586 */
9587 return;
9588 }
9589
9590 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
9591 if (pecb == ecb)
9592 break;
9593 prev = pecb;
9594 }
9595
9596 ASSERT(pecb != NULL);
9597
9598 if (prev == NULL) {
9599 probe->dtpr_ecb = ecb->dte_next;
9600 } else {
9601 prev->dte_next = ecb->dte_next;
9602 }
9603
9604 if (ecb == probe->dtpr_ecb_last) {
9605 ASSERT(ecb->dte_next == NULL);
9606 probe->dtpr_ecb_last = prev;
9607 }
9608
9609 /*
9610 * The ECB has been disconnected from the probe; now sync to assure
9611 * that all CPUs have seen the change before returning.
9612 */
9613 dtrace_sync();
9614
9615 if (probe->dtpr_ecb == NULL) {
9616 /*
9617 * That was the last ECB on the probe; clear the predicate
9618 * cache ID for the probe, disable it and sync one more time
9619 * to assure that we'll never hit it again.
9620 */
9621 dtrace_provider_t *prov = probe->dtpr_provider;
9622
9623 ASSERT(ecb->dte_next == NULL);
9624 ASSERT(probe->dtpr_ecb_last == NULL);
9625 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
9626 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
9627 probe->dtpr_id, probe->dtpr_arg);
9628 dtrace_sync();
9629 } else {
9630 /*
9631 * There is at least one ECB remaining on the probe. If there
9632 * is _exactly_ one, set the probe's predicate cache ID to be
9633 * the predicate cache ID of the remaining ECB.
9634 */
9635 ASSERT(probe->dtpr_ecb_last != NULL);
9636 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
9637
9638 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
9639 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
9640
9641 ASSERT(probe->dtpr_ecb->dte_next == NULL);
9642
9643 if (p != NULL)
9644 probe->dtpr_predcache = p->dtp_cacheid;
9645 }
9646
9647 ecb->dte_next = NULL;
9648 }
9649 }
9650
9651 static void
9652 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
9653 {
9654 dtrace_state_t *state = ecb->dte_state;
9655 dtrace_vstate_t *vstate = &state->dts_vstate;
9656 dtrace_predicate_t *pred;
9657 dtrace_epid_t epid = ecb->dte_epid;
9658
9659 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9660 ASSERT(ecb->dte_next == NULL);
9661 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
9662
9663 if ((pred = ecb->dte_predicate) != NULL)
9664 dtrace_predicate_release(pred, vstate);
9665
9666 dtrace_ecb_action_remove(ecb);
9667
9668 ASSERT(state->dts_ecbs[epid - 1] == ecb);
9669 state->dts_ecbs[epid - 1] = NULL;
9670
9671 kmem_free(ecb, sizeof (dtrace_ecb_t));
9672 }
9673
9674 static dtrace_ecb_t *
9675 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
9676 dtrace_enabling_t *enab)
9677 {
9678 dtrace_ecb_t *ecb;
9679 dtrace_predicate_t *pred;
9680 dtrace_actdesc_t *act;
9681 dtrace_provider_t *prov;
9682 dtrace_ecbdesc_t *desc = enab->dten_current;
9683
9684 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9685 ASSERT(state != NULL);
9686
9687 ecb = dtrace_ecb_add(state, probe);
9688 ecb->dte_uarg = desc->dted_uarg;
9689
9690 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
9691 dtrace_predicate_hold(pred);
9692 ecb->dte_predicate = pred;
9693 }
9694
9695 if (probe != NULL) {
9696 /*
9697 * If the provider shows more leg than the consumer is old
9698 * enough to see, we need to enable the appropriate implicit
9699 * predicate bits to prevent the ecb from activating at
9700 * revealing times.
9701 *
9702 * Providers specifying DTRACE_PRIV_USER at register time
9703 * are stating that they need the /proc-style privilege
9704 * model to be enforced, and this is what DTRACE_COND_OWNER
9705 * and DTRACE_COND_ZONEOWNER will then do at probe time.
9706 */
9707 prov = probe->dtpr_provider;
9708 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
9709 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
9710 ecb->dte_cond |= DTRACE_COND_OWNER;
9711
9712 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
9713 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
9714 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
9715
9716 /*
9717 * If the provider shows us kernel innards and the user
9718 * is lacking sufficient privilege, enable the
9719 * DTRACE_COND_USERMODE implicit predicate.
9720 */
9721 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
9722 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
9723 ecb->dte_cond |= DTRACE_COND_USERMODE;
9724 }
9725
9726 if (dtrace_ecb_create_cache != NULL) {
9727 /*
9728 * If we have a cached ecb, we'll use its action list instead
9729 * of creating our own (saving both time and space).
9730 */
9731 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
9732 dtrace_action_t *act = cached->dte_action;
9733
9734 if (act != NULL) {
9735 ASSERT(act->dta_refcnt > 0);
9736 act->dta_refcnt++;
9737 ecb->dte_action = act;
9738 ecb->dte_action_last = cached->dte_action_last;
9739 ecb->dte_needed = cached->dte_needed;
9740 ecb->dte_size = cached->dte_size;
9741 ecb->dte_alignment = cached->dte_alignment;
9742 }
9743
9744 return (ecb);
9745 }
9746
9747 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
9748 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
9749 dtrace_ecb_destroy(ecb);
9750 return (NULL);
9751 }
9752 }
9753
9754 dtrace_ecb_resize(ecb);
9755
9756 return (dtrace_ecb_create_cache = ecb);
9757 }
9758
9759 static int
9760 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
9761 {
9762 dtrace_ecb_t *ecb;
9763 dtrace_enabling_t *enab = arg;
9764 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
9765
9766 ASSERT(state != NULL);
9767
9768 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
9769 /*
9770 * This probe was created in a generation for which this
9771 * enabling has previously created ECBs; we don't want to
9772 * enable it again, so just kick out.
9773 */
9774 return (DTRACE_MATCH_NEXT);
9775 }
9776
9777 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
9778 return (DTRACE_MATCH_DONE);
9779
9780 dtrace_ecb_enable(ecb);
9781 return (DTRACE_MATCH_NEXT);
9782 }
9783
9784 static dtrace_ecb_t *
9785 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
9786 {
9787 dtrace_ecb_t *ecb;
9788
9789 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9790
9791 if (id == 0 || id > state->dts_necbs)
9792 return (NULL);
9793
9794 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
9795 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
9796
9797 return (state->dts_ecbs[id - 1]);
9798 }
9799
9800 static dtrace_aggregation_t *
9801 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
9802 {
9803 dtrace_aggregation_t *agg;
9804
9805 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9806
9807 if (id == 0 || id > state->dts_naggregations)
9808 return (NULL);
9809
9810 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
9811 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
9812 agg->dtag_id == id);
9813
9814 return (state->dts_aggregations[id - 1]);
9815 }
9816
9817 /*
9818 * DTrace Buffer Functions
9819 *
9820 * The following functions manipulate DTrace buffers. Most of these functions
9821 * are called in the context of establishing or processing consumer state;
9822 * exceptions are explicitly noted.
9823 */
9824
9825 /*
9826 * Note: called from cross call context. This function switches the two
9827 * buffers on a given CPU. The atomicity of this operation is assured by
9828 * disabling interrupts while the actual switch takes place; the disabling of
9829 * interrupts serializes the execution with any execution of dtrace_probe() on
9830 * the same CPU.
9831 */
9832 static void
9833 dtrace_buffer_switch(dtrace_buffer_t *buf)
9834 {
9835 caddr_t tomax = buf->dtb_tomax;
9836 caddr_t xamot = buf->dtb_xamot;
9837 dtrace_icookie_t cookie;
9838
9839 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
9840 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
9841
9842 cookie = dtrace_interrupt_disable();
9843 buf->dtb_tomax = xamot;
9844 buf->dtb_xamot = tomax;
9845 buf->dtb_xamot_drops = buf->dtb_drops;
9846 buf->dtb_xamot_offset = buf->dtb_offset;
9847 buf->dtb_xamot_errors = buf->dtb_errors;
9848 buf->dtb_xamot_flags = buf->dtb_flags;
9849 buf->dtb_offset = 0;
9850 buf->dtb_drops = 0;
9851 buf->dtb_errors = 0;
9852 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
9853 dtrace_interrupt_enable(cookie);
9854 }
9855
9856 /*
9857 * Note: called from cross call context. This function activates a buffer
9858 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
9859 * is guaranteed by the disabling of interrupts.
9860 */
9861 static void
9862 dtrace_buffer_activate(dtrace_state_t *state)
9863 {
9864 dtrace_buffer_t *buf;
9865 dtrace_icookie_t cookie = dtrace_interrupt_disable();
9866
9867 buf = &state->dts_buffer[CPU->cpu_id];
9868
9869 if (buf->dtb_tomax != NULL) {
9870 /*
9871 * We might like to assert that the buffer is marked inactive,
9872 * but this isn't necessarily true: the buffer for the CPU
9873 * that processes the BEGIN probe has its buffer activated
9874 * manually. In this case, we take the (harmless) action
9875 * re-clearing the bit INACTIVE bit.
9876 */
9877 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
9878 }
9879
9880 dtrace_interrupt_enable(cookie);
9881 }
9882
9883 static int
9884 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
9885 processorid_t cpu)
9886 {
9887 cpu_t *cp;
9888 dtrace_buffer_t *buf;
9889
9890 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
9891 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9892
9893 if (size > dtrace_nonroot_maxsize &&
9894 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
9895 return (EFBIG);
9896
9897 #if defined(__APPLE__)
9898 if (size > (sane_size / 8) / NCPU) /* As in kdbg_set_nkdbufs(), roughly. */
9899 return (ENOMEM);
9900 #endif /* __APPLE__ */
9901
9902 cp = cpu_list;
9903
9904 do {
9905 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
9906 continue;
9907
9908 buf = &bufs[cp->cpu_id];
9909
9910 /*
9911 * If there is already a buffer allocated for this CPU, it
9912 * is only possible that this is a DR event. In this case,
9913 * the buffer size must match our specified size.
9914 */
9915 if (buf->dtb_tomax != NULL) {
9916 ASSERT(buf->dtb_size == size);
9917 continue;
9918 }
9919
9920 ASSERT(buf->dtb_xamot == NULL);
9921
9922 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
9923 goto err;
9924
9925 buf->dtb_size = size;
9926 buf->dtb_flags = flags;
9927 buf->dtb_offset = 0;
9928 buf->dtb_drops = 0;
9929
9930 if (flags & DTRACEBUF_NOSWITCH)
9931 continue;
9932
9933 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
9934 goto err;
9935 } while ((cp = cp->cpu_next) != cpu_list);
9936
9937 return (0);
9938
9939 err:
9940 cp = cpu_list;
9941
9942 do {
9943 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
9944 continue;
9945
9946 buf = &bufs[cp->cpu_id];
9947
9948 if (buf->dtb_xamot != NULL) {
9949 ASSERT(buf->dtb_tomax != NULL);
9950 ASSERT(buf->dtb_size == size);
9951 kmem_free(buf->dtb_xamot, size);
9952 }
9953
9954 if (buf->dtb_tomax != NULL) {
9955 ASSERT(buf->dtb_size == size);
9956 kmem_free(buf->dtb_tomax, size);
9957 }
9958
9959 buf->dtb_tomax = NULL;
9960 buf->dtb_xamot = NULL;
9961 buf->dtb_size = 0;
9962 } while ((cp = cp->cpu_next) != cpu_list);
9963
9964 return (ENOMEM);
9965 }
9966
9967 /*
9968 * Note: called from probe context. This function just increments the drop
9969 * count on a buffer. It has been made a function to allow for the
9970 * possibility of understanding the source of mysterious drop counts. (A
9971 * problem for which one may be particularly disappointed that DTrace cannot
9972 * be used to understand DTrace.)
9973 */
9974 static void
9975 dtrace_buffer_drop(dtrace_buffer_t *buf)
9976 {
9977 buf->dtb_drops++;
9978 }
9979
9980 /*
9981 * Note: called from probe context. This function is called to reserve space
9982 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
9983 * mstate. Returns the new offset in the buffer, or a negative value if an
9984 * error has occurred.
9985 */
9986 static intptr_t
9987 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
9988 dtrace_state_t *state, dtrace_mstate_t *mstate)
9989 {
9990 intptr_t offs = buf->dtb_offset, soffs;
9991 intptr_t woffs;
9992 caddr_t tomax;
9993 size_t total;
9994
9995 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
9996 return (-1);
9997
9998 if ((tomax = buf->dtb_tomax) == NULL) {
9999 dtrace_buffer_drop(buf);
10000 return (-1);
10001 }
10002
10003 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10004 while (offs & (align - 1)) {
10005 /*
10006 * Assert that our alignment is off by a number which
10007 * is itself sizeof (uint32_t) aligned.
10008 */
10009 ASSERT(!((align - (offs & (align - 1))) &
10010 (sizeof (uint32_t) - 1)));
10011 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10012 offs += sizeof (uint32_t);
10013 }
10014
10015 if ((soffs = offs + needed) > buf->dtb_size) {
10016 dtrace_buffer_drop(buf);
10017 return (-1);
10018 }
10019
10020 if (mstate == NULL)
10021 return (offs);
10022
10023 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10024 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10025 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10026
10027 return (offs);
10028 }
10029
10030 if (buf->dtb_flags & DTRACEBUF_FILL) {
10031 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10032 (buf->dtb_flags & DTRACEBUF_FULL))
10033 return (-1);
10034 goto out;
10035 }
10036
10037 total = needed + (offs & (align - 1));
10038
10039 /*
10040 * For a ring buffer, life is quite a bit more complicated. Before
10041 * we can store any padding, we need to adjust our wrapping offset.
10042 * (If we've never before wrapped or we're not about to, no adjustment
10043 * is required.)
10044 */
10045 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10046 offs + total > buf->dtb_size) {
10047 woffs = buf->dtb_xamot_offset;
10048
10049 if (offs + total > buf->dtb_size) {
10050 /*
10051 * We can't fit in the end of the buffer. First, a
10052 * sanity check that we can fit in the buffer at all.
10053 */
10054 if (total > buf->dtb_size) {
10055 dtrace_buffer_drop(buf);
10056 return (-1);
10057 }
10058
10059 /*
10060 * We're going to be storing at the top of the buffer,
10061 * so now we need to deal with the wrapped offset. We
10062 * only reset our wrapped offset to 0 if it is
10063 * currently greater than the current offset. If it
10064 * is less than the current offset, it is because a
10065 * previous allocation induced a wrap -- but the
10066 * allocation didn't subsequently take the space due
10067 * to an error or false predicate evaluation. In this
10068 * case, we'll just leave the wrapped offset alone: if
10069 * the wrapped offset hasn't been advanced far enough
10070 * for this allocation, it will be adjusted in the
10071 * lower loop.
10072 */
10073 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10074 if (woffs >= offs)
10075 woffs = 0;
10076 } else {
10077 woffs = 0;
10078 }
10079
10080 /*
10081 * Now we know that we're going to be storing to the
10082 * top of the buffer and that there is room for us
10083 * there. We need to clear the buffer from the current
10084 * offset to the end (there may be old gunk there).
10085 */
10086 while (offs < buf->dtb_size)
10087 tomax[offs++] = 0;
10088
10089 /*
10090 * We need to set our offset to zero. And because we
10091 * are wrapping, we need to set the bit indicating as
10092 * much. We can also adjust our needed space back
10093 * down to the space required by the ECB -- we know
10094 * that the top of the buffer is aligned.
10095 */
10096 offs = 0;
10097 total = needed;
10098 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10099 } else {
10100 /*
10101 * There is room for us in the buffer, so we simply
10102 * need to check the wrapped offset.
10103 */
10104 if (woffs < offs) {
10105 /*
10106 * The wrapped offset is less than the offset.
10107 * This can happen if we allocated buffer space
10108 * that induced a wrap, but then we didn't
10109 * subsequently take the space due to an error
10110 * or false predicate evaluation. This is
10111 * okay; we know that _this_ allocation isn't
10112 * going to induce a wrap. We still can't
10113 * reset the wrapped offset to be zero,
10114 * however: the space may have been trashed in
10115 * the previous failed probe attempt. But at
10116 * least the wrapped offset doesn't need to
10117 * be adjusted at all...
10118 */
10119 goto out;
10120 }
10121 }
10122
10123 while (offs + total > woffs) {
10124 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10125 size_t size;
10126
10127 if (epid == DTRACE_EPIDNONE) {
10128 size = sizeof (uint32_t);
10129 } else {
10130 ASSERT(epid <= state->dts_necbs);
10131 ASSERT(state->dts_ecbs[epid - 1] != NULL);
10132
10133 size = state->dts_ecbs[epid - 1]->dte_size;
10134 }
10135
10136 ASSERT(woffs + size <= buf->dtb_size);
10137 ASSERT(size != 0);
10138
10139 if (woffs + size == buf->dtb_size) {
10140 /*
10141 * We've reached the end of the buffer; we want
10142 * to set the wrapped offset to 0 and break
10143 * out. However, if the offs is 0, then we're
10144 * in a strange edge-condition: the amount of
10145 * space that we want to reserve plus the size
10146 * of the record that we're overwriting is
10147 * greater than the size of the buffer. This
10148 * is problematic because if we reserve the
10149 * space but subsequently don't consume it (due
10150 * to a failed predicate or error) the wrapped
10151 * offset will be 0 -- yet the EPID at offset 0
10152 * will not be committed. This situation is
10153 * relatively easy to deal with: if we're in
10154 * this case, the buffer is indistinguishable
10155 * from one that hasn't wrapped; we need only
10156 * finish the job by clearing the wrapped bit,
10157 * explicitly setting the offset to be 0, and
10158 * zero'ing out the old data in the buffer.
10159 */
10160 if (offs == 0) {
10161 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10162 buf->dtb_offset = 0;
10163 woffs = total;
10164
10165 while (woffs < buf->dtb_size)
10166 tomax[woffs++] = 0;
10167 }
10168
10169 woffs = 0;
10170 break;
10171 }
10172
10173 woffs += size;
10174 }
10175
10176 /*
10177 * We have a wrapped offset. It may be that the wrapped offset
10178 * has become zero -- that's okay.
10179 */
10180 buf->dtb_xamot_offset = woffs;
10181 }
10182
10183 out:
10184 /*
10185 * Now we can plow the buffer with any necessary padding.
10186 */
10187 while (offs & (align - 1)) {
10188 /*
10189 * Assert that our alignment is off by a number which
10190 * is itself sizeof (uint32_t) aligned.
10191 */
10192 ASSERT(!((align - (offs & (align - 1))) &
10193 (sizeof (uint32_t) - 1)));
10194 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10195 offs += sizeof (uint32_t);
10196 }
10197
10198 if (buf->dtb_flags & DTRACEBUF_FILL) {
10199 if (offs + needed > buf->dtb_size - state->dts_reserve) {
10200 buf->dtb_flags |= DTRACEBUF_FULL;
10201 return (-1);
10202 }
10203 }
10204
10205 if (mstate == NULL)
10206 return (offs);
10207
10208 /*
10209 * For ring buffers and fill buffers, the scratch space is always
10210 * the inactive buffer.
10211 */
10212 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10213 mstate->dtms_scratch_size = buf->dtb_size;
10214 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10215
10216 return (offs);
10217 }
10218
10219 static void
10220 dtrace_buffer_polish(dtrace_buffer_t *buf)
10221 {
10222 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10223 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10224
10225 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
10226 return;
10227
10228 /*
10229 * We need to polish the ring buffer. There are three cases:
10230 *
10231 * - The first (and presumably most common) is that there is no gap
10232 * between the buffer offset and the wrapped offset. In this case,
10233 * there is nothing in the buffer that isn't valid data; we can
10234 * mark the buffer as polished and return.
10235 *
10236 * - The second (less common than the first but still more common
10237 * than the third) is that there is a gap between the buffer offset
10238 * and the wrapped offset, and the wrapped offset is larger than the
10239 * buffer offset. This can happen because of an alignment issue, or
10240 * can happen because of a call to dtrace_buffer_reserve() that
10241 * didn't subsequently consume the buffer space. In this case,
10242 * we need to zero the data from the buffer offset to the wrapped
10243 * offset.
10244 *
10245 * - The third (and least common) is that there is a gap between the
10246 * buffer offset and the wrapped offset, but the wrapped offset is
10247 * _less_ than the buffer offset. This can only happen because a
10248 * call to dtrace_buffer_reserve() induced a wrap, but the space
10249 * was not subsequently consumed. In this case, we need to zero the
10250 * space from the offset to the end of the buffer _and_ from the
10251 * top of the buffer to the wrapped offset.
10252 */
10253 if (buf->dtb_offset < buf->dtb_xamot_offset) {
10254 bzero(buf->dtb_tomax + buf->dtb_offset,
10255 buf->dtb_xamot_offset - buf->dtb_offset);
10256 }
10257
10258 if (buf->dtb_offset > buf->dtb_xamot_offset) {
10259 bzero(buf->dtb_tomax + buf->dtb_offset,
10260 buf->dtb_size - buf->dtb_offset);
10261 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
10262 }
10263 }
10264
10265 static void
10266 dtrace_buffer_free(dtrace_buffer_t *bufs)
10267 {
10268 int i;
10269
10270 for (i = 0; i < NCPU; i++) {
10271 dtrace_buffer_t *buf = &bufs[i];
10272
10273 if (buf->dtb_tomax == NULL) {
10274 ASSERT(buf->dtb_xamot == NULL);
10275 ASSERT(buf->dtb_size == 0);
10276 continue;
10277 }
10278
10279 if (buf->dtb_xamot != NULL) {
10280 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10281 kmem_free(buf->dtb_xamot, buf->dtb_size);
10282 }
10283
10284 kmem_free(buf->dtb_tomax, buf->dtb_size);
10285 buf->dtb_size = 0;
10286 buf->dtb_tomax = NULL;
10287 buf->dtb_xamot = NULL;
10288 }
10289 }
10290
10291 /*
10292 * DTrace Enabling Functions
10293 */
10294 static dtrace_enabling_t *
10295 dtrace_enabling_create(dtrace_vstate_t *vstate)
10296 {
10297 dtrace_enabling_t *enab;
10298
10299 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
10300 enab->dten_vstate = vstate;
10301
10302 return (enab);
10303 }
10304
10305 static void
10306 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
10307 {
10308 dtrace_ecbdesc_t **ndesc;
10309 size_t osize, nsize;
10310
10311 /*
10312 * We can't add to enablings after we've enabled them, or after we've
10313 * retained them.
10314 */
10315 ASSERT(enab->dten_probegen == 0);
10316 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10317
10318 #if defined(__APPLE__)
10319 if (ecb == NULL) return; /* XXX protection against gcc 4.0 botch on x86 */
10320 #endif /* __APPLE__ */
10321
10322 if (enab->dten_ndesc < enab->dten_maxdesc) {
10323 enab->dten_desc[enab->dten_ndesc++] = ecb;
10324 return;
10325 }
10326
10327 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10328
10329 if (enab->dten_maxdesc == 0) {
10330 enab->dten_maxdesc = 1;
10331 } else {
10332 enab->dten_maxdesc <<= 1;
10333 }
10334
10335 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
10336
10337 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10338 ndesc = kmem_zalloc(nsize, KM_SLEEP);
10339 bcopy(enab->dten_desc, ndesc, osize);
10340 kmem_free(enab->dten_desc, osize);
10341
10342 enab->dten_desc = ndesc;
10343 enab->dten_desc[enab->dten_ndesc++] = ecb;
10344 }
10345
10346 static void
10347 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
10348 dtrace_probedesc_t *pd)
10349 {
10350 dtrace_ecbdesc_t *new;
10351 dtrace_predicate_t *pred;
10352 dtrace_actdesc_t *act;
10353
10354 /*
10355 * We're going to create a new ECB description that matches the
10356 * specified ECB in every way, but has the specified probe description.
10357 */
10358 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
10359
10360 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
10361 dtrace_predicate_hold(pred);
10362
10363 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
10364 dtrace_actdesc_hold(act);
10365
10366 new->dted_action = ecb->dted_action;
10367 new->dted_pred = ecb->dted_pred;
10368 new->dted_probe = *pd;
10369 new->dted_uarg = ecb->dted_uarg;
10370
10371 dtrace_enabling_add(enab, new);
10372 }
10373
10374 static void
10375 dtrace_enabling_dump(dtrace_enabling_t *enab)
10376 {
10377 int i;
10378
10379 for (i = 0; i < enab->dten_ndesc; i++) {
10380 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
10381
10382 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
10383 desc->dtpd_provider, desc->dtpd_mod,
10384 desc->dtpd_func, desc->dtpd_name);
10385 }
10386 }
10387
10388 static void
10389 dtrace_enabling_destroy(dtrace_enabling_t *enab)
10390 {
10391 int i;
10392 dtrace_ecbdesc_t *ep;
10393 dtrace_vstate_t *vstate = enab->dten_vstate;
10394
10395 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10396
10397 for (i = 0; i < enab->dten_ndesc; i++) {
10398 dtrace_actdesc_t *act, *next;
10399 dtrace_predicate_t *pred;
10400
10401 ep = enab->dten_desc[i];
10402
10403 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
10404 dtrace_predicate_release(pred, vstate);
10405
10406 for (act = ep->dted_action; act != NULL; act = next) {
10407 next = act->dtad_next;
10408 dtrace_actdesc_release(act, vstate);
10409 }
10410
10411 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
10412 }
10413
10414 kmem_free(enab->dten_desc,
10415 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
10416
10417 /*
10418 * If this was a retained enabling, decrement the dts_nretained count
10419 * and take it off of the dtrace_retained list.
10420 */
10421 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
10422 dtrace_retained == enab) {
10423 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10424 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
10425 enab->dten_vstate->dtvs_state->dts_nretained--;
10426 }
10427
10428 if (enab->dten_prev == NULL) {
10429 if (dtrace_retained == enab) {
10430 dtrace_retained = enab->dten_next;
10431
10432 if (dtrace_retained != NULL)
10433 dtrace_retained->dten_prev = NULL;
10434 }
10435 } else {
10436 ASSERT(enab != dtrace_retained);
10437 ASSERT(dtrace_retained != NULL);
10438 enab->dten_prev->dten_next = enab->dten_next;
10439 }
10440
10441 if (enab->dten_next != NULL) {
10442 ASSERT(dtrace_retained != NULL);
10443 enab->dten_next->dten_prev = enab->dten_prev;
10444 }
10445
10446 kmem_free(enab, sizeof (dtrace_enabling_t));
10447 }
10448
10449 static int
10450 dtrace_enabling_retain(dtrace_enabling_t *enab)
10451 {
10452 dtrace_state_t *state;
10453
10454 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10455 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10456 ASSERT(enab->dten_vstate != NULL);
10457
10458 state = enab->dten_vstate->dtvs_state;
10459 ASSERT(state != NULL);
10460
10461 /*
10462 * We only allow each state to retain dtrace_retain_max enablings.
10463 */
10464 if (state->dts_nretained >= dtrace_retain_max)
10465 return (ENOSPC);
10466
10467 state->dts_nretained++;
10468
10469 if (dtrace_retained == NULL) {
10470 dtrace_retained = enab;
10471 return (0);
10472 }
10473
10474 enab->dten_next = dtrace_retained;
10475 dtrace_retained->dten_prev = enab;
10476 dtrace_retained = enab;
10477
10478 return (0);
10479 }
10480
10481 static int
10482 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
10483 dtrace_probedesc_t *create)
10484 {
10485 dtrace_enabling_t *new, *enab;
10486 int found = 0, err = ENOENT;
10487
10488 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10489 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
10490 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
10491 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
10492 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
10493
10494 new = dtrace_enabling_create(&state->dts_vstate);
10495
10496 /*
10497 * Iterate over all retained enablings, looking for enablings that
10498 * match the specified state.
10499 */
10500 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10501 int i;
10502
10503 /*
10504 * dtvs_state can only be NULL for helper enablings -- and
10505 * helper enablings can't be retained.
10506 */
10507 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10508
10509 if (enab->dten_vstate->dtvs_state != state)
10510 continue;
10511
10512 /*
10513 * Now iterate over each probe description; we're looking for
10514 * an exact match to the specified probe description.
10515 */
10516 for (i = 0; i < enab->dten_ndesc; i++) {
10517 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
10518 dtrace_probedesc_t *pd = &ep->dted_probe;
10519
10520 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
10521 continue;
10522
10523 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
10524 continue;
10525
10526 if (strcmp(pd->dtpd_func, match->dtpd_func))
10527 continue;
10528
10529 if (strcmp(pd->dtpd_name, match->dtpd_name))
10530 continue;
10531
10532 /*
10533 * We have a winning probe! Add it to our growing
10534 * enabling.
10535 */
10536 found = 1;
10537 dtrace_enabling_addlike(new, ep, create);
10538 }
10539 }
10540
10541 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
10542 dtrace_enabling_destroy(new);
10543 return (err);
10544 }
10545
10546 return (0);
10547 }
10548
10549 static void
10550 dtrace_enabling_retract(dtrace_state_t *state)
10551 {
10552 dtrace_enabling_t *enab, *next;
10553
10554 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10555
10556 /*
10557 * Iterate over all retained enablings, destroy the enablings retained
10558 * for the specified state.
10559 */
10560 for (enab = dtrace_retained; enab != NULL; enab = next) {
10561 next = enab->dten_next;
10562
10563 /*
10564 * dtvs_state can only be NULL for helper enablings -- and
10565 * helper enablings can't be retained.
10566 */
10567 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10568
10569 if (enab->dten_vstate->dtvs_state == state) {
10570 ASSERT(state->dts_nretained > 0);
10571 dtrace_enabling_destroy(enab);
10572 }
10573 }
10574
10575 ASSERT(state->dts_nretained == 0);
10576 }
10577
10578 static int
10579 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
10580 {
10581 int i = 0;
10582 int matched = 0;
10583
10584 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
10585 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10586
10587 for (i = 0; i < enab->dten_ndesc; i++) {
10588 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
10589
10590 enab->dten_current = ep;
10591 enab->dten_error = 0;
10592
10593 matched += dtrace_probe_enable(&ep->dted_probe, enab);
10594
10595 if (enab->dten_error != 0) {
10596 /*
10597 * If we get an error half-way through enabling the
10598 * probes, we kick out -- perhaps with some number of
10599 * them enabled. Leaving enabled probes enabled may
10600 * be slightly confusing for user-level, but we expect
10601 * that no one will attempt to actually drive on in
10602 * the face of such errors. If this is an anonymous
10603 * enabling (indicated with a NULL nmatched pointer),
10604 * we cmn_err() a message. We aren't expecting to
10605 * get such an error -- such as it can exist at all,
10606 * it would be a result of corrupted DOF in the driver
10607 * properties.
10608 */
10609 if (nmatched == NULL) {
10610 cmn_err(CE_WARN, "dtrace_enabling_match() "
10611 "error on %p: %d", (void *)ep,
10612 enab->dten_error);
10613 }
10614
10615 return (enab->dten_error);
10616 }
10617 }
10618
10619 enab->dten_probegen = dtrace_probegen;
10620 if (nmatched != NULL)
10621 *nmatched = matched;
10622
10623 return (0);
10624 }
10625
10626 static void
10627 dtrace_enabling_matchall(void)
10628 {
10629 dtrace_enabling_t *enab;
10630
10631 lck_mtx_lock(&cpu_lock);
10632 lck_mtx_lock(&dtrace_lock);
10633
10634 /*
10635 * Because we can be called after dtrace_detach() has been called, we
10636 * cannot assert that there are retained enablings. We can safely
10637 * load from dtrace_retained, however: the taskq_destroy() at the
10638 * end of dtrace_detach() will block pending our completion.
10639 */
10640 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next)
10641 (void) dtrace_enabling_match(enab, NULL);
10642
10643 lck_mtx_unlock(&dtrace_lock);
10644 lck_mtx_unlock(&cpu_lock);
10645 }
10646
10647 static int
10648 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched)
10649 {
10650 dtrace_enabling_t *enab;
10651 int matched, total = 0, err;
10652
10653 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
10654 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10655
10656 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10657 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10658
10659 if (enab->dten_vstate->dtvs_state != state)
10660 continue;
10661
10662 if ((err = dtrace_enabling_match(enab, &matched)) != 0)
10663 return (err);
10664
10665 total += matched;
10666 }
10667
10668 if (nmatched != NULL)
10669 *nmatched = total;
10670
10671 return (0);
10672 }
10673
10674 /*
10675 * If an enabling is to be enabled without having matched probes (that is, if
10676 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
10677 * enabling must be _primed_ by creating an ECB for every ECB description.
10678 * This must be done to assure that we know the number of speculations, the
10679 * number of aggregations, the minimum buffer size needed, etc. before we
10680 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
10681 * enabling any probes, we create ECBs for every ECB decription, but with a
10682 * NULL probe -- which is exactly what this function does.
10683 */
10684 static void
10685 dtrace_enabling_prime(dtrace_state_t *state)
10686 {
10687 dtrace_enabling_t *enab;
10688 int i;
10689
10690 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10691 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10692
10693 if (enab->dten_vstate->dtvs_state != state)
10694 continue;
10695
10696 /*
10697 * We don't want to prime an enabling more than once, lest
10698 * we allow a malicious user to induce resource exhaustion.
10699 * (The ECBs that result from priming an enabling aren't
10700 * leaked -- but they also aren't deallocated until the
10701 * consumer state is destroyed.)
10702 */
10703 if (enab->dten_primed)
10704 continue;
10705
10706 for (i = 0; i < enab->dten_ndesc; i++) {
10707 enab->dten_current = enab->dten_desc[i];
10708 (void) dtrace_probe_enable(NULL, enab);
10709 }
10710
10711 enab->dten_primed = 1;
10712 }
10713 }
10714
10715 /*
10716 * Called to indicate that probes should be provided due to retained
10717 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
10718 * must take an initial lap through the enabling calling the dtps_provide()
10719 * entry point explicitly to allow for autocreated probes.
10720 */
10721 static void
10722 dtrace_enabling_provide(dtrace_provider_t *prv)
10723 {
10724 int i, all = 0;
10725 dtrace_probedesc_t desc;
10726
10727 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10728 lck_mtx_assert(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
10729
10730 if (prv == NULL) {
10731 all = 1;
10732 prv = dtrace_provider;
10733 }
10734
10735 do {
10736 dtrace_enabling_t *enab = dtrace_retained;
10737 void *parg = prv->dtpv_arg;
10738
10739 for (; enab != NULL; enab = enab->dten_next) {
10740 for (i = 0; i < enab->dten_ndesc; i++) {
10741 desc = enab->dten_desc[i]->dted_probe;
10742 lck_mtx_unlock(&dtrace_lock);
10743 prv->dtpv_pops.dtps_provide(parg, &desc);
10744 lck_mtx_lock(&dtrace_lock);
10745 }
10746 }
10747 } while (all && (prv = prv->dtpv_next) != NULL);
10748
10749 lck_mtx_unlock(&dtrace_lock);
10750 dtrace_probe_provide(NULL, all ? NULL : prv);
10751 lck_mtx_lock(&dtrace_lock);
10752 }
10753
10754 /*
10755 * DTrace DOF Functions
10756 */
10757 /*ARGSUSED*/
10758 static void
10759 dtrace_dof_error(dof_hdr_t *dof, const char *str)
10760 {
10761 if (dtrace_err_verbose)
10762 cmn_err(CE_WARN, "failed to process DOF: %s", str);
10763
10764 #ifdef DTRACE_ERRDEBUG
10765 dtrace_errdebug(str);
10766 #endif
10767 }
10768
10769 /*
10770 * Create DOF out of a currently enabled state. Right now, we only create
10771 * DOF containing the run-time options -- but this could be expanded to create
10772 * complete DOF representing the enabled state.
10773 */
10774 static dof_hdr_t *
10775 dtrace_dof_create(dtrace_state_t *state)
10776 {
10777 dof_hdr_t *dof;
10778 dof_sec_t *sec;
10779 dof_optdesc_t *opt;
10780 int i, len = sizeof (dof_hdr_t) +
10781 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
10782 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
10783
10784 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10785
10786 dof = dt_kmem_zalloc_aligned(len, 8, KM_SLEEP);
10787 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
10788 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
10789 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
10790 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
10791
10792 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
10793 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
10794 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
10795 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
10796 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
10797 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
10798
10799 dof->dofh_flags = 0;
10800 dof->dofh_hdrsize = sizeof (dof_hdr_t);
10801 dof->dofh_secsize = sizeof (dof_sec_t);
10802 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
10803 dof->dofh_secoff = sizeof (dof_hdr_t);
10804 dof->dofh_loadsz = len;
10805 dof->dofh_filesz = len;
10806 dof->dofh_pad = 0;
10807
10808 /*
10809 * Fill in the option section header...
10810 */
10811 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
10812 sec->dofs_type = DOF_SECT_OPTDESC;
10813 sec->dofs_align = sizeof (uint64_t);
10814 sec->dofs_flags = DOF_SECF_LOAD;
10815 sec->dofs_entsize = sizeof (dof_optdesc_t);
10816
10817 opt = (dof_optdesc_t *)((uintptr_t)sec +
10818 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
10819
10820 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
10821 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
10822
10823 for (i = 0; i < DTRACEOPT_MAX; i++) {
10824 opt[i].dofo_option = i;
10825 opt[i].dofo_strtab = DOF_SECIDX_NONE;
10826 opt[i].dofo_value = state->dts_options[i];
10827 }
10828
10829 return (dof);
10830 }
10831
10832 static dof_hdr_t *
10833 #if defined(__APPLE__)
10834 dtrace_dof_copyin(user_addr_t uarg, int *errp)
10835 #else
10836 dtrace_dof_copyin(uintptr_t uarg, int *errp)
10837 #endif
10838 {
10839 dof_hdr_t hdr, *dof;
10840
10841 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
10842
10843 /*
10844 * First, we're going to copyin() the sizeof (dof_hdr_t).
10845 */
10846 #if defined(__APPLE__)
10847 if (copyin(uarg, &hdr, sizeof (hdr)) != 0) {
10848 #else
10849 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
10850 #endif
10851 dtrace_dof_error(NULL, "failed to copyin DOF header");
10852 *errp = EFAULT;
10853 return (NULL);
10854 }
10855
10856 /*
10857 * Now we'll allocate the entire DOF and copy it in -- provided
10858 * that the length isn't outrageous.
10859 */
10860 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
10861 dtrace_dof_error(&hdr, "load size exceeds maximum");
10862 *errp = E2BIG;
10863 return (NULL);
10864 }
10865
10866 if (hdr.dofh_loadsz < sizeof (hdr)) {
10867 dtrace_dof_error(&hdr, "invalid load size");
10868 *errp = EINVAL;
10869 return (NULL);
10870 }
10871
10872 dof = dt_kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
10873
10874 #if defined(__APPLE__)
10875 if (copyin(uarg, dof, hdr.dofh_loadsz) != 0) {
10876 #else
10877 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
10878 #endif
10879 dt_kmem_free_aligned(dof, hdr.dofh_loadsz);
10880 *errp = EFAULT;
10881 return (NULL);
10882 }
10883
10884 return (dof);
10885 }
10886
10887 #if defined(__APPLE__)
10888
10889 static dof_hdr_t *
10890 dtrace_dof_copyin_from_proc(proc_t* p, user_addr_t uarg, int *errp)
10891 {
10892 dof_hdr_t hdr, *dof;
10893
10894 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
10895
10896 /*
10897 * First, we're going to copyin() the sizeof (dof_hdr_t).
10898 */
10899 if (uread(p, &hdr, sizeof(hdr), uarg) != KERN_SUCCESS) {
10900 dtrace_dof_error(NULL, "failed to copyin DOF header");
10901 *errp = EFAULT;
10902 return (NULL);
10903 }
10904
10905 /*
10906 * Now we'll allocate the entire DOF and copy it in -- provided
10907 * that the length isn't outrageous.
10908 */
10909 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
10910 dtrace_dof_error(&hdr, "load size exceeds maximum");
10911 *errp = E2BIG;
10912 return (NULL);
10913 }
10914
10915 if (hdr.dofh_loadsz < sizeof (hdr)) {
10916 dtrace_dof_error(&hdr, "invalid load size");
10917 *errp = EINVAL;
10918 return (NULL);
10919 }
10920
10921 dof = dt_kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
10922
10923 if (uread(p, dof, hdr.dofh_loadsz, uarg) != KERN_SUCCESS) {
10924 dt_kmem_free_aligned(dof, hdr.dofh_loadsz);
10925 *errp = EFAULT;
10926 return (NULL);
10927 }
10928
10929 return (dof);
10930 }
10931
10932 #endif /* __APPLE__ */
10933
10934 static dof_hdr_t *
10935 dtrace_dof_property(const char *name)
10936 {
10937 uchar_t *buf;
10938 uint64_t loadsz;
10939 unsigned int len, i;
10940 dof_hdr_t *dof;
10941
10942 /*
10943 * Unfortunately, array of values in .conf files are always (and
10944 * only) interpreted to be integer arrays. We must read our DOF
10945 * as an integer array, and then squeeze it into a byte array.
10946 */
10947 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
10948 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
10949 return (NULL);
10950
10951 for (i = 0; i < len; i++)
10952 buf[i] = (uchar_t)(((int *)buf)[i]);
10953
10954 if (len < sizeof (dof_hdr_t)) {
10955 ddi_prop_free(buf);
10956 dtrace_dof_error(NULL, "truncated header");
10957 return (NULL);
10958 }
10959
10960 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
10961 ddi_prop_free(buf);
10962 dtrace_dof_error(NULL, "truncated DOF");
10963 return (NULL);
10964 }
10965
10966 if (loadsz >= dtrace_dof_maxsize) {
10967 ddi_prop_free(buf);
10968 dtrace_dof_error(NULL, "oversized DOF");
10969 return (NULL);
10970 }
10971
10972 dof = dt_kmem_alloc_aligned(loadsz, 8, KM_SLEEP);
10973 bcopy(buf, dof, loadsz);
10974 ddi_prop_free(buf);
10975
10976 return (dof);
10977 }
10978
10979 static void
10980 dtrace_dof_destroy(dof_hdr_t *dof)
10981 {
10982 dt_kmem_free_aligned(dof, dof->dofh_loadsz);
10983 }
10984
10985 /*
10986 * Return the dof_sec_t pointer corresponding to a given section index. If the
10987 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
10988 * a type other than DOF_SECT_NONE is specified, the header is checked against
10989 * this type and NULL is returned if the types do not match.
10990 */
10991 static dof_sec_t *
10992 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
10993 {
10994 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
10995 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
10996
10997 if (i >= dof->dofh_secnum) {
10998 dtrace_dof_error(dof, "referenced section index is invalid");
10999 return (NULL);
11000 }
11001
11002 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11003 dtrace_dof_error(dof, "referenced section is not loadable");
11004 return (NULL);
11005 }
11006
11007 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11008 dtrace_dof_error(dof, "referenced section is the wrong type");
11009 return (NULL);
11010 }
11011
11012 return (sec);
11013 }
11014
11015 static dtrace_probedesc_t *
11016 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11017 {
11018 dof_probedesc_t *probe;
11019 dof_sec_t *strtab;
11020 uintptr_t daddr = (uintptr_t)dof;
11021 uintptr_t str;
11022 size_t size;
11023
11024 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11025 dtrace_dof_error(dof, "invalid probe section");
11026 return (NULL);
11027 }
11028
11029 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11030 dtrace_dof_error(dof, "bad alignment in probe description");
11031 return (NULL);
11032 }
11033
11034 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11035 dtrace_dof_error(dof, "truncated probe description");
11036 return (NULL);
11037 }
11038
11039 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11040 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11041
11042 if (strtab == NULL)
11043 return (NULL);
11044
11045 str = daddr + strtab->dofs_offset;
11046 size = strtab->dofs_size;
11047
11048 if (probe->dofp_provider >= strtab->dofs_size) {
11049 dtrace_dof_error(dof, "corrupt probe provider");
11050 return (NULL);
11051 }
11052
11053 (void) strncpy(desc->dtpd_provider,
11054 (char *)(str + probe->dofp_provider),
11055 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11056
11057 if (probe->dofp_mod >= strtab->dofs_size) {
11058 dtrace_dof_error(dof, "corrupt probe module");
11059 return (NULL);
11060 }
11061
11062 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11063 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11064
11065 if (probe->dofp_func >= strtab->dofs_size) {
11066 dtrace_dof_error(dof, "corrupt probe function");
11067 return (NULL);
11068 }
11069
11070 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11071 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11072
11073 if (probe->dofp_name >= strtab->dofs_size) {
11074 dtrace_dof_error(dof, "corrupt probe name");
11075 return (NULL);
11076 }
11077
11078 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11079 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11080
11081 return (desc);
11082 }
11083
11084 static dtrace_difo_t *
11085 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11086 cred_t *cr)
11087 {
11088 dtrace_difo_t *dp;
11089 size_t ttl = 0;
11090 dof_difohdr_t *dofd;
11091 uintptr_t daddr = (uintptr_t)dof;
11092 size_t max = dtrace_difo_maxsize;
11093 int i, l, n;
11094
11095 static const struct {
11096 int section;
11097 int bufoffs;
11098 int lenoffs;
11099 int entsize;
11100 int align;
11101 const char *msg;
11102 } difo[] = {
11103 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11104 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11105 sizeof (dif_instr_t), "multiple DIF sections" },
11106
11107 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11108 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11109 sizeof (uint64_t), "multiple integer tables" },
11110
11111 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11112 offsetof(dtrace_difo_t, dtdo_strlen), 0,
11113 sizeof (char), "multiple string tables" },
11114
11115 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11116 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11117 sizeof (uint_t), "multiple variable tables" },
11118
11119 #if !defined(__APPLE__)
11120 { DOF_SECT_NONE, 0, 0, 0, NULL }
11121 #else
11122 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
11123 #endif /* __APPLE__ */
11124 };
11125
11126 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11127 dtrace_dof_error(dof, "invalid DIFO header section");
11128 return (NULL);
11129 }
11130
11131 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11132 dtrace_dof_error(dof, "bad alignment in DIFO header");
11133 return (NULL);
11134 }
11135
11136 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11137 sec->dofs_size % sizeof (dof_secidx_t)) {
11138 dtrace_dof_error(dof, "bad size in DIFO header");
11139 return (NULL);
11140 }
11141
11142 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11143 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11144
11145 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11146 dp->dtdo_rtype = dofd->dofd_rtype;
11147
11148 for (l = 0; l < n; l++) {
11149 dof_sec_t *subsec;
11150 void **bufp;
11151 uint32_t *lenp;
11152
11153 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11154 dofd->dofd_links[l])) == NULL)
11155 goto err; /* invalid section link */
11156
11157 if (ttl + subsec->dofs_size > max) {
11158 dtrace_dof_error(dof, "exceeds maximum size");
11159 goto err;
11160 }
11161
11162 ttl += subsec->dofs_size;
11163
11164 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11165 if (subsec->dofs_type != difo[i].section)
11166 continue;
11167
11168 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11169 dtrace_dof_error(dof, "section not loaded");
11170 goto err;
11171 }
11172
11173 if (subsec->dofs_align != difo[i].align) {
11174 dtrace_dof_error(dof, "bad alignment");
11175 goto err;
11176 }
11177
11178 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11179 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11180
11181 if (*bufp != NULL) {
11182 dtrace_dof_error(dof, difo[i].msg);
11183 goto err;
11184 }
11185
11186 if (difo[i].entsize != subsec->dofs_entsize) {
11187 dtrace_dof_error(dof, "entry size mismatch");
11188 goto err;
11189 }
11190
11191 if (subsec->dofs_entsize != 0 &&
11192 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11193 dtrace_dof_error(dof, "corrupt entry size");
11194 goto err;
11195 }
11196
11197 *lenp = subsec->dofs_size;
11198 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11199 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11200 *bufp, subsec->dofs_size);
11201
11202 if (subsec->dofs_entsize != 0)
11203 *lenp /= subsec->dofs_entsize;
11204
11205 break;
11206 }
11207
11208 /*
11209 * If we encounter a loadable DIFO sub-section that is not
11210 * known to us, assume this is a broken program and fail.
11211 */
11212 if (difo[i].section == DOF_SECT_NONE &&
11213 (subsec->dofs_flags & DOF_SECF_LOAD)) {
11214 dtrace_dof_error(dof, "unrecognized DIFO subsection");
11215 goto err;
11216 }
11217 }
11218
11219 if (dp->dtdo_buf == NULL) {
11220 /*
11221 * We can't have a DIF object without DIF text.
11222 */
11223 dtrace_dof_error(dof, "missing DIF text");
11224 goto err;
11225 }
11226
11227 /*
11228 * Before we validate the DIF object, run through the variable table
11229 * looking for the strings -- if any of their size are under, we'll set
11230 * their size to be the system-wide default string size. Note that
11231 * this should _not_ happen if the "strsize" option has been set --
11232 * in this case, the compiler should have set the size to reflect the
11233 * setting of the option.
11234 */
11235 for (i = 0; i < dp->dtdo_varlen; i++) {
11236 dtrace_difv_t *v = &dp->dtdo_vartab[i];
11237 dtrace_diftype_t *t = &v->dtdv_type;
11238
11239 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
11240 continue;
11241
11242 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
11243 t->dtdt_size = dtrace_strsize_default;
11244 }
11245
11246 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
11247 goto err;
11248
11249 dtrace_difo_init(dp, vstate);
11250 return (dp);
11251
11252 err:
11253 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
11254 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
11255 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
11256 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
11257
11258 kmem_free(dp, sizeof (dtrace_difo_t));
11259 return (NULL);
11260 }
11261
11262 static dtrace_predicate_t *
11263 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11264 cred_t *cr)
11265 {
11266 dtrace_difo_t *dp;
11267
11268 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
11269 return (NULL);
11270
11271 return (dtrace_predicate_create(dp));
11272 }
11273
11274 static dtrace_actdesc_t *
11275 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11276 cred_t *cr)
11277 {
11278 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
11279 dof_actdesc_t *desc;
11280 dof_sec_t *difosec;
11281 size_t offs;
11282 uintptr_t daddr = (uintptr_t)dof;
11283 uint64_t arg;
11284 dtrace_actkind_t kind;
11285
11286 if (sec->dofs_type != DOF_SECT_ACTDESC) {
11287 dtrace_dof_error(dof, "invalid action section");
11288 return (NULL);
11289 }
11290
11291 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
11292 dtrace_dof_error(dof, "truncated action description");
11293 return (NULL);
11294 }
11295
11296 if (sec->dofs_align != sizeof (uint64_t)) {
11297 dtrace_dof_error(dof, "bad alignment in action description");
11298 return (NULL);
11299 }
11300
11301 if (sec->dofs_size < sec->dofs_entsize) {
11302 dtrace_dof_error(dof, "section entry size exceeds total size");
11303 return (NULL);
11304 }
11305
11306 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
11307 dtrace_dof_error(dof, "bad entry size in action description");
11308 return (NULL);
11309 }
11310
11311 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
11312 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
11313 return (NULL);
11314 }
11315
11316 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
11317 desc = (dof_actdesc_t *)(daddr +
11318 (uintptr_t)sec->dofs_offset + offs);
11319 kind = (dtrace_actkind_t)desc->dofa_kind;
11320
11321 if (DTRACEACT_ISPRINTFLIKE(kind) &&
11322 (kind != DTRACEACT_PRINTA ||
11323 desc->dofa_strtab != DOF_SECIDX_NONE)) {
11324 dof_sec_t *strtab;
11325 char *str, *fmt;
11326 uint64_t i;
11327
11328 /*
11329 * printf()-like actions must have a format string.
11330 */
11331 if ((strtab = dtrace_dof_sect(dof,
11332 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
11333 goto err;
11334
11335 str = (char *)((uintptr_t)dof +
11336 (uintptr_t)strtab->dofs_offset);
11337
11338 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
11339 if (str[i] == '\0')
11340 break;
11341 }
11342
11343 if (i >= strtab->dofs_size) {
11344 dtrace_dof_error(dof, "bogus format string");
11345 goto err;
11346 }
11347
11348 if (i == desc->dofa_arg) {
11349 dtrace_dof_error(dof, "empty format string");
11350 goto err;
11351 }
11352
11353 i -= desc->dofa_arg;
11354 fmt = kmem_alloc(i + 1, KM_SLEEP);
11355 bcopy(&str[desc->dofa_arg], fmt, i + 1);
11356 arg = (uint64_t)(uintptr_t)fmt;
11357 } else {
11358 if (kind == DTRACEACT_PRINTA) {
11359 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
11360 arg = 0;
11361 } else {
11362 arg = desc->dofa_arg;
11363 }
11364 }
11365
11366 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
11367 desc->dofa_uarg, arg);
11368
11369 if (last != NULL) {
11370 last->dtad_next = act;
11371 } else {
11372 first = act;
11373 }
11374
11375 last = act;
11376
11377 if (desc->dofa_difo == DOF_SECIDX_NONE)
11378 continue;
11379
11380 if ((difosec = dtrace_dof_sect(dof,
11381 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
11382 goto err;
11383
11384 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
11385
11386 if (act->dtad_difo == NULL)
11387 goto err;
11388 }
11389
11390 ASSERT(first != NULL);
11391 return (first);
11392
11393 err:
11394 for (act = first; act != NULL; act = next) {
11395 next = act->dtad_next;
11396 dtrace_actdesc_release(act, vstate);
11397 }
11398
11399 return (NULL);
11400 }
11401
11402 static dtrace_ecbdesc_t *
11403 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11404 cred_t *cr)
11405 {
11406 dtrace_ecbdesc_t *ep;
11407 dof_ecbdesc_t *ecb;
11408 dtrace_probedesc_t *desc;
11409 dtrace_predicate_t *pred = NULL;
11410
11411 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
11412 dtrace_dof_error(dof, "truncated ECB description");
11413 return (NULL);
11414 }
11415
11416 if (sec->dofs_align != sizeof (uint64_t)) {
11417 dtrace_dof_error(dof, "bad alignment in ECB description");
11418 return (NULL);
11419 }
11420
11421 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
11422 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
11423
11424 if (sec == NULL)
11425 return (NULL);
11426
11427 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11428 ep->dted_uarg = ecb->dofe_uarg;
11429 desc = &ep->dted_probe;
11430
11431 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
11432 goto err;
11433
11434 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
11435 if ((sec = dtrace_dof_sect(dof,
11436 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
11437 goto err;
11438
11439 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
11440 goto err;
11441
11442 ep->dted_pred.dtpdd_predicate = pred;
11443 }
11444
11445 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
11446 if ((sec = dtrace_dof_sect(dof,
11447 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
11448 goto err;
11449
11450 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
11451
11452 if (ep->dted_action == NULL)
11453 goto err;
11454 }
11455
11456 return (ep);
11457
11458 err:
11459 if (pred != NULL)
11460 dtrace_predicate_release(pred, vstate);
11461 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11462 return (NULL);
11463 }
11464
11465 #if !defined(__APPLE__) /* APPLE dyld has already done this for us */
11466 /*
11467 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
11468 * specified DOF. At present, this amounts to simply adding 'ubase' to the
11469 * site of any user SETX relocations to account for load object base address.
11470 * In the future, if we need other relocations, this function can be extended.
11471 */
11472 static int
11473 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
11474 {
11475 uintptr_t daddr = (uintptr_t)dof;
11476 dof_relohdr_t *dofr =
11477 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11478 dof_sec_t *ss, *rs, *ts;
11479 dof_relodesc_t *r;
11480 uint_t i, n;
11481
11482 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
11483 sec->dofs_align != sizeof (dof_secidx_t)) {
11484 dtrace_dof_error(dof, "invalid relocation header");
11485 return (-1);
11486 }
11487
11488 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
11489 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
11490 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
11491
11492 if (ss == NULL || rs == NULL || ts == NULL)
11493 return (-1); /* dtrace_dof_error() has been called already */
11494
11495 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
11496 rs->dofs_align != sizeof (uint64_t)) {
11497 dtrace_dof_error(dof, "invalid relocation section");
11498 return (-1);
11499 }
11500
11501 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
11502 n = rs->dofs_size / rs->dofs_entsize;
11503
11504 for (i = 0; i < n; i++) {
11505 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
11506
11507 switch (r->dofr_type) {
11508 case DOF_RELO_NONE:
11509 break;
11510 case DOF_RELO_SETX:
11511 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
11512 sizeof (uint64_t) > ts->dofs_size) {
11513 dtrace_dof_error(dof, "bad relocation offset");
11514 return (-1);
11515 }
11516
11517 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
11518 dtrace_dof_error(dof, "misaligned setx relo");
11519 return (-1);
11520 }
11521
11522 *(uint64_t *)taddr += ubase;
11523 break;
11524 default:
11525 dtrace_dof_error(dof, "invalid relocation type");
11526 return (-1);
11527 }
11528
11529 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
11530 }
11531
11532 return (0);
11533 }
11534 #endif /* __APPLE__ */
11535
11536 /*
11537 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
11538 * header: it should be at the front of a memory region that is at least
11539 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
11540 * size. It need not be validated in any other way.
11541 */
11542 static int
11543 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
11544 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
11545 {
11546 uint64_t len = dof->dofh_loadsz, seclen;
11547 uintptr_t daddr = (uintptr_t)dof;
11548 dtrace_ecbdesc_t *ep;
11549 dtrace_enabling_t *enab;
11550 uint_t i;
11551
11552 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11553 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
11554
11555 /*
11556 * Check the DOF header identification bytes. In addition to checking
11557 * valid settings, we also verify that unused bits/bytes are zeroed so
11558 * we can use them later without fear of regressing existing binaries.
11559 */
11560 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
11561 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
11562 dtrace_dof_error(dof, "DOF magic string mismatch");
11563 return (-1);
11564 }
11565
11566 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
11567 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
11568 dtrace_dof_error(dof, "DOF has invalid data model");
11569 return (-1);
11570 }
11571
11572 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
11573 dtrace_dof_error(dof, "DOF encoding mismatch");
11574 return (-1);
11575 }
11576
11577 #if !defined(__APPLE__)
11578 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
11579 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
11580 dtrace_dof_error(dof, "DOF version mismatch");
11581 return (-1);
11582 }
11583 #else
11584 /*
11585 * We only support DOF_VERSION_3 for now.
11586 */
11587 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_3) {
11588 dtrace_dof_error(dof, "DOF version mismatch");
11589 return (-1);
11590 }
11591 #endif
11592
11593 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
11594 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
11595 return (-1);
11596 }
11597
11598 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
11599 dtrace_dof_error(dof, "DOF uses too many integer registers");
11600 return (-1);
11601 }
11602
11603 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
11604 dtrace_dof_error(dof, "DOF uses too many tuple registers");
11605 return (-1);
11606 }
11607
11608 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
11609 if (dof->dofh_ident[i] != 0) {
11610 dtrace_dof_error(dof, "DOF has invalid ident byte set");
11611 return (-1);
11612 }
11613 }
11614
11615 if (dof->dofh_flags & ~DOF_FL_VALID) {
11616 dtrace_dof_error(dof, "DOF has invalid flag bits set");
11617 return (-1);
11618 }
11619
11620 if (dof->dofh_secsize == 0) {
11621 dtrace_dof_error(dof, "zero section header size");
11622 return (-1);
11623 }
11624
11625 /*
11626 * Check that the section headers don't exceed the amount of DOF
11627 * data. Note that we cast the section size and number of sections
11628 * to uint64_t's to prevent possible overflow in the multiplication.
11629 */
11630 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
11631
11632 if (dof->dofh_secoff > len || seclen > len ||
11633 dof->dofh_secoff + seclen > len) {
11634 dtrace_dof_error(dof, "truncated section headers");
11635 return (-1);
11636 }
11637
11638 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
11639 dtrace_dof_error(dof, "misaligned section headers");
11640 return (-1);
11641 }
11642
11643 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
11644 dtrace_dof_error(dof, "misaligned section size");
11645 return (-1);
11646 }
11647
11648 /*
11649 * Take an initial pass through the section headers to be sure that
11650 * the headers don't have stray offsets. If the 'noprobes' flag is
11651 * set, do not permit sections relating to providers, probes, or args.
11652 */
11653 for (i = 0; i < dof->dofh_secnum; i++) {
11654 dof_sec_t *sec = (dof_sec_t *)(daddr +
11655 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11656
11657 if (noprobes) {
11658 switch (sec->dofs_type) {
11659 case DOF_SECT_PROVIDER:
11660 case DOF_SECT_PROBES:
11661 case DOF_SECT_PRARGS:
11662 case DOF_SECT_PROFFS:
11663 dtrace_dof_error(dof, "illegal sections "
11664 "for enabling");
11665 return (-1);
11666 }
11667 }
11668
11669 if (!(sec->dofs_flags & DOF_SECF_LOAD))
11670 continue; /* just ignore non-loadable sections */
11671
11672 if (sec->dofs_align & (sec->dofs_align - 1)) {
11673 dtrace_dof_error(dof, "bad section alignment");
11674 return (-1);
11675 }
11676
11677 if (sec->dofs_offset & (sec->dofs_align - 1)) {
11678 dtrace_dof_error(dof, "misaligned section");
11679 return (-1);
11680 }
11681
11682 if (sec->dofs_offset > len || sec->dofs_size > len ||
11683 sec->dofs_offset + sec->dofs_size > len) {
11684 dtrace_dof_error(dof, "corrupt section header");
11685 return (-1);
11686 }
11687
11688 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
11689 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
11690 dtrace_dof_error(dof, "non-terminating string table");
11691 return (-1);
11692 }
11693 }
11694
11695 #if !defined(__APPLE__)
11696 /*
11697 * APPLE NOTE: We have no relocation to perform. All dof values are
11698 * relative offsets.
11699 */
11700
11701 /*
11702 * Take a second pass through the sections and locate and perform any
11703 * relocations that are present. We do this after the first pass to
11704 * be sure that all sections have had their headers validated.
11705 */
11706 for (i = 0; i < dof->dofh_secnum; i++) {
11707 dof_sec_t *sec = (dof_sec_t *)(daddr +
11708 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11709
11710 if (!(sec->dofs_flags & DOF_SECF_LOAD))
11711 continue; /* skip sections that are not loadable */
11712
11713 switch (sec->dofs_type) {
11714 case DOF_SECT_URELHDR:
11715 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
11716 return (-1);
11717 break;
11718 }
11719 }
11720 #endif /* __APPLE__ */
11721
11722 if ((enab = *enabp) == NULL)
11723 enab = *enabp = dtrace_enabling_create(vstate);
11724
11725 for (i = 0; i < dof->dofh_secnum; i++) {
11726 dof_sec_t *sec = (dof_sec_t *)(daddr +
11727 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11728
11729 if (sec->dofs_type != DOF_SECT_ECBDESC)
11730 continue;
11731
11732 #if !defined(__APPLE__)
11733 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
11734 dtrace_enabling_destroy(enab);
11735 *enabp = NULL;
11736 return (-1);
11737 }
11738 #else
11739 /* XXX Defend against gcc 4.0 botch on x86 (not all paths out of inlined dtrace_dof_ecbdesc
11740 are checked for the NULL return value.) */
11741 ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr);
11742 if (ep == NULL) {
11743 dtrace_enabling_destroy(enab);
11744 *enabp = NULL;
11745 return (-1);
11746 }
11747 #endif /* __APPLE__ */
11748
11749 dtrace_enabling_add(enab, ep);
11750 }
11751
11752 return (0);
11753 }
11754
11755 /*
11756 * Process DOF for any options. This routine assumes that the DOF has been
11757 * at least processed by dtrace_dof_slurp().
11758 */
11759 static int
11760 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
11761 {
11762 int i, rval;
11763 uint32_t entsize;
11764 size_t offs;
11765 dof_optdesc_t *desc;
11766
11767 for (i = 0; i < dof->dofh_secnum; i++) {
11768 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
11769 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11770
11771 if (sec->dofs_type != DOF_SECT_OPTDESC)
11772 continue;
11773
11774 if (sec->dofs_align != sizeof (uint64_t)) {
11775 dtrace_dof_error(dof, "bad alignment in "
11776 "option description");
11777 return (EINVAL);
11778 }
11779
11780 if ((entsize = sec->dofs_entsize) == 0) {
11781 dtrace_dof_error(dof, "zeroed option entry size");
11782 return (EINVAL);
11783 }
11784
11785 if (entsize < sizeof (dof_optdesc_t)) {
11786 dtrace_dof_error(dof, "bad option entry size");
11787 return (EINVAL);
11788 }
11789
11790 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
11791 desc = (dof_optdesc_t *)((uintptr_t)dof +
11792 (uintptr_t)sec->dofs_offset + offs);
11793
11794 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
11795 dtrace_dof_error(dof, "non-zero option string");
11796 return (EINVAL);
11797 }
11798
11799 if (desc->dofo_value == DTRACEOPT_UNSET) {
11800 dtrace_dof_error(dof, "unset option");
11801 return (EINVAL);
11802 }
11803
11804 if ((rval = dtrace_state_option(state,
11805 desc->dofo_option, desc->dofo_value)) != 0) {
11806 dtrace_dof_error(dof, "rejected option");
11807 return (rval);
11808 }
11809 }
11810 }
11811
11812 return (0);
11813 }
11814
11815 /*
11816 * DTrace Consumer State Functions
11817 */
11818 #if defined(__APPLE__)
11819 static
11820 #endif /* __APPLE__ */
11821 int
11822 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
11823 {
11824 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
11825 void *base;
11826 uintptr_t limit;
11827 dtrace_dynvar_t *dvar, *next, *start;
11828 int i;
11829
11830 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11831 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
11832
11833 bzero(dstate, sizeof (dtrace_dstate_t));
11834
11835 if ((dstate->dtds_chunksize = chunksize) == 0)
11836 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
11837
11838 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
11839 size = min;
11840
11841 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
11842 return (ENOMEM);
11843
11844 dstate->dtds_size = size;
11845 dstate->dtds_base = base;
11846 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
11847 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
11848
11849 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
11850
11851 if (hashsize != 1 && (hashsize & 1))
11852 hashsize--;
11853
11854 dstate->dtds_hashsize = hashsize;
11855 dstate->dtds_hash = dstate->dtds_base;
11856
11857 /*
11858 * Set all of our hash buckets to point to the single sink, and (if
11859 * it hasn't already been set), set the sink's hash value to be the
11860 * sink sentinel value. The sink is needed for dynamic variable
11861 * lookups to know that they have iterated over an entire, valid hash
11862 * chain.
11863 */
11864 for (i = 0; i < hashsize; i++)
11865 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
11866
11867 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
11868 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
11869
11870 /*
11871 * Determine number of active CPUs. Divide free list evenly among
11872 * active CPUs.
11873 */
11874 start = (dtrace_dynvar_t *)
11875 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
11876 limit = (uintptr_t)base + size;
11877
11878 maxper = (limit - (uintptr_t)start) / NCPU;
11879 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
11880
11881 for (i = 0; i < NCPU; i++) {
11882 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
11883
11884 /*
11885 * If we don't even have enough chunks to make it once through
11886 * NCPUs, we're just going to allocate everything to the first
11887 * CPU. And if we're on the last CPU, we're going to allocate
11888 * whatever is left over. In either case, we set the limit to
11889 * be the limit of the dynamic variable space.
11890 */
11891 if (maxper == 0 || i == NCPU - 1) {
11892 limit = (uintptr_t)base + size;
11893 start = NULL;
11894 } else {
11895 limit = (uintptr_t)start + maxper;
11896 start = (dtrace_dynvar_t *)limit;
11897 }
11898
11899 ASSERT(limit <= (uintptr_t)base + size);
11900
11901 for (;;) {
11902 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
11903 dstate->dtds_chunksize);
11904
11905 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
11906 break;
11907
11908 dvar->dtdv_next = next;
11909 dvar = next;
11910 }
11911
11912 if (maxper == 0)
11913 break;
11914 }
11915
11916 return (0);
11917 }
11918
11919 #if defined(__APPLE__)
11920 static
11921 #endif /* __APPLE__ */
11922 void
11923 dtrace_dstate_fini(dtrace_dstate_t *dstate)
11924 {
11925 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
11926
11927 if (dstate->dtds_base == NULL)
11928 return;
11929
11930 kmem_free(dstate->dtds_base, dstate->dtds_size);
11931 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
11932 }
11933
11934 static void
11935 dtrace_vstate_fini(dtrace_vstate_t *vstate)
11936 {
11937 /*
11938 * Logical XOR, where are you?
11939 */
11940 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
11941
11942 if (vstate->dtvs_nglobals > 0) {
11943 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
11944 sizeof (dtrace_statvar_t *));
11945 }
11946
11947 if (vstate->dtvs_ntlocals > 0) {
11948 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
11949 sizeof (dtrace_difv_t));
11950 }
11951
11952 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
11953
11954 if (vstate->dtvs_nlocals > 0) {
11955 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
11956 sizeof (dtrace_statvar_t *));
11957 }
11958 }
11959
11960 static void
11961 dtrace_state_clean(dtrace_state_t *state)
11962 {
11963 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
11964 return;
11965
11966 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
11967 dtrace_speculation_clean(state);
11968 }
11969
11970 static void
11971 dtrace_state_deadman(dtrace_state_t *state)
11972 {
11973 hrtime_t now;
11974
11975 dtrace_sync();
11976
11977 now = dtrace_gethrtime();
11978
11979 if (state != dtrace_anon.dta_state &&
11980 now - state->dts_laststatus >= dtrace_deadman_user)
11981 return;
11982
11983 /*
11984 * We must be sure that dts_alive never appears to be less than the
11985 * value upon entry to dtrace_state_deadman(), and because we lack a
11986 * dtrace_cas64(), we cannot store to it atomically. We thus instead
11987 * store INT64_MAX to it, followed by a memory barrier, followed by
11988 * the new value. This assures that dts_alive never appears to be
11989 * less than its true value, regardless of the order in which the
11990 * stores to the underlying storage are issued.
11991 */
11992 state->dts_alive = INT64_MAX;
11993 dtrace_membar_producer();
11994 state->dts_alive = now;
11995 }
11996
11997 #if defined(__APPLE__)
11998 static
11999 #endif /* __APPLE__ */
12000 dtrace_state_t *
12001 dtrace_state_create(dev_t *devp, cred_t *cr)
12002 {
12003 minor_t minor;
12004 major_t major;
12005 char c[30];
12006 dtrace_state_t *state;
12007 dtrace_optval_t *opt;
12008 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12009
12010 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12011 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12012
12013 #if !defined(__APPLE__)
12014 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12015 VM_BESTFIT | VM_SLEEP);
12016 #else
12017 /*
12018 * Darwin's DEVFS layer acquired the minor number for this "device" when it called
12019 * dtrace_devfs_clone_func(). At that time, dtrace_devfs_clone_func() proposed a minor number
12020 * (next unused according to vmem_alloc()) and then immediately put the number back in play
12021 * (by calling vmem_free()). Now that minor number is being used for an open, so committing it
12022 * to use. The following vmem_alloc() must deliver that same minor number.
12023 */
12024
12025 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12026 VM_BESTFIT | VM_SLEEP);
12027
12028 if (NULL != devp) {
12029 ASSERT(getminor(*devp) == minor);
12030 if (getminor(*devp) != minor) {
12031 printf("dtrace_open: couldn't re-acquire vended minor number %d. Instead got %d\n",
12032 getminor(*devp), minor);
12033 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12034 return NULL;
12035 }
12036 } else {
12037 /* NULL==devp iff "Anonymous state" (see dtrace_anon_property),
12038 * so just vend the minor device number here de novo since no "open" has occurred. */
12039 }
12040
12041 #endif /* __APPLE__ */
12042
12043 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12044 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12045 return (NULL);
12046 }
12047
12048 state = ddi_get_soft_state(dtrace_softstate, minor);
12049 state->dts_epid = DTRACE_EPIDNONE + 1;
12050
12051 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
12052 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12053 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12054
12055 if (devp != NULL) {
12056 major = getemajor(*devp);
12057 } else {
12058 major = ddi_driver_major(dtrace_devi);
12059 }
12060
12061 state->dts_dev = makedevice(major, minor);
12062
12063 if (devp != NULL)
12064 *devp = state->dts_dev;
12065
12066 /*
12067 * We allocate NCPU buffers. On the one hand, this can be quite
12068 * a bit of memory per instance (nearly 36K on a Starcat). On the
12069 * other hand, it saves an additional memory reference in the probe
12070 * path.
12071 */
12072 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12073 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12074 state->dts_cleaner = CYCLIC_NONE;
12075 state->dts_deadman = CYCLIC_NONE;
12076 state->dts_vstate.dtvs_state = state;
12077
12078 for (i = 0; i < DTRACEOPT_MAX; i++)
12079 state->dts_options[i] = DTRACEOPT_UNSET;
12080
12081 /*
12082 * Set the default options.
12083 */
12084 opt = state->dts_options;
12085 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12086 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12087 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12088 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12089 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12090 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12091 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12092 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12093 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12094 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12095 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12096 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12097 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12098 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12099
12100 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12101
12102 /*
12103 * Depending on the user credentials, we set flag bits which alter probe
12104 * visibility or the amount of destructiveness allowed. In the case of
12105 * actual anonymous tracing, or the possession of all privileges, all of
12106 * the normal checks are bypassed.
12107 */
12108 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12109 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12110 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12111 } else {
12112 /*
12113 * Set up the credentials for this instantiation. We take a
12114 * hold on the credential to prevent it from disappearing on
12115 * us; this in turn prevents the zone_t referenced by this
12116 * credential from disappearing. This means that we can
12117 * examine the credential and the zone from probe context.
12118 */
12119 crhold(cr);
12120 state->dts_cred.dcr_cred = cr;
12121
12122 /*
12123 * CRA_PROC means "we have *some* privilege for dtrace" and
12124 * unlocks the use of variables like pid, zonename, etc.
12125 */
12126 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12127 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12128 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12129 }
12130
12131 /*
12132 * dtrace_user allows use of syscall and profile providers.
12133 * If the user also has proc_owner and/or proc_zone, we
12134 * extend the scope to include additional visibility and
12135 * destructive power.
12136 */
12137 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12138 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12139 state->dts_cred.dcr_visible |=
12140 DTRACE_CRV_ALLPROC;
12141
12142 state->dts_cred.dcr_action |=
12143 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12144 }
12145
12146 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12147 state->dts_cred.dcr_visible |=
12148 DTRACE_CRV_ALLZONE;
12149
12150 state->dts_cred.dcr_action |=
12151 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12152 }
12153
12154 /*
12155 * If we have all privs in whatever zone this is,
12156 * we can do destructive things to processes which
12157 * have altered credentials.
12158 */
12159 #if !defined(__APPLE__)
12160 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12161 cr->cr_zone->zone_privset)) {
12162 state->dts_cred.dcr_action |=
12163 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12164 }
12165 #else
12166 /* Darwin doesn't do zones. */
12167 state->dts_cred.dcr_action |=
12168 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12169 #endif /* __APPLE__ */
12170 }
12171
12172 /*
12173 * Holding the dtrace_kernel privilege also implies that
12174 * the user has the dtrace_user privilege from a visibility
12175 * perspective. But without further privileges, some
12176 * destructive actions are not available.
12177 */
12178 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12179 /*
12180 * Make all probes in all zones visible. However,
12181 * this doesn't mean that all actions become available
12182 * to all zones.
12183 */
12184 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12185 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12186
12187 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12188 DTRACE_CRA_PROC;
12189 /*
12190 * Holding proc_owner means that destructive actions
12191 * for *this* zone are allowed.
12192 */
12193 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12194 state->dts_cred.dcr_action |=
12195 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12196
12197 /*
12198 * Holding proc_zone means that destructive actions
12199 * for this user/group ID in all zones is allowed.
12200 */
12201 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12202 state->dts_cred.dcr_action |=
12203 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12204
12205 /*
12206 * If we have all privs in whatever zone this is,
12207 * we can do destructive things to processes which
12208 * have altered credentials.
12209 */
12210 #if !defined(__APPLE__)
12211 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12212 cr->cr_zone->zone_privset)) {
12213 state->dts_cred.dcr_action |=
12214 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12215 }
12216 #else
12217 /* Darwin doesn't do zones. */
12218 state->dts_cred.dcr_action |=
12219 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12220 #endif /* __APPLE__ */
12221 }
12222
12223 /*
12224 * Holding the dtrace_proc privilege gives control over fasttrap
12225 * and pid providers. We need to grant wider destructive
12226 * privileges in the event that the user has proc_owner and/or
12227 * proc_zone.
12228 */
12229 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12230 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12231 state->dts_cred.dcr_action |=
12232 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12233
12234 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12235 state->dts_cred.dcr_action |=
12236 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12237 }
12238 }
12239
12240 return (state);
12241 }
12242
12243 static int
12244 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
12245 {
12246 dtrace_optval_t *opt = state->dts_options, size;
12247 processorid_t cpu;
12248 int flags = 0, rval;
12249
12250 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12251 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12252 ASSERT(which < DTRACEOPT_MAX);
12253 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
12254 (state == dtrace_anon.dta_state &&
12255 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
12256
12257 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
12258 return (0);
12259
12260 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
12261 cpu = opt[DTRACEOPT_CPU];
12262
12263 if (which == DTRACEOPT_SPECSIZE)
12264 flags |= DTRACEBUF_NOSWITCH;
12265
12266 if (which == DTRACEOPT_BUFSIZE) {
12267 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
12268 flags |= DTRACEBUF_RING;
12269
12270 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
12271 flags |= DTRACEBUF_FILL;
12272
12273 if (state != dtrace_anon.dta_state ||
12274 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
12275 flags |= DTRACEBUF_INACTIVE;
12276 }
12277
12278 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
12279 /*
12280 * The size must be 8-byte aligned. If the size is not 8-byte
12281 * aligned, drop it down by the difference.
12282 */
12283 if (size & (sizeof (uint64_t) - 1))
12284 size -= size & (sizeof (uint64_t) - 1);
12285
12286 if (size < state->dts_reserve) {
12287 /*
12288 * Buffers always must be large enough to accommodate
12289 * their prereserved space. We return E2BIG instead
12290 * of ENOMEM in this case to allow for user-level
12291 * software to differentiate the cases.
12292 */
12293 return (E2BIG);
12294 }
12295
12296 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
12297
12298 if (rval != ENOMEM) {
12299 opt[which] = size;
12300 return (rval);
12301 }
12302
12303 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12304 return (rval);
12305 }
12306
12307 return (ENOMEM);
12308 }
12309
12310 static int
12311 dtrace_state_buffers(dtrace_state_t *state)
12312 {
12313 dtrace_speculation_t *spec = state->dts_speculations;
12314 int rval, i;
12315
12316 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
12317 DTRACEOPT_BUFSIZE)) != 0)
12318 return (rval);
12319
12320 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
12321 DTRACEOPT_AGGSIZE)) != 0)
12322 return (rval);
12323
12324 for (i = 0; i < state->dts_nspeculations; i++) {
12325 if ((rval = dtrace_state_buffer(state,
12326 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
12327 return (rval);
12328 }
12329
12330 return (0);
12331 }
12332
12333 static void
12334 dtrace_state_prereserve(dtrace_state_t *state)
12335 {
12336 dtrace_ecb_t *ecb;
12337 dtrace_probe_t *probe;
12338
12339 state->dts_reserve = 0;
12340
12341 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
12342 return;
12343
12344 /*
12345 * If our buffer policy is a "fill" buffer policy, we need to set the
12346 * prereserved space to be the space required by the END probes.
12347 */
12348 probe = dtrace_probes[dtrace_probeid_end - 1];
12349 ASSERT(probe != NULL);
12350
12351 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
12352 if (ecb->dte_state != state)
12353 continue;
12354
12355 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
12356 }
12357 }
12358
12359 static int
12360 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
12361 {
12362 dtrace_optval_t *opt = state->dts_options, sz, nspec;
12363 dtrace_speculation_t *spec;
12364 dtrace_buffer_t *buf;
12365 cyc_handler_t hdlr;
12366 cyc_time_t when;
12367 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12368 dtrace_icookie_t cookie;
12369
12370 lck_mtx_lock(&cpu_lock);
12371 lck_mtx_lock(&dtrace_lock);
12372
12373 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
12374 rval = EBUSY;
12375 goto out;
12376 }
12377
12378 /*
12379 * Before we can perform any checks, we must prime all of the
12380 * retained enablings that correspond to this state.
12381 */
12382 dtrace_enabling_prime(state);
12383
12384 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
12385 rval = EACCES;
12386 goto out;
12387 }
12388
12389 dtrace_state_prereserve(state);
12390
12391 /*
12392 * Now we want to do is try to allocate our speculations.
12393 * We do not automatically resize the number of speculations; if
12394 * this fails, we will fail the operation.
12395 */
12396 nspec = opt[DTRACEOPT_NSPEC];
12397 ASSERT(nspec != DTRACEOPT_UNSET);
12398
12399 if (nspec > INT_MAX) {
12400 rval = ENOMEM;
12401 goto out;
12402 }
12403
12404 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
12405
12406 if (spec == NULL) {
12407 rval = ENOMEM;
12408 goto out;
12409 }
12410
12411 state->dts_speculations = spec;
12412 state->dts_nspeculations = (int)nspec;
12413
12414 for (i = 0; i < nspec; i++) {
12415 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
12416 rval = ENOMEM;
12417 goto err;
12418 }
12419
12420 spec[i].dtsp_buffer = buf;
12421 }
12422
12423 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
12424 if (dtrace_anon.dta_state == NULL) {
12425 rval = ENOENT;
12426 goto out;
12427 }
12428
12429 if (state->dts_necbs != 0) {
12430 rval = EALREADY;
12431 goto out;
12432 }
12433
12434 state->dts_anon = dtrace_anon_grab();
12435 ASSERT(state->dts_anon != NULL);
12436 state = state->dts_anon;
12437
12438 /*
12439 * We want "grabanon" to be set in the grabbed state, so we'll
12440 * copy that option value from the grabbing state into the
12441 * grabbed state.
12442 */
12443 state->dts_options[DTRACEOPT_GRABANON] =
12444 opt[DTRACEOPT_GRABANON];
12445
12446 *cpu = dtrace_anon.dta_beganon;
12447
12448 /*
12449 * If the anonymous state is active (as it almost certainly
12450 * is if the anonymous enabling ultimately matched anything),
12451 * we don't allow any further option processing -- but we
12452 * don't return failure.
12453 */
12454 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12455 goto out;
12456 }
12457
12458 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
12459 opt[DTRACEOPT_AGGSIZE] != 0) {
12460 if (state->dts_aggregations == NULL) {
12461 /*
12462 * We're not going to create an aggregation buffer
12463 * because we don't have any ECBs that contain
12464 * aggregations -- set this option to 0.
12465 */
12466 opt[DTRACEOPT_AGGSIZE] = 0;
12467 } else {
12468 /*
12469 * If we have an aggregation buffer, we must also have
12470 * a buffer to use as scratch.
12471 */
12472 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
12473 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
12474 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
12475 }
12476 }
12477 }
12478
12479 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
12480 opt[DTRACEOPT_SPECSIZE] != 0) {
12481 if (!state->dts_speculates) {
12482 /*
12483 * We're not going to create speculation buffers
12484 * because we don't have any ECBs that actually
12485 * speculate -- set the speculation size to 0.
12486 */
12487 opt[DTRACEOPT_SPECSIZE] = 0;
12488 }
12489 }
12490
12491 /*
12492 * The bare minimum size for any buffer that we're actually going to
12493 * do anything to is sizeof (uint64_t).
12494 */
12495 sz = sizeof (uint64_t);
12496
12497 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
12498 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
12499 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
12500 /*
12501 * A buffer size has been explicitly set to 0 (or to a size
12502 * that will be adjusted to 0) and we need the space -- we
12503 * need to return failure. We return ENOSPC to differentiate
12504 * it from failing to allocate a buffer due to failure to meet
12505 * the reserve (for which we return E2BIG).
12506 */
12507 rval = ENOSPC;
12508 goto out;
12509 }
12510
12511 if ((rval = dtrace_state_buffers(state)) != 0)
12512 goto err;
12513
12514 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
12515 sz = dtrace_dstate_defsize;
12516
12517 do {
12518 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
12519
12520 if (rval == 0)
12521 break;
12522
12523 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12524 goto err;
12525 } while (sz >>= 1);
12526
12527 opt[DTRACEOPT_DYNVARSIZE] = sz;
12528
12529 if (rval != 0)
12530 goto err;
12531
12532 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
12533 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
12534
12535 if (opt[DTRACEOPT_CLEANRATE] == 0)
12536 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
12537
12538 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
12539 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
12540
12541 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
12542 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
12543
12544 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
12545 hdlr.cyh_arg = state;
12546 hdlr.cyh_level = CY_LOW_LEVEL;
12547
12548 when.cyt_when = 0;
12549 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
12550
12551 state->dts_cleaner = cyclic_add(&hdlr, &when);
12552
12553 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
12554 hdlr.cyh_arg = state;
12555 hdlr.cyh_level = CY_LOW_LEVEL;
12556
12557 when.cyt_when = 0;
12558 when.cyt_interval = dtrace_deadman_interval;
12559
12560 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
12561 state->dts_deadman = cyclic_add(&hdlr, &when);
12562
12563 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
12564
12565 /*
12566 * Now it's time to actually fire the BEGIN probe. We need to disable
12567 * interrupts here both to record the CPU on which we fired the BEGIN
12568 * probe (the data from this CPU will be processed first at user
12569 * level) and to manually activate the buffer for this CPU.
12570 */
12571 cookie = dtrace_interrupt_disable();
12572 *cpu = CPU->cpu_id;
12573 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
12574 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
12575
12576 dtrace_probe(dtrace_probeid_begin,
12577 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
12578 dtrace_interrupt_enable(cookie);
12579 /*
12580 * We may have had an exit action from a BEGIN probe; only change our
12581 * state to ACTIVE if we're still in WARMUP.
12582 */
12583 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
12584 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
12585
12586 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
12587 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
12588
12589 /*
12590 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
12591 * want each CPU to transition its principal buffer out of the
12592 * INACTIVE state. Doing this assures that no CPU will suddenly begin
12593 * processing an ECB halfway down a probe's ECB chain; all CPUs will
12594 * atomically transition from processing none of a state's ECBs to
12595 * processing all of them.
12596 */
12597 dtrace_xcall(DTRACE_CPUALL,
12598 (dtrace_xcall_t)dtrace_buffer_activate, state);
12599 goto out;
12600
12601 err:
12602 dtrace_buffer_free(state->dts_buffer);
12603 dtrace_buffer_free(state->dts_aggbuffer);
12604
12605 if ((nspec = state->dts_nspeculations) == 0) {
12606 ASSERT(state->dts_speculations == NULL);
12607 goto out;
12608 }
12609
12610 spec = state->dts_speculations;
12611 ASSERT(spec != NULL);
12612
12613 for (i = 0; i < state->dts_nspeculations; i++) {
12614 if ((buf = spec[i].dtsp_buffer) == NULL)
12615 break;
12616
12617 dtrace_buffer_free(buf);
12618 kmem_free(buf, bufsize);
12619 }
12620
12621 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
12622 state->dts_nspeculations = 0;
12623 state->dts_speculations = NULL;
12624
12625 out:
12626 lck_mtx_unlock(&dtrace_lock);
12627 lck_mtx_unlock(&cpu_lock);
12628
12629 return (rval);
12630 }
12631
12632 static int
12633 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
12634 {
12635 dtrace_icookie_t cookie;
12636
12637 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12638
12639 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
12640 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
12641 return (EINVAL);
12642
12643 /*
12644 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
12645 * to be sure that every CPU has seen it. See below for the details
12646 * on why this is done.
12647 */
12648 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
12649 dtrace_sync();
12650
12651 /*
12652 * By this point, it is impossible for any CPU to be still processing
12653 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
12654 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
12655 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
12656 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
12657 * iff we're in the END probe.
12658 */
12659 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
12660 dtrace_sync();
12661 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
12662
12663 /*
12664 * Finally, we can release the reserve and call the END probe. We
12665 * disable interrupts across calling the END probe to allow us to
12666 * return the CPU on which we actually called the END probe. This
12667 * allows user-land to be sure that this CPU's principal buffer is
12668 * processed last.
12669 */
12670 state->dts_reserve = 0;
12671
12672 cookie = dtrace_interrupt_disable();
12673 *cpu = CPU->cpu_id;
12674 dtrace_probe(dtrace_probeid_end,
12675 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
12676 dtrace_interrupt_enable(cookie);
12677
12678 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
12679 dtrace_sync();
12680
12681 return (0);
12682 }
12683
12684 static int
12685 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
12686 dtrace_optval_t val)
12687 {
12688 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12689
12690 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12691 return (EBUSY);
12692
12693 if (option >= DTRACEOPT_MAX)
12694 return (EINVAL);
12695
12696 if (option != DTRACEOPT_CPU && val < 0)
12697 return (EINVAL);
12698
12699 switch (option) {
12700 case DTRACEOPT_DESTRUCTIVE:
12701 if (dtrace_destructive_disallow)
12702 return (EACCES);
12703
12704 state->dts_cred.dcr_destructive = 1;
12705 break;
12706
12707 case DTRACEOPT_BUFSIZE:
12708 case DTRACEOPT_DYNVARSIZE:
12709 case DTRACEOPT_AGGSIZE:
12710 case DTRACEOPT_SPECSIZE:
12711 case DTRACEOPT_STRSIZE:
12712 if (val < 0)
12713 return (EINVAL);
12714
12715 if (val >= LONG_MAX) {
12716 /*
12717 * If this is an otherwise negative value, set it to
12718 * the highest multiple of 128m less than LONG_MAX.
12719 * Technically, we're adjusting the size without
12720 * regard to the buffer resizing policy, but in fact,
12721 * this has no effect -- if we set the buffer size to
12722 * ~LONG_MAX and the buffer policy is ultimately set to
12723 * be "manual", the buffer allocation is guaranteed to
12724 * fail, if only because the allocation requires two
12725 * buffers. (We set the the size to the highest
12726 * multiple of 128m because it ensures that the size
12727 * will remain a multiple of a megabyte when
12728 * repeatedly halved -- all the way down to 15m.)
12729 */
12730 val = LONG_MAX - (1 << 27) + 1;
12731 }
12732 }
12733
12734 state->dts_options[option] = val;
12735
12736 return (0);
12737 }
12738
12739 static void
12740 dtrace_state_destroy(dtrace_state_t *state)
12741 {
12742 dtrace_ecb_t *ecb;
12743 dtrace_vstate_t *vstate = &state->dts_vstate;
12744 minor_t minor = getminor(state->dts_dev);
12745 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12746 dtrace_speculation_t *spec = state->dts_speculations;
12747 int nspec = state->dts_nspeculations;
12748 uint32_t match;
12749
12750 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12751 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12752
12753 /*
12754 * First, retract any retained enablings for this state.
12755 */
12756 dtrace_enabling_retract(state);
12757 ASSERT(state->dts_nretained == 0);
12758
12759 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
12760 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
12761 /*
12762 * We have managed to come into dtrace_state_destroy() on a
12763 * hot enabling -- almost certainly because of a disorderly
12764 * shutdown of a consumer. (That is, a consumer that is
12765 * exiting without having called dtrace_stop().) In this case,
12766 * we're going to set our activity to be KILLED, and then
12767 * issue a sync to be sure that everyone is out of probe
12768 * context before we start blowing away ECBs.
12769 */
12770 state->dts_activity = DTRACE_ACTIVITY_KILLED;
12771 dtrace_sync();
12772 }
12773
12774 /*
12775 * Release the credential hold we took in dtrace_state_create().
12776 */
12777 if (state->dts_cred.dcr_cred != NULL)
12778 crfree(state->dts_cred.dcr_cred);
12779
12780 /*
12781 * Now we can safely disable and destroy any enabled probes. Because
12782 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
12783 * (especially if they're all enabled), we take two passes through the
12784 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
12785 * in the second we disable whatever is left over.
12786 */
12787 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
12788 for (i = 0; i < state->dts_necbs; i++) {
12789 if ((ecb = state->dts_ecbs[i]) == NULL)
12790 continue;
12791
12792 if (match && ecb->dte_probe != NULL) {
12793 dtrace_probe_t *probe = ecb->dte_probe;
12794 dtrace_provider_t *prov = probe->dtpr_provider;
12795
12796 if (!(prov->dtpv_priv.dtpp_flags & match))
12797 continue;
12798 }
12799
12800 dtrace_ecb_disable(ecb);
12801 dtrace_ecb_destroy(ecb);
12802 }
12803
12804 if (!match)
12805 break;
12806 }
12807
12808 /*
12809 * Before we free the buffers, perform one more sync to assure that
12810 * every CPU is out of probe context.
12811 */
12812 dtrace_sync();
12813
12814 dtrace_buffer_free(state->dts_buffer);
12815 dtrace_buffer_free(state->dts_aggbuffer);
12816
12817 for (i = 0; i < nspec; i++)
12818 dtrace_buffer_free(spec[i].dtsp_buffer);
12819
12820 if (state->dts_cleaner != CYCLIC_NONE)
12821 cyclic_remove(state->dts_cleaner);
12822
12823 if (state->dts_deadman != CYCLIC_NONE)
12824 cyclic_remove(state->dts_deadman);
12825
12826 dtrace_dstate_fini(&vstate->dtvs_dynvars);
12827 dtrace_vstate_fini(vstate);
12828 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
12829
12830 if (state->dts_aggregations != NULL) {
12831 #ifdef DEBUG
12832 for (i = 0; i < state->dts_naggregations; i++)
12833 ASSERT(state->dts_aggregations[i] == NULL);
12834 #endif
12835 ASSERT(state->dts_naggregations > 0);
12836 kmem_free(state->dts_aggregations,
12837 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
12838 }
12839
12840 kmem_free(state->dts_buffer, bufsize);
12841 kmem_free(state->dts_aggbuffer, bufsize);
12842
12843 for (i = 0; i < nspec; i++)
12844 kmem_free(spec[i].dtsp_buffer, bufsize);
12845
12846 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
12847
12848 dtrace_format_destroy(state);
12849
12850 vmem_destroy(state->dts_aggid_arena);
12851 ddi_soft_state_free(dtrace_softstate, minor);
12852 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12853 }
12854
12855 /*
12856 * DTrace Anonymous Enabling Functions
12857 */
12858 static dtrace_state_t *
12859 dtrace_anon_grab(void)
12860 {
12861 dtrace_state_t *state;
12862
12863 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12864
12865 if ((state = dtrace_anon.dta_state) == NULL) {
12866 ASSERT(dtrace_anon.dta_enabling == NULL);
12867 return (NULL);
12868 }
12869
12870 ASSERT(dtrace_anon.dta_enabling != NULL);
12871 ASSERT(dtrace_retained != NULL);
12872
12873 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
12874 dtrace_anon.dta_enabling = NULL;
12875 dtrace_anon.dta_state = NULL;
12876
12877 return (state);
12878 }
12879
12880 static void
12881 dtrace_anon_property(void)
12882 {
12883 int i, rv;
12884 dtrace_state_t *state;
12885 dof_hdr_t *dof;
12886 char c[32]; /* enough for "dof-data-" + digits */
12887
12888 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12889 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12890
12891 for (i = 0; ; i++) {
12892 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
12893
12894 dtrace_err_verbose = 1;
12895
12896 if ((dof = dtrace_dof_property(c)) == NULL) {
12897 dtrace_err_verbose = 0;
12898 break;
12899 }
12900
12901 /*
12902 * We want to create anonymous state, so we need to transition
12903 * the kernel debugger to indicate that DTrace is active. If
12904 * this fails (e.g. because the debugger has modified text in
12905 * some way), we won't continue with the processing.
12906 */
12907 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
12908 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
12909 "enabling ignored.");
12910 dtrace_dof_destroy(dof);
12911 break;
12912 }
12913
12914 /*
12915 * If we haven't allocated an anonymous state, we'll do so now.
12916 */
12917 if ((state = dtrace_anon.dta_state) == NULL) {
12918 state = dtrace_state_create(NULL, NULL);
12919 dtrace_anon.dta_state = state;
12920
12921 if (state == NULL) {
12922 /*
12923 * This basically shouldn't happen: the only
12924 * failure mode from dtrace_state_create() is a
12925 * failure of ddi_soft_state_zalloc() that
12926 * itself should never happen. Still, the
12927 * interface allows for a failure mode, and
12928 * we want to fail as gracefully as possible:
12929 * we'll emit an error message and cease
12930 * processing anonymous state in this case.
12931 */
12932 cmn_err(CE_WARN, "failed to create "
12933 "anonymous state");
12934 dtrace_dof_destroy(dof);
12935 break;
12936 }
12937 }
12938
12939 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
12940 &dtrace_anon.dta_enabling, 0, B_TRUE);
12941
12942 if (rv == 0)
12943 rv = dtrace_dof_options(dof, state);
12944
12945 dtrace_err_verbose = 0;
12946 dtrace_dof_destroy(dof);
12947
12948 if (rv != 0) {
12949 /*
12950 * This is malformed DOF; chuck any anonymous state
12951 * that we created.
12952 */
12953 ASSERT(dtrace_anon.dta_enabling == NULL);
12954 dtrace_state_destroy(state);
12955 dtrace_anon.dta_state = NULL;
12956 break;
12957 }
12958
12959 ASSERT(dtrace_anon.dta_enabling != NULL);
12960 }
12961
12962 if (dtrace_anon.dta_enabling != NULL) {
12963 int rval;
12964
12965 /*
12966 * dtrace_enabling_retain() can only fail because we are
12967 * trying to retain more enablings than are allowed -- but
12968 * we only have one anonymous enabling, and we are guaranteed
12969 * to be allowed at least one retained enabling; we assert
12970 * that dtrace_enabling_retain() returns success.
12971 */
12972 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
12973 ASSERT(rval == 0);
12974
12975 dtrace_enabling_dump(dtrace_anon.dta_enabling);
12976 }
12977 }
12978
12979 /*
12980 * DTrace Helper Functions
12981 */
12982 static void
12983 dtrace_helper_trace(dtrace_helper_action_t *helper,
12984 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
12985 {
12986 uint32_t size, next, nnext, i;
12987 dtrace_helptrace_t *ent;
12988 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
12989
12990 if (!dtrace_helptrace_enabled)
12991 return;
12992
12993 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
12994
12995 /*
12996 * What would a tracing framework be without its own tracing
12997 * framework? (Well, a hell of a lot simpler, for starters...)
12998 */
12999 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13000 sizeof (uint64_t) - sizeof (uint64_t);
13001
13002 /*
13003 * Iterate until we can allocate a slot in the trace buffer.
13004 */
13005 do {
13006 next = dtrace_helptrace_next;
13007
13008 if (next + size < dtrace_helptrace_bufsize) {
13009 nnext = next + size;
13010 } else {
13011 nnext = size;
13012 }
13013 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13014
13015 /*
13016 * We have our slot; fill it in.
13017 */
13018 if (nnext == size)
13019 next = 0;
13020
13021 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13022 ent->dtht_helper = helper;
13023 ent->dtht_where = where;
13024 ent->dtht_nlocals = vstate->dtvs_nlocals;
13025
13026 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13027 mstate->dtms_fltoffs : -1;
13028 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13029 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
13030
13031 for (i = 0; i < vstate->dtvs_nlocals; i++) {
13032 dtrace_statvar_t *svar;
13033
13034 if ((svar = vstate->dtvs_locals[i]) == NULL)
13035 continue;
13036
13037 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13038 ent->dtht_locals[i] =
13039 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
13040 }
13041 }
13042
13043 static uint64_t
13044 dtrace_helper(int which, dtrace_mstate_t *mstate,
13045 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13046 {
13047 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
13048 uint64_t sarg0 = mstate->dtms_arg[0];
13049 uint64_t sarg1 = mstate->dtms_arg[1];
13050 uint64_t rval;
13051 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13052 dtrace_helper_action_t *helper;
13053 dtrace_vstate_t *vstate;
13054 dtrace_difo_t *pred;
13055 int i, trace = dtrace_helptrace_enabled;
13056
13057 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13058
13059 if (helpers == NULL)
13060 return (0);
13061
13062 if ((helper = helpers->dthps_actions[which]) == NULL)
13063 return (0);
13064
13065 vstate = &helpers->dthps_vstate;
13066 mstate->dtms_arg[0] = arg0;
13067 mstate->dtms_arg[1] = arg1;
13068
13069 /*
13070 * Now iterate over each helper. If its predicate evaluates to 'true',
13071 * we'll call the corresponding actions. Note that the below calls
13072 * to dtrace_dif_emulate() may set faults in machine state. This is
13073 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13074 * the stored DIF offset with its own (which is the desired behavior).
13075 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13076 * from machine state; this is okay, too.
13077 */
13078 for (; helper != NULL; helper = helper->dtha_next) {
13079 if ((pred = helper->dtha_predicate) != NULL) {
13080 if (trace)
13081 dtrace_helper_trace(helper, mstate, vstate, 0);
13082
13083 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13084 goto next;
13085
13086 if (*flags & CPU_DTRACE_FAULT)
13087 goto err;
13088 }
13089
13090 for (i = 0; i < helper->dtha_nactions; i++) {
13091 if (trace)
13092 dtrace_helper_trace(helper,
13093 mstate, vstate, i + 1);
13094
13095 rval = dtrace_dif_emulate(helper->dtha_actions[i],
13096 mstate, vstate, state);
13097
13098 if (*flags & CPU_DTRACE_FAULT)
13099 goto err;
13100 }
13101
13102 next:
13103 if (trace)
13104 dtrace_helper_trace(helper, mstate, vstate,
13105 DTRACE_HELPTRACE_NEXT);
13106 }
13107
13108 if (trace)
13109 dtrace_helper_trace(helper, mstate, vstate,
13110 DTRACE_HELPTRACE_DONE);
13111
13112 /*
13113 * Restore the arg0 that we saved upon entry.
13114 */
13115 mstate->dtms_arg[0] = sarg0;
13116 mstate->dtms_arg[1] = sarg1;
13117
13118 return (rval);
13119
13120 err:
13121 if (trace)
13122 dtrace_helper_trace(helper, mstate, vstate,
13123 DTRACE_HELPTRACE_ERR);
13124
13125 /*
13126 * Restore the arg0 that we saved upon entry.
13127 */
13128 mstate->dtms_arg[0] = sarg0;
13129 mstate->dtms_arg[1] = sarg1;
13130
13131 return (NULL);
13132 }
13133
13134 static void
13135 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13136 dtrace_vstate_t *vstate)
13137 {
13138 int i;
13139
13140 if (helper->dtha_predicate != NULL)
13141 dtrace_difo_release(helper->dtha_predicate, vstate);
13142
13143 for (i = 0; i < helper->dtha_nactions; i++) {
13144 ASSERT(helper->dtha_actions[i] != NULL);
13145 dtrace_difo_release(helper->dtha_actions[i], vstate);
13146 }
13147
13148 kmem_free(helper->dtha_actions,
13149 helper->dtha_nactions * sizeof (dtrace_difo_t *));
13150 kmem_free(helper, sizeof (dtrace_helper_action_t));
13151 }
13152
13153 #if !defined(__APPLE__)
13154 static int
13155 dtrace_helper_destroygen(int gen)
13156 {
13157 proc_t *p = curproc;
13158 #else
13159 static int
13160 dtrace_helper_destroygen(proc_t* p, int gen)
13161 {
13162 #endif
13163 dtrace_helpers_t *help = p->p_dtrace_helpers;
13164 dtrace_vstate_t *vstate;
13165 int i;
13166
13167 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
13168
13169 if (help == NULL || gen > help->dthps_generation)
13170 return (EINVAL);
13171
13172 vstate = &help->dthps_vstate;
13173
13174 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13175 dtrace_helper_action_t *last = NULL, *h, *next;
13176
13177 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13178 next = h->dtha_next;
13179
13180 if (h->dtha_generation == gen) {
13181 if (last != NULL) {
13182 last->dtha_next = next;
13183 } else {
13184 help->dthps_actions[i] = next;
13185 }
13186
13187 dtrace_helper_action_destroy(h, vstate);
13188 } else {
13189 last = h;
13190 }
13191 }
13192 }
13193
13194 /*
13195 * Interate until we've cleared out all helper providers with the
13196 * given generation number.
13197 */
13198 for (;;) {
13199 dtrace_helper_provider_t *prov;
13200
13201 /*
13202 * Look for a helper provider with the right generation. We
13203 * have to start back at the beginning of the list each time
13204 * because we drop dtrace_lock. It's unlikely that we'll make
13205 * more than two passes.
13206 */
13207 for (i = 0; i < help->dthps_nprovs; i++) {
13208 prov = help->dthps_provs[i];
13209
13210 if (prov->dthp_generation == gen)
13211 break;
13212 }
13213
13214 /*
13215 * If there were no matches, we're done.
13216 */
13217 if (i == help->dthps_nprovs)
13218 break;
13219
13220 /*
13221 * Move the last helper provider into this slot.
13222 */
13223 help->dthps_nprovs--;
13224 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
13225 help->dthps_provs[help->dthps_nprovs] = NULL;
13226
13227 lck_mtx_unlock(&dtrace_lock);
13228
13229 /*
13230 * If we have a meta provider, remove this helper provider.
13231 */
13232 lck_mtx_lock(&dtrace_meta_lock);
13233 if (dtrace_meta_pid != NULL) {
13234 ASSERT(dtrace_deferred_pid == NULL);
13235 dtrace_helper_provider_remove(&prov->dthp_prov,
13236 p->p_pid);
13237 }
13238 lck_mtx_unlock(&dtrace_meta_lock);
13239
13240 dtrace_helper_provider_destroy(prov);
13241
13242 lck_mtx_lock(&dtrace_lock);
13243 }
13244
13245 return (0);
13246 }
13247
13248 static int
13249 dtrace_helper_validate(dtrace_helper_action_t *helper)
13250 {
13251 int err = 0, i;
13252 dtrace_difo_t *dp;
13253
13254 if ((dp = helper->dtha_predicate) != NULL)
13255 err += dtrace_difo_validate_helper(dp);
13256
13257 for (i = 0; i < helper->dtha_nactions; i++)
13258 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
13259
13260 return (err == 0);
13261 }
13262
13263 #if !defined(__APPLE__)
13264 static int
13265 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
13266 #else
13267 static int
13268 dtrace_helper_action_add(proc_t* p, int which, dtrace_ecbdesc_t *ep)
13269 #endif
13270 {
13271 dtrace_helpers_t *help;
13272 dtrace_helper_action_t *helper, *last;
13273 dtrace_actdesc_t *act;
13274 dtrace_vstate_t *vstate;
13275 dtrace_predicate_t *pred;
13276 int count = 0, nactions = 0, i;
13277
13278 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
13279 return (EINVAL);
13280
13281 #if !defined(__APPLE__)
13282 help = curproc->p_dtrace_helpers;
13283 #else
13284 help = p->p_dtrace_helpers;
13285 #endif
13286 last = help->dthps_actions[which];
13287 vstate = &help->dthps_vstate;
13288
13289 for (count = 0; last != NULL; last = last->dtha_next) {
13290 count++;
13291 if (last->dtha_next == NULL)
13292 break;
13293 }
13294
13295 /*
13296 * If we already have dtrace_helper_actions_max helper actions for this
13297 * helper action type, we'll refuse to add a new one.
13298 */
13299 if (count >= dtrace_helper_actions_max)
13300 return (ENOSPC);
13301
13302 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
13303 helper->dtha_generation = help->dthps_generation;
13304
13305 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
13306 ASSERT(pred->dtp_difo != NULL);
13307 dtrace_difo_hold(pred->dtp_difo);
13308 helper->dtha_predicate = pred->dtp_difo;
13309 }
13310
13311 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
13312 if (act->dtad_kind != DTRACEACT_DIFEXPR)
13313 goto err;
13314
13315 if (act->dtad_difo == NULL)
13316 goto err;
13317
13318 nactions++;
13319 }
13320
13321 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
13322 (helper->dtha_nactions = nactions), KM_SLEEP);
13323
13324 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
13325 dtrace_difo_hold(act->dtad_difo);
13326 helper->dtha_actions[i++] = act->dtad_difo;
13327 }
13328
13329 if (!dtrace_helper_validate(helper))
13330 goto err;
13331
13332 if (last == NULL) {
13333 help->dthps_actions[which] = helper;
13334 } else {
13335 last->dtha_next = helper;
13336 }
13337
13338 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
13339 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
13340 dtrace_helptrace_next = 0;
13341 }
13342
13343 return (0);
13344 err:
13345 dtrace_helper_action_destroy(helper, vstate);
13346 return (EINVAL);
13347 }
13348
13349 static void
13350 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
13351 dof_helper_t *dofhp)
13352 {
13353 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
13354
13355 lck_mtx_lock(&dtrace_meta_lock);
13356 lck_mtx_lock(&dtrace_lock);
13357
13358 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
13359 /*
13360 * If the dtrace module is loaded but not attached, or if
13361 * there aren't isn't a meta provider registered to deal with
13362 * these provider descriptions, we need to postpone creating
13363 * the actual providers until later.
13364 */
13365
13366 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
13367 dtrace_deferred_pid != help) {
13368 help->dthps_deferred = 1;
13369 help->dthps_pid = p->p_pid;
13370 help->dthps_next = dtrace_deferred_pid;
13371 help->dthps_prev = NULL;
13372 if (dtrace_deferred_pid != NULL)
13373 dtrace_deferred_pid->dthps_prev = help;
13374 dtrace_deferred_pid = help;
13375 }
13376
13377 lck_mtx_unlock(&dtrace_lock);
13378
13379 } else if (dofhp != NULL) {
13380 /*
13381 * If the dtrace module is loaded and we have a particular
13382 * helper provider description, pass that off to the
13383 * meta provider.
13384 */
13385
13386 lck_mtx_unlock(&dtrace_lock);
13387
13388 dtrace_helper_provide(dofhp, p->p_pid);
13389
13390 } else {
13391 /*
13392 * Otherwise, just pass all the helper provider descriptions
13393 * off to the meta provider.
13394 */
13395
13396 int i;
13397 lck_mtx_unlock(&dtrace_lock);
13398
13399 for (i = 0; i < help->dthps_nprovs; i++) {
13400 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
13401 p->p_pid);
13402 }
13403 }
13404
13405 lck_mtx_unlock(&dtrace_meta_lock);
13406 }
13407
13408 #if !defined(__APPLE__)
13409 static int
13410 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
13411 #else
13412 static int
13413 dtrace_helper_provider_add(proc_t* p, dof_helper_t *dofhp, int gen)
13414 #endif
13415 {
13416 dtrace_helpers_t *help;
13417 dtrace_helper_provider_t *hprov, **tmp_provs;
13418 uint_t tmp_maxprovs, i;
13419
13420 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
13421
13422 #if !defined(__APPLE__)
13423 help = curproc->p_dtrace_helpers;
13424 #else
13425 help = p->p_dtrace_helpers;
13426 #endif
13427 ASSERT(help != NULL);
13428
13429 /*
13430 * If we already have dtrace_helper_providers_max helper providers,
13431 * we're refuse to add a new one.
13432 */
13433 if (help->dthps_nprovs >= dtrace_helper_providers_max)
13434 return (ENOSPC);
13435
13436 /*
13437 * Check to make sure this isn't a duplicate.
13438 */
13439 for (i = 0; i < help->dthps_nprovs; i++) {
13440 if (dofhp->dofhp_addr ==
13441 help->dthps_provs[i]->dthp_prov.dofhp_addr)
13442 return (EALREADY);
13443 }
13444
13445 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
13446 hprov->dthp_prov = *dofhp;
13447 hprov->dthp_ref = 1;
13448 hprov->dthp_generation = gen;
13449
13450 /*
13451 * Allocate a bigger table for helper providers if it's already full.
13452 */
13453 if (help->dthps_maxprovs == help->dthps_nprovs) {
13454 tmp_maxprovs = help->dthps_maxprovs;
13455 tmp_provs = help->dthps_provs;
13456
13457 if (help->dthps_maxprovs == 0)
13458 help->dthps_maxprovs = 2;
13459 else
13460 help->dthps_maxprovs *= 2;
13461 if (help->dthps_maxprovs > dtrace_helper_providers_max)
13462 help->dthps_maxprovs = dtrace_helper_providers_max;
13463
13464 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
13465
13466 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
13467 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
13468
13469 if (tmp_provs != NULL) {
13470 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
13471 sizeof (dtrace_helper_provider_t *));
13472 kmem_free(tmp_provs, tmp_maxprovs *
13473 sizeof (dtrace_helper_provider_t *));
13474 }
13475 }
13476
13477 help->dthps_provs[help->dthps_nprovs] = hprov;
13478 help->dthps_nprovs++;
13479
13480 return (0);
13481 }
13482
13483 static void
13484 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
13485 {
13486 lck_mtx_lock(&dtrace_lock);
13487
13488 if (--hprov->dthp_ref == 0) {
13489 dof_hdr_t *dof;
13490 lck_mtx_unlock(&dtrace_lock);
13491 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
13492 dtrace_dof_destroy(dof);
13493 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
13494 } else {
13495 lck_mtx_unlock(&dtrace_lock);
13496 }
13497 }
13498
13499 static int
13500 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
13501 {
13502 uintptr_t daddr = (uintptr_t)dof;
13503 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
13504 dof_provider_t *provider;
13505 dof_probe_t *probe;
13506 uint8_t *arg;
13507 char *strtab, *typestr;
13508 dof_stridx_t typeidx;
13509 size_t typesz;
13510 uint_t nprobes, j, k;
13511
13512 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
13513
13514 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
13515 dtrace_dof_error(dof, "misaligned section offset");
13516 return (-1);
13517 }
13518
13519 /*
13520 * The section needs to be large enough to contain the DOF provider
13521 * structure appropriate for the given version.
13522 */
13523 if (sec->dofs_size <
13524 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
13525 offsetof(dof_provider_t, dofpv_prenoffs) :
13526 sizeof (dof_provider_t))) {
13527 dtrace_dof_error(dof, "provider section too small");
13528 return (-1);
13529 }
13530
13531 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
13532 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
13533 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
13534 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
13535 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
13536
13537 if (str_sec == NULL || prb_sec == NULL ||
13538 arg_sec == NULL || off_sec == NULL)
13539 return (-1);
13540
13541 enoff_sec = NULL;
13542
13543 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13544 provider->dofpv_prenoffs != DOF_SECT_NONE &&
13545 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
13546 provider->dofpv_prenoffs)) == NULL)
13547 return (-1);
13548
13549 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
13550
13551 if (provider->dofpv_name >= str_sec->dofs_size ||
13552 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
13553 dtrace_dof_error(dof, "invalid provider name");
13554 return (-1);
13555 }
13556
13557 if (prb_sec->dofs_entsize == 0 ||
13558 prb_sec->dofs_entsize > prb_sec->dofs_size) {
13559 dtrace_dof_error(dof, "invalid entry size");
13560 return (-1);
13561 }
13562
13563 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
13564 dtrace_dof_error(dof, "misaligned entry size");
13565 return (-1);
13566 }
13567
13568 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
13569 dtrace_dof_error(dof, "invalid entry size");
13570 return (-1);
13571 }
13572
13573 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
13574 dtrace_dof_error(dof, "misaligned section offset");
13575 return (-1);
13576 }
13577
13578 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
13579 dtrace_dof_error(dof, "invalid entry size");
13580 return (-1);
13581 }
13582
13583 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
13584
13585 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
13586
13587 /*
13588 * Take a pass through the probes to check for errors.
13589 */
13590 for (j = 0; j < nprobes; j++) {
13591 probe = (dof_probe_t *)(uintptr_t)(daddr +
13592 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
13593
13594 if (probe->dofpr_func >= str_sec->dofs_size) {
13595 dtrace_dof_error(dof, "invalid function name");
13596 return (-1);
13597 }
13598
13599 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
13600 dtrace_dof_error(dof, "function name too long");
13601 return (-1);
13602 }
13603
13604 if (probe->dofpr_name >= str_sec->dofs_size ||
13605 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
13606 dtrace_dof_error(dof, "invalid probe name");
13607 return (-1);
13608 }
13609
13610 /*
13611 * The offset count must not wrap the index, and the offsets
13612 * must also not overflow the section's data.
13613 */
13614 if (probe->dofpr_offidx + probe->dofpr_noffs <
13615 probe->dofpr_offidx ||
13616 (probe->dofpr_offidx + probe->dofpr_noffs) *
13617 off_sec->dofs_entsize > off_sec->dofs_size) {
13618 dtrace_dof_error(dof, "invalid probe offset");
13619 return (-1);
13620 }
13621
13622 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
13623 /*
13624 * If there's no is-enabled offset section, make sure
13625 * there aren't any is-enabled offsets. Otherwise
13626 * perform the same checks as for probe offsets
13627 * (immediately above).
13628 */
13629 if (enoff_sec == NULL) {
13630 if (probe->dofpr_enoffidx != 0 ||
13631 probe->dofpr_nenoffs != 0) {
13632 dtrace_dof_error(dof, "is-enabled "
13633 "offsets with null section");
13634 return (-1);
13635 }
13636 } else if (probe->dofpr_enoffidx +
13637 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
13638 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
13639 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
13640 dtrace_dof_error(dof, "invalid is-enabled "
13641 "offset");
13642 return (-1);
13643 }
13644
13645 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
13646 dtrace_dof_error(dof, "zero probe and "
13647 "is-enabled offsets");
13648 return (-1);
13649 }
13650 } else if (probe->dofpr_noffs == 0) {
13651 dtrace_dof_error(dof, "zero probe offsets");
13652 return (-1);
13653 }
13654
13655 if (probe->dofpr_argidx + probe->dofpr_xargc <
13656 probe->dofpr_argidx ||
13657 (probe->dofpr_argidx + probe->dofpr_xargc) *
13658 arg_sec->dofs_entsize > arg_sec->dofs_size) {
13659 dtrace_dof_error(dof, "invalid args");
13660 return (-1);
13661 }
13662
13663 typeidx = probe->dofpr_nargv;
13664 typestr = strtab + probe->dofpr_nargv;
13665 for (k = 0; k < probe->dofpr_nargc; k++) {
13666 if (typeidx >= str_sec->dofs_size) {
13667 dtrace_dof_error(dof, "bad "
13668 "native argument type");
13669 return (-1);
13670 }
13671
13672 typesz = strlen(typestr) + 1;
13673 if (typesz > DTRACE_ARGTYPELEN) {
13674 dtrace_dof_error(dof, "native "
13675 "argument type too long");
13676 return (-1);
13677 }
13678 typeidx += typesz;
13679 typestr += typesz;
13680 }
13681
13682 typeidx = probe->dofpr_xargv;
13683 typestr = strtab + probe->dofpr_xargv;
13684 for (k = 0; k < probe->dofpr_xargc; k++) {
13685 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
13686 dtrace_dof_error(dof, "bad "
13687 "native argument index");
13688 return (-1);
13689 }
13690
13691 if (typeidx >= str_sec->dofs_size) {
13692 dtrace_dof_error(dof, "bad "
13693 "translated argument type");
13694 return (-1);
13695 }
13696
13697 typesz = strlen(typestr) + 1;
13698 if (typesz > DTRACE_ARGTYPELEN) {
13699 dtrace_dof_error(dof, "translated argument "
13700 "type too long");
13701 return (-1);
13702 }
13703
13704 typeidx += typesz;
13705 typestr += typesz;
13706 }
13707 }
13708
13709 return (0);
13710 }
13711
13712 #if !defined(__APPLE__)
13713 static int
13714 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
13715 #else
13716 static int
13717 dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp)
13718 #endif
13719 {
13720 dtrace_helpers_t *help;
13721 dtrace_vstate_t *vstate;
13722 dtrace_enabling_t *enab = NULL;
13723 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
13724 uintptr_t daddr = (uintptr_t)dof;
13725
13726 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
13727
13728 #if !defined(__APPLE__)
13729 if ((help = curproc->p_dtrace_helpers) == NULL)
13730 help = dtrace_helpers_create(curproc);
13731 #else
13732 if ((help = p->p_dtrace_helpers) == NULL)
13733 help = dtrace_helpers_create(p);
13734 #endif
13735
13736 vstate = &help->dthps_vstate;
13737
13738 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
13739 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
13740 dtrace_dof_destroy(dof);
13741 return (rv);
13742 }
13743
13744 /*
13745 * Look for helper providers and validate their descriptions.
13746 */
13747 if (dhp != NULL) {
13748 for (i = 0; i < dof->dofh_secnum; i++) {
13749 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
13750 dof->dofh_secoff + i * dof->dofh_secsize);
13751
13752 if (sec->dofs_type != DOF_SECT_PROVIDER)
13753 continue;
13754
13755 if (dtrace_helper_provider_validate(dof, sec) != 0) {
13756 dtrace_enabling_destroy(enab);
13757 dtrace_dof_destroy(dof);
13758 return (-1);
13759 }
13760
13761 nprovs++;
13762 }
13763 }
13764
13765 /*
13766 * Now we need to walk through the ECB descriptions in the enabling.
13767 */
13768 for (i = 0; i < enab->dten_ndesc; i++) {
13769 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
13770 dtrace_probedesc_t *desc = &ep->dted_probe;
13771
13772 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
13773 continue;
13774
13775 if (strcmp(desc->dtpd_mod, "helper") != 0)
13776 continue;
13777
13778 if (strcmp(desc->dtpd_func, "ustack") != 0)
13779 continue;
13780
13781 #if !defined(__APPLE__)
13782 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, ep)) != 0)
13783 #else
13784 if ((rv = dtrace_helper_action_add(p, DTRACE_HELPER_ACTION_USTACK, ep)) != 0)
13785 #endif
13786 {
13787 /*
13788 * Adding this helper action failed -- we are now going
13789 * to rip out the entire generation and return failure.
13790 */
13791 #if !defined(__APPLE__)
13792 (void) dtrace_helper_destroygen(help->dthps_generation);
13793 #else
13794 (void) dtrace_helper_destroygen(p, help->dthps_generation);
13795 #endif
13796 dtrace_enabling_destroy(enab);
13797 dtrace_dof_destroy(dof);
13798 return (-1);
13799 }
13800
13801 nhelpers++;
13802 }
13803
13804 if (nhelpers < enab->dten_ndesc)
13805 dtrace_dof_error(dof, "unmatched helpers");
13806
13807 gen = help->dthps_generation++;
13808 dtrace_enabling_destroy(enab);
13809
13810 if (dhp != NULL && nprovs > 0) {
13811 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
13812 #if !defined(__APPLE__)
13813 if (dtrace_helper_provider_add(dhp, gen) == 0) {
13814 #else
13815 if (dtrace_helper_provider_add(p, dhp, gen) == 0) {
13816 #endif
13817 lck_mtx_unlock(&dtrace_lock);
13818 #if !defined(__APPLE__)
13819 dtrace_helper_provider_register(curproc, help, dhp);
13820 #else
13821 dtrace_helper_provider_register(p, help, dhp);
13822 #endif
13823 lck_mtx_lock(&dtrace_lock);
13824
13825 destroy = 0;
13826 }
13827 }
13828
13829 if (destroy)
13830 dtrace_dof_destroy(dof);
13831
13832 return (gen);
13833 }
13834
13835 #if defined(__APPLE__)
13836
13837 /*
13838 * DTrace lazy dof
13839 *
13840 * DTrace user static probes (USDT probes) and helper actions are loaded
13841 * in a process by proccessing dof sections. The dof sections are passed
13842 * into the kernel by dyld, in a dof_ioctl_data_t block. It is rather
13843 * expensive to process dof for a process that will never use it. There
13844 * is a memory cost (allocating the providers/probes), and a cpu cost
13845 * (creating the providers/probes).
13846 *
13847 * To reduce this cost, we use "lazy dof". The normal proceedure for
13848 * dof processing is to copyin the dof(s) pointed to by the dof_ioctl_data_t
13849 * block, and invoke dof_slurp_helper() on them. When "lazy dof" is
13850 * used, each process retains the dof_ioctl_data_t block, instead of
13851 * copying in the data it points to.
13852 *
13853 * The dof_ioctl_data_t blocks are managed as if they were the actual
13854 * processed dof; on fork the block is copied to the child, on exec and
13855 * exit the block is freed.
13856 *
13857 * If the process loads library(s) containing additional dof, the
13858 * new dof_ioctl_data_t is merged with the existing block.
13859 *
13860 * There are a few catches that make this slightly more difficult.
13861 * When dyld registers dof_ioctl_data_t blocks, it expects a unique
13862 * identifier value for each dof in the block. In non-lazy dof terms,
13863 * this is the generation that dof was loaded in. If we hand back
13864 * a UID for a lazy dof, that same UID must be able to unload the
13865 * dof once it has become non-lazy. To meet this requirement, the
13866 * code that loads lazy dof requires that the UID's for dof(s) in
13867 * the lazy dof be sorted, and in ascending order. It is okay to skip
13868 * UID's, I.E., 1 -> 5 -> 6 is legal.
13869 *
13870 * Once a process has become non-lazy, it will stay non-lazy. All
13871 * future dof operations for that process will be non-lazy, even
13872 * if the dof mode transitions back to lazy.
13873 *
13874 * Always do lazy dof checks before non-lazy (I.E. In fork, exit, exec.).
13875 * That way if the lazy check fails due to transitioning to non-lazy, the
13876 * right thing is done with the newly faulted in dof.
13877 */
13878
13879 /*
13880 * This method is a bit squicky. It must handle:
13881 *
13882 * dof should not be lazy.
13883 * dof should have been handled lazily, but there was an error
13884 * dof was handled lazily, and needs to be freed.
13885 * dof was handled lazily, and must not be freed.
13886 *
13887 *
13888 * Returns EACCESS if dof should be handled non-lazily.
13889 *
13890 * KERN_SUCCESS and all other return codes indicate lazy handling of dof.
13891 *
13892 * If the dofs data is claimed by this method, dofs_claimed will be set.
13893 * Callers should not free claimed dofs.
13894 */
13895 int
13896 dtrace_lazy_dofs_add(proc_t *p, dof_ioctl_data_t* incoming_dofs, int *dofs_claimed)
13897 {
13898 ASSERT(p);
13899 ASSERT(incoming_dofs && incoming_dofs->dofiod_count > 0);
13900
13901 int rval = 0;
13902 *dofs_claimed = 0;
13903
13904 lck_rw_lock_shared(&dtrace_dof_mode_lock);
13905
13906 /*
13907 * If we have lazy dof, dof mode better be LAZY_ON.
13908 */
13909 ASSERT(p->p_dtrace_lazy_dofs == NULL || dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON);
13910 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
13911 ASSERT(dtrace_dof_mode != DTRACE_DOF_MODE_NEVER);
13912
13913 /*
13914 * Any existing helpers force non-lazy behavior.
13915 */
13916 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
13917 lck_mtx_lock(&p->p_dtrace_sprlock);
13918
13919 dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
13920 unsigned int existing_dofs_count = (existing_dofs) ? existing_dofs->dofiod_count : 0;
13921 unsigned int i, merged_dofs_count = incoming_dofs->dofiod_count + existing_dofs_count;
13922
13923 /*
13924 * Range check...
13925 */
13926 if (merged_dofs_count == 0 || merged_dofs_count > 1024) {
13927 dtrace_dof_error(NULL, "lazy_dofs_add merged_dofs_count out of range");
13928 rval = EINVAL;
13929 goto unlock;
13930 }
13931
13932 /*
13933 * Each dof being added must be assigned a unique generation.
13934 */
13935 uint64_t generation = (existing_dofs) ? existing_dofs->dofiod_helpers[existing_dofs_count - 1].dofhp_dof + 1 : 1;
13936 for (i=0; i<incoming_dofs->dofiod_count; i++) {
13937 /*
13938 * We rely on these being the same so we can overwrite dofhp_dof and not lose info.
13939 */
13940 ASSERT(incoming_dofs->dofiod_helpers[i].dofhp_dof == incoming_dofs->dofiod_helpers[i].dofhp_addr);
13941 incoming_dofs->dofiod_helpers[i].dofhp_dof = generation++;
13942 }
13943
13944
13945 if (existing_dofs) {
13946 /*
13947 * Merge the existing and incoming dofs
13948 */
13949 size_t merged_dofs_size = DOF_IOCTL_DATA_T_SIZE(merged_dofs_count);
13950 dof_ioctl_data_t* merged_dofs = kmem_alloc(merged_dofs_size, KM_SLEEP);
13951
13952 bcopy(&existing_dofs->dofiod_helpers[0],
13953 &merged_dofs->dofiod_helpers[0],
13954 sizeof(dof_helper_t) * existing_dofs_count);
13955 bcopy(&incoming_dofs->dofiod_helpers[0],
13956 &merged_dofs->dofiod_helpers[existing_dofs_count],
13957 sizeof(dof_helper_t) * incoming_dofs->dofiod_count);
13958
13959 merged_dofs->dofiod_count = merged_dofs_count;
13960
13961 kmem_free(existing_dofs, DOF_IOCTL_DATA_T_SIZE(existing_dofs_count));
13962
13963 p->p_dtrace_lazy_dofs = merged_dofs;
13964 } else {
13965 /*
13966 * Claim the incoming dofs
13967 */
13968 *dofs_claimed = 1;
13969 p->p_dtrace_lazy_dofs = incoming_dofs;
13970 }
13971
13972 #if DEBUG
13973 dof_ioctl_data_t* all_dofs = p->p_dtrace_lazy_dofs;
13974 for (i=0; i<all_dofs->dofiod_count-1; i++) {
13975 ASSERT(all_dofs->dofiod_helpers[i].dofhp_dof < all_dofs->dofiod_helpers[i+1].dofhp_dof);
13976 }
13977 #endif DEBUG
13978
13979 unlock:
13980 lck_mtx_unlock(&p->p_dtrace_sprlock);
13981 } else {
13982 rval = EACCES;
13983 }
13984
13985 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
13986
13987 return rval;
13988 }
13989
13990 /*
13991 * Returns:
13992 *
13993 * EINVAL: lazy dof is enabled, but the requested generation was not found.
13994 * EACCES: This removal needs to be handled non-lazily.
13995 */
13996 int
13997 dtrace_lazy_dofs_remove(proc_t *p, int generation)
13998 {
13999 int rval = EINVAL;
14000
14001 lck_rw_lock_shared(&dtrace_dof_mode_lock);
14002
14003 /*
14004 * If we have lazy dof, dof mode better be LAZY_ON.
14005 */
14006 ASSERT(p->p_dtrace_lazy_dofs == NULL || dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON);
14007 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
14008 ASSERT(dtrace_dof_mode != DTRACE_DOF_MODE_NEVER);
14009
14010 /*
14011 * Any existing helpers force non-lazy behavior.
14012 */
14013 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
14014 lck_mtx_lock(&p->p_dtrace_sprlock);
14015
14016 dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
14017
14018 if (existing_dofs) {
14019 int index, existing_dofs_count = existing_dofs->dofiod_count;
14020 for (index=0; index<existing_dofs_count; index++) {
14021 if ((int)existing_dofs->dofiod_helpers[index].dofhp_dof == generation) {
14022 dof_ioctl_data_t* removed_dofs = NULL;
14023
14024 /*
14025 * If there is only 1 dof, we'll delete it and swap in NULL.
14026 */
14027 if (existing_dofs_count > 1) {
14028 int removed_dofs_count = existing_dofs_count - 1;
14029 size_t removed_dofs_size = DOF_IOCTL_DATA_T_SIZE(removed_dofs_count);
14030
14031 removed_dofs = kmem_alloc(removed_dofs_size, KM_SLEEP);
14032 removed_dofs->dofiod_count = removed_dofs_count;
14033
14034 /*
14035 * copy the remaining data.
14036 */
14037 if (index > 0) {
14038 bcopy(&existing_dofs->dofiod_helpers[0],
14039 &removed_dofs->dofiod_helpers[0],
14040 index * sizeof(dof_helper_t));
14041 }
14042
14043 if (index < existing_dofs_count-1) {
14044 bcopy(&existing_dofs->dofiod_helpers[index+1],
14045 &removed_dofs->dofiod_helpers[index],
14046 (existing_dofs_count - index - 1) * sizeof(dof_helper_t));
14047 }
14048 }
14049
14050 kmem_free(existing_dofs, DOF_IOCTL_DATA_T_SIZE(existing_dofs_count));
14051
14052 p->p_dtrace_lazy_dofs = removed_dofs;
14053
14054 rval = KERN_SUCCESS;
14055
14056 break;
14057 }
14058 }
14059
14060 #if DEBUG
14061 dof_ioctl_data_t* all_dofs = p->p_dtrace_lazy_dofs;
14062 if (all_dofs) {
14063 unsigned int i;
14064 for (i=0; i<all_dofs->dofiod_count-1; i++) {
14065 ASSERT(all_dofs->dofiod_helpers[i].dofhp_dof < all_dofs->dofiod_helpers[i+1].dofhp_dof);
14066 }
14067 }
14068 #endif
14069
14070 }
14071
14072 lck_mtx_unlock(&p->p_dtrace_sprlock);
14073 } else {
14074 rval = EACCES;
14075 }
14076
14077 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
14078
14079 return rval;
14080 }
14081
14082 void
14083 dtrace_lazy_dofs_destroy(proc_t *p)
14084 {
14085 lck_rw_lock_shared(&dtrace_dof_mode_lock);
14086 lck_mtx_lock(&p->p_dtrace_sprlock);
14087
14088 /*
14089 * If we have lazy dof, dof mode better be LAZY_ON, or we must be exiting.
14090 * We cannot assert against DTRACE_DOF_MODE_NEVER here, because we are called from
14091 * kern_exit.c and kern_exec.c.
14092 */
14093 ASSERT(p->p_dtrace_lazy_dofs == NULL || dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON || p->p_lflag & P_LEXIT);
14094 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
14095
14096 dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
14097 p->p_dtrace_lazy_dofs = NULL;
14098
14099 lck_mtx_unlock(&p->p_dtrace_sprlock);
14100 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
14101
14102 if (lazy_dofs) {
14103 kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
14104 }
14105 }
14106
14107 void
14108 dtrace_lazy_dofs_duplicate(proc_t *parent, proc_t *child)
14109 {
14110 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
14111 lck_mtx_assert(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
14112 lck_mtx_assert(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
14113
14114 lck_rw_lock_shared(&dtrace_dof_mode_lock);
14115 lck_mtx_lock(&parent->p_dtrace_sprlock);
14116
14117 /*
14118 * If we have lazy dof, dof mode better be LAZY_ON, or we must be exiting.
14119 * We cannot assert against DTRACE_DOF_MODE_NEVER here, because we are called from
14120 * kern_fork.c
14121 */
14122 ASSERT(parent->p_dtrace_lazy_dofs == NULL || dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON);
14123 ASSERT(parent->p_dtrace_lazy_dofs == NULL || parent->p_dtrace_helpers == NULL);
14124 /*
14125 * In theory we should hold the child sprlock, but this is safe...
14126 */
14127 ASSERT(child->p_dtrace_lazy_dofs == NULL && child->p_dtrace_helpers == NULL);
14128
14129 dof_ioctl_data_t* parent_dofs = parent->p_dtrace_lazy_dofs;
14130 dof_ioctl_data_t* child_dofs = NULL;
14131 if (parent_dofs) {
14132 size_t parent_dofs_size = DOF_IOCTL_DATA_T_SIZE(parent_dofs->dofiod_count);
14133 child_dofs = kmem_alloc(parent_dofs_size, KM_SLEEP);
14134 bcopy(parent_dofs, child_dofs, parent_dofs_size);
14135 }
14136
14137 lck_mtx_unlock(&parent->p_dtrace_sprlock);
14138
14139 if (child_dofs) {
14140 lck_mtx_lock(&child->p_dtrace_sprlock);
14141 child->p_dtrace_lazy_dofs = child_dofs;
14142 lck_mtx_unlock(&child->p_dtrace_sprlock);
14143 }
14144
14145 lck_rw_unlock_shared(&dtrace_dof_mode_lock);
14146 }
14147
14148 static int
14149 dtrace_lazy_dofs_proc_iterate_filter(proc_t *p, void* ignored)
14150 {
14151 #pragma unused(ignored)
14152 /*
14153 * Okay to NULL test without taking the sprlock.
14154 */
14155 return p->p_dtrace_lazy_dofs != NULL;
14156 }
14157
14158 static int
14159 dtrace_lazy_dofs_proc_iterate_doit(proc_t *p, void* ignored)
14160 {
14161 #pragma unused(ignored)
14162 /*
14163 * It is possible this process may exit during our attempt to
14164 * fault in the dof. We could fix this by holding locks longer,
14165 * but the errors are benign.
14166 */
14167 lck_mtx_lock(&p->p_dtrace_sprlock);
14168
14169 /*
14170 * In this case only, it is okay to have lazy dof when dof mode is DTRACE_DOF_MODE_LAZY_OFF
14171 */
14172 ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
14173 ASSERT(dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF);
14174
14175
14176 dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
14177 p->p_dtrace_lazy_dofs = NULL;
14178
14179 lck_mtx_unlock(&p->p_dtrace_sprlock);
14180
14181 /*
14182 * Process each dof_helper_t
14183 */
14184 if (lazy_dofs != NULL) {
14185 unsigned int i;
14186 int rval;
14187
14188 for (i=0; i<lazy_dofs->dofiod_count; i++) {
14189 /*
14190 * When loading lazy dof, we depend on the generations being sorted in ascending order.
14191 */
14192 ASSERT(i >= (lazy_dofs->dofiod_count - 1) || lazy_dofs->dofiod_helpers[i].dofhp_dof < lazy_dofs->dofiod_helpers[i+1].dofhp_dof);
14193
14194 dof_helper_t *dhp = &lazy_dofs->dofiod_helpers[i];
14195
14196 /*
14197 * We stored the generation in dofhp_dof. Save it, and restore the original value.
14198 */
14199 int generation = dhp->dofhp_dof;
14200 dhp->dofhp_dof = dhp->dofhp_addr;
14201
14202 dof_hdr_t *dof = dtrace_dof_copyin_from_proc(p, dhp->dofhp_dof, &rval);
14203
14204 if (dof != NULL) {
14205 dtrace_helpers_t *help;
14206
14207 lck_mtx_lock(&dtrace_lock);
14208
14209 /*
14210 * This must be done with the dtrace_lock held
14211 */
14212 if ((help = p->p_dtrace_helpers) == NULL)
14213 help = dtrace_helpers_create(p);
14214
14215 /*
14216 * If the generation value has been bumped, someone snuck in
14217 * when we released the dtrace lock. We have to dump this generation,
14218 * there is no safe way to load it.
14219 */
14220 if (help->dthps_generation <= generation) {
14221 help->dthps_generation = generation;
14222
14223 /*
14224 * dtrace_helper_slurp() takes responsibility for the dof --
14225 * it may free it now or it may save it and free it later.
14226 */
14227 if ((rval = dtrace_helper_slurp(p, dof, dhp)) != generation) {
14228 dtrace_dof_error(NULL, "returned value did not match expected generation");
14229 }
14230 }
14231
14232 lck_mtx_unlock(&dtrace_lock);
14233 }
14234 }
14235
14236 kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
14237 }
14238
14239 return PROC_RETURNED;
14240 }
14241
14242 #endif /* __APPLE__ */
14243
14244 static dtrace_helpers_t *
14245 dtrace_helpers_create(proc_t *p)
14246 {
14247 dtrace_helpers_t *help;
14248
14249 lck_mtx_assert(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14250 ASSERT(p->p_dtrace_helpers == NULL);
14251
14252 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14253 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14254 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14255
14256 p->p_dtrace_helpers = help;
14257 dtrace_helpers++;
14258
14259 return (help);
14260 }
14261
14262 #if !defined(__APPLE__)
14263 static void
14264 dtrace_helpers_destroy(void)
14265 {
14266 proc_t *p = curproc;
14267 #else
14268 static void
14269 dtrace_helpers_destroy(proc_t* p)
14270 {
14271 #endif
14272 dtrace_helpers_t *help;
14273 dtrace_vstate_t *vstate;
14274 int i;
14275
14276 lck_mtx_lock(&dtrace_lock);
14277
14278 ASSERT(p->p_dtrace_helpers != NULL);
14279 ASSERT(dtrace_helpers > 0);
14280
14281 help = p->p_dtrace_helpers;
14282 vstate = &help->dthps_vstate;
14283
14284 /*
14285 * We're now going to lose the help from this process.
14286 */
14287 p->p_dtrace_helpers = NULL;
14288 dtrace_sync();
14289
14290 /*
14291 * Destory the helper actions.
14292 */
14293 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14294 dtrace_helper_action_t *h, *next;
14295
14296 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14297 next = h->dtha_next;
14298 dtrace_helper_action_destroy(h, vstate);
14299 h = next;
14300 }
14301 }
14302
14303 lck_mtx_unlock(&dtrace_lock);
14304
14305 /*
14306 * Destroy the helper providers.
14307 */
14308 if (help->dthps_maxprovs > 0) {
14309 lck_mtx_lock(&dtrace_meta_lock);
14310 if (dtrace_meta_pid != NULL) {
14311 ASSERT(dtrace_deferred_pid == NULL);
14312
14313 for (i = 0; i < help->dthps_nprovs; i++) {
14314 dtrace_helper_provider_remove(
14315 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14316 }
14317 } else {
14318 lck_mtx_lock(&dtrace_lock);
14319 ASSERT(help->dthps_deferred == 0 ||
14320 help->dthps_next != NULL ||
14321 help->dthps_prev != NULL ||
14322 help == dtrace_deferred_pid);
14323
14324 /*
14325 * Remove the helper from the deferred list.
14326 */
14327 if (help->dthps_next != NULL)
14328 help->dthps_next->dthps_prev = help->dthps_prev;
14329 if (help->dthps_prev != NULL)
14330 help->dthps_prev->dthps_next = help->dthps_next;
14331 if (dtrace_deferred_pid == help) {
14332 dtrace_deferred_pid = help->dthps_next;
14333 ASSERT(help->dthps_prev == NULL);
14334 }
14335
14336 lck_mtx_unlock(&dtrace_lock);
14337 }
14338
14339 lck_mtx_unlock(&dtrace_meta_lock);
14340
14341 for (i = 0; i < help->dthps_nprovs; i++) {
14342 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14343 }
14344
14345 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14346 sizeof (dtrace_helper_provider_t *));
14347 }
14348
14349 lck_mtx_lock(&dtrace_lock);
14350
14351 dtrace_vstate_fini(&help->dthps_vstate);
14352 kmem_free(help->dthps_actions,
14353 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14354 kmem_free(help, sizeof (dtrace_helpers_t));
14355
14356 --dtrace_helpers;
14357 lck_mtx_unlock(&dtrace_lock);
14358 }
14359
14360 static void
14361 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14362 {
14363 dtrace_helpers_t *help, *newhelp;
14364 dtrace_helper_action_t *helper, *new, *last;
14365 dtrace_difo_t *dp;
14366 dtrace_vstate_t *vstate;
14367 int i, j, sz, hasprovs = 0;
14368
14369 lck_mtx_lock(&dtrace_lock);
14370 ASSERT(from->p_dtrace_helpers != NULL);
14371 ASSERT(dtrace_helpers > 0);
14372
14373 help = from->p_dtrace_helpers;
14374 newhelp = dtrace_helpers_create(to);
14375 ASSERT(to->p_dtrace_helpers != NULL);
14376
14377 newhelp->dthps_generation = help->dthps_generation;
14378 vstate = &newhelp->dthps_vstate;
14379
14380 /*
14381 * Duplicate the helper actions.
14382 */
14383 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14384 if ((helper = help->dthps_actions[i]) == NULL)
14385 continue;
14386
14387 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14388 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14389 KM_SLEEP);
14390 new->dtha_generation = helper->dtha_generation;
14391
14392 if ((dp = helper->dtha_predicate) != NULL) {
14393 dp = dtrace_difo_duplicate(dp, vstate);
14394 new->dtha_predicate = dp;
14395 }
14396
14397 new->dtha_nactions = helper->dtha_nactions;
14398 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14399 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14400
14401 for (j = 0; j < new->dtha_nactions; j++) {
14402 dtrace_difo_t *dp = helper->dtha_actions[j];
14403
14404 ASSERT(dp != NULL);
14405 dp = dtrace_difo_duplicate(dp, vstate);
14406 new->dtha_actions[j] = dp;
14407 }
14408
14409 if (last != NULL) {
14410 last->dtha_next = new;
14411 } else {
14412 newhelp->dthps_actions[i] = new;
14413 }
14414
14415 last = new;
14416 }
14417 }
14418
14419 /*
14420 * Duplicate the helper providers and register them with the
14421 * DTrace framework.
14422 */
14423 if (help->dthps_nprovs > 0) {
14424 newhelp->dthps_nprovs = help->dthps_nprovs;
14425 newhelp->dthps_maxprovs = help->dthps_nprovs;
14426 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14427 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14428 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14429 newhelp->dthps_provs[i] = help->dthps_provs[i];
14430 newhelp->dthps_provs[i]->dthp_ref++;
14431 }
14432
14433 hasprovs = 1;
14434 }
14435
14436 lck_mtx_unlock(&dtrace_lock);
14437
14438 if (hasprovs)
14439 dtrace_helper_provider_register(to, newhelp, NULL);
14440 }
14441
14442 /*
14443 * DTrace Hook Functions
14444 */
14445 static void
14446 dtrace_module_loaded(struct modctl *ctl)
14447 {
14448 dtrace_provider_t *prv;
14449
14450 lck_mtx_lock(&dtrace_provider_lock);
14451 lck_mtx_lock(&mod_lock);
14452
14453 // ASSERT(ctl->mod_busy);
14454
14455 /*
14456 * We're going to call each providers per-module provide operation
14457 * specifying only this module.
14458 */
14459 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14460 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14461
14462 lck_mtx_unlock(&mod_lock);
14463 lck_mtx_unlock(&dtrace_provider_lock);
14464
14465 /*
14466 * If we have any retained enablings, we need to match against them.
14467 * Enabling probes requires that cpu_lock be held, and we cannot hold
14468 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14469 * module. (In particular, this happens when loading scheduling
14470 * classes.) So if we have any retained enablings, we need to dispatch
14471 * our task queue to do the match for us.
14472 */
14473 lck_mtx_lock(&dtrace_lock);
14474
14475 if (dtrace_retained == NULL) {
14476 lck_mtx_unlock(&dtrace_lock);
14477 return;
14478 }
14479
14480 (void) taskq_dispatch(dtrace_taskq,
14481 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14482
14483 lck_mtx_unlock(&dtrace_lock);
14484
14485 /*
14486 * And now, for a little heuristic sleaze: in general, we want to
14487 * match modules as soon as they load. However, we cannot guarantee
14488 * this, because it would lead us to the lock ordering violation
14489 * outlined above. The common case, of course, is that cpu_lock is
14490 * _not_ held -- so we delay here for a clock tick, hoping that that's
14491 * long enough for the task queue to do its work. If it's not, it's
14492 * not a serious problem -- it just means that the module that we
14493 * just loaded may not be immediately instrumentable.
14494 */
14495 delay(1);
14496 }
14497
14498 static void
14499 dtrace_module_unloaded(struct modctl *ctl)
14500 {
14501 dtrace_probe_t template, *probe, *first, *next;
14502 dtrace_provider_t *prov;
14503
14504 template.dtpr_mod = ctl->mod_modname;
14505
14506 lck_mtx_lock(&dtrace_provider_lock);
14507 lck_mtx_lock(&mod_lock);
14508 lck_mtx_lock(&dtrace_lock);
14509
14510 if (dtrace_bymod == NULL) {
14511 /*
14512 * The DTrace module is loaded (obviously) but not attached;
14513 * we don't have any work to do.
14514 */
14515 lck_mtx_unlock(&dtrace_provider_lock);
14516 lck_mtx_unlock(&mod_lock);
14517 lck_mtx_unlock(&dtrace_lock);
14518 return;
14519 }
14520
14521 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14522 probe != NULL; probe = probe->dtpr_nextmod) {
14523 if (probe->dtpr_ecb != NULL) {
14524 lck_mtx_unlock(&dtrace_provider_lock);
14525 lck_mtx_unlock(&mod_lock);
14526 lck_mtx_unlock(&dtrace_lock);
14527
14528 /*
14529 * This shouldn't _actually_ be possible -- we're
14530 * unloading a module that has an enabled probe in it.
14531 * (It's normally up to the provider to make sure that
14532 * this can't happen.) However, because dtps_enable()
14533 * doesn't have a failure mode, there can be an
14534 * enable/unload race. Upshot: we don't want to
14535 * assert, but we're not going to disable the
14536 * probe, either.
14537 */
14538 if (dtrace_err_verbose) {
14539 cmn_err(CE_WARN, "unloaded module '%s' had "
14540 "enabled probes", ctl->mod_modname);
14541 }
14542
14543 return;
14544 }
14545 }
14546
14547 probe = first;
14548
14549 for (first = NULL; probe != NULL; probe = next) {
14550 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14551
14552 dtrace_probes[probe->dtpr_id - 1] = NULL;
14553
14554 next = probe->dtpr_nextmod;
14555 dtrace_hash_remove(dtrace_bymod, probe);
14556 dtrace_hash_remove(dtrace_byfunc, probe);
14557 dtrace_hash_remove(dtrace_byname, probe);
14558
14559 if (first == NULL) {
14560 first = probe;
14561 probe->dtpr_nextmod = NULL;
14562 } else {
14563 probe->dtpr_nextmod = first;
14564 first = probe;
14565 }
14566 }
14567
14568 /*
14569 * We've removed all of the module's probes from the hash chains and
14570 * from the probe array. Now issue a dtrace_sync() to be sure that
14571 * everyone has cleared out from any probe array processing.
14572 */
14573 dtrace_sync();
14574
14575 for (probe = first; probe != NULL; probe = first) {
14576 first = probe->dtpr_nextmod;
14577 prov = probe->dtpr_provider;
14578 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14579 probe->dtpr_arg);
14580 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14581 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14582 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14583 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14584 #if !defined(__APPLE__)
14585 kmem_free(probe, sizeof (dtrace_probe_t));
14586 #else
14587 zfree(dtrace_probe_t_zone, probe);
14588 #endif
14589 }
14590
14591 lck_mtx_unlock(&dtrace_lock);
14592 lck_mtx_unlock(&mod_lock);
14593 lck_mtx_unlock(&dtrace_provider_lock);
14594 }
14595
14596 void
14597 dtrace_suspend(void)
14598 {
14599 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14600 }
14601
14602 void
14603 dtrace_resume(void)
14604 {
14605 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14606 }
14607
14608 static int
14609 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14610 {
14611 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14612 lck_mtx_lock(&dtrace_lock);
14613
14614 switch (what) {
14615 case CPU_CONFIG: {
14616 dtrace_state_t *state;
14617 dtrace_optval_t *opt, rs, c;
14618
14619 /*
14620 * For now, we only allocate a new buffer for anonymous state.
14621 */
14622 if ((state = dtrace_anon.dta_state) == NULL)
14623 break;
14624
14625 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14626 break;
14627
14628 opt = state->dts_options;
14629 c = opt[DTRACEOPT_CPU];
14630
14631 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14632 break;
14633
14634 /*
14635 * Regardless of what the actual policy is, we're going to
14636 * temporarily set our resize policy to be manual. We're
14637 * also going to temporarily set our CPU option to denote
14638 * the newly configured CPU.
14639 */
14640 rs = opt[DTRACEOPT_BUFRESIZE];
14641 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14642 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14643
14644 (void) dtrace_state_buffers(state);
14645
14646 opt[DTRACEOPT_BUFRESIZE] = rs;
14647 opt[DTRACEOPT_CPU] = c;
14648
14649 break;
14650 }
14651
14652 case CPU_UNCONFIG:
14653 /*
14654 * We don't free the buffer in the CPU_UNCONFIG case. (The
14655 * buffer will be freed when the consumer exits.)
14656 */
14657 break;
14658
14659 default:
14660 break;
14661 }
14662
14663 lck_mtx_unlock(&dtrace_lock);
14664 return (0);
14665 }
14666
14667 static void
14668 dtrace_cpu_setup_initial(processorid_t cpu)
14669 {
14670 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
14671 }
14672
14673 static void
14674 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
14675 {
14676 if (dtrace_toxranges >= dtrace_toxranges_max) {
14677 int osize, nsize;
14678 dtrace_toxrange_t *range;
14679
14680 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14681
14682 if (osize == 0) {
14683 ASSERT(dtrace_toxrange == NULL);
14684 ASSERT(dtrace_toxranges_max == 0);
14685 dtrace_toxranges_max = 1;
14686 } else {
14687 dtrace_toxranges_max <<= 1;
14688 }
14689
14690 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14691 range = kmem_zalloc(nsize, KM_SLEEP);
14692
14693 if (dtrace_toxrange != NULL) {
14694 ASSERT(osize != 0);
14695 bcopy(dtrace_toxrange, range, osize);
14696 kmem_free(dtrace_toxrange, osize);
14697 }
14698
14699 dtrace_toxrange = range;
14700 }
14701
14702 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
14703 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
14704
14705 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
14706 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
14707 dtrace_toxranges++;
14708 }
14709
14710 /*
14711 * DTrace Driver Cookbook Functions
14712 */
14713 /*ARGSUSED*/
14714 static int
14715 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
14716 {
14717 dtrace_provider_id_t id;
14718 dtrace_state_t *state = NULL;
14719 dtrace_enabling_t *enab;
14720
14721 lck_mtx_lock(&cpu_lock);
14722 lck_mtx_lock(&dtrace_provider_lock);
14723 lck_mtx_lock(&dtrace_lock);
14724
14725 if (ddi_soft_state_init(&dtrace_softstate,
14726 sizeof (dtrace_state_t), 0) != 0) {
14727 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
14728 lck_mtx_unlock(&cpu_lock);
14729 lck_mtx_unlock(&dtrace_provider_lock);
14730 lck_mtx_unlock(&dtrace_lock);
14731 return (DDI_FAILURE);
14732 }
14733
14734 #if !defined(__APPLE__)
14735 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
14736 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
14737 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
14738 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
14739 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
14740 ddi_remove_minor_node(devi, NULL);
14741 ddi_soft_state_fini(&dtrace_softstate);
14742 lck_mtx_unlock(&cpu_lock);
14743 lck_mtx_unlock(&dtrace_provider_lock);
14744 lck_mtx_unlock(&dtrace_lock);
14745 return (DDI_FAILURE);
14746 }
14747 #endif /* __APPLE__ */
14748
14749 ddi_report_dev(devi);
14750 dtrace_devi = devi;
14751
14752 dtrace_modload = dtrace_module_loaded;
14753 dtrace_modunload = dtrace_module_unloaded;
14754 dtrace_cpu_init = dtrace_cpu_setup_initial;
14755 dtrace_helpers_cleanup = dtrace_helpers_destroy;
14756 dtrace_helpers_fork = dtrace_helpers_duplicate;
14757 dtrace_cpustart_init = dtrace_suspend;
14758 dtrace_cpustart_fini = dtrace_resume;
14759 dtrace_debugger_init = dtrace_suspend;
14760 dtrace_debugger_fini = dtrace_resume;
14761 dtrace_kreloc_init = dtrace_suspend;
14762 dtrace_kreloc_fini = dtrace_resume;
14763
14764 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
14765
14766 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14767
14768 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
14769 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14770 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
14771 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
14772 VM_SLEEP | VMC_IDENTIFIER);
14773 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
14774 1, INT_MAX, 0);
14775
14776 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
14777 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
14778 NULL, NULL, NULL, NULL, NULL, 0);
14779
14780 lck_mtx_assert(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14781
14782 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
14783 offsetof(dtrace_probe_t, dtpr_nextmod),
14784 offsetof(dtrace_probe_t, dtpr_prevmod));
14785
14786 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
14787 offsetof(dtrace_probe_t, dtpr_nextfunc),
14788 offsetof(dtrace_probe_t, dtpr_prevfunc));
14789
14790 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
14791 offsetof(dtrace_probe_t, dtpr_nextname),
14792 offsetof(dtrace_probe_t, dtpr_prevname));
14793
14794 if (dtrace_retain_max < 1) {
14795 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
14796 "setting to 1", dtrace_retain_max);
14797 dtrace_retain_max = 1;
14798 }
14799
14800 /*
14801 * Now discover our toxic ranges.
14802 */
14803 dtrace_toxic_ranges(dtrace_toxrange_add);
14804
14805 /*
14806 * Before we register ourselves as a provider to our own framework,
14807 * we would like to assert that dtrace_provider is NULL -- but that's
14808 * not true if we were loaded as a dependency of a DTrace provider.
14809 * Once we've registered, we can assert that dtrace_provider is our
14810 * pseudo provider.
14811 */
14812 (void) dtrace_register("dtrace", &dtrace_provider_attr,
14813 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
14814
14815 ASSERT(dtrace_provider != NULL);
14816 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
14817
14818 #if !defined(__APPLE__)
14819 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14820 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
14821 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14822 dtrace_provider, NULL, NULL, "END", 0, NULL);
14823 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14824 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
14825 #elif defined(__ppc__) || defined(__ppc64__)
14826 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14827 dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
14828 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14829 dtrace_provider, NULL, NULL, "END", 1, NULL);
14830 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14831 dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
14832 #elif (defined(__i386__) || defined (__x86_64__))
14833 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14834 dtrace_provider, NULL, NULL, "BEGIN", 1, NULL);
14835 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14836 dtrace_provider, NULL, NULL, "END", 0, NULL);
14837 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14838 dtrace_provider, NULL, NULL, "ERROR", 3, NULL);
14839 #elif defined(__arm__)
14840 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14841 dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
14842 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14843 dtrace_provider, NULL, NULL, "END", 1, NULL);
14844 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14845 dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
14846 #else
14847 #error Unknown Architecture
14848 #endif /* __APPLE__ */
14849
14850 dtrace_anon_property();
14851 lck_mtx_unlock(&cpu_lock);
14852
14853 /*
14854 * If DTrace helper tracing is enabled, we need to allocate the
14855 * trace buffer and initialize the values.
14856 */
14857 if (dtrace_helptrace_enabled) {
14858 ASSERT(dtrace_helptrace_buffer == NULL);
14859 dtrace_helptrace_buffer =
14860 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
14861 dtrace_helptrace_next = 0;
14862 }
14863
14864 /*
14865 * If there are already providers, we must ask them to provide their
14866 * probes, and then match any anonymous enabling against them. Note
14867 * that there should be no other retained enablings at this time:
14868 * the only retained enablings at this time should be the anonymous
14869 * enabling.
14870 */
14871 if (dtrace_anon.dta_enabling != NULL) {
14872 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
14873
14874 dtrace_enabling_provide(NULL);
14875 state = dtrace_anon.dta_state;
14876
14877 /*
14878 * We couldn't hold cpu_lock across the above call to
14879 * dtrace_enabling_provide(), but we must hold it to actually
14880 * enable the probes. We have to drop all of our locks, pick
14881 * up cpu_lock, and regain our locks before matching the
14882 * retained anonymous enabling.
14883 */
14884 lck_mtx_unlock(&dtrace_lock);
14885 lck_mtx_unlock(&dtrace_provider_lock);
14886
14887 lck_mtx_lock(&cpu_lock);
14888 lck_mtx_lock(&dtrace_provider_lock);
14889 lck_mtx_lock(&dtrace_lock);
14890
14891 if ((enab = dtrace_anon.dta_enabling) != NULL)
14892 (void) dtrace_enabling_match(enab, NULL);
14893
14894 lck_mtx_unlock(&cpu_lock);
14895 }
14896
14897 lck_mtx_unlock(&dtrace_lock);
14898 lck_mtx_unlock(&dtrace_provider_lock);
14899
14900 if (state != NULL) {
14901 /*
14902 * If we created any anonymous state, set it going now.
14903 */
14904 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
14905 }
14906
14907 return (DDI_SUCCESS);
14908 }
14909
14910 extern void fasttrap_init(void);
14911
14912 /*ARGSUSED*/
14913 static int
14914 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
14915 {
14916 #pragma unused(flag, otyp)
14917 dtrace_state_t *state;
14918 uint32_t priv;
14919 uid_t uid;
14920 zoneid_t zoneid;
14921
14922 #if !defined(__APPLE__)
14923 if (getminor(*devp) == DTRACEMNRN_HELPER)
14924 return (0);
14925
14926 /*
14927 * If this wasn't an open with the "helper" minor, then it must be
14928 * the "dtrace" minor.
14929 */
14930 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
14931 #else
14932 /* Darwin puts Helper on its own major device. */
14933 #endif /* __APPLE__ */
14934
14935 /*
14936 * If no DTRACE_PRIV_* bits are set in the credential, then the
14937 * caller lacks sufficient permission to do anything with DTrace.
14938 */
14939 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
14940 if (priv == DTRACE_PRIV_NONE)
14941 return (EACCES);
14942
14943 #if defined(__APPLE__)
14944 /*
14945 * We delay the initialization of fasttrap as late as possible.
14946 * It certainly can't be later than now!
14947 */
14948 fasttrap_init();
14949 #endif /* __APPLE__ */
14950
14951 /*
14952 * Ask all providers to provide all their probes.
14953 */
14954 lck_mtx_lock(&dtrace_provider_lock);
14955 dtrace_probe_provide(NULL, NULL);
14956 lck_mtx_unlock(&dtrace_provider_lock);
14957
14958 lck_mtx_lock(&cpu_lock);
14959 lck_mtx_lock(&dtrace_lock);
14960 dtrace_opens++;
14961 dtrace_membar_producer();
14962
14963 /*
14964 * If the kernel debugger is active (that is, if the kernel debugger
14965 * modified text in some way), we won't allow the open.
14966 */
14967 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14968 dtrace_opens--;
14969 lck_mtx_unlock(&cpu_lock);
14970 lck_mtx_unlock(&dtrace_lock);
14971 return (EBUSY);
14972 }
14973
14974 state = dtrace_state_create(devp, cred_p);
14975 lck_mtx_unlock(&cpu_lock);
14976
14977 if (state == NULL) {
14978 if (--dtrace_opens == 0)
14979 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
14980 lck_mtx_unlock(&dtrace_lock);
14981 return (EAGAIN);
14982 }
14983
14984 lck_mtx_unlock(&dtrace_lock);
14985
14986 #if defined(__APPLE__)
14987 lck_rw_lock_exclusive(&dtrace_dof_mode_lock);
14988
14989 /*
14990 * If we are currently lazy, transition states.
14991 *
14992 * Unlike dtrace_close, we do not need to check the
14993 * value of dtrace_opens, as any positive value (and
14994 * we count as 1) means we transition states.
14995 */
14996 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON) {
14997 dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_OFF;
14998
14999 /*
15000 * Iterate all existing processes and load lazy dofs.
15001 */
15002 proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS,
15003 dtrace_lazy_dofs_proc_iterate_doit,
15004 NULL,
15005 dtrace_lazy_dofs_proc_iterate_filter,
15006 NULL);
15007 }
15008
15009 lck_rw_unlock_exclusive(&dtrace_dof_mode_lock);
15010 #endif
15011
15012 return (0);
15013 }
15014
15015 /*ARGSUSED*/
15016 static int
15017 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15018 {
15019 minor_t minor = getminor(dev);
15020 dtrace_state_t *state;
15021
15022 #if !defined(__APPLE__)
15023 if (minor == DTRACEMNRN_HELPER)
15024 return (0);
15025 #else
15026 /* Darwin puts Helper on its own major device. */
15027 #endif /* __APPLE__ */
15028
15029 state = ddi_get_soft_state(dtrace_softstate, minor);
15030
15031 lck_mtx_lock(&cpu_lock);
15032 lck_mtx_lock(&dtrace_lock);
15033
15034 if (state->dts_anon) {
15035 /*
15036 * There is anonymous state. Destroy that first.
15037 */
15038 ASSERT(dtrace_anon.dta_state == NULL);
15039 dtrace_state_destroy(state->dts_anon);
15040 }
15041
15042 dtrace_state_destroy(state);
15043 ASSERT(dtrace_opens > 0);
15044 if (--dtrace_opens == 0)
15045 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15046
15047 lck_mtx_unlock(&dtrace_lock);
15048 lck_mtx_unlock(&cpu_lock);
15049
15050 #if defined(__APPLE__)
15051
15052 /*
15053 * Lock ordering requires the dof mode lock be taken before
15054 * the dtrace_lock.
15055 */
15056 lck_rw_lock_exclusive(&dtrace_dof_mode_lock);
15057 lck_mtx_lock(&dtrace_lock);
15058
15059 /*
15060 * If we are currently lazy-off, and this is the last close, transition to
15061 * lazy state.
15062 */
15063 if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF && dtrace_opens == 0) {
15064 dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
15065 }
15066
15067 lck_mtx_unlock(&dtrace_lock);
15068 lck_rw_unlock_exclusive(&dtrace_dof_mode_lock);
15069 #endif
15070
15071 return (0);
15072 }
15073
15074 #if defined(__APPLE__)
15075 /*
15076 * Introduce cast to quiet warnings.
15077 * XXX: This hides a lot of brokenness.
15078 */
15079 #define copyin(src, dst, len) copyin( (user_addr_t)(src), (dst), (len) )
15080 #define copyout(src, dst, len) copyout( (src), (user_addr_t)(dst), (len) )
15081 #endif /* __APPLE__ */
15082
15083 #if defined(__APPLE__)
15084 /*ARGSUSED*/
15085 static int
15086 dtrace_ioctl_helper(int cmd, caddr_t arg, int *rv)
15087 {
15088 #pragma unused(rv)
15089 /*
15090 * Safe to check this outside the dof mode lock
15091 */
15092 if (dtrace_dof_mode == DTRACE_DOF_MODE_NEVER)
15093 return KERN_SUCCESS;
15094
15095 switch (cmd) {
15096 case DTRACEHIOC_ADDDOF: {
15097 dof_helper_t *dhp = NULL;
15098 size_t dof_ioctl_data_size;
15099 dof_ioctl_data_t* multi_dof;
15100 unsigned int i;
15101 int rval = 0;
15102 user_addr_t user_address = *(user_addr_t*)arg;
15103 uint64_t dof_count;
15104 int multi_dof_claimed = 0;
15105 proc_t* p = current_proc();
15106
15107 /*
15108 * Read the number of DOF sections being passed in.
15109 */
15110 if (copyin(user_address + offsetof(dof_ioctl_data_t, dofiod_count),
15111 &dof_count,
15112 sizeof(dof_count))) {
15113 dtrace_dof_error(NULL, "failed to copyin dofiod_count");
15114 return (EFAULT);
15115 }
15116
15117 /*
15118 * Range check the count.
15119 */
15120 if (dof_count == 0 || dof_count > 1024) {
15121 dtrace_dof_error(NULL, "dofiod_count is not valid");
15122 return (EINVAL);
15123 }
15124
15125 /*
15126 * Allocate a correctly sized structure and copyin the data.
15127 */
15128 dof_ioctl_data_size = DOF_IOCTL_DATA_T_SIZE(dof_count);
15129 if ((multi_dof = kmem_alloc(dof_ioctl_data_size, KM_SLEEP)) == NULL)
15130 return (ENOMEM);
15131
15132 /* NOTE! We can no longer exit this method via return */
15133 if (copyin(user_address, multi_dof, dof_ioctl_data_size) != 0) {
15134 dtrace_dof_error(NULL, "failed copyin of dof_ioctl_data_t");
15135 rval = EFAULT;
15136 goto cleanup;
15137 }
15138
15139 /*
15140 * Check that the count didn't change between the first copyin and the second.
15141 */
15142 if (multi_dof->dofiod_count != dof_count) {
15143 rval = EINVAL;
15144 goto cleanup;
15145 }
15146
15147 /*
15148 * Try to process lazily first.
15149 */
15150 rval = dtrace_lazy_dofs_add(p, multi_dof, &multi_dof_claimed);
15151
15152 /*
15153 * If rval is EACCES, we must be non-lazy.
15154 */
15155 if (rval == EACCES) {
15156 rval = 0;
15157 /*
15158 * Process each dof_helper_t
15159 */
15160 i = 0;
15161 do {
15162 dhp = &multi_dof->dofiod_helpers[i];
15163
15164 dof_hdr_t *dof = dtrace_dof_copyin(dhp->dofhp_dof, &rval);
15165
15166 if (dof != NULL) {
15167 lck_mtx_lock(&dtrace_lock);
15168
15169 /*
15170 * dtrace_helper_slurp() takes responsibility for the dof --
15171 * it may free it now or it may save it and free it later.
15172 */
15173 if ((dhp->dofhp_dof = (uint64_t)dtrace_helper_slurp(p, dof, dhp)) == -1ULL) {
15174 rval = EINVAL;
15175 }
15176
15177 lck_mtx_unlock(&dtrace_lock);
15178 }
15179 } while (++i < multi_dof->dofiod_count && rval == 0);
15180 }
15181
15182 /*
15183 * We need to copyout the multi_dof struct, because it contains
15184 * the generation (unique id) values needed to call DTRACEHIOC_REMOVE
15185 *
15186 * This could certainly be better optimized.
15187 */
15188 if (copyout(multi_dof, user_address, dof_ioctl_data_size) != 0) {
15189 dtrace_dof_error(NULL, "failed copyout of dof_ioctl_data_t");
15190 /* Don't overwrite pre-existing error code */
15191 if (rval == 0) rval = EFAULT;
15192 }
15193
15194 cleanup:
15195 /*
15196 * If we had to allocate struct memory, free it.
15197 */
15198 if (multi_dof != NULL && !multi_dof_claimed) {
15199 kmem_free(multi_dof, dof_ioctl_data_size);
15200 }
15201
15202 return rval;
15203 }
15204
15205 case DTRACEHIOC_REMOVE: {
15206 int generation = *(int*)arg;
15207 proc_t* p = current_proc();
15208
15209 /*
15210 * Try lazy first.
15211 */
15212 int rval = dtrace_lazy_dofs_remove(p, generation);
15213
15214 /*
15215 * EACCES means non-lazy
15216 */
15217 if (rval == EACCES) {
15218 lck_mtx_lock(&dtrace_lock);
15219 rval = dtrace_helper_destroygen(p, generation);
15220 lck_mtx_unlock(&dtrace_lock);
15221 }
15222
15223 return (rval);
15224 }
15225
15226 default:
15227 break;
15228 }
15229
15230 return ENOTTY;
15231 }
15232 #endif /* __APPLE__ */
15233
15234 /*ARGSUSED*/
15235 static int
15236 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15237 {
15238 minor_t minor = getminor(dev);
15239 dtrace_state_t *state;
15240 int rval;
15241
15242 #if !defined(__APPLE__)
15243 if (minor == DTRACEMNRN_HELPER)
15244 return (dtrace_ioctl_helper(cmd, arg, rv));
15245 #else
15246 /* Darwin puts Helper on its own major device. */
15247 #endif /* __APPLE__ */
15248
15249 state = ddi_get_soft_state(dtrace_softstate, minor);
15250
15251 if (state->dts_anon) {
15252 ASSERT(dtrace_anon.dta_state == NULL);
15253 state = state->dts_anon;
15254 }
15255
15256 switch (cmd) {
15257 case DTRACEIOC_PROVIDER: {
15258 dtrace_providerdesc_t pvd;
15259 dtrace_provider_t *pvp;
15260
15261 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15262 return (EFAULT);
15263
15264 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15265 lck_mtx_lock(&dtrace_provider_lock);
15266
15267 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15268 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15269 break;
15270 }
15271
15272 lck_mtx_unlock(&dtrace_provider_lock);
15273
15274 if (pvp == NULL)
15275 return (ESRCH);
15276
15277 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15278 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15279 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15280 return (EFAULT);
15281
15282 return (0);
15283 }
15284
15285 case DTRACEIOC_EPROBE: {
15286 dtrace_eprobedesc_t epdesc;
15287 dtrace_ecb_t *ecb;
15288 dtrace_action_t *act;
15289 void *buf;
15290 size_t size;
15291 uintptr_t dest;
15292 int nrecs;
15293
15294 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15295 return (EFAULT);
15296
15297 lck_mtx_lock(&dtrace_lock);
15298
15299 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15300 lck_mtx_unlock(&dtrace_lock);
15301 return (EINVAL);
15302 }
15303
15304 if (ecb->dte_probe == NULL) {
15305 lck_mtx_unlock(&dtrace_lock);
15306 return (EINVAL);
15307 }
15308
15309 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15310 epdesc.dtepd_uarg = ecb->dte_uarg;
15311 epdesc.dtepd_size = ecb->dte_size;
15312
15313 nrecs = epdesc.dtepd_nrecs;
15314 epdesc.dtepd_nrecs = 0;
15315 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15316 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15317 continue;
15318
15319 epdesc.dtepd_nrecs++;
15320 }
15321
15322 /*
15323 * Now that we have the size, we need to allocate a temporary
15324 * buffer in which to store the complete description. We need
15325 * the temporary buffer to be able to drop dtrace_lock()
15326 * across the copyout(), below.
15327 */
15328 size = sizeof (dtrace_eprobedesc_t) +
15329 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15330
15331 buf = kmem_alloc(size, KM_SLEEP);
15332 dest = (uintptr_t)buf;
15333
15334 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15335 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15336
15337 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15338 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15339 continue;
15340
15341 if (nrecs-- == 0)
15342 break;
15343
15344 bcopy(&act->dta_rec, (void *)dest,
15345 sizeof (dtrace_recdesc_t));
15346 dest += sizeof (dtrace_recdesc_t);
15347 }
15348
15349 lck_mtx_unlock(&dtrace_lock);
15350
15351 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15352 kmem_free(buf, size);
15353 return (EFAULT);
15354 }
15355
15356 kmem_free(buf, size);
15357 return (0);
15358 }
15359
15360 case DTRACEIOC_AGGDESC: {
15361 dtrace_aggdesc_t aggdesc;
15362 dtrace_action_t *act;
15363 dtrace_aggregation_t *agg;
15364 int nrecs;
15365 uint32_t offs;
15366 dtrace_recdesc_t *lrec;
15367 void *buf;
15368 size_t size;
15369 uintptr_t dest;
15370
15371 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15372 return (EFAULT);
15373
15374 lck_mtx_lock(&dtrace_lock);
15375
15376 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15377 lck_mtx_unlock(&dtrace_lock);
15378 return (EINVAL);
15379 }
15380
15381 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15382
15383 nrecs = aggdesc.dtagd_nrecs;
15384 aggdesc.dtagd_nrecs = 0;
15385
15386 offs = agg->dtag_base;
15387 lrec = &agg->dtag_action.dta_rec;
15388 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15389
15390 for (act = agg->dtag_first; ; act = act->dta_next) {
15391 ASSERT(act->dta_intuple ||
15392 DTRACEACT_ISAGG(act->dta_kind));
15393
15394 /*
15395 * If this action has a record size of zero, it
15396 * denotes an argument to the aggregating action.
15397 * Because the presence of this record doesn't (or
15398 * shouldn't) affect the way the data is interpreted,
15399 * we don't copy it out to save user-level the
15400 * confusion of dealing with a zero-length record.
15401 */
15402 if (act->dta_rec.dtrd_size == 0) {
15403 ASSERT(agg->dtag_hasarg);
15404 continue;
15405 }
15406
15407 aggdesc.dtagd_nrecs++;
15408
15409 if (act == &agg->dtag_action)
15410 break;
15411 }
15412
15413 /*
15414 * Now that we have the size, we need to allocate a temporary
15415 * buffer in which to store the complete description. We need
15416 * the temporary buffer to be able to drop dtrace_lock()
15417 * across the copyout(), below.
15418 */
15419 size = sizeof (dtrace_aggdesc_t) +
15420 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15421
15422 buf = kmem_alloc(size, KM_SLEEP);
15423 dest = (uintptr_t)buf;
15424
15425 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15426 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15427
15428 for (act = agg->dtag_first; ; act = act->dta_next) {
15429 dtrace_recdesc_t rec = act->dta_rec;
15430
15431 /*
15432 * See the comment in the above loop for why we pass
15433 * over zero-length records.
15434 */
15435 if (rec.dtrd_size == 0) {
15436 ASSERT(agg->dtag_hasarg);
15437 continue;
15438 }
15439
15440 if (nrecs-- == 0)
15441 break;
15442
15443 rec.dtrd_offset -= offs;
15444 bcopy(&rec, (void *)dest, sizeof (rec));
15445 dest += sizeof (dtrace_recdesc_t);
15446
15447 if (act == &agg->dtag_action)
15448 break;
15449 }
15450
15451 lck_mtx_unlock(&dtrace_lock);
15452
15453 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15454 kmem_free(buf, size);
15455 return (EFAULT);
15456 }
15457
15458 kmem_free(buf, size);
15459 return (0);
15460 }
15461
15462 case DTRACEIOC_ENABLE: {
15463 dof_hdr_t *dof;
15464 dtrace_enabling_t *enab = NULL;
15465 dtrace_vstate_t *vstate;
15466 int err = 0;
15467
15468 *rv = 0;
15469
15470 /*
15471 * If a NULL argument has been passed, we take this as our
15472 * cue to reevaluate our enablings.
15473 */
15474 if (arg == NULL) {
15475 lck_mtx_lock(&cpu_lock);
15476 lck_mtx_lock(&dtrace_lock);
15477 err = dtrace_enabling_matchstate(state, rv);
15478 lck_mtx_unlock(&dtrace_lock);
15479 lck_mtx_unlock(&cpu_lock);
15480
15481 return (err);
15482 }
15483
15484 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15485 return (rval);
15486
15487 lck_mtx_lock(&cpu_lock);
15488 lck_mtx_lock(&dtrace_lock);
15489 vstate = &state->dts_vstate;
15490
15491 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15492 lck_mtx_unlock(&dtrace_lock);
15493 lck_mtx_unlock(&cpu_lock);
15494 dtrace_dof_destroy(dof);
15495 return (EBUSY);
15496 }
15497
15498 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15499 lck_mtx_unlock(&dtrace_lock);
15500 lck_mtx_unlock(&cpu_lock);
15501 dtrace_dof_destroy(dof);
15502 return (EINVAL);
15503 }
15504
15505 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15506 dtrace_enabling_destroy(enab);
15507 lck_mtx_unlock(&dtrace_lock);
15508 lck_mtx_unlock(&cpu_lock);
15509 dtrace_dof_destroy(dof);
15510 return (rval);
15511 }
15512
15513 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15514 err = dtrace_enabling_retain(enab);
15515 } else {
15516 dtrace_enabling_destroy(enab);
15517 }
15518
15519 lck_mtx_unlock(&cpu_lock);
15520 lck_mtx_unlock(&dtrace_lock);
15521 dtrace_dof_destroy(dof);
15522
15523 return (err);
15524 }
15525
15526 case DTRACEIOC_REPLICATE: {
15527 dtrace_repldesc_t desc;
15528 dtrace_probedesc_t *match = &desc.dtrpd_match;
15529 dtrace_probedesc_t *create = &desc.dtrpd_create;
15530 int err;
15531
15532 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15533 return (EFAULT);
15534
15535 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15536 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15537 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15538 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15539
15540 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15541 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15542 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15543 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15544
15545 lck_mtx_lock(&dtrace_lock);
15546 err = dtrace_enabling_replicate(state, match, create);
15547 lck_mtx_unlock(&dtrace_lock);
15548
15549 return (err);
15550 }
15551
15552 case DTRACEIOC_PROBEMATCH:
15553 case DTRACEIOC_PROBES: {
15554 dtrace_probe_t *probe = NULL;
15555 dtrace_probedesc_t desc;
15556 dtrace_probekey_t pkey;
15557 dtrace_id_t i;
15558 int m = 0;
15559 uint32_t priv;
15560 uid_t uid;
15561 zoneid_t zoneid;
15562
15563 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15564 return (EFAULT);
15565
15566 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15567 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15568 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15569 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15570
15571 /*
15572 * Before we attempt to match this probe, we want to give
15573 * all providers the opportunity to provide it.
15574 */
15575 if (desc.dtpd_id == DTRACE_IDNONE) {
15576 lck_mtx_lock(&dtrace_provider_lock);
15577 dtrace_probe_provide(&desc, NULL);
15578 lck_mtx_unlock(&dtrace_provider_lock);
15579 desc.dtpd_id++;
15580 }
15581
15582 if (cmd == DTRACEIOC_PROBEMATCH) {
15583 dtrace_probekey(&desc, &pkey);
15584 pkey.dtpk_id = DTRACE_IDNONE;
15585 }
15586
15587 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
15588
15589 lck_mtx_lock(&dtrace_lock);
15590
15591 if (cmd == DTRACEIOC_PROBEMATCH) {
15592 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15593 if ((probe = dtrace_probes[i - 1]) != NULL &&
15594 (m = dtrace_match_probe(probe, &pkey,
15595 priv, uid, zoneid)) != 0)
15596 break;
15597 }
15598
15599 if (m < 0) {
15600 lck_mtx_unlock(&dtrace_lock);
15601 return (EINVAL);
15602 }
15603
15604 } else {
15605 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15606 if ((probe = dtrace_probes[i - 1]) != NULL &&
15607 dtrace_match_priv(probe, priv, uid, zoneid))
15608 break;
15609 }
15610 }
15611
15612 if (probe == NULL) {
15613 lck_mtx_unlock(&dtrace_lock);
15614 return (ESRCH);
15615 }
15616
15617 dtrace_probe_description(probe, &desc);
15618 lck_mtx_unlock(&dtrace_lock);
15619
15620 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15621 return (EFAULT);
15622
15623 return (0);
15624 }
15625
15626 case DTRACEIOC_PROBEARG: {
15627 dtrace_argdesc_t desc;
15628 dtrace_probe_t *probe;
15629 dtrace_provider_t *prov;
15630
15631 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15632 return (EFAULT);
15633
15634 if (desc.dtargd_id == DTRACE_IDNONE)
15635 return (EINVAL);
15636
15637 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15638 return (EINVAL);
15639
15640 lck_mtx_lock(&dtrace_provider_lock);
15641 lck_mtx_lock(&mod_lock);
15642 lck_mtx_lock(&dtrace_lock);
15643
15644 if (desc.dtargd_id > dtrace_nprobes) {
15645 lck_mtx_unlock(&dtrace_lock);
15646 lck_mtx_unlock(&mod_lock);
15647 lck_mtx_unlock(&dtrace_provider_lock);
15648 return (EINVAL);
15649 }
15650
15651 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
15652 lck_mtx_unlock(&dtrace_lock);
15653 lck_mtx_unlock(&mod_lock);
15654 lck_mtx_unlock(&dtrace_provider_lock);
15655 return (EINVAL);
15656 }
15657
15658 lck_mtx_unlock(&dtrace_lock);
15659
15660 prov = probe->dtpr_provider;
15661
15662 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
15663 /*
15664 * There isn't any typed information for this probe.
15665 * Set the argument number to DTRACE_ARGNONE.
15666 */
15667 desc.dtargd_ndx = DTRACE_ARGNONE;
15668 } else {
15669 desc.dtargd_native[0] = '\0';
15670 desc.dtargd_xlate[0] = '\0';
15671 desc.dtargd_mapping = desc.dtargd_ndx;
15672
15673 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
15674 probe->dtpr_id, probe->dtpr_arg, &desc);
15675 }
15676
15677 lck_mtx_unlock(&mod_lock);
15678 lck_mtx_unlock(&dtrace_provider_lock);
15679
15680 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15681 return (EFAULT);
15682
15683 return (0);
15684 }
15685
15686 case DTRACEIOC_GO: {
15687 processorid_t cpuid;
15688 rval = dtrace_state_go(state, &cpuid);
15689
15690 if (rval != 0)
15691 return (rval);
15692
15693 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15694 return (EFAULT);
15695
15696 return (0);
15697 }
15698
15699 case DTRACEIOC_STOP: {
15700 processorid_t cpuid;
15701
15702 lck_mtx_lock(&dtrace_lock);
15703 rval = dtrace_state_stop(state, &cpuid);
15704 lck_mtx_unlock(&dtrace_lock);
15705
15706 if (rval != 0)
15707 return (rval);
15708
15709 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15710 return (EFAULT);
15711
15712 return (0);
15713 }
15714
15715 case DTRACEIOC_DOFGET: {
15716 dof_hdr_t hdr, *dof;
15717 uint64_t len;
15718
15719 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15720 return (EFAULT);
15721
15722 lck_mtx_lock(&dtrace_lock);
15723 dof = dtrace_dof_create(state);
15724 lck_mtx_unlock(&dtrace_lock);
15725
15726 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15727 rval = copyout(dof, (void *)arg, len);
15728 dtrace_dof_destroy(dof);
15729
15730 return (rval == 0 ? 0 : EFAULT);
15731 }
15732
15733 case DTRACEIOC_AGGSNAP:
15734 case DTRACEIOC_BUFSNAP: {
15735 dtrace_bufdesc_t desc;
15736 caddr_t cached;
15737 dtrace_buffer_t *buf;
15738
15739 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15740 return (EFAULT);
15741
15742 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
15743 return (EINVAL);
15744
15745 lck_mtx_lock(&dtrace_lock);
15746
15747 if (cmd == DTRACEIOC_BUFSNAP) {
15748 buf = &state->dts_buffer[desc.dtbd_cpu];
15749 } else {
15750 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
15751 }
15752
15753 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
15754 size_t sz = buf->dtb_offset;
15755
15756 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
15757 lck_mtx_unlock(&dtrace_lock);
15758 return (EBUSY);
15759 }
15760
15761 /*
15762 * If this buffer has already been consumed, we're
15763 * going to indicate that there's nothing left here
15764 * to consume.
15765 */
15766 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
15767 lck_mtx_unlock(&dtrace_lock);
15768
15769 desc.dtbd_size = 0;
15770 desc.dtbd_drops = 0;
15771 desc.dtbd_errors = 0;
15772 desc.dtbd_oldest = 0;
15773 sz = sizeof (desc);
15774
15775 if (copyout(&desc, (void *)arg, sz) != 0)
15776 return (EFAULT);
15777
15778 return (0);
15779 }
15780
15781 /*
15782 * If this is a ring buffer that has wrapped, we want
15783 * to copy the whole thing out.
15784 */
15785 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
15786 dtrace_buffer_polish(buf);
15787 sz = buf->dtb_size;
15788 }
15789
15790 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
15791 lck_mtx_unlock(&dtrace_lock);
15792 return (EFAULT);
15793 }
15794
15795 desc.dtbd_size = sz;
15796 desc.dtbd_drops = buf->dtb_drops;
15797 desc.dtbd_errors = buf->dtb_errors;
15798 desc.dtbd_oldest = buf->dtb_xamot_offset;
15799
15800 lck_mtx_unlock(&dtrace_lock);
15801
15802 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15803 return (EFAULT);
15804
15805 buf->dtb_flags |= DTRACEBUF_CONSUMED;
15806
15807 return (0);
15808 }
15809
15810 if (buf->dtb_tomax == NULL) {
15811 ASSERT(buf->dtb_xamot == NULL);
15812 lck_mtx_unlock(&dtrace_lock);
15813 return (ENOENT);
15814 }
15815
15816 cached = buf->dtb_tomax;
15817 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
15818
15819 dtrace_xcall(desc.dtbd_cpu,
15820 (dtrace_xcall_t)dtrace_buffer_switch, buf);
15821
15822 state->dts_errors += buf->dtb_xamot_errors;
15823
15824 /*
15825 * If the buffers did not actually switch, then the cross call
15826 * did not take place -- presumably because the given CPU is
15827 * not in the ready set. If this is the case, we'll return
15828 * ENOENT.
15829 */
15830 if (buf->dtb_tomax == cached) {
15831 ASSERT(buf->dtb_xamot != cached);
15832 lck_mtx_unlock(&dtrace_lock);
15833 return (ENOENT);
15834 }
15835
15836 ASSERT(cached == buf->dtb_xamot);
15837
15838 /*
15839 * We have our snapshot; now copy it out.
15840 */
15841 if (copyout(buf->dtb_xamot, desc.dtbd_data,
15842 buf->dtb_xamot_offset) != 0) {
15843 lck_mtx_unlock(&dtrace_lock);
15844 return (EFAULT);
15845 }
15846
15847 desc.dtbd_size = buf->dtb_xamot_offset;
15848 desc.dtbd_drops = buf->dtb_xamot_drops;
15849 desc.dtbd_errors = buf->dtb_xamot_errors;
15850 desc.dtbd_oldest = 0;
15851
15852 lck_mtx_unlock(&dtrace_lock);
15853
15854 /*
15855 * Finally, copy out the buffer description.
15856 */
15857 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15858 return (EFAULT);
15859
15860 return (0);
15861 }
15862
15863 case DTRACEIOC_CONF: {
15864 dtrace_conf_t conf;
15865
15866 bzero(&conf, sizeof (conf));
15867 conf.dtc_difversion = DIF_VERSION;
15868 conf.dtc_difintregs = DIF_DIR_NREGS;
15869 conf.dtc_diftupregs = DIF_DTR_NREGS;
15870 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
15871
15872 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
15873 return (EFAULT);
15874
15875 return (0);
15876 }
15877
15878 case DTRACEIOC_STATUS: {
15879 dtrace_status_t stat;
15880 dtrace_dstate_t *dstate;
15881 int i, j;
15882 uint64_t nerrs;
15883
15884 /*
15885 * See the comment in dtrace_state_deadman() for the reason
15886 * for setting dts_laststatus to INT64_MAX before setting
15887 * it to the correct value.
15888 */
15889 state->dts_laststatus = INT64_MAX;
15890 dtrace_membar_producer();
15891 state->dts_laststatus = dtrace_gethrtime();
15892
15893 bzero(&stat, sizeof (stat));
15894
15895 lck_mtx_lock(&dtrace_lock);
15896
15897 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
15898 lck_mtx_unlock(&dtrace_lock);
15899 return (ENOENT);
15900 }
15901
15902 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
15903 stat.dtst_exiting = 1;
15904
15905 nerrs = state->dts_errors;
15906 dstate = &state->dts_vstate.dtvs_dynvars;
15907
15908 for (i = 0; i < NCPU; i++) {
15909 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
15910
15911 stat.dtst_dyndrops += dcpu->dtdsc_drops;
15912 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
15913 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
15914
15915 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
15916 stat.dtst_filled++;
15917
15918 nerrs += state->dts_buffer[i].dtb_errors;
15919
15920 for (j = 0; j < state->dts_nspeculations; j++) {
15921 dtrace_speculation_t *spec;
15922 dtrace_buffer_t *buf;
15923
15924 spec = &state->dts_speculations[j];
15925 buf = &spec->dtsp_buffer[i];
15926 stat.dtst_specdrops += buf->dtb_xamot_drops;
15927 }
15928 }
15929
15930 stat.dtst_specdrops_busy = state->dts_speculations_busy;
15931 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
15932 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
15933 stat.dtst_dblerrors = state->dts_dblerrors;
15934 stat.dtst_killed =
15935 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
15936 stat.dtst_errors = nerrs;
15937
15938 lck_mtx_unlock(&dtrace_lock);
15939
15940 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
15941 return (EFAULT);
15942
15943 return (0);
15944 }
15945
15946 case DTRACEIOC_FORMAT: {
15947 dtrace_fmtdesc_t fmt;
15948 char *str;
15949 int len;
15950
15951 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
15952 return (EFAULT);
15953
15954 lck_mtx_lock(&dtrace_lock);
15955
15956 if (fmt.dtfd_format == 0 ||
15957 fmt.dtfd_format > state->dts_nformats) {
15958 lck_mtx_unlock(&dtrace_lock);
15959 return (EINVAL);
15960 }
15961
15962 /*
15963 * Format strings are allocated contiguously and they are
15964 * never freed; if a format index is less than the number
15965 * of formats, we can assert that the format map is non-NULL
15966 * and that the format for the specified index is non-NULL.
15967 */
15968 ASSERT(state->dts_formats != NULL);
15969 str = state->dts_formats[fmt.dtfd_format - 1];
15970 ASSERT(str != NULL);
15971
15972 len = strlen(str) + 1;
15973
15974 if (len > fmt.dtfd_length) {
15975 fmt.dtfd_length = len;
15976
15977 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
15978 lck_mtx_unlock(&dtrace_lock);
15979 return (EINVAL);
15980 }
15981 } else {
15982 if (copyout(str, fmt.dtfd_string, len) != 0) {
15983 lck_mtx_unlock(&dtrace_lock);
15984 return (EINVAL);
15985 }
15986 }
15987
15988 lck_mtx_unlock(&dtrace_lock);
15989 return (0);
15990 }
15991
15992 default:
15993 break;
15994 }
15995
15996 return (ENOTTY);
15997 }
15998
15999 #if defined(__APPLE__)
16000 #undef copyin
16001 #undef copyout
16002 #endif /* __APPLE__ */
16003
16004 #if !defined(__APPLE__)
16005 /*ARGSUSED*/
16006 static int
16007 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16008 {
16009 dtrace_state_t *state;
16010
16011 switch (cmd) {
16012 case DDI_DETACH:
16013 break;
16014
16015 case DDI_SUSPEND:
16016 return (DDI_SUCCESS);
16017
16018 default:
16019 return (DDI_FAILURE);
16020 }
16021
16022 lck_mtx_lock(&cpu_lock);
16023 lck_mtx_lock(&dtrace_provider_lock);
16024 lck_mtx_lock(&dtrace_lock);
16025
16026 ASSERT(dtrace_opens == 0);
16027
16028 if (dtrace_helpers > 0) {
16029 lck_mtx_unlock(&dtrace_provider_lock);
16030 lck_mtx_unlock(&dtrace_lock);
16031 lck_mtx_unlock(&cpu_lock);
16032 return (DDI_FAILURE);
16033 }
16034
16035 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16036 lck_mtx_unlock(&dtrace_provider_lock);
16037 lck_mtx_unlock(&dtrace_lock);
16038 lck_mtx_unlock(&cpu_lock);
16039 return (DDI_FAILURE);
16040 }
16041
16042 dtrace_provider = NULL;
16043
16044 if ((state = dtrace_anon_grab()) != NULL) {
16045 /*
16046 * If there were ECBs on this state, the provider should
16047 * have not been allowed to detach; assert that there is
16048 * none.
16049 */
16050 ASSERT(state->dts_necbs == 0);
16051 dtrace_state_destroy(state);
16052
16053 /*
16054 * If we're being detached with anonymous state, we need to
16055 * indicate to the kernel debugger that DTrace is now inactive.
16056 */
16057 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16058 }
16059
16060 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16061 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16062 dtrace_cpu_init = NULL;
16063 dtrace_helpers_cleanup = NULL;
16064 dtrace_helpers_fork = NULL;
16065 dtrace_cpustart_init = NULL;
16066 dtrace_cpustart_fini = NULL;
16067 dtrace_debugger_init = NULL;
16068 dtrace_debugger_fini = NULL;
16069 dtrace_kreloc_init = NULL;
16070 dtrace_kreloc_fini = NULL;
16071 dtrace_modload = NULL;
16072 dtrace_modunload = NULL;
16073
16074 lck_mtx_unlock(&cpu_lock);
16075
16076 if (dtrace_helptrace_enabled) {
16077 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16078 dtrace_helptrace_buffer = NULL;
16079 }
16080
16081 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16082 dtrace_probes = NULL;
16083 dtrace_nprobes = 0;
16084
16085 dtrace_hash_destroy(dtrace_bymod);
16086 dtrace_hash_destroy(dtrace_byfunc);
16087 dtrace_hash_destroy(dtrace_byname);
16088 dtrace_bymod = NULL;
16089 dtrace_byfunc = NULL;
16090 dtrace_byname = NULL;
16091
16092 kmem_cache_destroy(dtrace_state_cache);
16093 vmem_destroy(dtrace_minor);
16094 vmem_destroy(dtrace_arena);
16095
16096 if (dtrace_toxrange != NULL) {
16097 kmem_free(dtrace_toxrange,
16098 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16099 dtrace_toxrange = NULL;
16100 dtrace_toxranges = 0;
16101 dtrace_toxranges_max = 0;
16102 }
16103
16104 ddi_remove_minor_node(dtrace_devi, NULL);
16105 dtrace_devi = NULL;
16106
16107 ddi_soft_state_fini(&dtrace_softstate);
16108
16109 ASSERT(dtrace_vtime_references == 0);
16110 ASSERT(dtrace_opens == 0);
16111 ASSERT(dtrace_retained == NULL);
16112
16113 lck_mtx_unlock(&dtrace_lock);
16114 lck_mtx_unlock(&dtrace_provider_lock);
16115
16116 /*
16117 * We don't destroy the task queue until after we have dropped our
16118 * locks (taskq_destroy() may block on running tasks). To prevent
16119 * attempting to do work after we have effectively detached but before
16120 * the task queue has been destroyed, all tasks dispatched via the
16121 * task queue must check that DTrace is still attached before
16122 * performing any operation.
16123 */
16124 taskq_destroy(dtrace_taskq);
16125 dtrace_taskq = NULL;
16126
16127 return (DDI_SUCCESS);
16128 }
16129
16130 /*ARGSUSED*/
16131 static int
16132 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16133 {
16134 int error;
16135
16136 switch (infocmd) {
16137 case DDI_INFO_DEVT2DEVINFO:
16138 *result = (void *)dtrace_devi;
16139 error = DDI_SUCCESS;
16140 break;
16141 case DDI_INFO_DEVT2INSTANCE:
16142 *result = (void *)0;
16143 error = DDI_SUCCESS;
16144 break;
16145 default:
16146 error = DDI_FAILURE;
16147 }
16148 return (error);
16149 }
16150
16151 static struct cb_ops dtrace_cb_ops = {
16152 dtrace_open, /* open */
16153 dtrace_close, /* close */
16154 nulldev, /* strategy */
16155 nulldev, /* print */
16156 nodev, /* dump */
16157 nodev, /* read */
16158 nodev, /* write */
16159 dtrace_ioctl, /* ioctl */
16160 nodev, /* devmap */
16161 nodev, /* mmap */
16162 nodev, /* segmap */
16163 nochpoll, /* poll */
16164 ddi_prop_op, /* cb_prop_op */
16165 0, /* streamtab */
16166 D_NEW | D_MP /* Driver compatibility flag */
16167 };
16168
16169 static struct dev_ops dtrace_ops = {
16170 DEVO_REV, /* devo_rev */
16171 0, /* refcnt */
16172 dtrace_info, /* get_dev_info */
16173 nulldev, /* identify */
16174 nulldev, /* probe */
16175 dtrace_attach, /* attach */
16176 dtrace_detach, /* detach */
16177 nodev, /* reset */
16178 &dtrace_cb_ops, /* driver operations */
16179 NULL, /* bus operations */
16180 nodev /* dev power */
16181 };
16182
16183 static struct modldrv modldrv = {
16184 &mod_driverops, /* module type (this is a pseudo driver) */
16185 "Dynamic Tracing", /* name of module */
16186 &dtrace_ops, /* driver ops */
16187 };
16188
16189 static struct modlinkage modlinkage = {
16190 MODREV_1,
16191 (void *)&modldrv,
16192 NULL
16193 };
16194
16195 int
16196 _init(void)
16197 {
16198 return (mod_install(&modlinkage));
16199 }
16200
16201 int
16202 _info(struct modinfo *modinfop)
16203 {
16204 return (mod_info(&modlinkage, modinfop));
16205 }
16206
16207 int
16208 _fini(void)
16209 {
16210 return (mod_remove(&modlinkage));
16211 }
16212 #else
16213
16214 d_open_t _dtrace_open, helper_open;
16215 d_close_t _dtrace_close, helper_close;
16216 d_ioctl_t _dtrace_ioctl, helper_ioctl;
16217
16218 int
16219 _dtrace_open(dev_t dev, int flags, int devtype, struct proc *p)
16220 {
16221 #pragma unused(p)
16222 dev_t locdev = dev;
16223
16224 return dtrace_open( &locdev, flags, devtype, CRED());
16225 }
16226
16227 int
16228 helper_open(dev_t dev, int flags, int devtype, struct proc *p)
16229 {
16230 #pragma unused(dev,flags,devtype,p)
16231 return 0;
16232 }
16233
16234 int
16235 _dtrace_close(dev_t dev, int flags, int devtype, struct proc *p)
16236 {
16237 #pragma unused(p)
16238 return dtrace_close( dev, flags, devtype, CRED());
16239 }
16240
16241 int
16242 helper_close(dev_t dev, int flags, int devtype, struct proc *p)
16243 {
16244 #pragma unused(dev,flags,devtype,p)
16245 return 0;
16246 }
16247
16248 int
16249 _dtrace_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
16250 {
16251 #pragma unused(p)
16252 int err, rv = 0;
16253
16254 err = dtrace_ioctl(dev, (int)cmd, *(intptr_t *)data, fflag, CRED(), &rv);
16255
16256 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
16257 if (err != 0) {
16258 ASSERT( (err & 0xfffff000) == 0 );
16259 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
16260 } else if (rv != 0) {
16261 ASSERT( (rv & 0xfff00000) == 0 );
16262 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
16263 } else
16264 return 0;
16265 }
16266
16267 int
16268 helper_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
16269 {
16270 #pragma unused(dev,fflag,p)
16271 int err, rv = 0;
16272
16273 err = dtrace_ioctl_helper((int)cmd, data, &rv);
16274 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
16275 if (err != 0) {
16276 ASSERT( (err & 0xfffff000) == 0 );
16277 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
16278 } else if (rv != 0) {
16279 ASSERT( (rv & 0xfff00000) == 0 );
16280 return (((rv & 0xfffff) << 20)); /* ioctl returns -1 and errno set to a return value >= 4096 */
16281 } else
16282 return 0;
16283 }
16284
16285 #define HELPER_MAJOR -24 /* let the kernel pick the device number */
16286
16287 /*
16288 * A struct describing which functions will get invoked for certain
16289 * actions.
16290 */
16291 static struct cdevsw helper_cdevsw =
16292 {
16293 helper_open, /* open */
16294 helper_close, /* close */
16295 eno_rdwrt, /* read */
16296 eno_rdwrt, /* write */
16297 helper_ioctl, /* ioctl */
16298 (stop_fcn_t *)nulldev, /* stop */
16299 (reset_fcn_t *)nulldev, /* reset */
16300 NULL, /* tty's */
16301 eno_select, /* select */
16302 eno_mmap, /* mmap */
16303 eno_strat, /* strategy */
16304 eno_getc, /* getc */
16305 eno_putc, /* putc */
16306 0 /* type */
16307 };
16308
16309 static int helper_majdevno = 0;
16310
16311 static int gDTraceInited = 0;
16312
16313 void
16314 helper_init( void )
16315 {
16316 /*
16317 * Once the "helper" is initialized, it can take ioctl calls that use locks
16318 * and zones initialized in dtrace_init. Make certain dtrace_init was called
16319 * before us.
16320 */
16321
16322 if (!gDTraceInited) {
16323 panic("helper_init before dtrace_init\n");
16324 }
16325
16326 if (0 >= helper_majdevno)
16327 {
16328 helper_majdevno = cdevsw_add(HELPER_MAJOR, &helper_cdevsw);
16329
16330 if (helper_majdevno < 0) {
16331 printf("helper_init: failed to allocate a major number!\n");
16332 return;
16333 }
16334
16335 if (NULL == devfs_make_node( makedev(helper_majdevno, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
16336 DTRACEMNR_HELPER, 0 )) {
16337 printf("dtrace_init: failed to devfs_make_node for helper!\n");
16338 return;
16339 }
16340 } else
16341 panic("helper_init: called twice!\n");
16342 }
16343
16344 #undef HELPER_MAJOR
16345
16346 /*
16347 * Called with DEVFS_LOCK held, so vmem_alloc's underlying blist structures are protected.
16348 */
16349 static int
16350 dtrace_clone_func(dev_t dev, int action)
16351 {
16352 #pragma unused(dev)
16353
16354 if (action == DEVFS_CLONE_ALLOC) {
16355 if (NULL == dtrace_minor) /* Arena not created yet!?! */
16356 return 0;
16357 else {
16358 /*
16359 * Propose a minor number, namely the next number that vmem_alloc() will return.
16360 * Immediately put it back in play by calling vmem_free().
16361 */
16362 int ret = (int)(uintptr_t)vmem_alloc(dtrace_minor, 1, VM_BESTFIT | VM_SLEEP);
16363
16364 vmem_free(dtrace_minor, (void *)(uintptr_t)ret, 1);
16365
16366 return ret;
16367 }
16368 }
16369 else if (action == DEVFS_CLONE_FREE) {
16370 return 0;
16371 }
16372 else return -1;
16373 }
16374
16375 #define DTRACE_MAJOR -24 /* let the kernel pick the device number */
16376
16377 static struct cdevsw dtrace_cdevsw =
16378 {
16379 _dtrace_open, /* open */
16380 _dtrace_close, /* close */
16381 eno_rdwrt, /* read */
16382 eno_rdwrt, /* write */
16383 _dtrace_ioctl, /* ioctl */
16384 (stop_fcn_t *)nulldev, /* stop */
16385 (reset_fcn_t *)nulldev, /* reset */
16386 NULL, /* tty's */
16387 eno_select, /* select */
16388 eno_mmap, /* mmap */
16389 eno_strat, /* strategy */
16390 eno_getc, /* getc */
16391 eno_putc, /* putc */
16392 0 /* type */
16393 };
16394
16395 lck_attr_t* dtrace_lck_attr;
16396 lck_grp_attr_t* dtrace_lck_grp_attr;
16397 lck_grp_t* dtrace_lck_grp;
16398
16399 static int gMajDevNo;
16400
16401 void
16402 dtrace_init( void )
16403 {
16404 if (0 == gDTraceInited) {
16405 int i, ncpu = NCPU;
16406
16407 gMajDevNo = cdevsw_add(DTRACE_MAJOR, &dtrace_cdevsw);
16408
16409 if (gMajDevNo < 0) {
16410 printf("dtrace_init: failed to allocate a major number!\n");
16411 gDTraceInited = 0;
16412 return;
16413 }
16414
16415 if (NULL == devfs_make_node_clone( makedev(gMajDevNo, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
16416 dtrace_clone_func, DTRACEMNR_DTRACE, 0 )) {
16417 printf("dtrace_init: failed to devfs_make_node_clone for dtrace!\n");
16418 gDTraceInited = 0;
16419 return;
16420 }
16421
16422 #if defined(DTRACE_MEMORY_ZONES)
16423
16424 /*
16425 * Initialize the dtrace kalloc-emulation zones.
16426 */
16427 dtrace_alloc_init();
16428
16429 #endif /* DTRACE_MEMORY_ZONES */
16430
16431 /*
16432 * Allocate the dtrace_probe_t zone
16433 */
16434 dtrace_probe_t_zone = zinit(sizeof(dtrace_probe_t),
16435 1024 * sizeof(dtrace_probe_t),
16436 sizeof(dtrace_probe_t),
16437 "dtrace.dtrace_probe_t");
16438
16439 /*
16440 * Create the dtrace lock group and attrs.
16441 */
16442 dtrace_lck_attr = lck_attr_alloc_init();
16443 dtrace_lck_grp_attr= lck_grp_attr_alloc_init();
16444 dtrace_lck_grp = lck_grp_alloc_init("dtrace", dtrace_lck_grp_attr);
16445
16446 /*
16447 * We have to initialize all locks explicitly
16448 */
16449 lck_mtx_init(&dtrace_lock, dtrace_lck_grp, dtrace_lck_attr);
16450 lck_mtx_init(&dtrace_provider_lock, dtrace_lck_grp, dtrace_lck_attr);
16451 lck_mtx_init(&dtrace_meta_lock, dtrace_lck_grp, dtrace_lck_attr);
16452 #ifdef DEBUG
16453 lck_mtx_init(&dtrace_errlock, dtrace_lck_grp, dtrace_lck_attr);
16454 #endif
16455 lck_rw_init(&dtrace_dof_mode_lock, dtrace_lck_grp, dtrace_lck_attr);
16456
16457 /*
16458 * The cpu_core structure consists of per-CPU state available in any context.
16459 * On some architectures, this may mean that the page(s) containing the
16460 * NCPU-sized array of cpu_core structures must be locked in the TLB -- it
16461 * is up to the platform to assure that this is performed properly. Note that
16462 * the structure is sized to avoid false sharing.
16463 */
16464 lck_mtx_init(&cpu_lock, dtrace_lck_grp, dtrace_lck_attr);
16465 lck_mtx_init(&mod_lock, dtrace_lck_grp, dtrace_lck_attr);
16466
16467 cpu_core = (cpu_core_t *)kmem_zalloc( ncpu * sizeof(cpu_core_t), KM_SLEEP );
16468 for (i = 0; i < ncpu; ++i) {
16469 lck_mtx_init(&cpu_core[i].cpuc_pid_lock, dtrace_lck_grp, dtrace_lck_attr);
16470 }
16471
16472 cpu_list = (cpu_t *)kmem_zalloc( ncpu * sizeof(cpu_t), KM_SLEEP );
16473 for (i = 0; i < ncpu; ++i) {
16474 cpu_list[i].cpu_id = (processorid_t)i;
16475 cpu_list[i].cpu_next = &(cpu_list[(i+1) % ncpu]);
16476 lck_rw_init(&cpu_list[i].cpu_ft_lock, dtrace_lck_grp, dtrace_lck_attr);
16477 }
16478
16479 lck_mtx_lock(&cpu_lock);
16480 for (i = 0; i < ncpu; ++i)
16481 dtrace_cpu_setup_initial( (processorid_t)i ); /* In lieu of register_cpu_setup_func() callback */
16482 lck_mtx_unlock(&cpu_lock);
16483
16484 (void)dtrace_abs_to_nano(0LL); /* Force once only call to clock_timebase_info (which can take a lock) */
16485
16486 /*
16487 * See dtrace_impl.h for a description of dof modes.
16488 * The default is lazy dof.
16489 *
16490 * XXX Warn if state is LAZY_OFF? It won't break anything, but
16491 * makes no sense...
16492 */
16493 if (!PE_parse_boot_arg("dtrace_dof_mode", &dtrace_dof_mode)) {
16494 dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
16495 }
16496
16497 /*
16498 * Sanity check of dof mode value.
16499 */
16500 switch (dtrace_dof_mode) {
16501 case DTRACE_DOF_MODE_NEVER:
16502 case DTRACE_DOF_MODE_LAZY_ON:
16503 /* valid modes, but nothing else we need to do */
16504 break;
16505
16506 case DTRACE_DOF_MODE_LAZY_OFF:
16507 case DTRACE_DOF_MODE_NON_LAZY:
16508 /* Cannot wait for a dtrace_open to init fasttrap */
16509 fasttrap_init();
16510 break;
16511
16512 default:
16513 /* Invalid, clamp to non lazy */
16514 dtrace_dof_mode = DTRACE_DOF_MODE_NON_LAZY;
16515 fasttrap_init();
16516 break;
16517 }
16518
16519 gDTraceInited = 1;
16520
16521 } else
16522 panic("dtrace_init: called twice!\n");
16523 }
16524
16525 void
16526 dtrace_postinit(void)
16527 {
16528 dtrace_attach( (dev_info_t *)makedev(gMajDevNo, 0), 0 );
16529 }
16530 #undef DTRACE_MAJOR
16531
16532 /*
16533 * Routines used to register interest in cpu's being added to or removed
16534 * from the system.
16535 */
16536 void
16537 register_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2)
16538 {
16539 #pragma unused(ignore1,ignore2)
16540 }
16541
16542 void
16543 unregister_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2)
16544 {
16545 #pragma unused(ignore1,ignore2)
16546 }
16547 #endif /* __APPLE__ */