4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * #pragma ident "@(#)fasttrap.c 1.21 06/06/12 SMI"
31 #include <sys/types.h>
34 #include <sys/errno.h>
37 #include <sys/systm.h>
38 #include <sys/kauth.h>
40 #include <sys/fasttrap.h>
41 #include <sys/fasttrap_impl.h>
42 #include <sys/fasttrap_isa.h>
43 #include <sys/dtrace.h>
44 #include <sys/dtrace_impl.h>
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
52 #include <kern/zalloc.h>
54 #define proc_t struct proc
57 * User-Land Trap-Based Tracing
58 * ----------------------------
60 * The fasttrap provider allows DTrace consumers to instrument any user-level
61 * instruction to gather data; this includes probes with semantic
62 * signifigance like entry and return as well as simple offsets into the
63 * function. While the specific techniques used are very ISA specific, the
64 * methodology is generalizable to any architecture.
67 * The General Methodology
68 * -----------------------
70 * With the primary goal of tracing every user-land instruction and the
71 * limitation that we can't trust user space so don't want to rely on much
72 * information there, we begin by replacing the instructions we want to trace
73 * with trap instructions. Each instruction we overwrite is saved into a hash
74 * table keyed by process ID and pc address. When we enter the kernel due to
75 * this trap instruction, we need the effects of the replaced instruction to
76 * appear to have occurred before we proceed with the user thread's
79 * Each user level thread is represented by a ulwp_t structure which is
80 * always easily accessible through a register. The most basic way to produce
81 * the effects of the instruction we replaced is to copy that instruction out
82 * to a bit of scratch space reserved in the user thread's ulwp_t structure
83 * (a sort of kernel-private thread local storage), set the PC to that
84 * scratch space and single step. When we reenter the kernel after single
85 * stepping the instruction we must then adjust the PC to point to what would
86 * normally be the next instruction. Of course, special care must be taken
87 * for branches and jumps, but these represent such a small fraction of any
88 * instruction set that writing the code to emulate these in the kernel is
91 * Return probes may require several tracepoints to trace every return site,
92 * and, conversely, each tracepoint may activate several probes (the entry
93 * and offset 0 probes, for example). To solve this muliplexing problem,
94 * tracepoints contain lists of probes to activate and probes contain lists
95 * of tracepoints to enable. If a probe is activated, it adds its ID to
96 * existing tracepoints or creates new ones as necessary.
98 * Most probes are activated _before_ the instruction is executed, but return
99 * probes are activated _after_ the effects of the last instruction of the
100 * function are visible. Return probes must be fired _after_ we have
101 * single-stepped the instruction whereas all other probes are fired
108 * The lock ordering below -- both internally and with respect to the DTrace
109 * framework -- is a little tricky and bears some explanation. Each provider
110 * has a lock (ftp_mtx) that protects its members including reference counts
111 * for enabled probes (ftp_rcount), consumers actively creating probes
112 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
113 * from being freed. A provider is looked up by taking the bucket lock for the
114 * provider hash table, and is returned with its lock held. The provider lock
115 * may be taken in functions invoked by the DTrace framework, but may not be
116 * held while calling functions in the DTrace framework.
118 * To ensure consistency over multiple calls to the DTrace framework, the
119 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
120 * not be taken when holding the provider lock as that would create a cyclic
121 * lock ordering. In situations where one would naturally take the provider
122 * lock and then the creation lock, we instead up a reference count to prevent
123 * the provider from disappearing, drop the provider lock, and acquire the
127 * bucket lock before provider lock
128 * DTrace before provider lock
129 * creation lock before DTrace
130 * never hold the provider lock and creation lock simultaneously
133 static dev_info_t
*fasttrap_devi
;
134 static dtrace_meta_provider_id_t fasttrap_meta_id
;
136 static thread_call_t fasttrap_timeout
;
137 static lck_mtx_t fasttrap_cleanup_mtx
;
138 static uint_t fasttrap_cleanup_work
;
141 * Generation count on modifications to the global tracepoint lookup table.
143 static volatile uint64_t fasttrap_mod_gen
;
145 #if !defined(__APPLE__)
147 * When the fasttrap provider is loaded, fasttrap_max is set to either
148 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
149 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
150 * incremented by the number of tracepoints that may be associated with that
151 * probe; fasttrap_total is capped at fasttrap_max.
153 #define FASTTRAP_MAX_DEFAULT 2500000
156 static uint32_t fasttrap_max
;
157 static uint32_t fasttrap_total
;
160 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
161 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
162 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
164 fasttrap_hash_t fasttrap_tpoints
;
165 static fasttrap_hash_t fasttrap_provs
;
166 static fasttrap_hash_t fasttrap_procs
;
168 static uint64_t fasttrap_pid_count
; /* pid ref count */
169 static lck_mtx_t fasttrap_count_mtx
; /* lock on ref count */
171 #define FASTTRAP_ENABLE_FAIL 1
172 #define FASTTRAP_ENABLE_PARTIAL 2
174 static int fasttrap_tracepoint_enable(proc_t
*, fasttrap_probe_t
*, uint_t
);
175 static void fasttrap_tracepoint_disable(proc_t
*, fasttrap_probe_t
*, uint_t
);
177 #if defined(__APPLE__)
178 static fasttrap_provider_t
*fasttrap_provider_lookup(pid_t
, fasttrap_provider_type_t
, const char *,
179 const dtrace_pattr_t
*);
181 static void fasttrap_provider_retire(pid_t
, const char *, int);
182 static void fasttrap_provider_free(fasttrap_provider_t
*);
184 static fasttrap_proc_t
*fasttrap_proc_lookup(pid_t
);
185 static void fasttrap_proc_release(fasttrap_proc_t
*);
187 #define FASTTRAP_PROVS_INDEX(pid, name) \
188 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
190 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
192 #if defined(__APPLE__)
195 * To save memory, some common memory allocations are given a
196 * unique zone. In example, dtrace_probe_t is 72 bytes in size,
197 * which means it would fall into the kalloc.128 bucket. With
198 * 20k elements allocated, the space saved is substantial.
201 struct zone
*fasttrap_tracepoint_t_zone
;
204 * fasttrap_probe_t's are variable in size. Some quick profiling has shown
205 * that the sweet spot for reducing memory footprint is covering the first
206 * three sizes. Everything larger goes into the common pool.
208 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
210 struct zone
*fasttrap_probe_t_zones
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
];
212 static const char *fasttrap_probe_t_zone_names
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
] = {
214 "dtrace.fasttrap_probe_t[1]",
215 "dtrace.fasttrap_probe_t[2]",
216 "dtrace.fasttrap_probe_t[3]"
220 * We have to manage locks explicitly
222 lck_grp_t
* fasttrap_lck_grp
;
223 lck_grp_attr_t
* fasttrap_lck_grp_attr
;
224 lck_attr_t
* fasttrap_lck_attr
;
228 fasttrap_highbit(ulong_t i
)
235 if (i
& 0xffffffff00000000ul
) {
239 if (i
& 0xffff0000) {
258 fasttrap_hash_str(const char *p
)
264 hval
= (hval
<< 4) + *p
++;
265 if ((g
= (hval
& 0xf0000000)) != 0)
273 * FIXME - needs implementation
276 fasttrap_sigtrap(proc_t
*p
, uthread_t t
, user_addr_t pc
)
278 #pragma unused(p, t, pc)
281 sigqueue_t
*sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
283 sqp
->sq_info
.si_signo
= SIGTRAP
;
284 sqp
->sq_info
.si_code
= TRAP_DTRACE
;
285 sqp
->sq_info
.si_addr
= (caddr_t
)pc
;
287 mutex_enter(&p
->p_lock
);
289 mutex_exit(&p
->p_lock
);
295 printf("fasttrap_sigtrap called with no implementation.\n");
299 * This function ensures that no threads are actively using the memory
300 * associated with probes that were formerly live.
303 fasttrap_mod_barrier(uint64_t gen
)
307 if (gen
< fasttrap_mod_gen
)
312 for (i
= 0; i
< NCPU
; i
++) {
313 lck_mtx_lock(&cpu_core
[i
].cpuc_pid_lock
);
314 lck_mtx_unlock(&cpu_core
[i
].cpuc_pid_lock
);
319 * This is the timeout's callback for cleaning up the providers and their
324 fasttrap_pid_cleanup_cb(void *ignored
, void* ignored2
)
326 #pragma unused(ignored, ignored2)
327 fasttrap_provider_t
**fpp
, *fp
;
328 fasttrap_bucket_t
*bucket
;
329 dtrace_provider_id_t provid
;
330 unsigned int i
, later
= 0;
332 static volatile int in
= 0;
336 lck_mtx_lock(&fasttrap_cleanup_mtx
);
337 while (fasttrap_cleanup_work
) {
338 fasttrap_cleanup_work
= 0;
339 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
344 * Iterate over all the providers trying to remove the marked
345 * ones. If a provider is marked but not retired, we just
346 * have to take a crack at removing it -- it's no big deal if
349 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
350 bucket
= &fasttrap_provs
.fth_table
[i
];
351 lck_mtx_lock(&bucket
->ftb_mtx
);
352 fpp
= (fasttrap_provider_t
**)&bucket
->ftb_data
;
354 while ((fp
= *fpp
) != NULL
) {
355 if (!fp
->ftp_marked
) {
360 lck_mtx_lock(&fp
->ftp_mtx
);
363 * If this provider has consumers actively
364 * creating probes (ftp_ccount) or is a USDT
365 * provider (ftp_mcount), we can't unregister
368 if (fp
->ftp_ccount
!= 0 ||
369 fp
->ftp_mcount
!= 0) {
371 lck_mtx_unlock(&fp
->ftp_mtx
);
375 if (!fp
->ftp_retired
|| fp
->ftp_rcount
!= 0)
378 lck_mtx_unlock(&fp
->ftp_mtx
);
381 * If we successfully unregister this
382 * provider we can remove it from the hash
383 * chain and free the memory. If our attempt
384 * to unregister fails and this is a retired
385 * provider, increment our flag to try again
386 * pretty soon. If we've consumed more than
387 * half of our total permitted number of
388 * probes call dtrace_condense() to try to
389 * clean out the unenabled probes.
391 provid
= fp
->ftp_provid
;
392 if (dtrace_unregister(provid
) != 0) {
393 if (fasttrap_total
> fasttrap_max
/ 2)
394 (void) dtrace_condense(provid
);
395 later
+= fp
->ftp_marked
;
399 fasttrap_provider_free(fp
);
402 lck_mtx_unlock(&bucket
->ftb_mtx
);
405 lck_mtx_lock(&fasttrap_cleanup_mtx
);
408 ASSERT(fasttrap_timeout
!= 0);
411 * APPLE NOTE: You must hold the fasttrap_cleanup_mtx to do this!
413 if (fasttrap_timeout
!= (thread_call_t
)1)
414 thread_call_free(fasttrap_timeout
);
417 * If we were unable to remove a retired provider, try again after
418 * a second. This situation can occur in certain circumstances where
419 * providers cannot be unregistered even though they have no probes
420 * enabled because of an execution of dtrace -l or something similar.
421 * If the timeout has been disabled (set to 1 because we're trying
422 * to detach), we set fasttrap_cleanup_work to ensure that we'll
423 * get a chance to do that work if and when the timeout is reenabled
426 if (later
> 0 && fasttrap_timeout
!= (thread_call_t
)1)
427 /* The time value passed to dtrace_timeout is in nanos */
428 fasttrap_timeout
= dtrace_timeout(&fasttrap_pid_cleanup_cb
, NULL
, NANOSEC
/ SEC
);
430 fasttrap_cleanup_work
= 1;
432 fasttrap_timeout
= 0;
434 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
439 * Activates the asynchronous cleanup mechanism.
442 fasttrap_pid_cleanup(void)
444 lck_mtx_lock(&fasttrap_cleanup_mtx
);
445 fasttrap_cleanup_work
= 1;
446 if (fasttrap_timeout
== 0)
447 fasttrap_timeout
= dtrace_timeout(&fasttrap_pid_cleanup_cb
, NULL
, NANOSEC
/ MILLISEC
);
448 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
452 * This is called from cfork() via dtrace_fasttrap_fork(). The child
453 * process's address space is a (roughly) a copy of the parent process's so
454 * we have to remove all the instrumentation we had previously enabled in the
458 fasttrap_fork(proc_t
*p
, proc_t
*cp
)
460 pid_t ppid
= p
->p_pid
;
463 ASSERT(current_proc() == p
);
464 lck_mtx_assert(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_OWNED
);
465 ASSERT(p
->p_dtrace_count
> 0);
466 ASSERT(cp
->p_dtrace_count
== 0);
469 * This would be simpler and faster if we maintained per-process
470 * hash tables of enabled tracepoints. It could, however, potentially
471 * slow down execution of a tracepoint since we'd need to go
472 * through two levels of indirection. In the future, we should
473 * consider either maintaining per-process ancillary lists of
474 * enabled tracepoints or hanging a pointer to a per-process hash
475 * table of enabled tracepoints off the proc structure.
479 * We don't have to worry about the child process disappearing
480 * because we're in fork().
482 if (cp
!= sprlock(cp
->p_pid
)) {
483 printf("fasttrap_fork: sprlock(%d) returned a differt proc\n", cp
->p_pid
);
489 * Iterate over every tracepoint looking for ones that belong to the
490 * parent process, and remove each from the child process.
492 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
493 fasttrap_tracepoint_t
*tp
;
494 fasttrap_bucket_t
*bucket
= &fasttrap_tpoints
.fth_table
[i
];
496 lck_mtx_lock(&bucket
->ftb_mtx
);
497 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
498 if (tp
->ftt_pid
== ppid
&&
499 !tp
->ftt_proc
->ftpc_defunct
) {
500 fasttrap_tracepoint_remove(cp
, tp
);
503 lck_mtx_unlock(&bucket
->ftb_mtx
);
507 * Free any ptss pages/entries in the child.
509 dtrace_ptss_fork(p
, cp
);
516 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
517 * is set on the proc structure to indicate that there is a pid provider
518 * associated with this process.
521 fasttrap_exec_exit(proc_t
*p
)
523 ASSERT(p
== current_proc());
524 lck_mtx_assert(&p
->p_mlock
, LCK_MTX_ASSERT_OWNED
);
525 lck_mtx_assert(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_NOTOWNED
);
528 /* APPLE NOTE: Okay, the locking here is really odd and needs some
529 * explaining. This method is always called with the proc_lock held.
530 * We must drop the proc_lock before calling fasttrap_provider_retire
531 * to avoid a deadlock when it takes the bucket lock.
533 * Next, the dtrace_ptss_exec_exit function requires the sprlock
534 * be held, but not the proc_lock.
536 * Finally, we must re-acquire the proc_lock
541 * We clean up the pid provider for this process here; user-land
542 * static probes are handled by the meta-provider remove entry point.
544 fasttrap_provider_retire(p
->p_pid
, FASTTRAP_PID_NAME
, 0);
545 #if defined(__APPLE__)
547 * We also need to remove any aliased providers.
548 * XXX optimization: track which provider types are instantiated
549 * and only retire as needed.
551 fasttrap_provider_retire(p
->p_pid
, FASTTRAP_OBJC_NAME
, 0);
552 fasttrap_provider_retire(p
->p_pid
, FASTTRAP_ONESHOT_NAME
, 0);
553 #endif /* __APPLE__ */
556 * This should be called after it is no longer possible for a user
557 * thread to execute (potentially dtrace instrumented) instructions.
559 lck_mtx_lock(&p
->p_dtrace_sprlock
);
560 dtrace_ptss_exec_exit(p
);
561 lck_mtx_unlock(&p
->p_dtrace_sprlock
);
569 fasttrap_pid_provide(void *arg
, const dtrace_probedesc_t
*desc
)
571 #pragma unused(arg, desc)
573 * There are no "default" pid probes.
578 fasttrap_tracepoint_enable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
580 fasttrap_tracepoint_t
*tp
, *new_tp
= NULL
;
581 fasttrap_bucket_t
*bucket
;
586 ASSERT(index
< probe
->ftp_ntps
);
588 pid
= probe
->ftp_pid
;
589 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
590 id
= &probe
->ftp_tps
[index
].fit_id
;
592 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
594 //ASSERT(!(p->p_flag & SVFORK));
597 * Before we make any modifications, make sure we've imposed a barrier
598 * on the generation in which this probe was last modified.
600 fasttrap_mod_barrier(probe
->ftp_gen
);
602 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
605 * If the tracepoint has already been enabled, just add our id to the
606 * list of interested probes. This may be our second time through
607 * this path in which case we'll have constructed the tracepoint we'd
608 * like to install. If we can't find a match, and have an allocated
609 * tracepoint ready to go, enable that one now.
611 * A tracepoint whose process is defunct is also considered defunct.
614 lck_mtx_lock(&bucket
->ftb_mtx
);
615 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
616 if (tp
->ftt_pid
!= pid
|| tp
->ftt_pc
!= pc
||
617 tp
->ftt_proc
->ftpc_defunct
)
621 * Now that we've found a matching tracepoint, it would be
622 * a decent idea to confirm that the tracepoint is still
623 * enabled and the trap instruction hasn't been overwritten.
624 * Since this is a little hairy, we'll punt for now.
628 * This can't be the first interested probe. We don't have
629 * to worry about another thread being in the midst of
630 * deleting this tracepoint (which would be the only valid
631 * reason for a tracepoint to have no interested probes)
632 * since we're holding P_PR_LOCK for this process.
634 ASSERT(tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
);
636 switch (id
->fti_ptype
) {
639 case DTFTP_IS_ENABLED
:
640 id
->fti_next
= tp
->ftt_ids
;
641 dtrace_membar_producer();
643 dtrace_membar_producer();
647 case DTFTP_POST_OFFSETS
:
648 id
->fti_next
= tp
->ftt_retids
;
649 dtrace_membar_producer();
651 dtrace_membar_producer();
658 lck_mtx_unlock(&bucket
->ftb_mtx
);
660 if (new_tp
!= NULL
) {
661 new_tp
->ftt_ids
= NULL
;
662 new_tp
->ftt_retids
= NULL
;
669 * If we have a good tracepoint ready to go, install it now while
670 * we have the lock held and no one can screw with us.
672 if (new_tp
!= NULL
) {
675 new_tp
->ftt_next
= bucket
->ftb_data
;
676 dtrace_membar_producer();
677 bucket
->ftb_data
= new_tp
;
678 dtrace_membar_producer();
679 lck_mtx_unlock(&bucket
->ftb_mtx
);
682 * Activate the tracepoint in the ISA-specific manner.
683 * If this fails, we need to report the failure, but
684 * indicate that this tracepoint must still be disabled
685 * by calling fasttrap_tracepoint_disable().
687 if (fasttrap_tracepoint_install(p
, new_tp
) != 0)
688 rc
= FASTTRAP_ENABLE_PARTIAL
;
691 * Increment the count of the number of tracepoints active in
692 * the victim process.
694 //ASSERT(p->p_proc_flag & P_PR_LOCK);
700 lck_mtx_unlock(&bucket
->ftb_mtx
);
703 * Initialize the tracepoint that's been preallocated with the probe.
705 new_tp
= probe
->ftp_tps
[index
].fit_tp
;
707 ASSERT(new_tp
->ftt_pid
== pid
);
708 ASSERT(new_tp
->ftt_pc
== pc
);
709 ASSERT(new_tp
->ftt_proc
== probe
->ftp_prov
->ftp_proc
);
710 ASSERT(new_tp
->ftt_ids
== NULL
);
711 ASSERT(new_tp
->ftt_retids
== NULL
);
713 switch (id
->fti_ptype
) {
716 case DTFTP_IS_ENABLED
:
718 new_tp
->ftt_ids
= id
;
722 case DTFTP_POST_OFFSETS
:
724 new_tp
->ftt_retids
= id
;
732 * If the ISA-dependent initialization goes to plan, go back to the
733 * beginning and try to install this freshly made tracepoint.
735 if (fasttrap_tracepoint_init(p
, new_tp
, pc
, id
->fti_ptype
) == 0)
738 new_tp
->ftt_ids
= NULL
;
739 new_tp
->ftt_retids
= NULL
;
741 return (FASTTRAP_ENABLE_FAIL
);
745 fasttrap_tracepoint_disable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
747 fasttrap_bucket_t
*bucket
;
748 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
749 fasttrap_tracepoint_t
**pp
, *tp
;
750 fasttrap_id_t
*id
, **idp
;
754 ASSERT(index
< probe
->ftp_ntps
);
756 pid
= probe
->ftp_pid
;
757 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
758 id
= &probe
->ftp_tps
[index
].fit_id
;
760 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
763 * Find the tracepoint and make sure that our id is one of the
764 * ones registered with it.
766 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
767 lck_mtx_lock(&bucket
->ftb_mtx
);
768 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
769 if (tp
->ftt_pid
== pid
&& tp
->ftt_pc
== pc
&&
770 tp
->ftt_proc
== provider
->ftp_proc
)
775 * If we somehow lost this tracepoint, we're in a world of hurt.
779 switch (id
->fti_ptype
) {
782 case DTFTP_IS_ENABLED
:
783 ASSERT(tp
->ftt_ids
!= NULL
);
788 case DTFTP_POST_OFFSETS
:
789 ASSERT(tp
->ftt_retids
!= NULL
);
790 idp
= &tp
->ftt_retids
;
794 /* Fix compiler warning... */
799 while ((*idp
)->fti_probe
!= probe
) {
800 idp
= &(*idp
)->fti_next
;
801 ASSERT(*idp
!= NULL
);
806 dtrace_membar_producer();
808 ASSERT(id
->fti_probe
== probe
);
811 * If there are other registered enablings of this tracepoint, we're
812 * all done, but if this was the last probe assocated with this
813 * this tracepoint, we need to remove and free it.
815 if (tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
) {
818 * If the current probe's tracepoint is in use, swap it
819 * for an unused tracepoint.
821 if (tp
== probe
->ftp_tps
[index
].fit_tp
) {
822 fasttrap_probe_t
*tmp_probe
;
823 fasttrap_tracepoint_t
**tmp_tp
;
826 if (tp
->ftt_ids
!= NULL
) {
827 tmp_probe
= tp
->ftt_ids
->fti_probe
;
828 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_ids
);
829 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
831 tmp_probe
= tp
->ftt_retids
->fti_probe
;
832 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_retids
);
833 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
836 ASSERT(*tmp_tp
!= NULL
);
837 ASSERT(*tmp_tp
!= probe
->ftp_tps
[index
].fit_tp
);
838 ASSERT((*tmp_tp
)->ftt_ids
== NULL
);
839 ASSERT((*tmp_tp
)->ftt_retids
== NULL
);
841 probe
->ftp_tps
[index
].fit_tp
= *tmp_tp
;
846 lck_mtx_unlock(&bucket
->ftb_mtx
);
849 * Tag the modified probe with the generation in which it was
852 probe
->ftp_gen
= fasttrap_mod_gen
;
856 lck_mtx_unlock(&bucket
->ftb_mtx
);
859 * We can't safely remove the tracepoint from the set of active
860 * tracepoints until we've actually removed the fasttrap instruction
861 * from the process's text. We can, however, operate on this
862 * tracepoint secure in the knowledge that no other thread is going to
863 * be looking at it since we hold P_PR_LOCK on the process if it's
864 * live or we hold the provider lock on the process if it's dead and
869 * We only need to remove the actual instruction if we're looking
870 * at an existing process
874 * If we fail to restore the instruction we need to kill
875 * this process since it's in a completely unrecoverable
878 if (fasttrap_tracepoint_remove(p
, tp
) != 0)
879 fasttrap_sigtrap(p
, NULL
, pc
);
882 * Decrement the count of the number of tracepoints active
883 * in the victim process.
885 //ASSERT(p->p_proc_flag & P_PR_LOCK);
890 * Remove the probe from the hash table of active tracepoints.
892 lck_mtx_lock(&bucket
->ftb_mtx
);
893 pp
= (fasttrap_tracepoint_t
**)&bucket
->ftb_data
;
896 pp
= &(*pp
)->ftt_next
;
901 dtrace_membar_producer();
903 lck_mtx_unlock(&bucket
->ftb_mtx
);
906 * Tag the modified probe with the generation in which it was changed.
908 probe
->ftp_gen
= fasttrap_mod_gen
;
912 fasttrap_enable_callbacks(void)
915 * We don't have to play the rw lock game here because we're
916 * providing something rather than taking something away --
917 * we can be sure that no threads have tried to follow this
918 * function pointer yet.
920 lck_mtx_lock(&fasttrap_count_mtx
);
921 if (fasttrap_pid_count
== 0) {
922 ASSERT(dtrace_pid_probe_ptr
== NULL
);
923 ASSERT(dtrace_return_probe_ptr
== NULL
);
924 dtrace_pid_probe_ptr
= &fasttrap_pid_probe
;
925 dtrace_return_probe_ptr
= &fasttrap_return_probe
;
927 ASSERT(dtrace_pid_probe_ptr
== &fasttrap_pid_probe
);
928 ASSERT(dtrace_return_probe_ptr
== &fasttrap_return_probe
);
929 fasttrap_pid_count
++;
930 lck_mtx_unlock(&fasttrap_count_mtx
);
934 fasttrap_disable_callbacks(void)
936 //ASSERT(MUTEX_HELD(&cpu_lock));
938 lck_mtx_lock(&fasttrap_count_mtx
);
939 ASSERT(fasttrap_pid_count
> 0);
940 fasttrap_pid_count
--;
941 if (fasttrap_pid_count
== 0) {
942 cpu_t
*cur
, *cpu
= CPU
;
945 * APPLE NOTE: This loop seems broken, it touches every CPU
946 * but the one we're actually running on. Need to ask Sun folks
947 * if that is safe. Scenario is this: We're running on CPU A,
948 * and lock all but A. Then we get preempted, and start running
949 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
951 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
952 lck_rw_lock_exclusive(&cur
->cpu_ft_lock
);
953 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
956 dtrace_pid_probe_ptr
= NULL
;
957 dtrace_return_probe_ptr
= NULL
;
959 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
960 lck_rw_unlock_exclusive(&cur
->cpu_ft_lock
);
961 // rw_exit(&cur->cpu_ft_lock);
964 lck_mtx_unlock(&fasttrap_count_mtx
);
969 fasttrap_pid_enable(void *arg
, dtrace_id_t id
, void *parg
)
971 #pragma unused(arg, id)
972 fasttrap_probe_t
*probe
= parg
;
976 ASSERT(probe
!= NULL
);
977 ASSERT(!probe
->ftp_enabled
);
978 ASSERT(id
== probe
->ftp_id
);
979 // ASSERT(MUTEX_HELD(&cpu_lock));
982 * Increment the count of enabled probes on this probe's provider;
983 * the provider can't go away while the probe still exists. We
984 * must increment this even if we aren't able to properly enable
987 lck_mtx_lock(&probe
->ftp_prov
->ftp_mtx
);
988 probe
->ftp_prov
->ftp_rcount
++;
989 lck_mtx_unlock(&probe
->ftp_prov
->ftp_mtx
);
992 * If this probe's provider is retired (meaning it was valid in a
993 * previously exec'ed incarnation of this address space), bail out. The
994 * provider can't go away while we're in this code path.
996 if (probe
->ftp_prov
->ftp_retired
)
1000 * If we can't find the process, it may be that we're in the context of
1001 * a fork in which the traced process is being born and we're copying
1002 * USDT probes. Otherwise, the process is gone so bail.
1004 if ((p
= sprlock(probe
->ftp_pid
)) == PROC_NULL
) {
1005 #if defined(__APPLE__)
1007 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1008 * does not return process's with SIDL set, but we always return
1009 * the child process.
1014 if ((curproc
->p_flag
& SFORKING
) == 0)
1017 lck_mtx_lock(&pidlock
);
1018 p
= prfind(probe
->ftp_pid
);
1021 * Confirm that curproc is indeed forking the process in which
1022 * we're trying to enable probes.
1025 //ASSERT(p->p_parent == curproc);
1026 ASSERT(p
->p_stat
== SIDL
);
1028 lck_mtx_lock(&p
->p_lock
);
1029 lck_mtx_unlock(&pidlock
);
1036 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1037 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1038 * To mimic this, we allocate on demand scratch space. If this is the first
1039 * time a probe has been enabled in this process, we need to allocate scratch
1040 * space for each already existing thread. Now is a good time to do this, as
1041 * the target process is suspended and the proc_lock is held.
1043 if (p
->p_dtrace_ptss_pages
== NULL
) {
1044 dtrace_ptss_enable(p
);
1047 // ASSERT(!(p->p_flag & SVFORK));
1051 * We have to enable the trap entry point before any user threads have
1052 * the chance to execute the trap instruction we're about to place
1053 * in their process's text.
1055 fasttrap_enable_callbacks();
1058 * Enable all the tracepoints and add this probe's id to each
1059 * tracepoint's list of active probes.
1061 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1062 if ((rc
= fasttrap_tracepoint_enable(p
, probe
, i
)) != 0) {
1064 * If enabling the tracepoint failed completely,
1065 * we don't have to disable it; if the failure
1066 * was only partial we must disable it.
1068 if (rc
== FASTTRAP_ENABLE_FAIL
)
1071 ASSERT(rc
== FASTTRAP_ENABLE_PARTIAL
);
1074 * Back up and pull out all the tracepoints we've
1075 * created so far for this probe.
1078 fasttrap_tracepoint_disable(p
, probe
, i
);
1086 * Since we're not actually enabling this probe,
1087 * drop our reference on the trap table entry.
1089 fasttrap_disable_callbacks();
1097 probe
->ftp_enabled
= 1;
1102 fasttrap_pid_disable(void *arg
, dtrace_id_t id
, void *parg
)
1104 #pragma unused(arg, id)
1105 fasttrap_probe_t
*probe
= parg
;
1106 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
1110 ASSERT(id
== probe
->ftp_id
);
1113 * We won't be able to acquire a /proc-esque lock on the process
1114 * iff the process is dead and gone. In this case, we rely on the
1115 * provider lock as a point of mutual exclusion to prevent other
1116 * DTrace consumers from disabling this probe.
1118 if ((p
= sprlock(probe
->ftp_pid
)) != PROC_NULL
) {
1119 // ASSERT(!(p->p_flag & SVFORK));
1123 lck_mtx_lock(&provider
->ftp_mtx
);
1126 * Disable all the associated tracepoints (for fully enabled probes).
1128 if (probe
->ftp_enabled
) {
1129 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1130 fasttrap_tracepoint_disable(p
, probe
, i
);
1134 ASSERT(provider
->ftp_rcount
> 0);
1135 provider
->ftp_rcount
--;
1139 * Even though we may not be able to remove it entirely, we
1140 * mark this retired provider to get a chance to remove some
1141 * of the associated probes.
1143 if (provider
->ftp_retired
&& !provider
->ftp_marked
)
1144 whack
= provider
->ftp_marked
= 1;
1145 lck_mtx_unlock(&provider
->ftp_mtx
);
1151 * If the process is dead, we're just waiting for the
1152 * last probe to be disabled to be able to free it.
1154 if (provider
->ftp_rcount
== 0 && !provider
->ftp_marked
)
1155 whack
= provider
->ftp_marked
= 1;
1156 lck_mtx_unlock(&provider
->ftp_mtx
);
1160 fasttrap_pid_cleanup();
1162 if (!probe
->ftp_enabled
)
1165 probe
->ftp_enabled
= 0;
1167 // ASSERT(MUTEX_HELD(&cpu_lock));
1168 fasttrap_disable_callbacks();
1173 fasttrap_pid_getargdesc(void *arg
, dtrace_id_t id
, void *parg
,
1174 dtrace_argdesc_t
*desc
)
1176 #pragma unused(arg, id)
1177 fasttrap_probe_t
*probe
= parg
;
1181 desc
->dtargd_native
[0] = '\0';
1182 desc
->dtargd_xlate
[0] = '\0';
1184 if (probe
->ftp_prov
->ftp_retired
!= 0 ||
1185 desc
->dtargd_ndx
>= probe
->ftp_nargs
) {
1186 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
1191 * We only need to set this member if the argument is remapped.
1193 if (probe
->ftp_argmap
!= NULL
)
1194 desc
->dtargd_mapping
= probe
->ftp_argmap
[desc
->dtargd_ndx
];
1196 str
= probe
->ftp_ntypes
;
1197 for (i
= 0; i
< desc
->dtargd_mapping
; i
++) {
1198 str
+= strlen(str
) + 1;
1201 (void) strlcpy(desc
->dtargd_native
, str
, sizeof(desc
->dtargd_native
));
1203 if (probe
->ftp_xtypes
== NULL
)
1206 str
= probe
->ftp_xtypes
;
1207 for (i
= 0; i
< desc
->dtargd_ndx
; i
++) {
1208 str
+= strlen(str
) + 1;
1211 (void) strlcpy(desc
->dtargd_xlate
, str
, sizeof(desc
->dtargd_xlate
));
1216 fasttrap_pid_destroy(void *arg
, dtrace_id_t id
, void *parg
)
1218 #pragma unused(arg, id)
1219 fasttrap_probe_t
*probe
= parg
;
1222 ASSERT(probe
!= NULL
);
1223 ASSERT(!probe
->ftp_enabled
);
1224 ASSERT(fasttrap_total
>= probe
->ftp_ntps
);
1226 atomic_add_32(&fasttrap_total
, -probe
->ftp_ntps
);
1227 #if !defined(__APPLE__)
1228 size_t size
= offsetof(fasttrap_probe_t
, ftp_tps
[probe
->ftp_ntps
]);
1231 if (probe
->ftp_gen
+ 1 >= fasttrap_mod_gen
)
1232 fasttrap_mod_barrier(probe
->ftp_gen
);
1234 for (i
= 0; i
< probe
->ftp_ntps
; i
++) {
1235 #if !defined(__APPLE__)
1236 kmem_free(probe
->ftp_tps
[i
].fit_tp
, sizeof (fasttrap_tracepoint_t
));
1238 zfree(fasttrap_tracepoint_t_zone
, probe
->ftp_tps
[i
].fit_tp
);
1242 #if !defined(__APPLE__)
1243 kmem_free(probe
, size
);
1245 if (probe
->ftp_ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1246 zfree(fasttrap_probe_t_zones
[probe
->ftp_ntps
], probe
);
1248 size_t size
= offsetof(fasttrap_probe_t
, ftp_tps
[probe
->ftp_ntps
]);
1249 kmem_free(probe
, size
);
1255 static const dtrace_pattr_t pid_attr
= {
1256 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1257 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1258 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1259 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1260 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1263 static dtrace_pops_t pid_pops
= {
1264 fasttrap_pid_provide
,
1266 fasttrap_pid_enable
,
1267 fasttrap_pid_disable
,
1270 fasttrap_pid_getargdesc
,
1271 fasttrap_pid_getarg
,
1273 fasttrap_pid_destroy
1276 static dtrace_pops_t usdt_pops
= {
1277 fasttrap_pid_provide
,
1279 fasttrap_pid_enable
,
1280 fasttrap_pid_disable
,
1283 fasttrap_pid_getargdesc
,
1284 fasttrap_usdt_getarg
,
1286 fasttrap_pid_destroy
1289 static fasttrap_proc_t
*
1290 fasttrap_proc_lookup(pid_t pid
)
1292 fasttrap_bucket_t
*bucket
;
1293 fasttrap_proc_t
*fprc
, *new_fprc
;
1295 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1296 lck_mtx_lock(&bucket
->ftb_mtx
);
1298 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1299 if (fprc
->ftpc_pid
== pid
&& !fprc
->ftpc_defunct
) {
1300 lck_mtx_lock(&fprc
->ftpc_mtx
);
1301 lck_mtx_unlock(&bucket
->ftb_mtx
);
1303 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1310 * Drop the bucket lock so we don't try to perform a sleeping
1311 * allocation under it.
1313 lck_mtx_unlock(&bucket
->ftb_mtx
);
1315 new_fprc
= kmem_zalloc(sizeof (fasttrap_proc_t
), KM_SLEEP
);
1316 ASSERT(new_fprc
!= NULL
);
1317 new_fprc
->ftpc_pid
= pid
;
1318 new_fprc
->ftpc_count
= 1;
1320 lck_mtx_lock(&bucket
->ftb_mtx
);
1323 * Take another lap through the list to make sure a proc hasn't
1324 * been created for this pid while we weren't under the bucket lock.
1326 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1327 if (fprc
->ftpc_pid
== pid
&& !fprc
->ftpc_defunct
) {
1328 lck_mtx_lock(&fprc
->ftpc_mtx
);
1329 lck_mtx_unlock(&bucket
->ftb_mtx
);
1331 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1333 kmem_free(new_fprc
, sizeof (fasttrap_proc_t
));
1339 #if defined(__APPLE__)
1341 * We have to initialize all locks explicitly
1343 lck_mtx_init(&new_fprc
->ftpc_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1346 new_fprc
->ftpc_next
= bucket
->ftb_data
;
1347 bucket
->ftb_data
= new_fprc
;
1349 lck_mtx_unlock(&bucket
->ftb_mtx
);
1355 fasttrap_proc_release(fasttrap_proc_t
*proc
)
1357 fasttrap_bucket_t
*bucket
;
1358 fasttrap_proc_t
*fprc
, **fprcp
;
1359 pid_t pid
= proc
->ftpc_pid
;
1361 lck_mtx_lock(&proc
->ftpc_mtx
);
1363 ASSERT(proc
->ftpc_count
!= 0);
1365 if (--proc
->ftpc_count
!= 0) {
1366 lck_mtx_unlock(&proc
->ftpc_mtx
);
1370 lck_mtx_unlock(&proc
->ftpc_mtx
);
1372 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1373 lck_mtx_lock(&bucket
->ftb_mtx
);
1375 fprcp
= (fasttrap_proc_t
**)&bucket
->ftb_data
;
1376 while ((fprc
= *fprcp
) != NULL
) {
1380 fprcp
= &fprc
->ftpc_next
;
1384 * Something strange has happened if we can't find the proc.
1386 ASSERT(fprc
!= NULL
);
1388 *fprcp
= fprc
->ftpc_next
;
1390 lck_mtx_unlock(&bucket
->ftb_mtx
);
1392 #if defined(__APPLE__)
1394 * Apple explicit lock management. Not 100% certain we need this, the
1395 * memory is freed even without the destroy. Maybe accounting cleanup?
1397 lck_mtx_destroy(&fprc
->ftpc_mtx
, fasttrap_lck_grp
);
1400 kmem_free(fprc
, sizeof (fasttrap_proc_t
));
1404 * Lookup a fasttrap-managed provider based on its name and associated pid.
1405 * If the pattr argument is non-NULL, this function instantiates the provider
1406 * if it doesn't exist otherwise it returns NULL. The provider is returned
1407 * with its lock held.
1409 #if defined(__APPLE__)
1410 static fasttrap_provider_t
*
1411 fasttrap_provider_lookup(pid_t pid
, fasttrap_provider_type_t provider_type
, const char *name
,
1412 const dtrace_pattr_t
*pattr
)
1413 #endif /* __APPLE__ */
1415 fasttrap_provider_t
*fp
, *new_fp
= NULL
;
1416 fasttrap_bucket_t
*bucket
;
1417 char provname
[DTRACE_PROVNAMELEN
];
1421 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1422 ASSERT(pattr
!= NULL
);
1424 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(pid
, name
)];
1425 lck_mtx_lock(&bucket
->ftb_mtx
);
1428 * Take a lap through the list and return the match if we find it.
1430 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1431 if (fp
->ftp_pid
== pid
&&
1432 #if defined(__APPLE__)
1433 fp
->ftp_provider_type
== provider_type
&&
1434 #endif /* __APPLE__ */
1435 strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1437 lck_mtx_lock(&fp
->ftp_mtx
);
1438 lck_mtx_unlock(&bucket
->ftb_mtx
);
1444 * Drop the bucket lock so we don't try to perform a sleeping
1445 * allocation under it.
1447 lck_mtx_unlock(&bucket
->ftb_mtx
);
1450 * Make sure the process exists, isn't a child created as the result
1451 * of a vfork(2), and isn't a zombie (but may be in fork).
1453 if ((p
= proc_find(pid
)) == NULL
) {
1457 if (p
->p_lflag
& (P_LINVFORK
| P_LEXIT
)) {
1464 * Increment p_dtrace_probes so that the process knows to inform us
1465 * when it exits or execs. fasttrap_provider_free() decrements this
1466 * when we're done with this provider.
1468 p
->p_dtrace_probes
++;
1471 * Grab the credentials for this process so we have
1472 * something to pass to dtrace_register().
1474 #if !defined(__APPLE__)
1475 mutex_enter(&p
->p_crlock
);
1478 mutex_exit(&p
->p_crlock
);
1479 mutex_exit(&p
->p_lock
);
1481 // lck_mtx_lock(&p->p_crlock);
1482 // Seems like OS X has no equivalent to crhold, even though it has a cr_ref field in ucred
1485 // lck_mtx_unlock(&p->p_crlock);
1488 #endif /* __APPLE__ */
1490 new_fp
= kmem_zalloc(sizeof (fasttrap_provider_t
), KM_SLEEP
);
1491 ASSERT(new_fp
!= NULL
);
1492 new_fp
->ftp_pid
= pid
;
1493 new_fp
->ftp_proc
= fasttrap_proc_lookup(pid
);
1494 #if defined(__APPLE__)
1495 new_fp
->ftp_provider_type
= provider_type
;
1498 * Apple locks require explicit init.
1500 lck_mtx_init(&new_fp
->ftp_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1501 lck_mtx_init(&new_fp
->ftp_cmtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1502 #endif /* __APPLE__ */
1504 ASSERT(new_fp
->ftp_proc
!= NULL
);
1506 lck_mtx_lock(&bucket
->ftb_mtx
);
1509 * Take another lap through the list to make sure a provider hasn't
1510 * been created for this pid while we weren't under the bucket lock.
1512 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1513 if (fp
->ftp_pid
== pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1515 lck_mtx_lock(&fp
->ftp_mtx
);
1516 lck_mtx_unlock(&bucket
->ftb_mtx
);
1517 fasttrap_provider_free(new_fp
);
1523 (void) strlcpy(new_fp
->ftp_name
, name
, sizeof(new_fp
->ftp_name
));
1526 * Fail and return NULL if either the provider name is too long
1527 * or we fail to register this new provider with the DTrace
1528 * framework. Note that this is the only place we ever construct
1529 * the full provider name -- we keep it in pieces in the provider
1532 if (snprintf(provname
, sizeof (provname
), "%s%u", name
, (uint_t
)pid
) >=
1533 (int)sizeof (provname
) ||
1534 dtrace_register(provname
, pattr
,
1535 DTRACE_PRIV_PROC
| DTRACE_PRIV_OWNER
| DTRACE_PRIV_ZONEOWNER
, cred
,
1536 pattr
== &pid_attr
? &pid_pops
: &usdt_pops
, new_fp
,
1537 &new_fp
->ftp_provid
) != 0) {
1538 lck_mtx_unlock(&bucket
->ftb_mtx
);
1539 fasttrap_provider_free(new_fp
);
1544 new_fp
->ftp_next
= bucket
->ftb_data
;
1545 bucket
->ftb_data
= new_fp
;
1547 lck_mtx_lock(&new_fp
->ftp_mtx
);
1548 lck_mtx_unlock(&bucket
->ftb_mtx
);
1555 fasttrap_provider_free(fasttrap_provider_t
*provider
)
1557 pid_t pid
= provider
->ftp_pid
;
1561 * There need to be no associated enabled probes, no consumers
1562 * creating probes, and no meta providers referencing this provider.
1564 ASSERT(provider
->ftp_rcount
== 0);
1565 ASSERT(provider
->ftp_ccount
== 0);
1566 ASSERT(provider
->ftp_mcount
== 0);
1568 fasttrap_proc_release(provider
->ftp_proc
);
1570 #if defined(__APPLE__)
1572 * Apple explicit lock management. Not 100% certain we need this, the
1573 * memory is freed even without the destroy. Maybe accounting cleanup?
1575 lck_mtx_destroy(&provider
->ftp_mtx
, fasttrap_lck_grp
);
1576 lck_mtx_destroy(&provider
->ftp_cmtx
, fasttrap_lck_grp
);
1579 kmem_free(provider
, sizeof (fasttrap_provider_t
));
1582 * Decrement p_dtrace_probes on the process whose provider we're
1583 * freeing. We don't have to worry about clobbering somone else's
1584 * modifications to it because we have locked the bucket that
1585 * corresponds to this process's hash chain in the provider hash
1586 * table. Don't sweat it if we can't find the process.
1588 if ((p
= proc_find(pid
)) == NULL
) {
1593 p
->p_dtrace_probes
--;
1600 fasttrap_provider_retire(pid_t pid
, const char *name
, int mprov
)
1602 fasttrap_provider_t
*fp
;
1603 fasttrap_bucket_t
*bucket
;
1604 dtrace_provider_id_t provid
;
1606 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1608 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(pid
, name
)];
1609 lck_mtx_lock(&bucket
->ftb_mtx
);
1611 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1612 if (fp
->ftp_pid
== pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1618 lck_mtx_unlock(&bucket
->ftb_mtx
);
1622 lck_mtx_lock(&fp
->ftp_mtx
);
1623 ASSERT(!mprov
|| fp
->ftp_mcount
> 0);
1624 if (mprov
&& --fp
->ftp_mcount
!= 0) {
1625 lck_mtx_unlock(&fp
->ftp_mtx
);
1626 lck_mtx_unlock(&bucket
->ftb_mtx
);
1631 * Mark the provider to be removed in our post-processing step,
1632 * mark it retired, and mark its proc as defunct (though it may
1633 * already be marked defunct by another provider that shares the
1634 * same proc). Marking it indicates that we should try to remove it;
1635 * setting the retired flag indicates that we're done with this
1636 * provider; setting the proc to be defunct indicates that all
1637 * tracepoints associated with the traced process should be ignored.
1639 * We obviously need to take the bucket lock before the provider lock
1640 * to perform the lookup, but we need to drop the provider lock
1641 * before calling into the DTrace framework since we acquire the
1642 * provider lock in callbacks invoked from the DTrace framework. The
1643 * bucket lock therefore protects the integrity of the provider hash
1646 fp
->ftp_proc
->ftpc_defunct
= 1;
1647 fp
->ftp_retired
= 1;
1649 provid
= fp
->ftp_provid
;
1650 lck_mtx_unlock(&fp
->ftp_mtx
);
1653 * We don't have to worry about invalidating the same provider twice
1654 * since fasttrap_provider_lookup() will ignore provider that have
1655 * been marked as retired.
1657 dtrace_invalidate(provid
);
1659 lck_mtx_unlock(&bucket
->ftb_mtx
);
1661 fasttrap_pid_cleanup();
1665 fasttrap_add_probe(fasttrap_probe_spec_t
*pdata
)
1667 fasttrap_provider_t
*provider
;
1668 fasttrap_probe_t
*pp
;
1669 fasttrap_tracepoint_t
*tp
;
1671 unsigned int i
, aframes
, whack
;
1673 #if defined(__APPLE__)
1674 switch (pdata
->ftps_probe_type
) {
1678 aframes
= FASTTRAP_ENTRY_AFRAMES
;
1682 aframes
= FASTTRAP_RETURN_AFRAMES
;
1692 #if defined(__APPLE__)
1693 const char* provider_name
;
1694 switch (pdata
->ftps_provider_type
) {
1695 case DTFTP_PROVIDER_PID
:
1696 provider_name
= FASTTRAP_PID_NAME
;
1698 case DTFTP_PROVIDER_OBJC
:
1699 provider_name
= FASTTRAP_OBJC_NAME
;
1701 case DTFTP_PROVIDER_ONESHOT
:
1702 provider_name
= FASTTRAP_ONESHOT_NAME
;
1708 if ((provider
= fasttrap_provider_lookup(pdata
->ftps_pid
, pdata
->ftps_provider_type
,
1709 provider_name
, &pid_attr
)) == NULL
)
1711 #endif /* __APPLE__ */
1714 * Increment this reference count to indicate that a consumer is
1715 * actively adding a new probe associated with this provider. This
1716 * prevents the provider from being deleted -- we'll need to check
1717 * for pending deletions when we drop this reference count.
1719 provider
->ftp_ccount
++;
1720 lck_mtx_unlock(&provider
->ftp_mtx
);
1723 * Grab the creation lock to ensure consistency between calls to
1724 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1725 * other threads creating probes. We must drop the provider lock
1726 * before taking this lock to avoid a three-way deadlock with the
1729 lck_mtx_lock(&provider
->ftp_cmtx
);
1732 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1735 (void) snprintf(name_str
, sizeof(name_str
), "%llx",
1736 (unsigned long long)pdata
->ftps_offs
[i
]);
1738 if (dtrace_probe_lookup(provider
->ftp_provid
,
1739 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
) != 0)
1742 atomic_add_32(&fasttrap_total
, 1);
1744 if (fasttrap_total
> fasttrap_max
) {
1745 atomic_add_32(&fasttrap_total
, -1);
1749 #if !defined(__APPLE__)
1750 pp
= kmem_zalloc(sizeof (fasttrap_probe_t
), KM_SLEEP
);
1753 pp
= zalloc(fasttrap_probe_t_zones
[1]);
1754 bzero(pp
, sizeof (fasttrap_probe_t
));
1757 pp
->ftp_prov
= provider
;
1758 pp
->ftp_faddr
= pdata
->ftps_pc
;
1759 pp
->ftp_fsize
= pdata
->ftps_size
;
1760 pp
->ftp_pid
= pdata
->ftps_pid
;
1763 #if !defined(__APPLE__)
1764 tp
= kmem_zalloc(sizeof (fasttrap_tracepoint_t
), KM_SLEEP
);
1766 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1767 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1770 tp
->ftt_proc
= provider
->ftp_proc
;
1771 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1772 tp
->ftt_pid
= pdata
->ftps_pid
;
1775 pp
->ftp_tps
[0].fit_tp
= tp
;
1776 pp
->ftp_tps
[0].fit_id
.fti_probe
= pp
;
1777 #if defined(__APPLE__)
1778 pp
->ftp_tps
[0].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1780 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1781 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
,
1782 FASTTRAP_OFFSET_AFRAMES
, pp
);
1785 } else if (dtrace_probe_lookup(provider
->ftp_provid
, pdata
->ftps_mod
,
1786 pdata
->ftps_func
, name
) == 0) {
1787 atomic_add_32(&fasttrap_total
, pdata
->ftps_noffs
);
1789 if (fasttrap_total
> fasttrap_max
) {
1790 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1794 ASSERT(pdata
->ftps_noffs
> 0);
1795 #if !defined(__APPLE__)
1796 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
,
1797 ftp_tps
[pdata
->ftps_noffs
]), KM_SLEEP
);
1800 if (pdata
->ftps_noffs
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1801 pp
= zalloc(fasttrap_probe_t_zones
[pdata
->ftps_noffs
]);
1802 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]));
1804 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]), KM_SLEEP
);
1808 pp
->ftp_prov
= provider
;
1809 pp
->ftp_faddr
= pdata
->ftps_pc
;
1810 pp
->ftp_fsize
= pdata
->ftps_size
;
1811 pp
->ftp_pid
= pdata
->ftps_pid
;
1812 pp
->ftp_ntps
= pdata
->ftps_noffs
;
1814 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1815 #if !defined(__APPLE__)
1816 tp
= kmem_zalloc(sizeof (fasttrap_tracepoint_t
), KM_SLEEP
);
1818 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1819 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1822 tp
->ftt_proc
= provider
->ftp_proc
;
1823 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1824 tp
->ftt_pid
= pdata
->ftps_pid
;
1826 pp
->ftp_tps
[i
].fit_tp
= tp
;
1827 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
1828 #if defined(__APPLE__)
1829 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1833 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1834 pdata
->ftps_mod
, pdata
->ftps_func
, name
, aframes
, pp
);
1837 lck_mtx_unlock(&provider
->ftp_cmtx
);
1840 * We know that the provider is still valid since we incremented the
1841 * creation reference count. If someone tried to clean up this provider
1842 * while we were using it (e.g. because the process called exec(2) or
1843 * exit(2)), take note of that and try to clean it up now.
1845 lck_mtx_lock(&provider
->ftp_mtx
);
1846 provider
->ftp_ccount
--;
1847 whack
= provider
->ftp_retired
;
1848 lck_mtx_unlock(&provider
->ftp_mtx
);
1851 fasttrap_pid_cleanup();
1857 * If we've exhausted the allowable resources, we'll try to remove
1858 * this provider to free some up. This is to cover the case where
1859 * the user has accidentally created many more probes than was
1860 * intended (e.g. pid123:::).
1862 lck_mtx_unlock(&provider
->ftp_cmtx
);
1863 lck_mtx_lock(&provider
->ftp_mtx
);
1864 provider
->ftp_ccount
--;
1865 provider
->ftp_marked
= 1;
1866 lck_mtx_unlock(&provider
->ftp_mtx
);
1868 fasttrap_pid_cleanup();
1875 fasttrap_meta_provide(void *arg
, dtrace_helper_provdesc_t
*dhpv
, pid_t pid
)
1878 fasttrap_provider_t
*provider
;
1881 * A 32-bit unsigned integer (like a pid for example) can be
1882 * expressed in 10 or fewer decimal digits. Make sure that we'll
1883 * have enough space for the provider name.
1885 if (strlen(dhpv
->dthpv_provname
) + 10 >=
1886 sizeof (provider
->ftp_name
)) {
1887 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1888 "name too long to accomodate pid", dhpv
->dthpv_provname
);
1893 * Don't let folks spoof the true pid provider.
1895 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_PID_NAME
, sizeof(FASTTRAP_PID_NAME
)) == 0) {
1896 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1897 "%s is an invalid name", dhpv
->dthpv_provname
,
1901 #if defined(__APPLE__)
1903 * We also need to check the other pid provider types
1905 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_OBJC_NAME
, sizeof(FASTTRAP_OBJC_NAME
)) == 0) {
1906 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1907 "%s is an invalid name", dhpv
->dthpv_provname
,
1908 FASTTRAP_OBJC_NAME
);
1911 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_ONESHOT_NAME
, sizeof(FASTTRAP_ONESHOT_NAME
)) == 0) {
1912 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1913 "%s is an invalid name", dhpv
->dthpv_provname
,
1914 FASTTRAP_ONESHOT_NAME
);
1917 #endif /* __APPLE__ */
1920 * The highest stability class that fasttrap supports is ISA; cap
1921 * the stability of the new provider accordingly.
1923 if (dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
>= DTRACE_CLASS_COMMON
)
1924 dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
= DTRACE_CLASS_ISA
;
1925 if (dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
>= DTRACE_CLASS_COMMON
)
1926 dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
= DTRACE_CLASS_ISA
;
1927 if (dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
>= DTRACE_CLASS_COMMON
)
1928 dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
= DTRACE_CLASS_ISA
;
1929 if (dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
>= DTRACE_CLASS_COMMON
)
1930 dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
= DTRACE_CLASS_ISA
;
1931 if (dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
>= DTRACE_CLASS_COMMON
)
1932 dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
= DTRACE_CLASS_ISA
;
1934 #if defined(__APPLE__)
1935 if ((provider
= fasttrap_provider_lookup(pid
, DTFTP_PROVIDER_USDT
, dhpv
->dthpv_provname
,
1936 &dhpv
->dthpv_pattr
)) == NULL
) {
1937 cmn_err(CE_WARN
, "failed to instantiate provider %s for "
1938 "process %u", dhpv
->dthpv_provname
, (uint_t
)pid
);
1945 * USDT probes (fasttrap meta probes) are very expensive to create.
1946 * Profiling has shown that the largest single cost is verifying that
1947 * dtrace hasn't already created a given meta_probe. The reason for
1948 * this is dtrace_match() often has to strcmp ~100 hashed entries for
1949 * each static probe being created. We want to get rid of that check.
1950 * The simplest way of eliminating it is to deny the ability to add
1951 * probes to an existing provider. If the provider already exists, BZZT!
1952 * This still leaves the possibility of intentionally malformed DOF
1953 * having duplicate probes. However, duplicate probes are not fatal,
1954 * and there is no way to get that by accident, so we will not check
1958 if (provider
->ftp_mcount
!= 0) {
1959 /* This is the duplicate provider case. */
1960 lck_mtx_unlock(&provider
->ftp_mtx
);
1963 #endif /* __APPLE__ */
1966 * Up the meta provider count so this provider isn't removed until
1967 * the meta provider has been told to remove it.
1969 provider
->ftp_mcount
++;
1971 lck_mtx_unlock(&provider
->ftp_mtx
);
1978 fasttrap_meta_create_probe(void *arg
, void *parg
,
1979 dtrace_helper_probedesc_t
*dhpb
)
1982 fasttrap_provider_t
*provider
= parg
;
1983 fasttrap_probe_t
*pp
;
1984 fasttrap_tracepoint_t
*tp
;
1989 * Since the meta provider count is non-zero we don't have to worry
1990 * about this provider disappearing.
1992 ASSERT(provider
->ftp_mcount
> 0);
1995 * Grab the creation lock to ensure consistency between calls to
1996 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1997 * other threads creating probes.
1999 lck_mtx_lock(&provider
->ftp_cmtx
);
2001 #if !defined(__APPLE__)
2003 * APPLE NOTE: This is hideously expensive. See note in
2004 * fasttrap_meta_provide() for why we can get away without
2007 if (dtrace_probe_lookup(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2008 dhpb
->dthpb_func
, dhpb
->dthpb_name
) != 0) {
2009 lck_mtx_unlock(&provider
->ftp_cmtx
);
2014 ntps
= dhpb
->dthpb_noffs
+ dhpb
->dthpb_nenoffs
;
2017 atomic_add_32(&fasttrap_total
, ntps
);
2019 if (fasttrap_total
> fasttrap_max
) {
2020 atomic_add_32(&fasttrap_total
, -ntps
);
2021 lck_mtx_unlock(&provider
->ftp_cmtx
);
2025 #if !defined(__APPLE__)
2026 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]), KM_SLEEP
);
2029 if (ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
2030 pp
= zalloc(fasttrap_probe_t_zones
[ntps
]);
2031 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]));
2033 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]), KM_SLEEP
);
2037 pp
->ftp_prov
= provider
;
2038 pp
->ftp_pid
= provider
->ftp_pid
;
2039 pp
->ftp_ntps
= ntps
;
2040 pp
->ftp_nargs
= dhpb
->dthpb_xargc
;
2041 pp
->ftp_xtypes
= dhpb
->dthpb_xtypes
;
2042 pp
->ftp_ntypes
= dhpb
->dthpb_ntypes
;
2045 * First create a tracepoint for each actual point of interest.
2047 for (i
= 0; i
< dhpb
->dthpb_noffs
; i
++) {
2048 #if !defined(__APPLE__)
2049 tp
= kmem_zalloc(sizeof (fasttrap_tracepoint_t
), KM_SLEEP
);
2051 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2052 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2055 tp
->ftt_proc
= provider
->ftp_proc
;
2056 #if defined(__APPLE__)
2058 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2059 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2060 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2062 #if defined(__i386__)
2064 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2066 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
] - 1;
2067 #elif defined(__ppc__)
2068 /* All PPC probes are zero offset. */
2069 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
];
2071 #error "Architecture not supported"
2075 tp
->ftt_pc
= dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
];
2077 tp
->ftt_pid
= provider
->ftp_pid
;
2079 pp
->ftp_tps
[i
].fit_tp
= tp
;
2080 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2082 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_POST_OFFSETS
;
2084 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_OFFSETS
;
2089 * Then create a tracepoint for each is-enabled point.
2091 for (j
= 0; i
< ntps
; i
++, j
++) {
2092 #if !defined(__APPLE__)
2093 tp
= kmem_zalloc(sizeof (fasttrap_tracepoint_t
), KM_SLEEP
);
2095 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2096 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2099 tp
->ftt_proc
= provider
->ftp_proc
;
2100 #if defined(__APPLE__)
2102 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2103 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2104 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2106 #if defined(__i386__)
2108 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2110 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
] + 2;
2111 #elif defined(__ppc__)
2112 /* All PPC is-enabled probes are zero offset. */
2113 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
];
2115 #error "Architecture not supported"
2119 tp
->ftt_pc
= dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[j
];
2121 tp
->ftt_pid
= provider
->ftp_pid
;
2123 pp
->ftp_tps
[i
].fit_tp
= tp
;
2124 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2125 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_IS_ENABLED
;
2129 * If the arguments are shuffled around we set the argument remapping
2130 * table. Later, when the probe fires, we only remap the arguments
2131 * if the table is non-NULL.
2133 for (i
= 0; i
< dhpb
->dthpb_xargc
; i
++) {
2134 if (dhpb
->dthpb_args
[i
] != i
) {
2135 pp
->ftp_argmap
= dhpb
->dthpb_args
;
2141 * The probe is fully constructed -- register it with DTrace.
2143 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2144 dhpb
->dthpb_func
, dhpb
->dthpb_name
, FASTTRAP_OFFSET_AFRAMES
, pp
);
2146 lck_mtx_unlock(&provider
->ftp_cmtx
);
2151 fasttrap_meta_remove(void *arg
, dtrace_helper_provdesc_t
*dhpv
, pid_t pid
)
2155 * Clean up the USDT provider. There may be active consumers of the
2156 * provider busy adding probes, no damage will actually befall the
2157 * provider until that count has dropped to zero. This just puts
2158 * the provider on death row.
2160 fasttrap_provider_retire(pid
, dhpv
->dthpv_provname
, 1);
2163 static dtrace_mops_t fasttrap_mops
= {
2164 fasttrap_meta_create_probe
,
2165 fasttrap_meta_provide
,
2166 fasttrap_meta_remove
2171 fasttrap_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int md
, cred_t
*cr
, int *rv
)
2173 #pragma unused(dev, md, rv)
2174 if (!dtrace_attached())
2177 if (cmd
== FASTTRAPIOC_MAKEPROBE
) {
2178 // FIXME! What size is arg? If it is not 64 bit, how do we pass in a 64 bit value?
2179 fasttrap_probe_spec_t
*uprobe
= (void *)arg
;
2180 fasttrap_probe_spec_t
*probe
;
2187 * FIXME! How does this work? The kern is running in 32 bit mode. It has a 32 bit pointer,
2188 * uprobe. We do address manipulations on it, and still have a 64 bit value? This seems
2189 * broken. What is the right way to do this?
2191 if (copyin((user_addr_t
)(unsigned long)&uprobe
->ftps_noffs
, &noffs
,
2192 sizeof (uprobe
->ftps_noffs
)))
2196 * Probes must have at least one tracepoint.
2202 * We want to check the number of noffs before doing
2203 * sizing math, to prevent potential buffer overflows.
2205 if (noffs
> ((1024 * 1024) - sizeof(fasttrap_probe_spec_t
)) / sizeof(probe
->ftps_offs
[0]))
2208 size
= sizeof (fasttrap_probe_spec_t
) +
2209 sizeof (probe
->ftps_offs
[0]) * (noffs
- 1);
2211 probe
= kmem_alloc(size
, KM_SLEEP
);
2213 if (copyin((user_addr_t
)(unsigned long)uprobe
, probe
, size
) != 0) {
2214 kmem_free(probe
, size
);
2219 * Verify that the function and module strings contain no
2222 for (i
= 0, c
= &probe
->ftps_func
[0]; i
< sizeof(probe
->ftps_func
) && *c
!= '\0'; i
++, c
++) {
2223 if (*c
< 0x20 || 0x7f <= *c
) {
2233 for (i
= 0, c
= &probe
->ftps_mod
[0]; i
< sizeof(probe
->ftps_mod
) && *c
!= '\0'; i
++, c
++) {
2234 if (*c
< 0x20 || 0x7f <= *c
) {
2244 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2246 pid_t pid
= probe
->ftps_pid
;
2249 * Report an error if the process doesn't exist
2250 * or is actively being birthed.
2252 if ((p
= proc_find(pid
)) == PROC_NULL
|| p
->p_stat
== SIDL
) {
2258 // FIXME! How is this done on OS X?
2259 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2260 // VREAD | VWRITE)) != 0) {
2261 // mutex_exit(&p->p_lock);
2268 ret
= fasttrap_add_probe(probe
);
2271 kmem_free(probe
, size
);
2275 } else if (cmd
== FASTTRAPIOC_GETINSTR
) {
2276 fasttrap_instr_query_t instr
;
2277 fasttrap_tracepoint_t
*tp
;
2281 if (copyin((user_addr_t
)(unsigned long)arg
, &instr
, sizeof (instr
)) != 0)
2284 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2286 pid_t pid
= instr
.ftiq_pid
;
2289 * Report an error if the process doesn't exist
2290 * or is actively being birthed.
2292 if ((p
= proc_find(pid
)) == NULL
|| p
->p_stat
== SIDL
) {
2298 // FIXME! How is this done on OS X?
2299 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2301 // mutex_exit(&p->p_lock);
2308 index
= FASTTRAP_TPOINTS_INDEX(instr
.ftiq_pid
, instr
.ftiq_pc
);
2310 lck_mtx_lock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2311 tp
= fasttrap_tpoints
.fth_table
[index
].ftb_data
;
2312 while (tp
!= NULL
) {
2313 if (instr
.ftiq_pid
== tp
->ftt_pid
&&
2314 instr
.ftiq_pc
== tp
->ftt_pc
&&
2315 !tp
->ftt_proc
->ftpc_defunct
)
2322 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2326 bcopy(&tp
->ftt_instr
, &instr
.ftiq_instr
,
2327 sizeof (instr
.ftiq_instr
));
2328 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2330 if (copyout(&instr
, (user_addr_t
)(unsigned long)arg
, sizeof (instr
)) != 0)
2340 fasttrap_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
2348 return (DDI_SUCCESS
);
2350 return (DDI_FAILURE
);
2353 ddi_report_dev(devi
);
2354 fasttrap_devi
= devi
;
2357 * Install our hooks into fork(2), exec(2), and exit(2).
2359 dtrace_fasttrap_fork_ptr
= &fasttrap_fork
;
2360 dtrace_fasttrap_exit_ptr
= &fasttrap_exec_exit
;
2361 dtrace_fasttrap_exec_ptr
= &fasttrap_exec_exit
;
2363 #if !defined(__APPLE__)
2364 fasttrap_max
= ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
2365 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT
);
2368 * We're sizing based on system memory. 100k probes per 256M of system memory.
2369 * Yes, this is a WAG.
2371 fasttrap_max
= (sane_size
>> 28) * 100000;
2372 if (fasttrap_max
== 0)
2373 fasttrap_max
= 50000;
2378 * Conjure up the tracepoints hashtable...
2380 nent
= ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
2381 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE
);
2383 if (nent
<= 0 || nent
> 0x1000000)
2384 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2386 if ((nent
& (nent
- 1)) == 0)
2387 fasttrap_tpoints
.fth_nent
= nent
;
2389 fasttrap_tpoints
.fth_nent
= 1 << fasttrap_highbit(nent
);
2390 ASSERT(fasttrap_tpoints
.fth_nent
> 0);
2391 fasttrap_tpoints
.fth_mask
= fasttrap_tpoints
.fth_nent
- 1;
2392 fasttrap_tpoints
.fth_table
= kmem_zalloc(fasttrap_tpoints
.fth_nent
*
2393 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2394 ASSERT(fasttrap_tpoints
.fth_table
!= NULL
);
2395 #if defined(__APPLE__)
2397 * We have to explicitly initialize all locks...
2400 for (i
=0; i
<fasttrap_tpoints
.fth_nent
; i
++) {
2401 lck_mtx_init(&fasttrap_tpoints
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2406 * ... and the providers hash table...
2408 nent
= FASTTRAP_PROVIDERS_DEFAULT_SIZE
;
2409 if ((nent
& (nent
- 1)) == 0)
2410 fasttrap_provs
.fth_nent
= nent
;
2412 fasttrap_provs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2413 ASSERT(fasttrap_provs
.fth_nent
> 0);
2414 fasttrap_provs
.fth_mask
= fasttrap_provs
.fth_nent
- 1;
2415 fasttrap_provs
.fth_table
= kmem_zalloc(fasttrap_provs
.fth_nent
*
2416 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2417 ASSERT(fasttrap_provs
.fth_table
!= NULL
);
2418 #if defined(__APPLE__)
2420 * We have to explicitly initialize all locks...
2422 for (i
=0; i
<fasttrap_provs
.fth_nent
; i
++) {
2423 lck_mtx_init(&fasttrap_provs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2428 * ... and the procs hash table.
2430 nent
= FASTTRAP_PROCS_DEFAULT_SIZE
;
2431 if ((nent
& (nent
- 1)) == 0)
2432 fasttrap_procs
.fth_nent
= nent
;
2434 fasttrap_procs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2435 ASSERT(fasttrap_procs
.fth_nent
> 0);
2436 fasttrap_procs
.fth_mask
= fasttrap_procs
.fth_nent
- 1;
2437 fasttrap_procs
.fth_table
= kmem_zalloc(fasttrap_procs
.fth_nent
*
2438 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2439 ASSERT(fasttrap_procs
.fth_table
!= NULL
);
2440 #if defined(__APPLE__)
2442 * We have to explicitly initialize all locks...
2444 for (i
=0; i
<fasttrap_procs
.fth_nent
; i
++) {
2445 lck_mtx_init(&fasttrap_procs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2449 (void) dtrace_meta_register("fasttrap", &fasttrap_mops
, NULL
,
2452 return (DDI_SUCCESS
);
2456 _fasttrap_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
2458 #pragma unused(dev, flags, devtype, p)
2463 _fasttrap_ioctl(dev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct proc
*p
)
2469 * FIXME! 64 bit problem with the data var.
2471 err
= fasttrap_ioctl(dev
, (int)cmd
, *(intptr_t *)data
, fflag
, CRED(), &rv
);
2473 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2475 ASSERT( (err
& 0xfffff000) == 0 );
2476 return (err
& 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2477 } else if (rv
!= 0) {
2478 ASSERT( (rv
& 0xfff00000) == 0 );
2479 return (((rv
& 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2484 static int gFasttrapInited
= 0;
2486 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2489 * A struct describing which functions will get invoked for certain
2493 static struct cdevsw fasttrap_cdevsw
=
2495 _fasttrap_open
, /* open */
2496 eno_opcl
, /* close */
2497 eno_rdwrt
, /* read */
2498 eno_rdwrt
, /* write */
2499 _fasttrap_ioctl
, /* ioctl */
2500 (stop_fcn_t
*)nulldev
, /* stop */
2501 (reset_fcn_t
*)nulldev
, /* reset */
2503 eno_select
, /* select */
2504 eno_mmap
, /* mmap */
2505 eno_strat
, /* strategy */
2506 eno_getc
, /* getc */
2507 eno_putc
, /* putc */
2511 void fasttrap_init(void);
2514 fasttrap_init( void )
2517 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2518 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2520 * The reason is to delay allocating the (rather large) resources as late as possible.
2522 if (0 == gFasttrapInited
) {
2523 int majdevno
= cdevsw_add(FASTTRAP_MAJOR
, &fasttrap_cdevsw
);
2526 // FIX ME! What kind of error reporting to do here?
2527 printf("fasttrap_init: failed to allocate a major number!\n");
2531 dev_t device
= makedev( (uint32_t)majdevno
, 0 );
2532 if (NULL
== devfs_make_node( device
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666, "fasttrap", 0 )) {
2537 * Allocate the fasttrap_tracepoint_t zone
2539 fasttrap_tracepoint_t_zone
= zinit(sizeof(fasttrap_tracepoint_t
),
2540 1024 * sizeof(fasttrap_tracepoint_t
),
2541 sizeof(fasttrap_tracepoint_t
),
2542 "dtrace.fasttrap_tracepoint_t");
2545 * fasttrap_probe_t's are variable in size. We use an array of zones to
2546 * cover the most common sizes.
2549 for (i
=1; i
<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
; i
++) {
2550 size_t zone_element_size
= offsetof(fasttrap_probe_t
, ftp_tps
[i
]);
2551 fasttrap_probe_t_zones
[i
] = zinit(zone_element_size
,
2552 1024 * zone_element_size
,
2554 fasttrap_probe_t_zone_names
[i
]);
2559 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2561 fasttrap_lck_attr
= lck_attr_alloc_init();
2562 fasttrap_lck_grp_attr
= lck_grp_attr_alloc_init();
2563 fasttrap_lck_grp
= lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr
);
2566 * Initialize global locks
2568 lck_mtx_init(&fasttrap_cleanup_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2569 lck_mtx_init(&fasttrap_count_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2571 if (DDI_FAILURE
== fasttrap_attach((dev_info_t
*)device
, 0 )) {
2572 // FIX ME! Do we remove the devfs node here?
2573 // What kind of error reporting?
2574 printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2578 gFasttrapInited
= 1;
2582 #undef FASTTRAP_MAJOR