4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
31 #include <sys/types.h>
34 #include <sys/codesign.h>
35 #include <sys/errno.h>
38 #include <sys/systm.h>
39 #include <sys/kauth.h>
40 #include <sys/utfconv.h>
42 #include <sys/fasttrap.h>
43 #include <sys/fasttrap_impl.h>
44 #include <sys/fasttrap_isa.h>
45 #include <sys/dtrace.h>
46 #include <sys/dtrace_impl.h>
49 #include <security/mac_framework.h>
51 #include <miscfs/devfs/devfs.h>
52 #include <sys/proc_internal.h>
53 #include <sys/dtrace_glue.h>
54 #include <sys/dtrace_ptss.h>
56 #include <kern/cs_blobs.h>
57 #include <kern/thread.h>
58 #include <kern/zalloc.h>
60 #include <mach/thread_act.h>
62 extern kern_return_t
kernel_thread_start_priority(thread_continue_t continuation
, void *parameter
, integer_t priority
, thread_t
*new_thread
);
64 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
65 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
69 qsort(void *a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
72 * User-Land Trap-Based Tracing
73 * ----------------------------
75 * The fasttrap provider allows DTrace consumers to instrument any user-level
76 * instruction to gather data; this includes probes with semantic
77 * signifigance like entry and return as well as simple offsets into the
78 * function. While the specific techniques used are very ISA specific, the
79 * methodology is generalizable to any architecture.
82 * The General Methodology
83 * -----------------------
85 * With the primary goal of tracing every user-land instruction and the
86 * limitation that we can't trust user space so don't want to rely on much
87 * information there, we begin by replacing the instructions we want to trace
88 * with trap instructions. Each instruction we overwrite is saved into a hash
89 * table keyed by process ID and pc address. When we enter the kernel due to
90 * this trap instruction, we need the effects of the replaced instruction to
91 * appear to have occurred before we proceed with the user thread's
94 * Each user level thread is represented by a ulwp_t structure which is
95 * always easily accessible through a register. The most basic way to produce
96 * the effects of the instruction we replaced is to copy that instruction out
97 * to a bit of scratch space reserved in the user thread's ulwp_t structure
98 * (a sort of kernel-private thread local storage), set the PC to that
99 * scratch space and single step. When we reenter the kernel after single
100 * stepping the instruction we must then adjust the PC to point to what would
101 * normally be the next instruction. Of course, special care must be taken
102 * for branches and jumps, but these represent such a small fraction of any
103 * instruction set that writing the code to emulate these in the kernel is
106 * Return probes may require several tracepoints to trace every return site,
107 * and, conversely, each tracepoint may activate several probes (the entry
108 * and offset 0 probes, for example). To solve this muliplexing problem,
109 * tracepoints contain lists of probes to activate and probes contain lists
110 * of tracepoints to enable. If a probe is activated, it adds its ID to
111 * existing tracepoints or creates new ones as necessary.
113 * Most probes are activated _before_ the instruction is executed, but return
114 * probes are activated _after_ the effects of the last instruction of the
115 * function are visible. Return probes must be fired _after_ we have
116 * single-stepped the instruction whereas all other probes are fired
123 * The lock ordering below -- both internally and with respect to the DTrace
124 * framework -- is a little tricky and bears some explanation. Each provider
125 * has a lock (ftp_mtx) that protects its members including reference counts
126 * for enabled probes (ftp_rcount), consumers actively creating probes
127 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
128 * from being freed. A provider is looked up by taking the bucket lock for the
129 * provider hash table, and is returned with its lock held. The provider lock
130 * may be taken in functions invoked by the DTrace framework, but may not be
131 * held while calling functions in the DTrace framework.
133 * To ensure consistency over multiple calls to the DTrace framework, the
134 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
135 * not be taken when holding the provider lock as that would create a cyclic
136 * lock ordering. In situations where one would naturally take the provider
137 * lock and then the creation lock, we instead up a reference count to prevent
138 * the provider from disappearing, drop the provider lock, and acquire the
142 * bucket lock before provider lock
143 * DTrace before provider lock
144 * creation lock before DTrace
145 * never hold the provider lock and creation lock simultaneously
148 static dtrace_meta_provider_id_t fasttrap_meta_id
;
150 static thread_t fasttrap_cleanup_thread
;
152 static lck_mtx_t fasttrap_cleanup_mtx
;
155 #define FASTTRAP_CLEANUP_PROVIDER 0x1
156 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
158 static uint32_t fasttrap_cleanup_work
= 0;
161 * Generation count on modifications to the global tracepoint lookup table.
163 static volatile uint64_t fasttrap_mod_gen
;
166 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
167 * base on system memory. Each time a probe is created, fasttrap_total is
168 * incremented by the number of tracepoints that may be associated with that
169 * probe; fasttrap_total is capped at fasttrap_max.
172 static uint32_t fasttrap_max
;
173 static uint32_t fasttrap_retired
;
174 static uint32_t fasttrap_total
;
177 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
178 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
179 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
181 fasttrap_hash_t fasttrap_tpoints
;
182 static fasttrap_hash_t fasttrap_provs
;
183 static fasttrap_hash_t fasttrap_procs
;
185 static uint64_t fasttrap_pid_count
; /* pid ref count */
186 static lck_mtx_t fasttrap_count_mtx
; /* lock on ref count */
188 #define FASTTRAP_ENABLE_FAIL 1
189 #define FASTTRAP_ENABLE_PARTIAL 2
191 static int fasttrap_tracepoint_enable(proc_t
*, fasttrap_probe_t
*, uint_t
);
192 static void fasttrap_tracepoint_disable(proc_t
*, fasttrap_probe_t
*, uint_t
);
194 static fasttrap_provider_t
*fasttrap_provider_lookup(proc_t
*, fasttrap_provider_type_t
, const char *,
195 const dtrace_pattr_t
*);
196 static void fasttrap_provider_retire(proc_t
*, const char *, int);
197 static void fasttrap_provider_free(fasttrap_provider_t
*);
199 static fasttrap_proc_t
*fasttrap_proc_lookup(pid_t
);
200 static void fasttrap_proc_release(fasttrap_proc_t
*);
202 #define FASTTRAP_PROVS_INDEX(pid, name) \
203 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
205 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
208 * APPLE NOTE: To save memory, some common memory allocations are given
209 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
210 * which means it would fall into the kalloc.128 bucket. With
211 * 20k elements allocated, the space saved is substantial.
214 struct zone
*fasttrap_tracepoint_t_zone
;
217 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
218 * that the sweet spot for reducing memory footprint is covering the first
219 * three sizes. Everything larger goes into the common pool.
221 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
223 struct zone
*fasttrap_probe_t_zones
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
];
225 static const char *fasttrap_probe_t_zone_names
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
] = {
227 "dtrace.fasttrap_probe_t[1]",
228 "dtrace.fasttrap_probe_t[2]",
229 "dtrace.fasttrap_probe_t[3]"
233 * APPLE NOTE: We have to manage locks explicitly
235 lck_grp_t
* fasttrap_lck_grp
;
236 lck_grp_attr_t
* fasttrap_lck_grp_attr
;
237 lck_attr_t
* fasttrap_lck_attr
;
240 fasttrap_highbit(ulong_t i
)
247 if (i
& 0xffffffff00000000ul
) {
251 if (i
& 0xffff0000) {
270 fasttrap_hash_str(const char *p
)
276 hval
= (hval
<< 4) + *p
++;
277 if ((g
= (hval
& 0xf0000000)) != 0)
285 * APPLE NOTE: fasttrap_sigtrap not implemented
288 fasttrap_sigtrap(proc_t
*p
, uthread_t t
, user_addr_t pc
)
290 #pragma unused(p, t, pc)
292 #if !defined(__APPLE__)
293 sigqueue_t
*sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
295 sqp
->sq_info
.si_signo
= SIGTRAP
;
296 sqp
->sq_info
.si_code
= TRAP_DTRACE
;
297 sqp
->sq_info
.si_addr
= (caddr_t
)pc
;
299 mutex_enter(&p
->p_lock
);
301 mutex_exit(&p
->p_lock
);
305 #endif /* __APPLE__ */
307 printf("fasttrap_sigtrap called with no implementation.\n");
311 * This function ensures that no threads are actively using the memory
312 * associated with probes that were formerly live.
315 fasttrap_mod_barrier(uint64_t gen
)
319 if (gen
< fasttrap_mod_gen
)
324 for (i
= 0; i
< NCPU
; i
++) {
325 lck_mtx_lock(&cpu_core
[i
].cpuc_pid_lock
);
326 lck_mtx_unlock(&cpu_core
[i
].cpuc_pid_lock
);
330 static void fasttrap_pid_cleanup(uint32_t);
333 fasttrap_pid_cleanup_providers(void)
335 fasttrap_provider_t
**fpp
, *fp
;
336 fasttrap_bucket_t
*bucket
;
337 dtrace_provider_id_t provid
;
338 unsigned int later
= 0, i
;
341 * Iterate over all the providers trying to remove the marked
342 * ones. If a provider is marked but not retired, we just
343 * have to take a crack at removing it -- it's no big deal if
346 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
347 bucket
= &fasttrap_provs
.fth_table
[i
];
348 lck_mtx_lock(&bucket
->ftb_mtx
);
349 fpp
= (fasttrap_provider_t
**)&bucket
->ftb_data
;
351 while ((fp
= *fpp
) != NULL
) {
352 if (!fp
->ftp_marked
) {
357 lck_mtx_lock(&fp
->ftp_mtx
);
360 * If this provider has consumers actively
361 * creating probes (ftp_ccount) or is a USDT
362 * provider (ftp_mcount), we can't unregister
365 if (fp
->ftp_ccount
!= 0 ||
366 fp
->ftp_mcount
!= 0) {
368 lck_mtx_unlock(&fp
->ftp_mtx
);
372 if (!fp
->ftp_retired
|| fp
->ftp_rcount
!= 0)
375 lck_mtx_unlock(&fp
->ftp_mtx
);
378 * If we successfully unregister this
379 * provider we can remove it from the hash
380 * chain and free the memory. If our attempt
381 * to unregister fails and this is a retired
382 * provider, increment our flag to try again
383 * pretty soon. If we've consumed more than
384 * half of our total permitted number of
385 * probes call dtrace_condense() to try to
386 * clean out the unenabled probes.
388 provid
= fp
->ftp_provid
;
389 if (dtrace_unregister(provid
) != 0) {
390 if (fasttrap_total
> fasttrap_max
/ 2)
391 (void) dtrace_condense(provid
);
392 later
+= fp
->ftp_marked
;
396 fasttrap_provider_free(fp
);
399 lck_mtx_unlock(&bucket
->ftb_mtx
);
405 typedef struct fasttrap_tracepoint_spec
{
407 user_addr_t fttps_pc
;
408 } fasttrap_tracepoint_spec_t
;
410 static fasttrap_tracepoint_spec_t
*fasttrap_retired_spec
;
411 static size_t fasttrap_cur_retired
= 0, fasttrap_retired_size
;
412 static lck_mtx_t fasttrap_retired_mtx
;
414 #define DEFAULT_RETIRED_SIZE 256
417 fasttrap_tracepoint_cleanup(void)
422 proc_t
*p
= PROC_NULL
;
423 fasttrap_tracepoint_t
*tp
= NULL
;
424 lck_mtx_lock(&fasttrap_retired_mtx
);
425 fasttrap_bucket_t
*bucket
;
426 for (i
= 0; i
< fasttrap_cur_retired
; i
++) {
427 pc
= fasttrap_retired_spec
[i
].fttps_pc
;
428 if (fasttrap_retired_spec
[i
].fttps_pid
!= pid
) {
429 pid
= fasttrap_retired_spec
[i
].fttps_pid
;
430 if (p
!= PROC_NULL
) {
433 if ((p
= sprlock(pid
)) == PROC_NULL
) {
438 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
439 lck_mtx_lock(&bucket
->ftb_mtx
);
440 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
441 if (pid
== tp
->ftt_pid
&& pc
== tp
->ftt_pc
&&
442 tp
->ftt_proc
->ftpc_acount
!= 0)
446 * Check that the tracepoint is not gone or has not been
447 * re-activated for another probe
449 if (tp
== NULL
|| tp
->ftt_retired
== 0) {
450 lck_mtx_unlock(&bucket
->ftb_mtx
);
453 fasttrap_tracepoint_remove(p
, tp
);
454 lck_mtx_unlock(&bucket
->ftb_mtx
);
456 if (p
!= PROC_NULL
) {
460 fasttrap_cur_retired
= 0;
462 lck_mtx_unlock(&fasttrap_retired_mtx
);
466 fasttrap_tracepoint_retire(proc_t
*p
, fasttrap_tracepoint_t
*tp
)
470 lck_mtx_lock(&fasttrap_retired_mtx
);
471 fasttrap_tracepoint_spec_t
*s
= &fasttrap_retired_spec
[fasttrap_cur_retired
++];
472 s
->fttps_pid
= p
->p_pid
;
473 s
->fttps_pc
= tp
->ftt_pc
;
475 if (fasttrap_cur_retired
== fasttrap_retired_size
) {
476 fasttrap_tracepoint_spec_t
*new_retired
= kmem_zalloc(
477 fasttrap_retired_size
* 2 *
478 sizeof(*fasttrap_retired_spec
),
480 memcpy(new_retired
, fasttrap_retired_spec
, sizeof(*fasttrap_retired_spec
) * fasttrap_retired_size
);
481 kmem_free(fasttrap_retired_spec
, sizeof(*fasttrap_retired_spec
) * fasttrap_retired_size
);
482 fasttrap_retired_size
*= 2;
483 fasttrap_retired_spec
= new_retired
;
486 lck_mtx_unlock(&fasttrap_retired_mtx
);
490 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT
);
494 fasttrap_pid_cleanup_compute_priority(void)
496 if (fasttrap_total
> (fasttrap_max
/ 100 * 90) || fasttrap_retired
> fasttrap_max
/ 2) {
497 thread_precedence_policy_data_t precedence
= {12 /* BASEPRI_PREEMPT_HIGH */};
498 thread_policy_set(fasttrap_cleanup_thread
, THREAD_PRECEDENCE_POLICY
, (thread_policy_t
) &precedence
, THREAD_PRECEDENCE_POLICY_COUNT
);
501 thread_precedence_policy_data_t precedence
= {-39 /* BASEPRI_USER_INITIATED */};
502 thread_policy_set(fasttrap_cleanup_thread
, THREAD_PRECEDENCE_POLICY
, (thread_policy_t
) &precedence
, THREAD_PRECEDENCE_POLICY_COUNT
);
508 * This is the timeout's callback for cleaning up the providers and their
512 __attribute__((noreturn
))
514 fasttrap_pid_cleanup_cb(void)
517 lck_mtx_lock(&fasttrap_cleanup_mtx
);
518 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", NULL
);
520 unsigned int later
= 0;
522 work
= atomic_and_32(&fasttrap_cleanup_work
, 0);
523 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
524 if (work
& FASTTRAP_CLEANUP_PROVIDER
) {
525 later
= fasttrap_pid_cleanup_providers();
527 if (work
& FASTTRAP_CLEANUP_TRACEPOINT
) {
528 fasttrap_tracepoint_cleanup();
530 lck_mtx_lock(&fasttrap_cleanup_mtx
);
532 fasttrap_pid_cleanup_compute_priority();
533 if (!fasttrap_cleanup_work
) {
535 * If we were unable to remove a retired provider, try again after
536 * a second. This situation can occur in certain circumstances where
537 * providers cannot be unregistered even though they have no probes
538 * enabled because of an execution of dtrace -l or something similar.
539 * If the timeout has been disabled (set to 1 because we're trying
540 * to detach), we set fasttrap_cleanup_work to ensure that we'll
541 * get a chance to do that work if and when the timeout is reenabled
545 struct timespec t
= {1, 0};
546 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", &t
);
549 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", NULL
);
556 * Activates the asynchronous cleanup mechanism.
559 fasttrap_pid_cleanup(uint32_t work
)
561 lck_mtx_lock(&fasttrap_cleanup_mtx
);
562 atomic_or_32(&fasttrap_cleanup_work
, work
);
563 fasttrap_pid_cleanup_compute_priority();
564 wakeup(&fasttrap_pid_cleanup_cb
);
565 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
570 * This is called from cfork() via dtrace_fasttrap_fork(). The child
571 * process's address space is a (roughly) a copy of the parent process's so
572 * we have to remove all the instrumentation we had previously enabled in the
576 fasttrap_fork(proc_t
*p
, proc_t
*cp
)
578 pid_t ppid
= p
->p_pid
;
581 ASSERT(current_proc() == p
);
582 LCK_MTX_ASSERT(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_OWNED
);
583 ASSERT(p
->p_dtrace_count
> 0);
584 ASSERT(cp
->p_dtrace_count
== 0);
587 * This would be simpler and faster if we maintained per-process
588 * hash tables of enabled tracepoints. It could, however, potentially
589 * slow down execution of a tracepoint since we'd need to go
590 * through two levels of indirection. In the future, we should
591 * consider either maintaining per-process ancillary lists of
592 * enabled tracepoints or hanging a pointer to a per-process hash
593 * table of enabled tracepoints off the proc structure.
597 * We don't have to worry about the child process disappearing
598 * because we're in fork().
600 if (cp
!= sprlock(cp
->p_pid
)) {
601 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp
->p_pid
);
607 * Iterate over every tracepoint looking for ones that belong to the
608 * parent process, and remove each from the child process.
610 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
611 fasttrap_tracepoint_t
*tp
;
612 fasttrap_bucket_t
*bucket
= &fasttrap_tpoints
.fth_table
[i
];
614 lck_mtx_lock(&bucket
->ftb_mtx
);
615 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
616 if (tp
->ftt_pid
== ppid
&&
617 tp
->ftt_proc
->ftpc_acount
!= 0) {
618 fasttrap_tracepoint_remove(cp
, tp
);
621 * The count of active providers can only be
622 * decremented (i.e. to zero) during exec,
623 * exit, and removal of a meta provider so it
624 * should be impossible to drop the count
627 ASSERT(tp
->ftt_proc
->ftpc_acount
!= 0);
630 lck_mtx_unlock(&bucket
->ftb_mtx
);
634 * Free any ptss pages/entries in the child.
636 dtrace_ptss_fork(p
, cp
);
643 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
644 * is set on the proc structure to indicate that there is a pid provider
645 * associated with this process.
648 fasttrap_exec_exit(proc_t
*p
)
650 ASSERT(p
== current_proc());
651 LCK_MTX_ASSERT(&p
->p_mlock
, LCK_MTX_ASSERT_OWNED
);
652 LCK_MTX_ASSERT(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_NOTOWNED
);
655 /* APPLE NOTE: Okay, the locking here is really odd and needs some
656 * explaining. This method is always called with the proc_lock held.
657 * We must drop the proc_lock before calling fasttrap_provider_retire
658 * to avoid a deadlock when it takes the bucket lock.
660 * Next, the dtrace_ptss_exec_exit function requires the sprlock
661 * be held, but not the proc_lock.
663 * Finally, we must re-acquire the proc_lock
668 * We clean up the pid provider for this process here; user-land
669 * static probes are handled by the meta-provider remove entry point.
671 fasttrap_provider_retire(p
, FASTTRAP_PID_NAME
, 0);
674 * APPLE NOTE: We also need to remove any aliased providers.
675 * XXX optimization: track which provider types are instantiated
676 * and only retire as needed.
678 fasttrap_provider_retire(p
, FASTTRAP_OBJC_NAME
, 0);
679 fasttrap_provider_retire(p
, FASTTRAP_ONESHOT_NAME
, 0);
682 * This should be called after it is no longer possible for a user
683 * thread to execute (potentially dtrace instrumented) instructions.
685 lck_mtx_lock(&p
->p_dtrace_sprlock
);
686 dtrace_ptss_exec_exit(p
);
687 lck_mtx_unlock(&p
->p_dtrace_sprlock
);
695 fasttrap_pid_provide(void *arg
, const dtrace_probedesc_t
*desc
)
697 #pragma unused(arg, desc)
699 * There are no "default" pid probes.
704 fasttrap_tracepoint_enable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
706 fasttrap_tracepoint_t
*tp
, *new_tp
= NULL
;
707 fasttrap_bucket_t
*bucket
;
712 ASSERT(index
< probe
->ftp_ntps
);
714 pid
= probe
->ftp_pid
;
715 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
716 id
= &probe
->ftp_tps
[index
].fit_id
;
718 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
720 //ASSERT(!(p->p_flag & SVFORK));
723 * Before we make any modifications, make sure we've imposed a barrier
724 * on the generation in which this probe was last modified.
726 fasttrap_mod_barrier(probe
->ftp_gen
);
728 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
731 * If the tracepoint has already been enabled, just add our id to the
732 * list of interested probes. This may be our second time through
733 * this path in which case we'll have constructed the tracepoint we'd
734 * like to install. If we can't find a match, and have an allocated
735 * tracepoint ready to go, enable that one now.
737 * A tracepoint whose process is defunct is also considered defunct.
740 lck_mtx_lock(&bucket
->ftb_mtx
);
741 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
744 * Note that it's safe to access the active count on the
745 * associated proc structure because we know that at least one
746 * provider (this one) will still be around throughout this
749 if (tp
->ftt_pid
!= pid
|| tp
->ftt_pc
!= pc
||
750 tp
->ftt_proc
->ftpc_acount
== 0)
754 * Now that we've found a matching tracepoint, it would be
755 * a decent idea to confirm that the tracepoint is still
756 * enabled and the trap instruction hasn't been overwritten.
757 * Since this is a little hairy, we'll punt for now.
759 if (!tp
->ftt_installed
) {
760 if (fasttrap_tracepoint_install(p
, tp
) != 0)
761 rc
= FASTTRAP_ENABLE_PARTIAL
;
764 * This can't be the first interested probe. We don't have
765 * to worry about another thread being in the midst of
766 * deleting this tracepoint (which would be the only valid
767 * reason for a tracepoint to have no interested probes)
768 * since we're holding P_PR_LOCK for this process.
770 ASSERT(tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
);
772 switch (id
->fti_ptype
) {
775 case DTFTP_IS_ENABLED
:
776 id
->fti_next
= tp
->ftt_ids
;
777 dtrace_membar_producer();
779 dtrace_membar_producer();
783 case DTFTP_POST_OFFSETS
:
784 id
->fti_next
= tp
->ftt_retids
;
785 dtrace_membar_producer();
787 dtrace_membar_producer();
796 lck_mtx_unlock(&bucket
->ftb_mtx
);
798 if (new_tp
!= NULL
) {
799 new_tp
->ftt_ids
= NULL
;
800 new_tp
->ftt_retids
= NULL
;
807 * If we have a good tracepoint ready to go, install it now while
808 * we have the lock held and no one can screw with us.
810 if (new_tp
!= NULL
) {
813 new_tp
->ftt_next
= bucket
->ftb_data
;
814 dtrace_membar_producer();
815 bucket
->ftb_data
= new_tp
;
816 dtrace_membar_producer();
817 lck_mtx_unlock(&bucket
->ftb_mtx
);
820 * Activate the tracepoint in the ISA-specific manner.
821 * If this fails, we need to report the failure, but
822 * indicate that this tracepoint must still be disabled
823 * by calling fasttrap_tracepoint_disable().
825 if (fasttrap_tracepoint_install(p
, new_tp
) != 0)
826 rc
= FASTTRAP_ENABLE_PARTIAL
;
828 * Increment the count of the number of tracepoints active in
829 * the victim process.
831 //ASSERT(p->p_proc_flag & P_PR_LOCK);
838 lck_mtx_unlock(&bucket
->ftb_mtx
);
841 * Initialize the tracepoint that's been preallocated with the probe.
843 new_tp
= probe
->ftp_tps
[index
].fit_tp
;
844 new_tp
->ftt_retired
= 0;
846 ASSERT(new_tp
->ftt_pid
== pid
);
847 ASSERT(new_tp
->ftt_pc
== pc
);
848 ASSERT(new_tp
->ftt_proc
== probe
->ftp_prov
->ftp_proc
);
849 ASSERT(new_tp
->ftt_ids
== NULL
);
850 ASSERT(new_tp
->ftt_retids
== NULL
);
852 switch (id
->fti_ptype
) {
855 case DTFTP_IS_ENABLED
:
857 new_tp
->ftt_ids
= id
;
861 case DTFTP_POST_OFFSETS
:
863 new_tp
->ftt_retids
= id
;
871 * If the ISA-dependent initialization goes to plan, go back to the
872 * beginning and try to install this freshly made tracepoint.
874 if (fasttrap_tracepoint_init(p
, new_tp
, pc
, id
->fti_ptype
) == 0)
877 new_tp
->ftt_ids
= NULL
;
878 new_tp
->ftt_retids
= NULL
;
880 return (FASTTRAP_ENABLE_FAIL
);
884 fasttrap_tracepoint_disable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
886 fasttrap_bucket_t
*bucket
;
887 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
888 fasttrap_tracepoint_t
**pp
, *tp
;
889 fasttrap_id_t
*id
, **idp
;
893 ASSERT(index
< probe
->ftp_ntps
);
895 pid
= probe
->ftp_pid
;
896 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
897 id
= &probe
->ftp_tps
[index
].fit_id
;
899 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
902 * Find the tracepoint and make sure that our id is one of the
903 * ones registered with it.
905 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
906 lck_mtx_lock(&bucket
->ftb_mtx
);
907 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
908 if (tp
->ftt_pid
== pid
&& tp
->ftt_pc
== pc
&&
909 tp
->ftt_proc
== provider
->ftp_proc
)
914 * If we somehow lost this tracepoint, we're in a world of hurt.
918 switch (id
->fti_ptype
) {
921 case DTFTP_IS_ENABLED
:
922 ASSERT(tp
->ftt_ids
!= NULL
);
927 case DTFTP_POST_OFFSETS
:
928 ASSERT(tp
->ftt_retids
!= NULL
);
929 idp
= &tp
->ftt_retids
;
933 /* Fix compiler warning... */
938 while ((*idp
)->fti_probe
!= probe
) {
939 idp
= &(*idp
)->fti_next
;
940 ASSERT(*idp
!= NULL
);
945 dtrace_membar_producer();
947 ASSERT(id
->fti_probe
== probe
);
950 * If there are other registered enablings of this tracepoint, we're
951 * all done, but if this was the last probe assocated with this
952 * this tracepoint, we need to remove and free it.
954 if (tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
) {
957 * If the current probe's tracepoint is in use, swap it
958 * for an unused tracepoint.
960 if (tp
== probe
->ftp_tps
[index
].fit_tp
) {
961 fasttrap_probe_t
*tmp_probe
;
962 fasttrap_tracepoint_t
**tmp_tp
;
965 if (tp
->ftt_ids
!= NULL
) {
966 tmp_probe
= tp
->ftt_ids
->fti_probe
;
967 /* LINTED - alignment */
968 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_ids
);
969 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
971 tmp_probe
= tp
->ftt_retids
->fti_probe
;
972 /* LINTED - alignment */
973 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_retids
);
974 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
977 ASSERT(*tmp_tp
!= NULL
);
978 ASSERT(*tmp_tp
!= probe
->ftp_tps
[index
].fit_tp
);
979 ASSERT((*tmp_tp
)->ftt_ids
== NULL
);
980 ASSERT((*tmp_tp
)->ftt_retids
== NULL
);
982 probe
->ftp_tps
[index
].fit_tp
= *tmp_tp
;
987 lck_mtx_unlock(&bucket
->ftb_mtx
);
990 * Tag the modified probe with the generation in which it was
993 probe
->ftp_gen
= fasttrap_mod_gen
;
997 lck_mtx_unlock(&bucket
->ftb_mtx
);
1000 * We can't safely remove the tracepoint from the set of active
1001 * tracepoints until we've actually removed the fasttrap instruction
1002 * from the process's text. We can, however, operate on this
1003 * tracepoint secure in the knowledge that no other thread is going to
1004 * be looking at it since we hold P_PR_LOCK on the process if it's
1005 * live or we hold the provider lock on the process if it's dead and
1010 * We only need to remove the actual instruction if we're looking
1011 * at an existing process
1015 * If we fail to restore the instruction we need to kill
1016 * this process since it's in a completely unrecoverable
1019 if (fasttrap_tracepoint_remove(p
, tp
) != 0)
1020 fasttrap_sigtrap(p
, NULL
, pc
);
1023 * Decrement the count of the number of tracepoints active
1024 * in the victim process.
1026 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1027 p
->p_dtrace_count
--;
1031 * Remove the probe from the hash table of active tracepoints.
1033 lck_mtx_lock(&bucket
->ftb_mtx
);
1034 pp
= (fasttrap_tracepoint_t
**)&bucket
->ftb_data
;
1035 ASSERT(*pp
!= NULL
);
1037 pp
= &(*pp
)->ftt_next
;
1038 ASSERT(*pp
!= NULL
);
1042 dtrace_membar_producer();
1044 lck_mtx_unlock(&bucket
->ftb_mtx
);
1047 * Tag the modified probe with the generation in which it was changed.
1049 probe
->ftp_gen
= fasttrap_mod_gen
;
1053 fasttrap_enable_callbacks(void)
1056 * We don't have to play the rw lock game here because we're
1057 * providing something rather than taking something away --
1058 * we can be sure that no threads have tried to follow this
1059 * function pointer yet.
1061 lck_mtx_lock(&fasttrap_count_mtx
);
1062 if (fasttrap_pid_count
== 0) {
1063 ASSERT(dtrace_pid_probe_ptr
== NULL
);
1064 ASSERT(dtrace_return_probe_ptr
== NULL
);
1065 dtrace_pid_probe_ptr
= &fasttrap_pid_probe
;
1066 dtrace_return_probe_ptr
= &fasttrap_return_probe
;
1068 ASSERT(dtrace_pid_probe_ptr
== &fasttrap_pid_probe
);
1069 ASSERT(dtrace_return_probe_ptr
== &fasttrap_return_probe
);
1070 fasttrap_pid_count
++;
1071 lck_mtx_unlock(&fasttrap_count_mtx
);
1075 fasttrap_disable_callbacks(void)
1077 //ASSERT(MUTEX_HELD(&cpu_lock));
1079 lck_mtx_lock(&fasttrap_count_mtx
);
1080 ASSERT(fasttrap_pid_count
> 0);
1081 fasttrap_pid_count
--;
1082 if (fasttrap_pid_count
== 0) {
1083 dtrace_cpu_t
*cur
, *cpu
= CPU
;
1086 * APPLE NOTE: This loop seems broken, it touches every CPU
1087 * but the one we're actually running on. Need to ask Sun folks
1088 * if that is safe. Scenario is this: We're running on CPU A,
1089 * and lock all but A. Then we get preempted, and start running
1090 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1092 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
1093 lck_rw_lock_exclusive(&cur
->cpu_ft_lock
);
1094 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1097 dtrace_pid_probe_ptr
= NULL
;
1098 dtrace_return_probe_ptr
= NULL
;
1100 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
1101 lck_rw_unlock_exclusive(&cur
->cpu_ft_lock
);
1102 // rw_exit(&cur->cpu_ft_lock);
1105 lck_mtx_unlock(&fasttrap_count_mtx
);
1110 fasttrap_pid_enable(void *arg
, dtrace_id_t id
, void *parg
)
1112 #pragma unused(arg, id)
1113 fasttrap_probe_t
*probe
= parg
;
1117 ASSERT(probe
!= NULL
);
1118 ASSERT(!probe
->ftp_enabled
);
1119 ASSERT(id
== probe
->ftp_id
);
1120 // ASSERT(MUTEX_HELD(&cpu_lock));
1123 * Increment the count of enabled probes on this probe's provider;
1124 * the provider can't go away while the probe still exists. We
1125 * must increment this even if we aren't able to properly enable
1128 lck_mtx_lock(&probe
->ftp_prov
->ftp_mtx
);
1129 probe
->ftp_prov
->ftp_rcount
++;
1130 lck_mtx_unlock(&probe
->ftp_prov
->ftp_mtx
);
1133 * If this probe's provider is retired (meaning it was valid in a
1134 * previously exec'ed incarnation of this address space), bail out. The
1135 * provider can't go away while we're in this code path.
1137 if (probe
->ftp_prov
->ftp_retired
)
1141 * If we can't find the process, it may be that we're in the context of
1142 * a fork in which the traced process is being born and we're copying
1143 * USDT probes. Otherwise, the process is gone so bail.
1145 if ((p
= sprlock(probe
->ftp_pid
)) == PROC_NULL
) {
1147 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1148 * does not return process's with SIDL set, but we always return
1149 * the child process.
1154 if ((p
->p_csflags
& (CS_KILL
|CS_HARD
))) {
1156 for (i
= 0; i
< DTRACE_NCLIENTS
; i
++) {
1157 dtrace_state_t
*state
= dtrace_state_get(i
);
1160 if (state
->dts_cred
.dcr_cred
== NULL
)
1162 mac_proc_check_get_task(state
->dts_cred
.dcr_cred
, p
);
1164 rc
= cs_allow_invalid(p
);
1168 cmn_err(CE_WARN
, "process doesn't allow invalid code pages, failing to install fasttrap probe\n");
1174 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1175 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1176 * To mimic this, we allocate on demand scratch space. If this is the first
1177 * time a probe has been enabled in this process, we need to allocate scratch
1178 * space for each already existing thread. Now is a good time to do this, as
1179 * the target process is suspended and the proc_lock is held.
1181 if (p
->p_dtrace_ptss_pages
== NULL
) {
1182 dtrace_ptss_enable(p
);
1185 // ASSERT(!(p->p_flag & SVFORK));
1189 * We have to enable the trap entry point before any user threads have
1190 * the chance to execute the trap instruction we're about to place
1191 * in their process's text.
1193 fasttrap_enable_callbacks();
1196 * Enable all the tracepoints and add this probe's id to each
1197 * tracepoint's list of active probes.
1199 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1200 if ((rc
= fasttrap_tracepoint_enable(p
, probe
, i
)) != 0) {
1202 * If enabling the tracepoint failed completely,
1203 * we don't have to disable it; if the failure
1204 * was only partial we must disable it.
1206 if (rc
== FASTTRAP_ENABLE_FAIL
)
1209 ASSERT(rc
== FASTTRAP_ENABLE_PARTIAL
);
1212 * Back up and pull out all the tracepoints we've
1213 * created so far for this probe.
1216 fasttrap_tracepoint_disable(p
, probe
, i
);
1224 * Since we're not actually enabling this probe,
1225 * drop our reference on the trap table entry.
1227 fasttrap_disable_callbacks();
1235 probe
->ftp_enabled
= 1;
1241 fasttrap_pid_disable(void *arg
, dtrace_id_t id
, void *parg
)
1243 #pragma unused(arg, id)
1244 fasttrap_probe_t
*probe
= parg
;
1245 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
1249 ASSERT(id
== probe
->ftp_id
);
1252 * We won't be able to acquire a /proc-esque lock on the process
1253 * iff the process is dead and gone. In this case, we rely on the
1254 * provider lock as a point of mutual exclusion to prevent other
1255 * DTrace consumers from disabling this probe.
1257 if ((p
= sprlock(probe
->ftp_pid
)) != PROC_NULL
) {
1258 // ASSERT(!(p->p_flag & SVFORK));
1262 lck_mtx_lock(&provider
->ftp_mtx
);
1265 * Disable all the associated tracepoints (for fully enabled probes).
1267 if (probe
->ftp_enabled
) {
1268 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1269 fasttrap_tracepoint_disable(p
, probe
, i
);
1273 ASSERT(provider
->ftp_rcount
> 0);
1274 provider
->ftp_rcount
--;
1278 * Even though we may not be able to remove it entirely, we
1279 * mark this retired provider to get a chance to remove some
1280 * of the associated probes.
1282 if (provider
->ftp_retired
&& !provider
->ftp_marked
)
1283 whack
= provider
->ftp_marked
= 1;
1284 lck_mtx_unlock(&provider
->ftp_mtx
);
1290 * If the process is dead, we're just waiting for the
1291 * last probe to be disabled to be able to free it.
1293 if (provider
->ftp_rcount
== 0 && !provider
->ftp_marked
)
1294 whack
= provider
->ftp_marked
= 1;
1295 lck_mtx_unlock(&provider
->ftp_mtx
);
1299 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
1302 if (!probe
->ftp_enabled
)
1305 probe
->ftp_enabled
= 0;
1307 // ASSERT(MUTEX_HELD(&cpu_lock));
1308 fasttrap_disable_callbacks();
1313 fasttrap_pid_getargdesc(void *arg
, dtrace_id_t id
, void *parg
,
1314 dtrace_argdesc_t
*desc
)
1316 #pragma unused(arg, id)
1317 fasttrap_probe_t
*probe
= parg
;
1321 desc
->dtargd_native
[0] = '\0';
1322 desc
->dtargd_xlate
[0] = '\0';
1324 if (probe
->ftp_prov
->ftp_retired
!= 0 ||
1325 desc
->dtargd_ndx
>= probe
->ftp_nargs
) {
1326 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
1330 ndx
= (probe
->ftp_argmap
!= NULL
) ?
1331 probe
->ftp_argmap
[desc
->dtargd_ndx
] : desc
->dtargd_ndx
;
1333 str
= probe
->ftp_ntypes
;
1334 for (i
= 0; i
< ndx
; i
++) {
1335 str
+= strlen(str
) + 1;
1338 (void) strlcpy(desc
->dtargd_native
, str
, sizeof(desc
->dtargd_native
));
1340 if (probe
->ftp_xtypes
== NULL
)
1343 str
= probe
->ftp_xtypes
;
1344 for (i
= 0; i
< desc
->dtargd_ndx
; i
++) {
1345 str
+= strlen(str
) + 1;
1348 (void) strlcpy(desc
->dtargd_xlate
, str
, sizeof(desc
->dtargd_xlate
));
1353 fasttrap_pid_destroy(void *arg
, dtrace_id_t id
, void *parg
)
1355 #pragma unused(arg, id)
1356 fasttrap_probe_t
*probe
= parg
;
1359 ASSERT(probe
!= NULL
);
1360 ASSERT(!probe
->ftp_enabled
);
1361 ASSERT(fasttrap_total
>= probe
->ftp_ntps
);
1363 atomic_add_32(&fasttrap_total
, -probe
->ftp_ntps
);
1364 atomic_add_32(&fasttrap_retired
, -probe
->ftp_ntps
);
1366 if (probe
->ftp_gen
+ 1 >= fasttrap_mod_gen
)
1367 fasttrap_mod_barrier(probe
->ftp_gen
);
1369 for (i
= 0; i
< probe
->ftp_ntps
; i
++) {
1370 zfree(fasttrap_tracepoint_t_zone
, probe
->ftp_tps
[i
].fit_tp
);
1373 if (probe
->ftp_ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1374 zfree(fasttrap_probe_t_zones
[probe
->ftp_ntps
], probe
);
1376 size_t size
= offsetof(fasttrap_probe_t
, ftp_tps
[probe
->ftp_ntps
]);
1377 kmem_free(probe
, size
);
1382 static const dtrace_pattr_t pid_attr
= {
1383 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1384 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1385 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1386 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1387 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1390 static dtrace_pops_t pid_pops
= {
1391 .dtps_provide
= fasttrap_pid_provide
,
1392 .dtps_provide_module
= NULL
,
1393 .dtps_enable
= fasttrap_pid_enable
,
1394 .dtps_disable
= fasttrap_pid_disable
,
1395 .dtps_suspend
= NULL
,
1396 .dtps_resume
= NULL
,
1397 .dtps_getargdesc
= fasttrap_pid_getargdesc
,
1398 .dtps_getargval
= fasttrap_pid_getarg
,
1399 .dtps_usermode
= NULL
,
1400 .dtps_destroy
= fasttrap_pid_destroy
1403 static dtrace_pops_t usdt_pops
= {
1404 .dtps_provide
= fasttrap_pid_provide
,
1405 .dtps_provide_module
= NULL
,
1406 .dtps_enable
= fasttrap_pid_enable
,
1407 .dtps_disable
= fasttrap_pid_disable
,
1408 .dtps_suspend
= NULL
,
1409 .dtps_resume
= NULL
,
1410 .dtps_getargdesc
= fasttrap_pid_getargdesc
,
1411 .dtps_getargval
= fasttrap_usdt_getarg
,
1412 .dtps_usermode
= NULL
,
1413 .dtps_destroy
= fasttrap_pid_destroy
1416 static fasttrap_proc_t
*
1417 fasttrap_proc_lookup(pid_t pid
)
1419 fasttrap_bucket_t
*bucket
;
1420 fasttrap_proc_t
*fprc
, *new_fprc
;
1422 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1423 lck_mtx_lock(&bucket
->ftb_mtx
);
1425 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1426 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1427 lck_mtx_lock(&fprc
->ftpc_mtx
);
1428 lck_mtx_unlock(&bucket
->ftb_mtx
);
1429 fprc
->ftpc_rcount
++;
1430 atomic_add_64(&fprc
->ftpc_acount
, 1);
1431 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1432 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1439 * Drop the bucket lock so we don't try to perform a sleeping
1440 * allocation under it.
1442 lck_mtx_unlock(&bucket
->ftb_mtx
);
1444 new_fprc
= kmem_zalloc(sizeof (fasttrap_proc_t
), KM_SLEEP
);
1445 ASSERT(new_fprc
!= NULL
);
1446 new_fprc
->ftpc_pid
= pid
;
1447 new_fprc
->ftpc_rcount
= 1;
1448 new_fprc
->ftpc_acount
= 1;
1450 lck_mtx_lock(&bucket
->ftb_mtx
);
1453 * Take another lap through the list to make sure a proc hasn't
1454 * been created for this pid while we weren't under the bucket lock.
1456 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1457 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1458 lck_mtx_lock(&fprc
->ftpc_mtx
);
1459 lck_mtx_unlock(&bucket
->ftb_mtx
);
1460 fprc
->ftpc_rcount
++;
1461 atomic_add_64(&fprc
->ftpc_acount
, 1);
1462 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1463 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1465 kmem_free(new_fprc
, sizeof (fasttrap_proc_t
));
1472 * APPLE NOTE: We have to initialize all locks explicitly
1474 lck_mtx_init(&new_fprc
->ftpc_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1476 new_fprc
->ftpc_next
= bucket
->ftb_data
;
1477 bucket
->ftb_data
= new_fprc
;
1479 lck_mtx_unlock(&bucket
->ftb_mtx
);
1485 fasttrap_proc_release(fasttrap_proc_t
*proc
)
1487 fasttrap_bucket_t
*bucket
;
1488 fasttrap_proc_t
*fprc
, **fprcp
;
1489 pid_t pid
= proc
->ftpc_pid
;
1491 lck_mtx_lock(&proc
->ftpc_mtx
);
1493 ASSERT(proc
->ftpc_rcount
!= 0);
1494 ASSERT(proc
->ftpc_acount
<= proc
->ftpc_rcount
);
1496 if (--proc
->ftpc_rcount
!= 0) {
1497 lck_mtx_unlock(&proc
->ftpc_mtx
);
1501 lck_mtx_unlock(&proc
->ftpc_mtx
);
1504 * There should definitely be no live providers associated with this
1505 * process at this point.
1507 ASSERT(proc
->ftpc_acount
== 0);
1509 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1510 lck_mtx_lock(&bucket
->ftb_mtx
);
1512 fprcp
= (fasttrap_proc_t
**)&bucket
->ftb_data
;
1513 while ((fprc
= *fprcp
) != NULL
) {
1517 fprcp
= &fprc
->ftpc_next
;
1521 * Something strange has happened if we can't find the proc.
1523 ASSERT(fprc
!= NULL
);
1525 *fprcp
= fprc
->ftpc_next
;
1527 lck_mtx_unlock(&bucket
->ftb_mtx
);
1530 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1531 * memory is freed even without the destroy. Maybe accounting cleanup?
1533 lck_mtx_destroy(&fprc
->ftpc_mtx
, fasttrap_lck_grp
);
1535 kmem_free(fprc
, sizeof (fasttrap_proc_t
));
1539 * Lookup a fasttrap-managed provider based on its name and associated proc.
1540 * A reference to the proc must be held for the duration of the call.
1541 * If the pattr argument is non-NULL, this function instantiates the provider
1542 * if it doesn't exist otherwise it returns NULL. The provider is returned
1543 * with its lock held.
1545 static fasttrap_provider_t
*
1546 fasttrap_provider_lookup(proc_t
*p
, fasttrap_provider_type_t provider_type
, const char *name
,
1547 const dtrace_pattr_t
*pattr
)
1549 pid_t pid
= p
->p_pid
;
1550 fasttrap_provider_t
*fp
, *new_fp
= NULL
;
1551 fasttrap_bucket_t
*bucket
;
1552 char provname
[DTRACE_PROVNAMELEN
];
1555 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1556 ASSERT(pattr
!= NULL
);
1558 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(pid
, name
)];
1559 lck_mtx_lock(&bucket
->ftb_mtx
);
1562 * Take a lap through the list and return the match if we find it.
1564 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1565 if (fp
->ftp_pid
== pid
&&
1566 fp
->ftp_provider_type
== provider_type
&&
1567 strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1569 lck_mtx_lock(&fp
->ftp_mtx
);
1570 lck_mtx_unlock(&bucket
->ftb_mtx
);
1576 * Drop the bucket lock so we don't try to perform a sleeping
1577 * allocation under it.
1579 lck_mtx_unlock(&bucket
->ftb_mtx
);
1582 * Make sure the process isn't a child created as the result
1583 * of a vfork(2), and isn't a zombie (but may be in fork).
1586 if (p
->p_lflag
& (P_LINVFORK
| P_LEXIT
)) {
1592 * Increment p_dtrace_probes so that the process knows to inform us
1593 * when it exits or execs. fasttrap_provider_free() decrements this
1594 * when we're done with this provider.
1596 p
->p_dtrace_probes
++;
1599 * Grab the credentials for this process so we have
1600 * something to pass to dtrace_register().
1601 * APPLE NOTE: We have no equivalent to crhold,
1602 * even though there is a cr_ref filed in ucred.
1604 cred
= kauth_cred_proc_ref(p
);
1607 new_fp
= kmem_zalloc(sizeof (fasttrap_provider_t
), KM_SLEEP
);
1608 ASSERT(new_fp
!= NULL
);
1609 new_fp
->ftp_pid
= p
->p_pid
;
1610 new_fp
->ftp_proc
= fasttrap_proc_lookup(pid
);
1611 new_fp
->ftp_provider_type
= provider_type
;
1614 * APPLE NOTE: locks require explicit init
1616 lck_mtx_init(&new_fp
->ftp_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1617 lck_mtx_init(&new_fp
->ftp_cmtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1619 ASSERT(new_fp
->ftp_proc
!= NULL
);
1621 lck_mtx_lock(&bucket
->ftb_mtx
);
1624 * Take another lap through the list to make sure a provider hasn't
1625 * been created for this pid while we weren't under the bucket lock.
1627 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1628 if (fp
->ftp_pid
== pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1630 lck_mtx_lock(&fp
->ftp_mtx
);
1631 lck_mtx_unlock(&bucket
->ftb_mtx
);
1632 fasttrap_provider_free(new_fp
);
1633 kauth_cred_unref(&cred
);
1638 (void) strlcpy(new_fp
->ftp_name
, name
, sizeof(new_fp
->ftp_name
));
1641 * Fail and return NULL if either the provider name is too long
1642 * or we fail to register this new provider with the DTrace
1643 * framework. Note that this is the only place we ever construct
1644 * the full provider name -- we keep it in pieces in the provider
1647 if (snprintf(provname
, sizeof (provname
), "%s%u", name
, (uint_t
)pid
) >=
1648 (int)sizeof (provname
) ||
1649 dtrace_register(provname
, pattr
,
1650 DTRACE_PRIV_PROC
| DTRACE_PRIV_OWNER
| DTRACE_PRIV_ZONEOWNER
, cred
,
1651 pattr
== &pid_attr
? &pid_pops
: &usdt_pops
, new_fp
,
1652 &new_fp
->ftp_provid
) != 0) {
1653 lck_mtx_unlock(&bucket
->ftb_mtx
);
1654 fasttrap_provider_free(new_fp
);
1655 kauth_cred_unref(&cred
);
1659 new_fp
->ftp_next
= bucket
->ftb_data
;
1660 bucket
->ftb_data
= new_fp
;
1662 lck_mtx_lock(&new_fp
->ftp_mtx
);
1663 lck_mtx_unlock(&bucket
->ftb_mtx
);
1665 kauth_cred_unref(&cred
);
1671 fasttrap_provider_free(fasttrap_provider_t
*provider
)
1673 pid_t pid
= provider
->ftp_pid
;
1677 * There need to be no associated enabled probes, no consumers
1678 * creating probes, and no meta providers referencing this provider.
1680 ASSERT(provider
->ftp_rcount
== 0);
1681 ASSERT(provider
->ftp_ccount
== 0);
1682 ASSERT(provider
->ftp_mcount
== 0);
1685 * If this provider hasn't been retired, we need to explicitly drop the
1686 * count of active providers on the associated process structure.
1688 if (!provider
->ftp_retired
) {
1689 atomic_add_64(&provider
->ftp_proc
->ftpc_acount
, -1);
1690 ASSERT(provider
->ftp_proc
->ftpc_acount
<
1691 provider
->ftp_proc
->ftpc_rcount
);
1694 fasttrap_proc_release(provider
->ftp_proc
);
1697 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1698 * memory is freed even without the destroy. Maybe accounting cleanup?
1700 lck_mtx_destroy(&provider
->ftp_mtx
, fasttrap_lck_grp
);
1701 lck_mtx_destroy(&provider
->ftp_cmtx
, fasttrap_lck_grp
);
1703 kmem_free(provider
, sizeof (fasttrap_provider_t
));
1706 * Decrement p_dtrace_probes on the process whose provider we're
1707 * freeing. We don't have to worry about clobbering somone else's
1708 * modifications to it because we have locked the bucket that
1709 * corresponds to this process's hash chain in the provider hash
1710 * table. Don't sweat it if we can't find the process.
1712 if ((p
= proc_find(pid
)) == NULL
) {
1717 p
->p_dtrace_probes
--;
1724 fasttrap_provider_retire(proc_t
*p
, const char *name
, int mprov
)
1726 fasttrap_provider_t
*fp
;
1727 fasttrap_bucket_t
*bucket
;
1728 dtrace_provider_id_t provid
;
1729 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1731 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(p
->p_pid
, name
)];
1732 lck_mtx_lock(&bucket
->ftb_mtx
);
1734 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1735 if (fp
->ftp_pid
== p
->p_pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1741 lck_mtx_unlock(&bucket
->ftb_mtx
);
1745 lck_mtx_lock(&fp
->ftp_mtx
);
1746 ASSERT(!mprov
|| fp
->ftp_mcount
> 0);
1747 if (mprov
&& --fp
->ftp_mcount
!= 0) {
1748 lck_mtx_unlock(&fp
->ftp_mtx
);
1749 lck_mtx_unlock(&bucket
->ftb_mtx
);
1754 * Mark the provider to be removed in our post-processing step, mark it
1755 * retired, and drop the active count on its proc. Marking it indicates
1756 * that we should try to remove it; setting the retired flag indicates
1757 * that we're done with this provider; dropping the active the proc
1758 * releases our hold, and when this reaches zero (as it will during
1759 * exit or exec) the proc and associated providers become defunct.
1761 * We obviously need to take the bucket lock before the provider lock
1762 * to perform the lookup, but we need to drop the provider lock
1763 * before calling into the DTrace framework since we acquire the
1764 * provider lock in callbacks invoked from the DTrace framework. The
1765 * bucket lock therefore protects the integrity of the provider hash
1768 atomic_add_64(&fp
->ftp_proc
->ftpc_acount
, -1);
1769 ASSERT(fp
->ftp_proc
->ftpc_acount
< fp
->ftp_proc
->ftpc_rcount
);
1772 * Add this provider probes to the retired count and
1773 * make sure we don't add them twice
1775 atomic_add_32(&fasttrap_retired
, fp
->ftp_pcount
);
1778 fp
->ftp_retired
= 1;
1780 provid
= fp
->ftp_provid
;
1781 lck_mtx_unlock(&fp
->ftp_mtx
);
1784 * We don't have to worry about invalidating the same provider twice
1785 * since fasttrap_provider_lookup() will ignore providers that have
1786 * been marked as retired.
1788 dtrace_invalidate(provid
);
1790 lck_mtx_unlock(&bucket
->ftb_mtx
);
1792 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
1796 fasttrap_uint32_cmp(const void *ap
, const void *bp
)
1798 return (*(const uint32_t *)ap
- *(const uint32_t *)bp
);
1802 fasttrap_uint64_cmp(const void *ap
, const void *bp
)
1804 return (*(const uint64_t *)ap
- *(const uint64_t *)bp
);
1808 fasttrap_add_probe(fasttrap_probe_spec_t
*pdata
)
1811 fasttrap_provider_t
*provider
;
1812 fasttrap_probe_t
*pp
;
1813 fasttrap_tracepoint_t
*tp
;
1815 unsigned int i
, aframes
, whack
;
1818 * There needs to be at least one desired trace point.
1820 if (pdata
->ftps_noffs
== 0)
1823 switch (pdata
->ftps_probe_type
) {
1826 aframes
= FASTTRAP_ENTRY_AFRAMES
;
1830 aframes
= FASTTRAP_RETURN_AFRAMES
;
1840 const char* provider_name
;
1841 switch (pdata
->ftps_provider_type
) {
1842 case DTFTP_PROVIDER_PID
:
1843 provider_name
= FASTTRAP_PID_NAME
;
1845 case DTFTP_PROVIDER_OBJC
:
1846 provider_name
= FASTTRAP_OBJC_NAME
;
1848 case DTFTP_PROVIDER_ONESHOT
:
1849 provider_name
= FASTTRAP_ONESHOT_NAME
;
1855 p
= proc_find(pdata
->ftps_pid
);
1859 if ((provider
= fasttrap_provider_lookup(p
, pdata
->ftps_provider_type
,
1860 provider_name
, &pid_attr
)) == NULL
) {
1867 * Increment this reference count to indicate that a consumer is
1868 * actively adding a new probe associated with this provider. This
1869 * prevents the provider from being deleted -- we'll need to check
1870 * for pending deletions when we drop this reference count.
1872 provider
->ftp_ccount
++;
1873 lck_mtx_unlock(&provider
->ftp_mtx
);
1876 * Grab the creation lock to ensure consistency between calls to
1877 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1878 * other threads creating probes. We must drop the provider lock
1879 * before taking this lock to avoid a three-way deadlock with the
1882 lck_mtx_lock(&provider
->ftp_cmtx
);
1885 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1888 (void) snprintf(name_str
, sizeof(name_str
), "%llx",
1889 (uint64_t)pdata
->ftps_offs
[i
]);
1891 if (dtrace_probe_lookup(provider
->ftp_provid
,
1892 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
) != 0)
1895 atomic_add_32(&fasttrap_total
, 1);
1896 if (fasttrap_total
> fasttrap_max
) {
1897 atomic_add_32(&fasttrap_total
, -1);
1900 provider
->ftp_pcount
++;
1902 pp
= zalloc(fasttrap_probe_t_zones
[1]);
1903 bzero(pp
, sizeof (fasttrap_probe_t
));
1905 pp
->ftp_prov
= provider
;
1906 pp
->ftp_faddr
= pdata
->ftps_pc
;
1907 pp
->ftp_fsize
= pdata
->ftps_size
;
1908 pp
->ftp_pid
= pdata
->ftps_pid
;
1911 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1912 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1914 tp
->ftt_proc
= provider
->ftp_proc
;
1915 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1916 tp
->ftt_pid
= pdata
->ftps_pid
;
1918 #if defined(__arm__) || defined(__arm64__)
1920 * On arm the subinfo is used to distinguish between arm
1921 * and thumb modes. On arm64 there is no thumb mode, so
1922 * this field is simply initialized to 0 on its way
1925 tp
->ftt_fntype
= pdata
->ftps_arch_subinfo
;
1928 pp
->ftp_tps
[0].fit_tp
= tp
;
1929 pp
->ftp_tps
[0].fit_id
.fti_probe
= pp
;
1930 pp
->ftp_tps
[0].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1931 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1932 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
,
1933 FASTTRAP_OFFSET_AFRAMES
, pp
);
1936 } else if (dtrace_probe_lookup(provider
->ftp_provid
, pdata
->ftps_mod
,
1937 pdata
->ftps_func
, name
) == 0) {
1938 atomic_add_32(&fasttrap_total
, pdata
->ftps_noffs
);
1940 if (fasttrap_total
> fasttrap_max
) {
1941 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1946 * Make sure all tracepoint program counter values are unique.
1947 * We later assume that each probe has exactly one tracepoint
1950 qsort(pdata
->ftps_offs
, pdata
->ftps_noffs
,
1951 sizeof (uint64_t), fasttrap_uint64_cmp
);
1952 for (i
= 1; i
< pdata
->ftps_noffs
; i
++) {
1953 if (pdata
->ftps_offs
[i
] > pdata
->ftps_offs
[i
- 1])
1956 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1959 provider
->ftp_pcount
+= pdata
->ftps_noffs
;
1960 ASSERT(pdata
->ftps_noffs
> 0);
1961 if (pdata
->ftps_noffs
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1962 pp
= zalloc(fasttrap_probe_t_zones
[pdata
->ftps_noffs
]);
1963 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]));
1965 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]), KM_SLEEP
);
1968 pp
->ftp_prov
= provider
;
1969 pp
->ftp_faddr
= pdata
->ftps_pc
;
1970 pp
->ftp_fsize
= pdata
->ftps_size
;
1971 pp
->ftp_pid
= pdata
->ftps_pid
;
1972 pp
->ftp_ntps
= pdata
->ftps_noffs
;
1974 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1975 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1976 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1977 tp
->ftt_proc
= provider
->ftp_proc
;
1978 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1979 tp
->ftt_pid
= pdata
->ftps_pid
;
1981 #if defined(__arm__) || defined (__arm64__)
1983 * On arm the subinfo is used to distinguish between arm
1984 * and thumb modes. On arm64 there is no thumb mode, so
1985 * this field is simply initialized to 0 on its way
1989 tp
->ftt_fntype
= pdata
->ftps_arch_subinfo
;
1991 pp
->ftp_tps
[i
].fit_tp
= tp
;
1992 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
1993 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1996 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1997 pdata
->ftps_mod
, pdata
->ftps_func
, name
, aframes
, pp
);
2000 lck_mtx_unlock(&provider
->ftp_cmtx
);
2003 * We know that the provider is still valid since we incremented the
2004 * creation reference count. If someone tried to clean up this provider
2005 * while we were using it (e.g. because the process called exec(2) or
2006 * exit(2)), take note of that and try to clean it up now.
2008 lck_mtx_lock(&provider
->ftp_mtx
);
2009 provider
->ftp_ccount
--;
2010 whack
= provider
->ftp_retired
;
2011 lck_mtx_unlock(&provider
->ftp_mtx
);
2014 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
2020 * If we've exhausted the allowable resources, we'll try to remove
2021 * this provider to free some up. This is to cover the case where
2022 * the user has accidentally created many more probes than was
2023 * intended (e.g. pid123:::).
2025 lck_mtx_unlock(&provider
->ftp_cmtx
);
2026 lck_mtx_lock(&provider
->ftp_mtx
);
2027 provider
->ftp_ccount
--;
2028 provider
->ftp_marked
= 1;
2029 lck_mtx_unlock(&provider
->ftp_mtx
);
2031 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
2038 fasttrap_meta_provide(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2041 fasttrap_provider_t
*provider
;
2044 * A 32-bit unsigned integer (like a pid for example) can be
2045 * expressed in 10 or fewer decimal digits. Make sure that we'll
2046 * have enough space for the provider name.
2048 if (strlen(dhpv
->dthpv_provname
) + 10 >=
2049 sizeof (provider
->ftp_name
)) {
2050 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2051 "name too long to accomodate pid", dhpv
->dthpv_provname
);
2056 * Don't let folks spoof the true pid provider.
2058 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_PID_NAME
, sizeof(FASTTRAP_PID_NAME
)) == 0) {
2059 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2060 "%s is an invalid name", dhpv
->dthpv_provname
,
2066 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2068 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_OBJC_NAME
, sizeof(FASTTRAP_OBJC_NAME
)) == 0) {
2069 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2070 "%s is an invalid name", dhpv
->dthpv_provname
,
2071 FASTTRAP_OBJC_NAME
);
2074 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_ONESHOT_NAME
, sizeof(FASTTRAP_ONESHOT_NAME
)) == 0) {
2075 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2076 "%s is an invalid name", dhpv
->dthpv_provname
,
2077 FASTTRAP_ONESHOT_NAME
);
2082 * The highest stability class that fasttrap supports is ISA; cap
2083 * the stability of the new provider accordingly.
2085 if (dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
> DTRACE_CLASS_ISA
)
2086 dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
= DTRACE_CLASS_ISA
;
2087 if (dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
> DTRACE_CLASS_ISA
)
2088 dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
= DTRACE_CLASS_ISA
;
2089 if (dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
> DTRACE_CLASS_ISA
)
2090 dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
= DTRACE_CLASS_ISA
;
2091 if (dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
> DTRACE_CLASS_ISA
)
2092 dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
= DTRACE_CLASS_ISA
;
2093 if (dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
> DTRACE_CLASS_ISA
)
2094 dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
= DTRACE_CLASS_ISA
;
2096 if ((provider
= fasttrap_provider_lookup(p
, DTFTP_PROVIDER_USDT
, dhpv
->dthpv_provname
,
2097 &dhpv
->dthpv_pattr
)) == NULL
) {
2098 cmn_err(CE_WARN
, "failed to instantiate provider %s for "
2099 "process %u", dhpv
->dthpv_provname
, (uint_t
)p
->p_pid
);
2106 * USDT probes (fasttrap meta probes) are very expensive to create.
2107 * Profiling has shown that the largest single cost is verifying that
2108 * dtrace hasn't already created a given meta_probe. The reason for
2109 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2110 * each static probe being created. We want to get rid of that check.
2111 * The simplest way of eliminating it is to deny the ability to add
2112 * probes to an existing provider. If the provider already exists, BZZT!
2113 * This still leaves the possibility of intentionally malformed DOF
2114 * having duplicate probes. However, duplicate probes are not fatal,
2115 * and there is no way to get that by accident, so we will not check
2118 * UPDATE: It turns out there are several use cases that require adding
2119 * probes to existing providers. Disabling the dtrace_probe_lookup()
2120 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2124 * Up the meta provider count so this provider isn't removed until
2125 * the meta provider has been told to remove it.
2127 provider
->ftp_mcount
++;
2129 lck_mtx_unlock(&provider
->ftp_mtx
);
2136 fasttrap_meta_create_probe(void *arg
, void *parg
,
2137 dtrace_helper_probedesc_t
*dhpb
)
2140 fasttrap_provider_t
*provider
= parg
;
2141 fasttrap_probe_t
*pp
;
2142 fasttrap_tracepoint_t
*tp
;
2147 * Since the meta provider count is non-zero we don't have to worry
2148 * about this provider disappearing.
2150 ASSERT(provider
->ftp_mcount
> 0);
2153 * The offsets must be unique.
2155 qsort(dhpb
->dthpb_offs
, dhpb
->dthpb_noffs
, sizeof (uint32_t),
2156 fasttrap_uint32_cmp
);
2157 for (i
= 1; i
< dhpb
->dthpb_noffs
; i
++) {
2158 if (dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
] <=
2159 dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
- 1])
2163 qsort(dhpb
->dthpb_enoffs
, dhpb
->dthpb_nenoffs
, sizeof (uint32_t),
2164 fasttrap_uint32_cmp
);
2165 for (i
= 1; i
< dhpb
->dthpb_nenoffs
; i
++) {
2166 if (dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
] <=
2167 dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
- 1])
2172 * Grab the creation lock to ensure consistency between calls to
2173 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2174 * other threads creating probes.
2176 lck_mtx_lock(&provider
->ftp_cmtx
);
2180 * APPLE NOTE: This is hideously expensive. See note in
2181 * fasttrap_meta_provide() for why we can get away without
2184 if (dtrace_probe_lookup(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2185 dhpb
->dthpb_func
, dhpb
->dthpb_name
) != 0) {
2186 lck_mtx_unlock(&provider
->ftp_cmtx
);
2191 ntps
= dhpb
->dthpb_noffs
+ dhpb
->dthpb_nenoffs
;
2194 atomic_add_32(&fasttrap_total
, ntps
);
2196 if (fasttrap_total
> fasttrap_max
) {
2197 atomic_add_32(&fasttrap_total
, -ntps
);
2198 lck_mtx_unlock(&provider
->ftp_cmtx
);
2202 provider
->ftp_pcount
+= ntps
;
2204 if (ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
2205 pp
= zalloc(fasttrap_probe_t_zones
[ntps
]);
2206 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]));
2208 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]), KM_SLEEP
);
2211 pp
->ftp_prov
= provider
;
2212 pp
->ftp_pid
= provider
->ftp_pid
;
2213 pp
->ftp_ntps
= ntps
;
2214 pp
->ftp_nargs
= dhpb
->dthpb_xargc
;
2215 pp
->ftp_xtypes
= dhpb
->dthpb_xtypes
;
2216 pp
->ftp_ntypes
= dhpb
->dthpb_ntypes
;
2219 * First create a tracepoint for each actual point of interest.
2221 for (i
= 0; i
< dhpb
->dthpb_noffs
; i
++) {
2222 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2223 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2225 tp
->ftt_proc
= provider
->ftp_proc
;
2228 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2229 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2230 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2232 #if defined(__x86_64__)
2234 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2236 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
] - 1;
2237 #elif defined(__arm__) || defined(__arm64__)
2239 * All ARM and ARM64 probes are zero offset. We need to zero out the
2240 * thumb bit because we still support 32bit user processes.
2241 * On 64bit user processes, bit zero won't be set anyway.
2243 tp
->ftt_pc
= (dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
]) & ~0x1UL
;
2244 tp
->ftt_fntype
= FASTTRAP_FN_USDT
;
2246 #error "Architecture not supported"
2249 tp
->ftt_pid
= provider
->ftp_pid
;
2251 pp
->ftp_tps
[i
].fit_tp
= tp
;
2252 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2253 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_OFFSETS
;
2257 * Then create a tracepoint for each is-enabled point.
2259 for (j
= 0; i
< ntps
; i
++, j
++) {
2260 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2261 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2263 tp
->ftt_proc
= provider
->ftp_proc
;
2266 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2267 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2268 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2270 #if defined(__x86_64__)
2272 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2274 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
] + 2;
2275 #elif defined(__arm__) || defined(__arm64__)
2277 * All ARM and ARM64 probes are zero offset. We need to zero out the
2278 * thumb bit because we still support 32bit user processes.
2279 * On 64bit user processes, bit zero won't be set anyway.
2281 tp
->ftt_pc
= (dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
]) & ~0x1UL
;
2282 tp
->ftt_fntype
= FASTTRAP_FN_USDT
;
2284 #error "Architecture not supported"
2287 tp
->ftt_pid
= provider
->ftp_pid
;
2289 pp
->ftp_tps
[i
].fit_tp
= tp
;
2290 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2291 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_IS_ENABLED
;
2295 * If the arguments are shuffled around we set the argument remapping
2296 * table. Later, when the probe fires, we only remap the arguments
2297 * if the table is non-NULL.
2299 for (i
= 0; i
< dhpb
->dthpb_xargc
; i
++) {
2300 if (dhpb
->dthpb_args
[i
] != i
) {
2301 pp
->ftp_argmap
= dhpb
->dthpb_args
;
2307 * The probe is fully constructed -- register it with DTrace.
2309 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2310 dhpb
->dthpb_func
, dhpb
->dthpb_name
, FASTTRAP_OFFSET_AFRAMES
, pp
);
2312 lck_mtx_unlock(&provider
->ftp_cmtx
);
2317 fasttrap_meta_remove(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2321 * Clean up the USDT provider. There may be active consumers of the
2322 * provider busy adding probes, no damage will actually befall the
2323 * provider until that count has dropped to zero. This just puts
2324 * the provider on death row.
2326 fasttrap_provider_retire(p
, dhpv
->dthpv_provname
, 1);
2330 fasttrap_meta_provider_name(void *arg
)
2332 fasttrap_provider_t
*fprovider
= arg
;
2333 dtrace_provider_t
*provider
= (dtrace_provider_t
*)(fprovider
->ftp_provid
);
2334 return provider
->dtpv_name
;
2337 static dtrace_mops_t fasttrap_mops
= {
2338 .dtms_create_probe
= fasttrap_meta_create_probe
,
2339 .dtms_provide_proc
= fasttrap_meta_provide
,
2340 .dtms_remove_proc
= fasttrap_meta_remove
,
2341 .dtms_provider_name
= fasttrap_meta_provider_name
2345 * Validate a null-terminated string. If str is not null-terminated,
2346 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2349 * str: string to validate.
2350 * maxlen: maximal length of the string, null-terminated byte included.
2353 fasttrap_validatestr(char const* str
, size_t maxlen
) {
2357 assert(maxlen
!= 0);
2359 /* Check if the string is null-terminated. */
2360 len
= strnlen(str
, maxlen
);
2364 /* Finally, check for UTF8 validity. */
2365 return utf8_validatestr((unsigned const char*) str
, len
);
2370 fasttrap_ioctl(dev_t dev
, u_long cmd
, user_addr_t arg
, int md
, cred_t
*cr
, int *rv
)
2372 #pragma unused(dev, md, rv)
2373 if (!dtrace_attached())
2376 if (cmd
== FASTTRAPIOC_MAKEPROBE
) {
2377 fasttrap_probe_spec_t
*probe
;
2382 if (copyin(arg
+ __offsetof(fasttrap_probe_spec_t
, ftps_noffs
), &noffs
,
2383 sizeof (probe
->ftps_noffs
)))
2387 * Probes must have at least one tracepoint.
2393 * We want to check the number of noffs before doing
2394 * sizing math, to prevent potential buffer overflows.
2396 if (noffs
> ((1024 * 1024) - sizeof(fasttrap_probe_spec_t
)) / sizeof(probe
->ftps_offs
[0]))
2399 size
= sizeof (fasttrap_probe_spec_t
) +
2400 sizeof (probe
->ftps_offs
[0]) * (noffs
- 1);
2402 probe
= kmem_alloc(size
, KM_SLEEP
);
2404 if (copyin(arg
, probe
, size
) != 0 ||
2405 probe
->ftps_noffs
!= noffs
) {
2406 kmem_free(probe
, size
);
2411 * Verify that the function and module strings contain no
2415 if (fasttrap_validatestr(probe
->ftps_func
, sizeof(probe
->ftps_func
)) != 0) {
2420 if (fasttrap_validatestr(probe
->ftps_mod
, sizeof(probe
->ftps_mod
)) != 0) {
2425 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2427 pid_t pid
= probe
->ftps_pid
;
2430 * Report an error if the process doesn't exist
2431 * or is actively being birthed.
2433 if ((p
= proc_find(pid
)) == PROC_NULL
|| p
->p_stat
== SIDL
) {
2440 // FIXME! How is this done on OS X?
2441 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2442 // VREAD | VWRITE)) != 0) {
2443 // mutex_exit(&p->p_lock);
2450 ret
= fasttrap_add_probe(probe
);
2453 kmem_free(probe
, size
);
2457 } else if (cmd
== FASTTRAPIOC_GETINSTR
) {
2458 fasttrap_instr_query_t instr
;
2459 fasttrap_tracepoint_t
*tp
;
2463 if (copyin(arg
, &instr
, sizeof (instr
)) != 0)
2466 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2468 pid_t pid
= instr
.ftiq_pid
;
2471 * Report an error if the process doesn't exist
2472 * or is actively being birthed.
2474 if ((p
= proc_find(pid
)) == NULL
|| p
->p_stat
== SIDL
) {
2480 // FIXME! How is this done on OS X?
2481 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2483 // mutex_exit(&p->p_lock);
2490 index
= FASTTRAP_TPOINTS_INDEX(instr
.ftiq_pid
, instr
.ftiq_pc
);
2492 lck_mtx_lock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2493 tp
= fasttrap_tpoints
.fth_table
[index
].ftb_data
;
2494 while (tp
!= NULL
) {
2495 if (instr
.ftiq_pid
== tp
->ftt_pid
&&
2496 instr
.ftiq_pc
== tp
->ftt_pc
&&
2497 tp
->ftt_proc
->ftpc_acount
!= 0)
2504 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2508 bcopy(&tp
->ftt_instr
, &instr
.ftiq_instr
,
2509 sizeof (instr
.ftiq_instr
));
2510 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2512 if (copyout(&instr
, arg
, sizeof (instr
)) != 0)
2522 fasttrap_attach(void)
2528 * Install our hooks into fork(2), exec(2), and exit(2).
2530 dtrace_fasttrap_fork_ptr
= &fasttrap_fork
;
2531 dtrace_fasttrap_exit_ptr
= &fasttrap_exec_exit
;
2532 dtrace_fasttrap_exec_ptr
= &fasttrap_exec_exit
;
2535 * APPLE NOTE: We size the maximum number of fasttrap probes
2536 * based on system memory. 100k probes per 256M of system memory.
2537 * Yes, this is a WAG.
2539 fasttrap_max
= (sane_size
>> 28) * 100000;
2541 if (fasttrap_max
== 0)
2542 fasttrap_max
= 50000;
2545 fasttrap_retired
= 0;
2548 * Conjure up the tracepoints hashtable...
2551 nent
= ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
2552 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE
);
2554 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2557 if (nent
<= 0 || nent
> 0x1000000)
2558 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2560 if ((nent
& (nent
- 1)) == 0)
2561 fasttrap_tpoints
.fth_nent
= nent
;
2563 fasttrap_tpoints
.fth_nent
= 1 << fasttrap_highbit(nent
);
2564 ASSERT(fasttrap_tpoints
.fth_nent
> 0);
2565 fasttrap_tpoints
.fth_mask
= fasttrap_tpoints
.fth_nent
- 1;
2566 fasttrap_tpoints
.fth_table
= kmem_zalloc(fasttrap_tpoints
.fth_nent
*
2567 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2568 ASSERT(fasttrap_tpoints
.fth_table
!= NULL
);
2570 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
2571 lck_mtx_init(&fasttrap_tpoints
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2575 * ... and the providers hash table...
2577 nent
= FASTTRAP_PROVIDERS_DEFAULT_SIZE
;
2578 if ((nent
& (nent
- 1)) == 0)
2579 fasttrap_provs
.fth_nent
= nent
;
2581 fasttrap_provs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2582 ASSERT(fasttrap_provs
.fth_nent
> 0);
2583 fasttrap_provs
.fth_mask
= fasttrap_provs
.fth_nent
- 1;
2584 fasttrap_provs
.fth_table
= kmem_zalloc(fasttrap_provs
.fth_nent
*
2585 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2586 ASSERT(fasttrap_provs
.fth_table
!= NULL
);
2588 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
2589 lck_mtx_init(&fasttrap_provs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2593 * ... and the procs hash table.
2595 nent
= FASTTRAP_PROCS_DEFAULT_SIZE
;
2596 if ((nent
& (nent
- 1)) == 0)
2597 fasttrap_procs
.fth_nent
= nent
;
2599 fasttrap_procs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2600 ASSERT(fasttrap_procs
.fth_nent
> 0);
2601 fasttrap_procs
.fth_mask
= fasttrap_procs
.fth_nent
- 1;
2602 fasttrap_procs
.fth_table
= kmem_zalloc(fasttrap_procs
.fth_nent
*
2603 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2604 ASSERT(fasttrap_procs
.fth_table
!= NULL
);
2607 for (i
= 0; i
< fasttrap_procs
.fth_nent
; i
++) {
2608 lck_mtx_init(&fasttrap_procs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2612 (void) dtrace_meta_register("fasttrap", &fasttrap_mops
, NULL
,
2617 _fasttrap_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
2619 #pragma unused(dev, flags, devtype, p)
2624 _fasttrap_ioctl(dev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct proc
*p
)
2629 if (proc_is64bit(p
))
2630 uaddrp
= *(user_addr_t
*)data
;
2632 uaddrp
= (user_addr_t
) *(uint32_t *)data
;
2634 err
= fasttrap_ioctl(dev
, cmd
, uaddrp
, fflag
, CRED(), &rv
);
2636 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2638 ASSERT( (err
& 0xfffff000) == 0 );
2639 return (err
& 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2640 } else if (rv
!= 0) {
2641 ASSERT( (rv
& 0xfff00000) == 0 );
2642 return (((rv
& 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2647 static int fasttrap_inited
= 0;
2649 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2652 * A struct describing which functions will get invoked for certain
2656 static struct cdevsw fasttrap_cdevsw
=
2658 _fasttrap_open
, /* open */
2659 eno_opcl
, /* close */
2660 eno_rdwrt
, /* read */
2661 eno_rdwrt
, /* write */
2662 _fasttrap_ioctl
, /* ioctl */
2663 (stop_fcn_t
*)nulldev
, /* stop */
2664 (reset_fcn_t
*)nulldev
, /* reset */
2666 eno_select
, /* select */
2667 eno_mmap
, /* mmap */
2668 eno_strat
, /* strategy */
2669 eno_getc
, /* getc */
2670 eno_putc
, /* putc */
2674 void fasttrap_init(void);
2677 fasttrap_init( void )
2680 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2681 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2683 * The reason is to delay allocating the (rather large) resources as late as possible.
2685 if (!fasttrap_inited
) {
2686 int majdevno
= cdevsw_add(FASTTRAP_MAJOR
, &fasttrap_cdevsw
);
2689 // FIX ME! What kind of error reporting to do here?
2690 printf("fasttrap_init: failed to allocate a major number!\n");
2694 dev_t device
= makedev( (uint32_t)majdevno
, 0 );
2695 if (NULL
== devfs_make_node( device
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666, "fasttrap", 0 )) {
2700 * Allocate the fasttrap_tracepoint_t zone
2702 fasttrap_tracepoint_t_zone
= zinit(sizeof(fasttrap_tracepoint_t
),
2703 1024 * sizeof(fasttrap_tracepoint_t
),
2704 sizeof(fasttrap_tracepoint_t
),
2705 "dtrace.fasttrap_tracepoint_t");
2708 * fasttrap_probe_t's are variable in size. We use an array of zones to
2709 * cover the most common sizes.
2712 for (i
=1; i
<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
; i
++) {
2713 size_t zone_element_size
= offsetof(fasttrap_probe_t
, ftp_tps
[i
]);
2714 fasttrap_probe_t_zones
[i
] = zinit(zone_element_size
,
2715 1024 * zone_element_size
,
2717 fasttrap_probe_t_zone_names
[i
]);
2722 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2724 fasttrap_lck_attr
= lck_attr_alloc_init();
2725 fasttrap_lck_grp_attr
= lck_grp_attr_alloc_init();
2726 fasttrap_lck_grp
= lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr
);
2729 * Initialize global locks
2731 lck_mtx_init(&fasttrap_cleanup_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2732 lck_mtx_init(&fasttrap_count_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2737 * Start the fasttrap cleanup thread
2739 kern_return_t res
= kernel_thread_start_priority((thread_continue_t
)fasttrap_pid_cleanup_cb
, NULL
, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread
);
2740 if (res
!= KERN_SUCCESS
) {
2741 panic("Could not create fasttrap_cleanup_thread");
2743 thread_set_thread_name(fasttrap_cleanup_thread
, "dtrace_fasttrap_cleanup_thread");
2745 fasttrap_retired_size
= DEFAULT_RETIRED_SIZE
;
2746 fasttrap_retired_spec
= kmem_zalloc(fasttrap_retired_size
* sizeof(*fasttrap_retired_spec
),
2748 lck_mtx_init(&fasttrap_retired_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2750 fasttrap_inited
= 1;
2754 #undef FASTTRAP_MAJOR