4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
31 #include <sys/types.h>
34 #include <sys/codesign.h>
35 #include <sys/errno.h>
38 #include <sys/systm.h>
39 #include <sys/kauth.h>
40 #include <sys/utfconv.h>
42 #include <sys/fasttrap.h>
43 #include <sys/fasttrap_impl.h>
44 #include <sys/fasttrap_isa.h>
45 #include <sys/dtrace.h>
46 #include <sys/dtrace_impl.h>
49 #include <miscfs/devfs/devfs.h>
50 #include <sys/proc_internal.h>
51 #include <sys/dtrace_glue.h>
52 #include <sys/dtrace_ptss.h>
54 #include <kern/cs_blobs.h>
55 #include <kern/thread.h>
56 #include <kern/zalloc.h>
58 #include <mach/thread_act.h>
60 extern kern_return_t
kernel_thread_start_priority(thread_continue_t continuation
, void *parameter
, integer_t priority
, thread_t
*new_thread
);
62 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
63 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
67 qsort(void *a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
70 * User-Land Trap-Based Tracing
71 * ----------------------------
73 * The fasttrap provider allows DTrace consumers to instrument any user-level
74 * instruction to gather data; this includes probes with semantic
75 * signifigance like entry and return as well as simple offsets into the
76 * function. While the specific techniques used are very ISA specific, the
77 * methodology is generalizable to any architecture.
80 * The General Methodology
81 * -----------------------
83 * With the primary goal of tracing every user-land instruction and the
84 * limitation that we can't trust user space so don't want to rely on much
85 * information there, we begin by replacing the instructions we want to trace
86 * with trap instructions. Each instruction we overwrite is saved into a hash
87 * table keyed by process ID and pc address. When we enter the kernel due to
88 * this trap instruction, we need the effects of the replaced instruction to
89 * appear to have occurred before we proceed with the user thread's
92 * Each user level thread is represented by a ulwp_t structure which is
93 * always easily accessible through a register. The most basic way to produce
94 * the effects of the instruction we replaced is to copy that instruction out
95 * to a bit of scratch space reserved in the user thread's ulwp_t structure
96 * (a sort of kernel-private thread local storage), set the PC to that
97 * scratch space and single step. When we reenter the kernel after single
98 * stepping the instruction we must then adjust the PC to point to what would
99 * normally be the next instruction. Of course, special care must be taken
100 * for branches and jumps, but these represent such a small fraction of any
101 * instruction set that writing the code to emulate these in the kernel is
104 * Return probes may require several tracepoints to trace every return site,
105 * and, conversely, each tracepoint may activate several probes (the entry
106 * and offset 0 probes, for example). To solve this muliplexing problem,
107 * tracepoints contain lists of probes to activate and probes contain lists
108 * of tracepoints to enable. If a probe is activated, it adds its ID to
109 * existing tracepoints or creates new ones as necessary.
111 * Most probes are activated _before_ the instruction is executed, but return
112 * probes are activated _after_ the effects of the last instruction of the
113 * function are visible. Return probes must be fired _after_ we have
114 * single-stepped the instruction whereas all other probes are fired
121 * The lock ordering below -- both internally and with respect to the DTrace
122 * framework -- is a little tricky and bears some explanation. Each provider
123 * has a lock (ftp_mtx) that protects its members including reference counts
124 * for enabled probes (ftp_rcount), consumers actively creating probes
125 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
126 * from being freed. A provider is looked up by taking the bucket lock for the
127 * provider hash table, and is returned with its lock held. The provider lock
128 * may be taken in functions invoked by the DTrace framework, but may not be
129 * held while calling functions in the DTrace framework.
131 * To ensure consistency over multiple calls to the DTrace framework, the
132 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
133 * not be taken when holding the provider lock as that would create a cyclic
134 * lock ordering. In situations where one would naturally take the provider
135 * lock and then the creation lock, we instead up a reference count to prevent
136 * the provider from disappearing, drop the provider lock, and acquire the
140 * bucket lock before provider lock
141 * DTrace before provider lock
142 * creation lock before DTrace
143 * never hold the provider lock and creation lock simultaneously
146 static dev_info_t
*fasttrap_devi
;
147 static dtrace_meta_provider_id_t fasttrap_meta_id
;
149 static thread_t fasttrap_cleanup_thread
;
151 static lck_mtx_t fasttrap_cleanup_mtx
;
154 #define FASTTRAP_CLEANUP_PROVIDER 0x1
155 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
157 static uint32_t fasttrap_cleanup_work
= 0;
160 * Generation count on modifications to the global tracepoint lookup table.
162 static volatile uint64_t fasttrap_mod_gen
;
165 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
166 * base on system memory. Each time a probe is created, fasttrap_total is
167 * incremented by the number of tracepoints that may be associated with that
168 * probe; fasttrap_total is capped at fasttrap_max.
171 static uint32_t fasttrap_max
;
172 static uint32_t fasttrap_retired
;
173 static uint32_t fasttrap_total
;
176 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
177 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
178 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
180 fasttrap_hash_t fasttrap_tpoints
;
181 static fasttrap_hash_t fasttrap_provs
;
182 static fasttrap_hash_t fasttrap_procs
;
184 static uint64_t fasttrap_pid_count
; /* pid ref count */
185 static lck_mtx_t fasttrap_count_mtx
; /* lock on ref count */
187 #define FASTTRAP_ENABLE_FAIL 1
188 #define FASTTRAP_ENABLE_PARTIAL 2
190 static int fasttrap_tracepoint_enable(proc_t
*, fasttrap_probe_t
*, uint_t
);
191 static void fasttrap_tracepoint_disable(proc_t
*, fasttrap_probe_t
*, uint_t
);
193 static fasttrap_provider_t
*fasttrap_provider_lookup(proc_t
*, fasttrap_provider_type_t
, const char *,
194 const dtrace_pattr_t
*);
195 static void fasttrap_provider_retire(proc_t
*, const char *, int);
196 static void fasttrap_provider_free(fasttrap_provider_t
*);
198 static fasttrap_proc_t
*fasttrap_proc_lookup(pid_t
);
199 static void fasttrap_proc_release(fasttrap_proc_t
*);
201 #define FASTTRAP_PROVS_INDEX(pid, name) \
202 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
204 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
207 * APPLE NOTE: To save memory, some common memory allocations are given
208 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
209 * which means it would fall into the kalloc.128 bucket. With
210 * 20k elements allocated, the space saved is substantial.
213 struct zone
*fasttrap_tracepoint_t_zone
;
216 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
217 * that the sweet spot for reducing memory footprint is covering the first
218 * three sizes. Everything larger goes into the common pool.
220 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
222 struct zone
*fasttrap_probe_t_zones
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
];
224 static const char *fasttrap_probe_t_zone_names
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
] = {
226 "dtrace.fasttrap_probe_t[1]",
227 "dtrace.fasttrap_probe_t[2]",
228 "dtrace.fasttrap_probe_t[3]"
232 * APPLE NOTE: We have to manage locks explicitly
234 lck_grp_t
* fasttrap_lck_grp
;
235 lck_grp_attr_t
* fasttrap_lck_grp_attr
;
236 lck_attr_t
* fasttrap_lck_attr
;
239 fasttrap_highbit(ulong_t i
)
246 if (i
& 0xffffffff00000000ul
) {
250 if (i
& 0xffff0000) {
269 fasttrap_hash_str(const char *p
)
275 hval
= (hval
<< 4) + *p
++;
276 if ((g
= (hval
& 0xf0000000)) != 0)
284 * APPLE NOTE: fasttrap_sigtrap not implemented
287 fasttrap_sigtrap(proc_t
*p
, uthread_t t
, user_addr_t pc
)
289 #pragma unused(p, t, pc)
291 #if !defined(__APPLE__)
292 sigqueue_t
*sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
294 sqp
->sq_info
.si_signo
= SIGTRAP
;
295 sqp
->sq_info
.si_code
= TRAP_DTRACE
;
296 sqp
->sq_info
.si_addr
= (caddr_t
)pc
;
298 mutex_enter(&p
->p_lock
);
300 mutex_exit(&p
->p_lock
);
304 #endif /* __APPLE__ */
306 printf("fasttrap_sigtrap called with no implementation.\n");
310 * This function ensures that no threads are actively using the memory
311 * associated with probes that were formerly live.
314 fasttrap_mod_barrier(uint64_t gen
)
318 if (gen
< fasttrap_mod_gen
)
323 for (i
= 0; i
< NCPU
; i
++) {
324 lck_mtx_lock(&cpu_core
[i
].cpuc_pid_lock
);
325 lck_mtx_unlock(&cpu_core
[i
].cpuc_pid_lock
);
329 static void fasttrap_pid_cleanup(uint32_t);
332 fasttrap_pid_cleanup_providers(void)
334 fasttrap_provider_t
**fpp
, *fp
;
335 fasttrap_bucket_t
*bucket
;
336 dtrace_provider_id_t provid
;
337 unsigned int later
= 0, i
;
340 * Iterate over all the providers trying to remove the marked
341 * ones. If a provider is marked but not retired, we just
342 * have to take a crack at removing it -- it's no big deal if
345 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
346 bucket
= &fasttrap_provs
.fth_table
[i
];
347 lck_mtx_lock(&bucket
->ftb_mtx
);
348 fpp
= (fasttrap_provider_t
**)&bucket
->ftb_data
;
350 while ((fp
= *fpp
) != NULL
) {
351 if (!fp
->ftp_marked
) {
356 lck_mtx_lock(&fp
->ftp_mtx
);
359 * If this provider has consumers actively
360 * creating probes (ftp_ccount) or is a USDT
361 * provider (ftp_mcount), we can't unregister
364 if (fp
->ftp_ccount
!= 0 ||
365 fp
->ftp_mcount
!= 0) {
367 lck_mtx_unlock(&fp
->ftp_mtx
);
371 if (!fp
->ftp_retired
|| fp
->ftp_rcount
!= 0)
374 lck_mtx_unlock(&fp
->ftp_mtx
);
377 * If we successfully unregister this
378 * provider we can remove it from the hash
379 * chain and free the memory. If our attempt
380 * to unregister fails and this is a retired
381 * provider, increment our flag to try again
382 * pretty soon. If we've consumed more than
383 * half of our total permitted number of
384 * probes call dtrace_condense() to try to
385 * clean out the unenabled probes.
387 provid
= fp
->ftp_provid
;
388 if (dtrace_unregister(provid
) != 0) {
389 if (fasttrap_total
> fasttrap_max
/ 2)
390 (void) dtrace_condense(provid
);
391 later
+= fp
->ftp_marked
;
395 fasttrap_provider_free(fp
);
398 lck_mtx_unlock(&bucket
->ftb_mtx
);
404 #ifdef FASTTRAP_ASYNC_REMOVE
405 typedef struct fasttrap_tracepoint_spec
{
407 user_addr_t fttps_pc
;
408 } fasttrap_tracepoint_spec_t
;
410 static fasttrap_tracepoint_spec_t
*fasttrap_retired_spec
;
411 static size_t fasttrap_cur_retired
= 0, fasttrap_retired_size
;
412 static lck_mtx_t fasttrap_retired_mtx
;
414 #define DEFAULT_RETIRED_SIZE 256
417 fasttrap_tracepoint_cleanup(void)
422 proc_t
*p
= PROC_NULL
;
423 fasttrap_tracepoint_t
*tp
= NULL
;
424 lck_mtx_lock(&fasttrap_retired_mtx
);
425 fasttrap_bucket_t
*bucket
;
426 for (i
= 0; i
< fasttrap_cur_retired
; i
++) {
427 pc
= fasttrap_retired_spec
[i
].fttps_pc
;
428 if (fasttrap_retired_spec
[i
].fttps_pid
!= pid
) {
429 pid
= fasttrap_retired_spec
[i
].fttps_pid
;
430 if (p
!= PROC_NULL
) {
433 if ((p
= sprlock(pid
)) == PROC_NULL
) {
438 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
439 lck_mtx_lock(&bucket
->ftb_mtx
);
440 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
441 if (pid
== tp
->ftt_pid
&& pc
== tp
->ftt_pc
&&
442 tp
->ftt_proc
->ftpc_acount
!= 0)
446 * Check that the tracepoint is not gone or has not been
447 * re-activated for another probe
449 if (tp
== NULL
|| tp
->ftt_retired
== 0) {
450 lck_mtx_unlock(&bucket
->ftb_mtx
);
453 fasttrap_tracepoint_remove(p
, tp
);
454 lck_mtx_unlock(&bucket
->ftb_mtx
);
456 if (p
!= PROC_NULL
) {
460 fasttrap_cur_retired
= 0;
462 lck_mtx_unlock(&fasttrap_retired_mtx
);
466 fasttrap_tracepoint_retire(proc_t
*p
, fasttrap_tracepoint_t
*tp
)
470 lck_mtx_lock(&fasttrap_retired_mtx
);
471 fasttrap_tracepoint_spec_t
*s
= &fasttrap_retired_spec
[fasttrap_cur_retired
++];
472 s
->fttps_pid
= p
->p_pid
;
473 s
->fttps_pc
= tp
->ftt_pc
;
475 if (fasttrap_cur_retired
== fasttrap_retired_size
) {
476 fasttrap_retired_size
*= 2;
477 fasttrap_tracepoint_spec_t
*new_retired
= kmem_zalloc(
478 fasttrap_retired_size
*
479 sizeof(fasttrap_tracepoint_t
*),
481 memcpy(new_retired
, fasttrap_retired_spec
, sizeof(fasttrap_tracepoint_t
*) * fasttrap_retired_size
);
482 kmem_free(fasttrap_retired_spec
, sizeof(fasttrap_tracepoint_t
*) * (fasttrap_retired_size
/ 2));
483 fasttrap_retired_spec
= new_retired
;
486 lck_mtx_unlock(&fasttrap_retired_mtx
);
490 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT
);
493 void fasttrap_tracepoint_retire(proc_t
*p
, fasttrap_tracepoint_t
*tp
)
498 fasttrap_tracepoint_remove(p
, tp
);
503 fasttrap_pid_cleanup_compute_priority(void)
505 if (fasttrap_total
> (fasttrap_max
/ 100 * 90) || fasttrap_retired
> fasttrap_max
/ 2) {
506 thread_precedence_policy_data_t precedence
= {12 /* BASEPRI_PREEMPT_HIGH */};
507 thread_policy_set(fasttrap_cleanup_thread
, THREAD_PRECEDENCE_POLICY
, (thread_policy_t
) &precedence
, THREAD_PRECEDENCE_POLICY_COUNT
);
510 thread_precedence_policy_data_t precedence
= {-39 /* BASEPRI_USER_INITIATED */};
511 thread_policy_set(fasttrap_cleanup_thread
, THREAD_PRECEDENCE_POLICY
, (thread_policy_t
) &precedence
, THREAD_PRECEDENCE_POLICY_COUNT
);
517 * This is the timeout's callback for cleaning up the providers and their
521 __attribute__((noreturn
))
523 fasttrap_pid_cleanup_cb(void)
526 lck_mtx_lock(&fasttrap_cleanup_mtx
);
527 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", NULL
);
529 unsigned int later
= 0;
531 work
= atomic_and_32(&fasttrap_cleanup_work
, 0);
532 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
533 if (work
& FASTTRAP_CLEANUP_PROVIDER
) {
534 later
= fasttrap_pid_cleanup_providers();
536 #ifdef FASTTRAP_ASYNC_REMOVE
537 if (work
& FASTTRAP_CLEANUP_TRACEPOINT
) {
538 fasttrap_tracepoint_cleanup();
541 lck_mtx_lock(&fasttrap_cleanup_mtx
);
543 fasttrap_pid_cleanup_compute_priority();
544 if (!fasttrap_cleanup_work
) {
546 * If we were unable to remove a retired provider, try again after
547 * a second. This situation can occur in certain circumstances where
548 * providers cannot be unregistered even though they have no probes
549 * enabled because of an execution of dtrace -l or something similar.
550 * If the timeout has been disabled (set to 1 because we're trying
551 * to detach), we set fasttrap_cleanup_work to ensure that we'll
552 * get a chance to do that work if and when the timeout is reenabled
556 struct timespec t
= {1, 0};
557 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", &t
);
560 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", NULL
);
567 * Activates the asynchronous cleanup mechanism.
570 fasttrap_pid_cleanup(uint32_t work
)
572 lck_mtx_lock(&fasttrap_cleanup_mtx
);
573 atomic_or_32(&fasttrap_cleanup_work
, work
);
574 fasttrap_pid_cleanup_compute_priority();
575 wakeup(&fasttrap_pid_cleanup_cb
);
576 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
581 * This is called from cfork() via dtrace_fasttrap_fork(). The child
582 * process's address space is a (roughly) a copy of the parent process's so
583 * we have to remove all the instrumentation we had previously enabled in the
587 fasttrap_fork(proc_t
*p
, proc_t
*cp
)
589 pid_t ppid
= p
->p_pid
;
592 ASSERT(current_proc() == p
);
593 LCK_MTX_ASSERT(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_OWNED
);
594 ASSERT(p
->p_dtrace_count
> 0);
595 ASSERT(cp
->p_dtrace_count
== 0);
598 * This would be simpler and faster if we maintained per-process
599 * hash tables of enabled tracepoints. It could, however, potentially
600 * slow down execution of a tracepoint since we'd need to go
601 * through two levels of indirection. In the future, we should
602 * consider either maintaining per-process ancillary lists of
603 * enabled tracepoints or hanging a pointer to a per-process hash
604 * table of enabled tracepoints off the proc structure.
608 * We don't have to worry about the child process disappearing
609 * because we're in fork().
611 if (cp
!= sprlock(cp
->p_pid
)) {
612 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp
->p_pid
);
618 * Iterate over every tracepoint looking for ones that belong to the
619 * parent process, and remove each from the child process.
621 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
622 fasttrap_tracepoint_t
*tp
;
623 fasttrap_bucket_t
*bucket
= &fasttrap_tpoints
.fth_table
[i
];
625 lck_mtx_lock(&bucket
->ftb_mtx
);
626 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
627 if (tp
->ftt_pid
== ppid
&&
628 tp
->ftt_proc
->ftpc_acount
!= 0) {
629 fasttrap_tracepoint_remove(cp
, tp
);
632 * The count of active providers can only be
633 * decremented (i.e. to zero) during exec,
634 * exit, and removal of a meta provider so it
635 * should be impossible to drop the count
638 ASSERT(tp
->ftt_proc
->ftpc_acount
!= 0);
641 lck_mtx_unlock(&bucket
->ftb_mtx
);
645 * Free any ptss pages/entries in the child.
647 dtrace_ptss_fork(p
, cp
);
654 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
655 * is set on the proc structure to indicate that there is a pid provider
656 * associated with this process.
659 fasttrap_exec_exit(proc_t
*p
)
661 ASSERT(p
== current_proc());
662 LCK_MTX_ASSERT(&p
->p_mlock
, LCK_MTX_ASSERT_OWNED
);
663 LCK_MTX_ASSERT(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_NOTOWNED
);
666 /* APPLE NOTE: Okay, the locking here is really odd and needs some
667 * explaining. This method is always called with the proc_lock held.
668 * We must drop the proc_lock before calling fasttrap_provider_retire
669 * to avoid a deadlock when it takes the bucket lock.
671 * Next, the dtrace_ptss_exec_exit function requires the sprlock
672 * be held, but not the proc_lock.
674 * Finally, we must re-acquire the proc_lock
679 * We clean up the pid provider for this process here; user-land
680 * static probes are handled by the meta-provider remove entry point.
682 fasttrap_provider_retire(p
, FASTTRAP_PID_NAME
, 0);
685 * APPLE NOTE: We also need to remove any aliased providers.
686 * XXX optimization: track which provider types are instantiated
687 * and only retire as needed.
689 fasttrap_provider_retire(p
, FASTTRAP_OBJC_NAME
, 0);
690 fasttrap_provider_retire(p
, FASTTRAP_ONESHOT_NAME
, 0);
693 * This should be called after it is no longer possible for a user
694 * thread to execute (potentially dtrace instrumented) instructions.
696 lck_mtx_lock(&p
->p_dtrace_sprlock
);
697 dtrace_ptss_exec_exit(p
);
698 lck_mtx_unlock(&p
->p_dtrace_sprlock
);
706 fasttrap_pid_provide(void *arg
, const dtrace_probedesc_t
*desc
)
708 #pragma unused(arg, desc)
710 * There are no "default" pid probes.
715 fasttrap_tracepoint_enable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
717 fasttrap_tracepoint_t
*tp
, *new_tp
= NULL
;
718 fasttrap_bucket_t
*bucket
;
723 ASSERT(index
< probe
->ftp_ntps
);
725 pid
= probe
->ftp_pid
;
726 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
727 id
= &probe
->ftp_tps
[index
].fit_id
;
729 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
731 //ASSERT(!(p->p_flag & SVFORK));
734 * Before we make any modifications, make sure we've imposed a barrier
735 * on the generation in which this probe was last modified.
737 fasttrap_mod_barrier(probe
->ftp_gen
);
739 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
742 * If the tracepoint has already been enabled, just add our id to the
743 * list of interested probes. This may be our second time through
744 * this path in which case we'll have constructed the tracepoint we'd
745 * like to install. If we can't find a match, and have an allocated
746 * tracepoint ready to go, enable that one now.
748 * A tracepoint whose process is defunct is also considered defunct.
751 lck_mtx_lock(&bucket
->ftb_mtx
);
752 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
755 * Note that it's safe to access the active count on the
756 * associated proc structure because we know that at least one
757 * provider (this one) will still be around throughout this
760 if (tp
->ftt_pid
!= pid
|| tp
->ftt_pc
!= pc
||
761 tp
->ftt_proc
->ftpc_acount
== 0)
765 * Now that we've found a matching tracepoint, it would be
766 * a decent idea to confirm that the tracepoint is still
767 * enabled and the trap instruction hasn't been overwritten.
768 * Since this is a little hairy, we'll punt for now.
770 if (!tp
->ftt_installed
) {
771 if (fasttrap_tracepoint_install(p
, tp
) != 0)
772 rc
= FASTTRAP_ENABLE_PARTIAL
;
775 * This can't be the first interested probe. We don't have
776 * to worry about another thread being in the midst of
777 * deleting this tracepoint (which would be the only valid
778 * reason for a tracepoint to have no interested probes)
779 * since we're holding P_PR_LOCK for this process.
781 ASSERT(tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
);
783 switch (id
->fti_ptype
) {
786 case DTFTP_IS_ENABLED
:
787 id
->fti_next
= tp
->ftt_ids
;
788 dtrace_membar_producer();
790 dtrace_membar_producer();
794 case DTFTP_POST_OFFSETS
:
795 id
->fti_next
= tp
->ftt_retids
;
796 dtrace_membar_producer();
798 dtrace_membar_producer();
807 lck_mtx_unlock(&bucket
->ftb_mtx
);
809 if (new_tp
!= NULL
) {
810 new_tp
->ftt_ids
= NULL
;
811 new_tp
->ftt_retids
= NULL
;
818 * If we have a good tracepoint ready to go, install it now while
819 * we have the lock held and no one can screw with us.
821 if (new_tp
!= NULL
) {
824 new_tp
->ftt_next
= bucket
->ftb_data
;
825 dtrace_membar_producer();
826 bucket
->ftb_data
= new_tp
;
827 dtrace_membar_producer();
828 lck_mtx_unlock(&bucket
->ftb_mtx
);
831 * Activate the tracepoint in the ISA-specific manner.
832 * If this fails, we need to report the failure, but
833 * indicate that this tracepoint must still be disabled
834 * by calling fasttrap_tracepoint_disable().
836 if (fasttrap_tracepoint_install(p
, new_tp
) != 0)
837 rc
= FASTTRAP_ENABLE_PARTIAL
;
839 * Increment the count of the number of tracepoints active in
840 * the victim process.
842 //ASSERT(p->p_proc_flag & P_PR_LOCK);
849 lck_mtx_unlock(&bucket
->ftb_mtx
);
852 * Initialize the tracepoint that's been preallocated with the probe.
854 new_tp
= probe
->ftp_tps
[index
].fit_tp
;
855 new_tp
->ftt_retired
= 0;
857 ASSERT(new_tp
->ftt_pid
== pid
);
858 ASSERT(new_tp
->ftt_pc
== pc
);
859 ASSERT(new_tp
->ftt_proc
== probe
->ftp_prov
->ftp_proc
);
860 ASSERT(new_tp
->ftt_ids
== NULL
);
861 ASSERT(new_tp
->ftt_retids
== NULL
);
863 switch (id
->fti_ptype
) {
866 case DTFTP_IS_ENABLED
:
868 new_tp
->ftt_ids
= id
;
872 case DTFTP_POST_OFFSETS
:
874 new_tp
->ftt_retids
= id
;
882 * If the ISA-dependent initialization goes to plan, go back to the
883 * beginning and try to install this freshly made tracepoint.
885 if (fasttrap_tracepoint_init(p
, new_tp
, pc
, id
->fti_ptype
) == 0)
888 new_tp
->ftt_ids
= NULL
;
889 new_tp
->ftt_retids
= NULL
;
891 return (FASTTRAP_ENABLE_FAIL
);
895 fasttrap_tracepoint_disable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
897 fasttrap_bucket_t
*bucket
;
898 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
899 fasttrap_tracepoint_t
**pp
, *tp
;
900 fasttrap_id_t
*id
, **idp
;
904 ASSERT(index
< probe
->ftp_ntps
);
906 pid
= probe
->ftp_pid
;
907 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
908 id
= &probe
->ftp_tps
[index
].fit_id
;
910 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
913 * Find the tracepoint and make sure that our id is one of the
914 * ones registered with it.
916 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
917 lck_mtx_lock(&bucket
->ftb_mtx
);
918 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
919 if (tp
->ftt_pid
== pid
&& tp
->ftt_pc
== pc
&&
920 tp
->ftt_proc
== provider
->ftp_proc
)
925 * If we somehow lost this tracepoint, we're in a world of hurt.
929 switch (id
->fti_ptype
) {
932 case DTFTP_IS_ENABLED
:
933 ASSERT(tp
->ftt_ids
!= NULL
);
938 case DTFTP_POST_OFFSETS
:
939 ASSERT(tp
->ftt_retids
!= NULL
);
940 idp
= &tp
->ftt_retids
;
944 /* Fix compiler warning... */
949 while ((*idp
)->fti_probe
!= probe
) {
950 idp
= &(*idp
)->fti_next
;
951 ASSERT(*idp
!= NULL
);
956 dtrace_membar_producer();
958 ASSERT(id
->fti_probe
== probe
);
961 * If there are other registered enablings of this tracepoint, we're
962 * all done, but if this was the last probe assocated with this
963 * this tracepoint, we need to remove and free it.
965 if (tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
) {
968 * If the current probe's tracepoint is in use, swap it
969 * for an unused tracepoint.
971 if (tp
== probe
->ftp_tps
[index
].fit_tp
) {
972 fasttrap_probe_t
*tmp_probe
;
973 fasttrap_tracepoint_t
**tmp_tp
;
976 if (tp
->ftt_ids
!= NULL
) {
977 tmp_probe
= tp
->ftt_ids
->fti_probe
;
978 /* LINTED - alignment */
979 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_ids
);
980 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
982 tmp_probe
= tp
->ftt_retids
->fti_probe
;
983 /* LINTED - alignment */
984 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_retids
);
985 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
988 ASSERT(*tmp_tp
!= NULL
);
989 ASSERT(*tmp_tp
!= probe
->ftp_tps
[index
].fit_tp
);
990 ASSERT((*tmp_tp
)->ftt_ids
== NULL
);
991 ASSERT((*tmp_tp
)->ftt_retids
== NULL
);
993 probe
->ftp_tps
[index
].fit_tp
= *tmp_tp
;
998 lck_mtx_unlock(&bucket
->ftb_mtx
);
1001 * Tag the modified probe with the generation in which it was
1004 probe
->ftp_gen
= fasttrap_mod_gen
;
1008 lck_mtx_unlock(&bucket
->ftb_mtx
);
1011 * We can't safely remove the tracepoint from the set of active
1012 * tracepoints until we've actually removed the fasttrap instruction
1013 * from the process's text. We can, however, operate on this
1014 * tracepoint secure in the knowledge that no other thread is going to
1015 * be looking at it since we hold P_PR_LOCK on the process if it's
1016 * live or we hold the provider lock on the process if it's dead and
1021 * We only need to remove the actual instruction if we're looking
1022 * at an existing process
1026 * If we fail to restore the instruction we need to kill
1027 * this process since it's in a completely unrecoverable
1030 if (fasttrap_tracepoint_remove(p
, tp
) != 0)
1031 fasttrap_sigtrap(p
, NULL
, pc
);
1034 * Decrement the count of the number of tracepoints active
1035 * in the victim process.
1037 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1038 p
->p_dtrace_count
--;
1042 * Remove the probe from the hash table of active tracepoints.
1044 lck_mtx_lock(&bucket
->ftb_mtx
);
1045 pp
= (fasttrap_tracepoint_t
**)&bucket
->ftb_data
;
1046 ASSERT(*pp
!= NULL
);
1048 pp
= &(*pp
)->ftt_next
;
1049 ASSERT(*pp
!= NULL
);
1053 dtrace_membar_producer();
1055 lck_mtx_unlock(&bucket
->ftb_mtx
);
1058 * Tag the modified probe with the generation in which it was changed.
1060 probe
->ftp_gen
= fasttrap_mod_gen
;
1064 fasttrap_enable_callbacks(void)
1067 * We don't have to play the rw lock game here because we're
1068 * providing something rather than taking something away --
1069 * we can be sure that no threads have tried to follow this
1070 * function pointer yet.
1072 lck_mtx_lock(&fasttrap_count_mtx
);
1073 if (fasttrap_pid_count
== 0) {
1074 ASSERT(dtrace_pid_probe_ptr
== NULL
);
1075 ASSERT(dtrace_return_probe_ptr
== NULL
);
1076 dtrace_pid_probe_ptr
= &fasttrap_pid_probe
;
1077 dtrace_return_probe_ptr
= &fasttrap_return_probe
;
1079 ASSERT(dtrace_pid_probe_ptr
== &fasttrap_pid_probe
);
1080 ASSERT(dtrace_return_probe_ptr
== &fasttrap_return_probe
);
1081 fasttrap_pid_count
++;
1082 lck_mtx_unlock(&fasttrap_count_mtx
);
1086 fasttrap_disable_callbacks(void)
1088 //ASSERT(MUTEX_HELD(&cpu_lock));
1090 lck_mtx_lock(&fasttrap_count_mtx
);
1091 ASSERT(fasttrap_pid_count
> 0);
1092 fasttrap_pid_count
--;
1093 if (fasttrap_pid_count
== 0) {
1094 dtrace_cpu_t
*cur
, *cpu
= CPU
;
1097 * APPLE NOTE: This loop seems broken, it touches every CPU
1098 * but the one we're actually running on. Need to ask Sun folks
1099 * if that is safe. Scenario is this: We're running on CPU A,
1100 * and lock all but A. Then we get preempted, and start running
1101 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1103 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
1104 lck_rw_lock_exclusive(&cur
->cpu_ft_lock
);
1105 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1108 dtrace_pid_probe_ptr
= NULL
;
1109 dtrace_return_probe_ptr
= NULL
;
1111 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
1112 lck_rw_unlock_exclusive(&cur
->cpu_ft_lock
);
1113 // rw_exit(&cur->cpu_ft_lock);
1116 lck_mtx_unlock(&fasttrap_count_mtx
);
1121 fasttrap_pid_enable(void *arg
, dtrace_id_t id
, void *parg
)
1123 #pragma unused(arg, id)
1124 fasttrap_probe_t
*probe
= parg
;
1128 ASSERT(probe
!= NULL
);
1129 ASSERT(!probe
->ftp_enabled
);
1130 ASSERT(id
== probe
->ftp_id
);
1131 // ASSERT(MUTEX_HELD(&cpu_lock));
1134 * Increment the count of enabled probes on this probe's provider;
1135 * the provider can't go away while the probe still exists. We
1136 * must increment this even if we aren't able to properly enable
1139 lck_mtx_lock(&probe
->ftp_prov
->ftp_mtx
);
1140 probe
->ftp_prov
->ftp_rcount
++;
1141 lck_mtx_unlock(&probe
->ftp_prov
->ftp_mtx
);
1144 * If this probe's provider is retired (meaning it was valid in a
1145 * previously exec'ed incarnation of this address space), bail out. The
1146 * provider can't go away while we're in this code path.
1148 if (probe
->ftp_prov
->ftp_retired
)
1152 * If we can't find the process, it may be that we're in the context of
1153 * a fork in which the traced process is being born and we're copying
1154 * USDT probes. Otherwise, the process is gone so bail.
1156 if ((p
= sprlock(probe
->ftp_pid
)) == PROC_NULL
) {
1158 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1159 * does not return process's with SIDL set, but we always return
1160 * the child process.
1166 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1167 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1168 * To mimic this, we allocate on demand scratch space. If this is the first
1169 * time a probe has been enabled in this process, we need to allocate scratch
1170 * space for each already existing thread. Now is a good time to do this, as
1171 * the target process is suspended and the proc_lock is held.
1173 if (p
->p_dtrace_ptss_pages
== NULL
) {
1174 dtrace_ptss_enable(p
);
1177 // ASSERT(!(p->p_flag & SVFORK));
1181 * We have to enable the trap entry point before any user threads have
1182 * the chance to execute the trap instruction we're about to place
1183 * in their process's text.
1185 fasttrap_enable_callbacks();
1188 * Enable all the tracepoints and add this probe's id to each
1189 * tracepoint's list of active probes.
1191 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1192 if ((rc
= fasttrap_tracepoint_enable(p
, probe
, i
)) != 0) {
1194 * If enabling the tracepoint failed completely,
1195 * we don't have to disable it; if the failure
1196 * was only partial we must disable it.
1198 if (rc
== FASTTRAP_ENABLE_FAIL
)
1201 ASSERT(rc
== FASTTRAP_ENABLE_PARTIAL
);
1204 * Back up and pull out all the tracepoints we've
1205 * created so far for this probe.
1208 fasttrap_tracepoint_disable(p
, probe
, i
);
1216 * Since we're not actually enabling this probe,
1217 * drop our reference on the trap table entry.
1219 fasttrap_disable_callbacks();
1227 probe
->ftp_enabled
= 1;
1233 fasttrap_pid_disable(void *arg
, dtrace_id_t id
, void *parg
)
1235 #pragma unused(arg, id)
1236 fasttrap_probe_t
*probe
= parg
;
1237 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
1241 ASSERT(id
== probe
->ftp_id
);
1244 * We won't be able to acquire a /proc-esque lock on the process
1245 * iff the process is dead and gone. In this case, we rely on the
1246 * provider lock as a point of mutual exclusion to prevent other
1247 * DTrace consumers from disabling this probe.
1249 if ((p
= sprlock(probe
->ftp_pid
)) != PROC_NULL
) {
1250 // ASSERT(!(p->p_flag & SVFORK));
1254 lck_mtx_lock(&provider
->ftp_mtx
);
1257 * Disable all the associated tracepoints (for fully enabled probes).
1259 if (probe
->ftp_enabled
) {
1260 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1261 fasttrap_tracepoint_disable(p
, probe
, i
);
1265 ASSERT(provider
->ftp_rcount
> 0);
1266 provider
->ftp_rcount
--;
1270 * Even though we may not be able to remove it entirely, we
1271 * mark this retired provider to get a chance to remove some
1272 * of the associated probes.
1274 if (provider
->ftp_retired
&& !provider
->ftp_marked
)
1275 whack
= provider
->ftp_marked
= 1;
1276 lck_mtx_unlock(&provider
->ftp_mtx
);
1282 * If the process is dead, we're just waiting for the
1283 * last probe to be disabled to be able to free it.
1285 if (provider
->ftp_rcount
== 0 && !provider
->ftp_marked
)
1286 whack
= provider
->ftp_marked
= 1;
1287 lck_mtx_unlock(&provider
->ftp_mtx
);
1291 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
1294 if (!probe
->ftp_enabled
)
1297 probe
->ftp_enabled
= 0;
1299 // ASSERT(MUTEX_HELD(&cpu_lock));
1300 fasttrap_disable_callbacks();
1305 fasttrap_pid_getargdesc(void *arg
, dtrace_id_t id
, void *parg
,
1306 dtrace_argdesc_t
*desc
)
1308 #pragma unused(arg, id)
1309 fasttrap_probe_t
*probe
= parg
;
1313 desc
->dtargd_native
[0] = '\0';
1314 desc
->dtargd_xlate
[0] = '\0';
1316 if (probe
->ftp_prov
->ftp_retired
!= 0 ||
1317 desc
->dtargd_ndx
>= probe
->ftp_nargs
) {
1318 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
1322 ndx
= (probe
->ftp_argmap
!= NULL
) ?
1323 probe
->ftp_argmap
[desc
->dtargd_ndx
] : desc
->dtargd_ndx
;
1325 str
= probe
->ftp_ntypes
;
1326 for (i
= 0; i
< ndx
; i
++) {
1327 str
+= strlen(str
) + 1;
1330 (void) strlcpy(desc
->dtargd_native
, str
, sizeof(desc
->dtargd_native
));
1332 if (probe
->ftp_xtypes
== NULL
)
1335 str
= probe
->ftp_xtypes
;
1336 for (i
= 0; i
< desc
->dtargd_ndx
; i
++) {
1337 str
+= strlen(str
) + 1;
1340 (void) strlcpy(desc
->dtargd_xlate
, str
, sizeof(desc
->dtargd_xlate
));
1345 fasttrap_pid_destroy(void *arg
, dtrace_id_t id
, void *parg
)
1347 #pragma unused(arg, id)
1348 fasttrap_probe_t
*probe
= parg
;
1351 ASSERT(probe
!= NULL
);
1352 ASSERT(!probe
->ftp_enabled
);
1353 ASSERT(fasttrap_total
>= probe
->ftp_ntps
);
1355 atomic_add_32(&fasttrap_total
, -probe
->ftp_ntps
);
1356 atomic_add_32(&fasttrap_retired
, -probe
->ftp_ntps
);
1358 if (probe
->ftp_gen
+ 1 >= fasttrap_mod_gen
)
1359 fasttrap_mod_barrier(probe
->ftp_gen
);
1361 for (i
= 0; i
< probe
->ftp_ntps
; i
++) {
1362 zfree(fasttrap_tracepoint_t_zone
, probe
->ftp_tps
[i
].fit_tp
);
1365 if (probe
->ftp_ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1366 zfree(fasttrap_probe_t_zones
[probe
->ftp_ntps
], probe
);
1368 size_t size
= offsetof(fasttrap_probe_t
, ftp_tps
[probe
->ftp_ntps
]);
1369 kmem_free(probe
, size
);
1374 static const dtrace_pattr_t pid_attr
= {
1375 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1376 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1377 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1378 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1379 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1382 static dtrace_pops_t pid_pops
= {
1383 fasttrap_pid_provide
,
1385 fasttrap_pid_enable
,
1386 fasttrap_pid_disable
,
1389 fasttrap_pid_getargdesc
,
1390 fasttrap_pid_getarg
,
1392 fasttrap_pid_destroy
1395 static dtrace_pops_t usdt_pops
= {
1396 fasttrap_pid_provide
,
1398 fasttrap_pid_enable
,
1399 fasttrap_pid_disable
,
1402 fasttrap_pid_getargdesc
,
1403 fasttrap_usdt_getarg
,
1405 fasttrap_pid_destroy
1408 static fasttrap_proc_t
*
1409 fasttrap_proc_lookup(pid_t pid
)
1411 fasttrap_bucket_t
*bucket
;
1412 fasttrap_proc_t
*fprc
, *new_fprc
;
1414 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1415 lck_mtx_lock(&bucket
->ftb_mtx
);
1417 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1418 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1419 lck_mtx_lock(&fprc
->ftpc_mtx
);
1420 lck_mtx_unlock(&bucket
->ftb_mtx
);
1421 fprc
->ftpc_rcount
++;
1422 atomic_add_64(&fprc
->ftpc_acount
, 1);
1423 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1424 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1431 * Drop the bucket lock so we don't try to perform a sleeping
1432 * allocation under it.
1434 lck_mtx_unlock(&bucket
->ftb_mtx
);
1436 new_fprc
= kmem_zalloc(sizeof (fasttrap_proc_t
), KM_SLEEP
);
1437 ASSERT(new_fprc
!= NULL
);
1438 new_fprc
->ftpc_pid
= pid
;
1439 new_fprc
->ftpc_rcount
= 1;
1440 new_fprc
->ftpc_acount
= 1;
1442 lck_mtx_lock(&bucket
->ftb_mtx
);
1445 * Take another lap through the list to make sure a proc hasn't
1446 * been created for this pid while we weren't under the bucket lock.
1448 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1449 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1450 lck_mtx_lock(&fprc
->ftpc_mtx
);
1451 lck_mtx_unlock(&bucket
->ftb_mtx
);
1452 fprc
->ftpc_rcount
++;
1453 atomic_add_64(&fprc
->ftpc_acount
, 1);
1454 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1455 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1457 kmem_free(new_fprc
, sizeof (fasttrap_proc_t
));
1464 * APPLE NOTE: We have to initialize all locks explicitly
1466 lck_mtx_init(&new_fprc
->ftpc_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1468 new_fprc
->ftpc_next
= bucket
->ftb_data
;
1469 bucket
->ftb_data
= new_fprc
;
1471 lck_mtx_unlock(&bucket
->ftb_mtx
);
1477 fasttrap_proc_release(fasttrap_proc_t
*proc
)
1479 fasttrap_bucket_t
*bucket
;
1480 fasttrap_proc_t
*fprc
, **fprcp
;
1481 pid_t pid
= proc
->ftpc_pid
;
1483 lck_mtx_lock(&proc
->ftpc_mtx
);
1485 ASSERT(proc
->ftpc_rcount
!= 0);
1486 ASSERT(proc
->ftpc_acount
<= proc
->ftpc_rcount
);
1488 if (--proc
->ftpc_rcount
!= 0) {
1489 lck_mtx_unlock(&proc
->ftpc_mtx
);
1493 lck_mtx_unlock(&proc
->ftpc_mtx
);
1496 * There should definitely be no live providers associated with this
1497 * process at this point.
1499 ASSERT(proc
->ftpc_acount
== 0);
1501 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1502 lck_mtx_lock(&bucket
->ftb_mtx
);
1504 fprcp
= (fasttrap_proc_t
**)&bucket
->ftb_data
;
1505 while ((fprc
= *fprcp
) != NULL
) {
1509 fprcp
= &fprc
->ftpc_next
;
1513 * Something strange has happened if we can't find the proc.
1515 ASSERT(fprc
!= NULL
);
1517 *fprcp
= fprc
->ftpc_next
;
1519 lck_mtx_unlock(&bucket
->ftb_mtx
);
1522 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1523 * memory is freed even without the destroy. Maybe accounting cleanup?
1525 lck_mtx_destroy(&fprc
->ftpc_mtx
, fasttrap_lck_grp
);
1527 kmem_free(fprc
, sizeof (fasttrap_proc_t
));
1531 * Lookup a fasttrap-managed provider based on its name and associated proc.
1532 * A reference to the proc must be held for the duration of the call.
1533 * If the pattr argument is non-NULL, this function instantiates the provider
1534 * if it doesn't exist otherwise it returns NULL. The provider is returned
1535 * with its lock held.
1537 static fasttrap_provider_t
*
1538 fasttrap_provider_lookup(proc_t
*p
, fasttrap_provider_type_t provider_type
, const char *name
,
1539 const dtrace_pattr_t
*pattr
)
1541 pid_t pid
= p
->p_pid
;
1542 fasttrap_provider_t
*fp
, *new_fp
= NULL
;
1543 fasttrap_bucket_t
*bucket
;
1544 char provname
[DTRACE_PROVNAMELEN
];
1547 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1548 ASSERT(pattr
!= NULL
);
1550 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(pid
, name
)];
1551 lck_mtx_lock(&bucket
->ftb_mtx
);
1554 * Take a lap through the list and return the match if we find it.
1556 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1557 if (fp
->ftp_pid
== pid
&&
1558 fp
->ftp_provider_type
== provider_type
&&
1559 strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1561 lck_mtx_lock(&fp
->ftp_mtx
);
1562 lck_mtx_unlock(&bucket
->ftb_mtx
);
1568 * Drop the bucket lock so we don't try to perform a sleeping
1569 * allocation under it.
1571 lck_mtx_unlock(&bucket
->ftb_mtx
);
1574 * Make sure the process isn't a child created as the result
1575 * of a vfork(2), and isn't a zombie (but may be in fork).
1578 if (p
->p_lflag
& (P_LINVFORK
| P_LEXIT
)) {
1584 * Increment p_dtrace_probes so that the process knows to inform us
1585 * when it exits or execs. fasttrap_provider_free() decrements this
1586 * when we're done with this provider.
1588 p
->p_dtrace_probes
++;
1591 * Grab the credentials for this process so we have
1592 * something to pass to dtrace_register().
1593 * APPLE NOTE: We have no equivalent to crhold,
1594 * even though there is a cr_ref filed in ucred.
1596 // lck_mtx_lock(&p->p_crlock;
1599 // lck_mtx_unlock(&p->p_crlock);
1602 new_fp
= kmem_zalloc(sizeof (fasttrap_provider_t
), KM_SLEEP
);
1603 ASSERT(new_fp
!= NULL
);
1604 new_fp
->ftp_pid
= p
->p_pid
;
1605 new_fp
->ftp_proc
= fasttrap_proc_lookup(pid
);
1606 new_fp
->ftp_provider_type
= provider_type
;
1609 * APPLE NOTE: locks require explicit init
1611 lck_mtx_init(&new_fp
->ftp_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1612 lck_mtx_init(&new_fp
->ftp_cmtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1614 ASSERT(new_fp
->ftp_proc
!= NULL
);
1616 lck_mtx_lock(&bucket
->ftb_mtx
);
1619 * Take another lap through the list to make sure a provider hasn't
1620 * been created for this pid while we weren't under the bucket lock.
1622 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1623 if (fp
->ftp_pid
== pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1625 lck_mtx_lock(&fp
->ftp_mtx
);
1626 lck_mtx_unlock(&bucket
->ftb_mtx
);
1627 fasttrap_provider_free(new_fp
);
1633 (void) strlcpy(new_fp
->ftp_name
, name
, sizeof(new_fp
->ftp_name
));
1636 * Fail and return NULL if either the provider name is too long
1637 * or we fail to register this new provider with the DTrace
1638 * framework. Note that this is the only place we ever construct
1639 * the full provider name -- we keep it in pieces in the provider
1642 if (snprintf(provname
, sizeof (provname
), "%s%u", name
, (uint_t
)pid
) >=
1643 (int)sizeof (provname
) ||
1644 dtrace_register(provname
, pattr
,
1645 DTRACE_PRIV_PROC
| DTRACE_PRIV_OWNER
| DTRACE_PRIV_ZONEOWNER
, cred
,
1646 pattr
== &pid_attr
? &pid_pops
: &usdt_pops
, new_fp
,
1647 &new_fp
->ftp_provid
) != 0) {
1648 lck_mtx_unlock(&bucket
->ftb_mtx
);
1649 fasttrap_provider_free(new_fp
);
1654 new_fp
->ftp_next
= bucket
->ftb_data
;
1655 bucket
->ftb_data
= new_fp
;
1657 lck_mtx_lock(&new_fp
->ftp_mtx
);
1658 lck_mtx_unlock(&bucket
->ftb_mtx
);
1665 fasttrap_provider_free(fasttrap_provider_t
*provider
)
1667 pid_t pid
= provider
->ftp_pid
;
1671 * There need to be no associated enabled probes, no consumers
1672 * creating probes, and no meta providers referencing this provider.
1674 ASSERT(provider
->ftp_rcount
== 0);
1675 ASSERT(provider
->ftp_ccount
== 0);
1676 ASSERT(provider
->ftp_mcount
== 0);
1679 * If this provider hasn't been retired, we need to explicitly drop the
1680 * count of active providers on the associated process structure.
1682 if (!provider
->ftp_retired
) {
1683 atomic_add_64(&provider
->ftp_proc
->ftpc_acount
, -1);
1684 ASSERT(provider
->ftp_proc
->ftpc_acount
<
1685 provider
->ftp_proc
->ftpc_rcount
);
1688 fasttrap_proc_release(provider
->ftp_proc
);
1691 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1692 * memory is freed even without the destroy. Maybe accounting cleanup?
1694 lck_mtx_destroy(&provider
->ftp_mtx
, fasttrap_lck_grp
);
1695 lck_mtx_destroy(&provider
->ftp_cmtx
, fasttrap_lck_grp
);
1697 kmem_free(provider
, sizeof (fasttrap_provider_t
));
1700 * Decrement p_dtrace_probes on the process whose provider we're
1701 * freeing. We don't have to worry about clobbering somone else's
1702 * modifications to it because we have locked the bucket that
1703 * corresponds to this process's hash chain in the provider hash
1704 * table. Don't sweat it if we can't find the process.
1706 if ((p
= proc_find(pid
)) == NULL
) {
1711 p
->p_dtrace_probes
--;
1718 fasttrap_provider_retire(proc_t
*p
, const char *name
, int mprov
)
1720 fasttrap_provider_t
*fp
;
1721 fasttrap_bucket_t
*bucket
;
1722 dtrace_provider_id_t provid
;
1723 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1725 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(p
->p_pid
, name
)];
1726 lck_mtx_lock(&bucket
->ftb_mtx
);
1728 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1729 if (fp
->ftp_pid
== p
->p_pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1735 lck_mtx_unlock(&bucket
->ftb_mtx
);
1739 lck_mtx_lock(&fp
->ftp_mtx
);
1740 ASSERT(!mprov
|| fp
->ftp_mcount
> 0);
1741 if (mprov
&& --fp
->ftp_mcount
!= 0) {
1742 lck_mtx_unlock(&fp
->ftp_mtx
);
1743 lck_mtx_unlock(&bucket
->ftb_mtx
);
1748 * Mark the provider to be removed in our post-processing step, mark it
1749 * retired, and drop the active count on its proc. Marking it indicates
1750 * that we should try to remove it; setting the retired flag indicates
1751 * that we're done with this provider; dropping the active the proc
1752 * releases our hold, and when this reaches zero (as it will during
1753 * exit or exec) the proc and associated providers become defunct.
1755 * We obviously need to take the bucket lock before the provider lock
1756 * to perform the lookup, but we need to drop the provider lock
1757 * before calling into the DTrace framework since we acquire the
1758 * provider lock in callbacks invoked from the DTrace framework. The
1759 * bucket lock therefore protects the integrity of the provider hash
1762 atomic_add_64(&fp
->ftp_proc
->ftpc_acount
, -1);
1763 ASSERT(fp
->ftp_proc
->ftpc_acount
< fp
->ftp_proc
->ftpc_rcount
);
1766 * Add this provider probes to the retired count and
1767 * make sure we don't add them twice
1769 atomic_add_32(&fasttrap_retired
, fp
->ftp_pcount
);
1772 fp
->ftp_retired
= 1;
1774 provid
= fp
->ftp_provid
;
1775 lck_mtx_unlock(&fp
->ftp_mtx
);
1778 * We don't have to worry about invalidating the same provider twice
1779 * since fasttrap_provider_lookup() will ignore providers that have
1780 * been marked as retired.
1782 dtrace_invalidate(provid
);
1784 lck_mtx_unlock(&bucket
->ftb_mtx
);
1786 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
1790 fasttrap_uint32_cmp(const void *ap
, const void *bp
)
1792 return (*(const uint32_t *)ap
- *(const uint32_t *)bp
);
1796 fasttrap_uint64_cmp(const void *ap
, const void *bp
)
1798 return (*(const uint64_t *)ap
- *(const uint64_t *)bp
);
1802 fasttrap_add_probe(fasttrap_probe_spec_t
*pdata
)
1805 fasttrap_provider_t
*provider
;
1806 fasttrap_probe_t
*pp
;
1807 fasttrap_tracepoint_t
*tp
;
1809 unsigned int i
, aframes
, whack
;
1812 * There needs to be at least one desired trace point.
1814 if (pdata
->ftps_noffs
== 0)
1817 switch (pdata
->ftps_probe_type
) {
1820 aframes
= FASTTRAP_ENTRY_AFRAMES
;
1824 aframes
= FASTTRAP_RETURN_AFRAMES
;
1834 const char* provider_name
;
1835 switch (pdata
->ftps_provider_type
) {
1836 case DTFTP_PROVIDER_PID
:
1837 provider_name
= FASTTRAP_PID_NAME
;
1839 case DTFTP_PROVIDER_OBJC
:
1840 provider_name
= FASTTRAP_OBJC_NAME
;
1842 case DTFTP_PROVIDER_ONESHOT
:
1843 provider_name
= FASTTRAP_ONESHOT_NAME
;
1849 p
= proc_find(pdata
->ftps_pid
);
1854 * Set that the process is allowed to run modified code and
1855 * bail if it is not allowed to
1858 if ((p
->p_csflags
& (CS_KILL
|CS_HARD
)) && !cs_allow_invalid(p
)) {
1863 if ((provider
= fasttrap_provider_lookup(p
, pdata
->ftps_provider_type
,
1864 provider_name
, &pid_attr
)) == NULL
) {
1871 * Increment this reference count to indicate that a consumer is
1872 * actively adding a new probe associated with this provider. This
1873 * prevents the provider from being deleted -- we'll need to check
1874 * for pending deletions when we drop this reference count.
1876 provider
->ftp_ccount
++;
1877 lck_mtx_unlock(&provider
->ftp_mtx
);
1880 * Grab the creation lock to ensure consistency between calls to
1881 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1882 * other threads creating probes. We must drop the provider lock
1883 * before taking this lock to avoid a three-way deadlock with the
1886 lck_mtx_lock(&provider
->ftp_cmtx
);
1889 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1892 (void) snprintf(name_str
, sizeof(name_str
), "%llx",
1893 (uint64_t)pdata
->ftps_offs
[i
]);
1895 if (dtrace_probe_lookup(provider
->ftp_provid
,
1896 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
) != 0)
1899 atomic_add_32(&fasttrap_total
, 1);
1900 if (fasttrap_total
> fasttrap_max
) {
1901 atomic_add_32(&fasttrap_total
, -1);
1904 provider
->ftp_pcount
++;
1906 pp
= zalloc(fasttrap_probe_t_zones
[1]);
1907 bzero(pp
, sizeof (fasttrap_probe_t
));
1909 pp
->ftp_prov
= provider
;
1910 pp
->ftp_faddr
= pdata
->ftps_pc
;
1911 pp
->ftp_fsize
= pdata
->ftps_size
;
1912 pp
->ftp_pid
= pdata
->ftps_pid
;
1915 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1916 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1918 tp
->ftt_proc
= provider
->ftp_proc
;
1919 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1920 tp
->ftt_pid
= pdata
->ftps_pid
;
1922 #if defined(__arm__) || defined(__arm64__)
1924 * On arm the subinfo is used to distinguish between arm
1925 * and thumb modes. On arm64 there is no thumb mode, so
1926 * this field is simply initialized to 0 on its way
1929 tp
->ftt_fntype
= pdata
->ftps_arch_subinfo
;
1932 pp
->ftp_tps
[0].fit_tp
= tp
;
1933 pp
->ftp_tps
[0].fit_id
.fti_probe
= pp
;
1934 pp
->ftp_tps
[0].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1935 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1936 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
,
1937 FASTTRAP_OFFSET_AFRAMES
, pp
);
1940 } else if (dtrace_probe_lookup(provider
->ftp_provid
, pdata
->ftps_mod
,
1941 pdata
->ftps_func
, name
) == 0) {
1942 atomic_add_32(&fasttrap_total
, pdata
->ftps_noffs
);
1944 if (fasttrap_total
> fasttrap_max
) {
1945 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1950 * Make sure all tracepoint program counter values are unique.
1951 * We later assume that each probe has exactly one tracepoint
1954 qsort(pdata
->ftps_offs
, pdata
->ftps_noffs
,
1955 sizeof (uint64_t), fasttrap_uint64_cmp
);
1956 for (i
= 1; i
< pdata
->ftps_noffs
; i
++) {
1957 if (pdata
->ftps_offs
[i
] > pdata
->ftps_offs
[i
- 1])
1960 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1963 provider
->ftp_pcount
+= pdata
->ftps_noffs
;
1964 ASSERT(pdata
->ftps_noffs
> 0);
1965 if (pdata
->ftps_noffs
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1966 pp
= zalloc(fasttrap_probe_t_zones
[pdata
->ftps_noffs
]);
1967 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]));
1969 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]), KM_SLEEP
);
1972 pp
->ftp_prov
= provider
;
1973 pp
->ftp_faddr
= pdata
->ftps_pc
;
1974 pp
->ftp_fsize
= pdata
->ftps_size
;
1975 pp
->ftp_pid
= pdata
->ftps_pid
;
1976 pp
->ftp_ntps
= pdata
->ftps_noffs
;
1978 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1979 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1980 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1981 tp
->ftt_proc
= provider
->ftp_proc
;
1982 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1983 tp
->ftt_pid
= pdata
->ftps_pid
;
1985 #if defined(__arm__) || defined (__arm64__)
1987 * On arm the subinfo is used to distinguish between arm
1988 * and thumb modes. On arm64 there is no thumb mode, so
1989 * this field is simply initialized to 0 on its way
1993 tp
->ftt_fntype
= pdata
->ftps_arch_subinfo
;
1995 pp
->ftp_tps
[i
].fit_tp
= tp
;
1996 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
1997 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
2000 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
2001 pdata
->ftps_mod
, pdata
->ftps_func
, name
, aframes
, pp
);
2004 lck_mtx_unlock(&provider
->ftp_cmtx
);
2007 * We know that the provider is still valid since we incremented the
2008 * creation reference count. If someone tried to clean up this provider
2009 * while we were using it (e.g. because the process called exec(2) or
2010 * exit(2)), take note of that and try to clean it up now.
2012 lck_mtx_lock(&provider
->ftp_mtx
);
2013 provider
->ftp_ccount
--;
2014 whack
= provider
->ftp_retired
;
2015 lck_mtx_unlock(&provider
->ftp_mtx
);
2018 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
2024 * If we've exhausted the allowable resources, we'll try to remove
2025 * this provider to free some up. This is to cover the case where
2026 * the user has accidentally created many more probes than was
2027 * intended (e.g. pid123:::).
2029 lck_mtx_unlock(&provider
->ftp_cmtx
);
2030 lck_mtx_lock(&provider
->ftp_mtx
);
2031 provider
->ftp_ccount
--;
2032 provider
->ftp_marked
= 1;
2033 lck_mtx_unlock(&provider
->ftp_mtx
);
2035 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
2042 fasttrap_meta_provide(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2045 fasttrap_provider_t
*provider
;
2048 * A 32-bit unsigned integer (like a pid for example) can be
2049 * expressed in 10 or fewer decimal digits. Make sure that we'll
2050 * have enough space for the provider name.
2052 if (strlen(dhpv
->dthpv_provname
) + 10 >=
2053 sizeof (provider
->ftp_name
)) {
2054 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2055 "name too long to accomodate pid", dhpv
->dthpv_provname
);
2060 * Don't let folks spoof the true pid provider.
2062 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_PID_NAME
, sizeof(FASTTRAP_PID_NAME
)) == 0) {
2063 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2064 "%s is an invalid name", dhpv
->dthpv_provname
,
2070 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2072 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_OBJC_NAME
, sizeof(FASTTRAP_OBJC_NAME
)) == 0) {
2073 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2074 "%s is an invalid name", dhpv
->dthpv_provname
,
2075 FASTTRAP_OBJC_NAME
);
2078 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_ONESHOT_NAME
, sizeof(FASTTRAP_ONESHOT_NAME
)) == 0) {
2079 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2080 "%s is an invalid name", dhpv
->dthpv_provname
,
2081 FASTTRAP_ONESHOT_NAME
);
2086 * The highest stability class that fasttrap supports is ISA; cap
2087 * the stability of the new provider accordingly.
2089 if (dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
> DTRACE_CLASS_ISA
)
2090 dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
= DTRACE_CLASS_ISA
;
2091 if (dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
> DTRACE_CLASS_ISA
)
2092 dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
= DTRACE_CLASS_ISA
;
2093 if (dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
> DTRACE_CLASS_ISA
)
2094 dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
= DTRACE_CLASS_ISA
;
2095 if (dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
> DTRACE_CLASS_ISA
)
2096 dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
= DTRACE_CLASS_ISA
;
2097 if (dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
> DTRACE_CLASS_ISA
)
2098 dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
= DTRACE_CLASS_ISA
;
2100 if ((provider
= fasttrap_provider_lookup(p
, DTFTP_PROVIDER_USDT
, dhpv
->dthpv_provname
,
2101 &dhpv
->dthpv_pattr
)) == NULL
) {
2102 cmn_err(CE_WARN
, "failed to instantiate provider %s for "
2103 "process %u", dhpv
->dthpv_provname
, (uint_t
)p
->p_pid
);
2110 * USDT probes (fasttrap meta probes) are very expensive to create.
2111 * Profiling has shown that the largest single cost is verifying that
2112 * dtrace hasn't already created a given meta_probe. The reason for
2113 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2114 * each static probe being created. We want to get rid of that check.
2115 * The simplest way of eliminating it is to deny the ability to add
2116 * probes to an existing provider. If the provider already exists, BZZT!
2117 * This still leaves the possibility of intentionally malformed DOF
2118 * having duplicate probes. However, duplicate probes are not fatal,
2119 * and there is no way to get that by accident, so we will not check
2122 * UPDATE: It turns out there are several use cases that require adding
2123 * probes to existing providers. Disabling the dtrace_probe_lookup()
2124 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2128 * Up the meta provider count so this provider isn't removed until
2129 * the meta provider has been told to remove it.
2131 provider
->ftp_mcount
++;
2133 lck_mtx_unlock(&provider
->ftp_mtx
);
2140 fasttrap_meta_create_probe(void *arg
, void *parg
,
2141 dtrace_helper_probedesc_t
*dhpb
)
2144 fasttrap_provider_t
*provider
= parg
;
2145 fasttrap_probe_t
*pp
;
2146 fasttrap_tracepoint_t
*tp
;
2151 * Since the meta provider count is non-zero we don't have to worry
2152 * about this provider disappearing.
2154 ASSERT(provider
->ftp_mcount
> 0);
2157 * The offsets must be unique.
2159 qsort(dhpb
->dthpb_offs
, dhpb
->dthpb_noffs
, sizeof (uint32_t),
2160 fasttrap_uint32_cmp
);
2161 for (i
= 1; i
< dhpb
->dthpb_noffs
; i
++) {
2162 if (dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
] <=
2163 dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
- 1])
2167 qsort(dhpb
->dthpb_enoffs
, dhpb
->dthpb_nenoffs
, sizeof (uint32_t),
2168 fasttrap_uint32_cmp
);
2169 for (i
= 1; i
< dhpb
->dthpb_nenoffs
; i
++) {
2170 if (dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
] <=
2171 dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
- 1])
2176 * Grab the creation lock to ensure consistency between calls to
2177 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2178 * other threads creating probes.
2180 lck_mtx_lock(&provider
->ftp_cmtx
);
2184 * APPLE NOTE: This is hideously expensive. See note in
2185 * fasttrap_meta_provide() for why we can get away without
2188 if (dtrace_probe_lookup(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2189 dhpb
->dthpb_func
, dhpb
->dthpb_name
) != 0) {
2190 lck_mtx_unlock(&provider
->ftp_cmtx
);
2195 ntps
= dhpb
->dthpb_noffs
+ dhpb
->dthpb_nenoffs
;
2198 atomic_add_32(&fasttrap_total
, ntps
);
2200 if (fasttrap_total
> fasttrap_max
) {
2201 atomic_add_32(&fasttrap_total
, -ntps
);
2202 lck_mtx_unlock(&provider
->ftp_cmtx
);
2206 provider
->ftp_pcount
+= ntps
;
2208 if (ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
2209 pp
= zalloc(fasttrap_probe_t_zones
[ntps
]);
2210 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]));
2212 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]), KM_SLEEP
);
2215 pp
->ftp_prov
= provider
;
2216 pp
->ftp_pid
= provider
->ftp_pid
;
2217 pp
->ftp_ntps
= ntps
;
2218 pp
->ftp_nargs
= dhpb
->dthpb_xargc
;
2219 pp
->ftp_xtypes
= dhpb
->dthpb_xtypes
;
2220 pp
->ftp_ntypes
= dhpb
->dthpb_ntypes
;
2223 * First create a tracepoint for each actual point of interest.
2225 for (i
= 0; i
< dhpb
->dthpb_noffs
; i
++) {
2226 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2227 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2229 tp
->ftt_proc
= provider
->ftp_proc
;
2232 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2233 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2234 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2236 #if defined(__x86_64__)
2238 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2240 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
] - 1;
2241 #elif defined(__arm__) || defined(__arm64__)
2243 * All ARM and ARM64 probes are zero offset. We need to zero out the
2244 * thumb bit because we still support 32bit user processes.
2245 * On 64bit user processes, bit zero won't be set anyway.
2247 tp
->ftt_pc
= (dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
]) & ~0x1UL
;
2248 tp
->ftt_fntype
= FASTTRAP_FN_USDT
;
2250 #error "Architecture not supported"
2253 tp
->ftt_pid
= provider
->ftp_pid
;
2255 pp
->ftp_tps
[i
].fit_tp
= tp
;
2256 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2257 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_OFFSETS
;
2261 * Then create a tracepoint for each is-enabled point.
2263 for (j
= 0; i
< ntps
; i
++, j
++) {
2264 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2265 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2267 tp
->ftt_proc
= provider
->ftp_proc
;
2270 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2271 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2272 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2274 #if defined(__x86_64__)
2276 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2278 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
] + 2;
2279 #elif defined(__arm__) || defined(__arm64__)
2281 * All ARM and ARM64 probes are zero offset. We need to zero out the
2282 * thumb bit because we still support 32bit user processes.
2283 * On 64bit user processes, bit zero won't be set anyway.
2285 tp
->ftt_pc
= (dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
]) & ~0x1UL
;
2286 tp
->ftt_fntype
= FASTTRAP_FN_USDT
;
2288 #error "Architecture not supported"
2291 tp
->ftt_pid
= provider
->ftp_pid
;
2293 pp
->ftp_tps
[i
].fit_tp
= tp
;
2294 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2295 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_IS_ENABLED
;
2299 * If the arguments are shuffled around we set the argument remapping
2300 * table. Later, when the probe fires, we only remap the arguments
2301 * if the table is non-NULL.
2303 for (i
= 0; i
< dhpb
->dthpb_xargc
; i
++) {
2304 if (dhpb
->dthpb_args
[i
] != i
) {
2305 pp
->ftp_argmap
= dhpb
->dthpb_args
;
2311 * The probe is fully constructed -- register it with DTrace.
2313 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2314 dhpb
->dthpb_func
, dhpb
->dthpb_name
, FASTTRAP_OFFSET_AFRAMES
, pp
);
2316 lck_mtx_unlock(&provider
->ftp_cmtx
);
2321 fasttrap_meta_remove(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2325 * Clean up the USDT provider. There may be active consumers of the
2326 * provider busy adding probes, no damage will actually befall the
2327 * provider until that count has dropped to zero. This just puts
2328 * the provider on death row.
2330 fasttrap_provider_retire(p
, dhpv
->dthpv_provname
, 1);
2334 fasttrap_meta_provider_name(void *arg
)
2336 fasttrap_provider_t
*fprovider
= arg
;
2337 dtrace_provider_t
*provider
= (dtrace_provider_t
*)(fprovider
->ftp_provid
);
2338 return provider
->dtpv_name
;
2341 static dtrace_mops_t fasttrap_mops
= {
2342 fasttrap_meta_create_probe
,
2343 fasttrap_meta_provide
,
2344 fasttrap_meta_remove
,
2345 fasttrap_meta_provider_name
2349 * Validate a null-terminated string. If str is not null-terminated,
2350 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2353 * str: string to validate.
2354 * maxlen: maximal length of the string, null-terminated byte included.
2357 fasttrap_validatestr(char const* str
, size_t maxlen
) {
2361 assert(maxlen
!= 0);
2363 /* Check if the string is null-terminated. */
2364 len
= strnlen(str
, maxlen
);
2368 /* Finally, check for UTF8 validity. */
2369 return utf8_validatestr((unsigned const char*) str
, len
);
2374 fasttrap_ioctl(dev_t dev
, u_long cmd
, user_addr_t arg
, int md
, cred_t
*cr
, int *rv
)
2376 #pragma unused(dev, md, rv)
2377 if (!dtrace_attached())
2380 if (cmd
== FASTTRAPIOC_MAKEPROBE
) {
2381 fasttrap_probe_spec_t
*probe
;
2386 if (copyin(arg
+ __offsetof(fasttrap_probe_spec_t
, ftps_noffs
), &noffs
,
2387 sizeof (probe
->ftps_noffs
)))
2391 * Probes must have at least one tracepoint.
2397 * We want to check the number of noffs before doing
2398 * sizing math, to prevent potential buffer overflows.
2400 if (noffs
> ((1024 * 1024) - sizeof(fasttrap_probe_spec_t
)) / sizeof(probe
->ftps_offs
[0]))
2403 size
= sizeof (fasttrap_probe_spec_t
) +
2404 sizeof (probe
->ftps_offs
[0]) * (noffs
- 1);
2406 probe
= kmem_alloc(size
, KM_SLEEP
);
2408 if (copyin(arg
, probe
, size
) != 0 ||
2409 probe
->ftps_noffs
!= noffs
) {
2410 kmem_free(probe
, size
);
2415 * Verify that the function and module strings contain no
2419 if (fasttrap_validatestr(probe
->ftps_func
, sizeof(probe
->ftps_func
)) != 0) {
2424 if (fasttrap_validatestr(probe
->ftps_mod
, sizeof(probe
->ftps_mod
)) != 0) {
2429 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2431 pid_t pid
= probe
->ftps_pid
;
2434 * Report an error if the process doesn't exist
2435 * or is actively being birthed.
2437 if ((p
= proc_find(pid
)) == PROC_NULL
|| p
->p_stat
== SIDL
) {
2444 // FIXME! How is this done on OS X?
2445 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2446 // VREAD | VWRITE)) != 0) {
2447 // mutex_exit(&p->p_lock);
2454 ret
= fasttrap_add_probe(probe
);
2457 kmem_free(probe
, size
);
2461 } else if (cmd
== FASTTRAPIOC_GETINSTR
) {
2462 fasttrap_instr_query_t instr
;
2463 fasttrap_tracepoint_t
*tp
;
2467 if (copyin(arg
, &instr
, sizeof (instr
)) != 0)
2470 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2472 pid_t pid
= instr
.ftiq_pid
;
2475 * Report an error if the process doesn't exist
2476 * or is actively being birthed.
2478 if ((p
= proc_find(pid
)) == NULL
|| p
->p_stat
== SIDL
) {
2484 // FIXME! How is this done on OS X?
2485 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2487 // mutex_exit(&p->p_lock);
2494 index
= FASTTRAP_TPOINTS_INDEX(instr
.ftiq_pid
, instr
.ftiq_pc
);
2496 lck_mtx_lock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2497 tp
= fasttrap_tpoints
.fth_table
[index
].ftb_data
;
2498 while (tp
!= NULL
) {
2499 if (instr
.ftiq_pid
== tp
->ftt_pid
&&
2500 instr
.ftiq_pc
== tp
->ftt_pc
&&
2501 tp
->ftt_proc
->ftpc_acount
!= 0)
2508 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2512 bcopy(&tp
->ftt_instr
, &instr
.ftiq_instr
,
2513 sizeof (instr
.ftiq_instr
));
2514 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2516 if (copyout(&instr
, arg
, sizeof (instr
)) != 0)
2526 fasttrap_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
2534 return (DDI_SUCCESS
);
2536 return (DDI_FAILURE
);
2539 ddi_report_dev(devi
);
2540 fasttrap_devi
= devi
;
2543 * Install our hooks into fork(2), exec(2), and exit(2).
2545 dtrace_fasttrap_fork_ptr
= &fasttrap_fork
;
2546 dtrace_fasttrap_exit_ptr
= &fasttrap_exec_exit
;
2547 dtrace_fasttrap_exec_ptr
= &fasttrap_exec_exit
;
2550 * APPLE NOTE: We size the maximum number of fasttrap probes
2551 * based on system memory. 100k probes per 256M of system memory.
2552 * Yes, this is a WAG.
2554 fasttrap_max
= (sane_size
>> 28) * 100000;
2557 #if defined(__LP64__)
2559 * On embedded, the zone map does not grow with the memory size over 1GB
2560 * (see osfmk/vm/vm_init.c)
2562 if (fasttrap_max
> 400000) {
2563 fasttrap_max
= 400000;
2567 if (fasttrap_max
== 0)
2568 fasttrap_max
= 50000;
2571 fasttrap_retired
= 0;
2574 * Conjure up the tracepoints hashtable...
2576 nent
= ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
2577 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE
);
2579 if (nent
<= 0 || nent
> 0x1000000)
2580 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2582 if ((nent
& (nent
- 1)) == 0)
2583 fasttrap_tpoints
.fth_nent
= nent
;
2585 fasttrap_tpoints
.fth_nent
= 1 << fasttrap_highbit(nent
);
2586 ASSERT(fasttrap_tpoints
.fth_nent
> 0);
2587 fasttrap_tpoints
.fth_mask
= fasttrap_tpoints
.fth_nent
- 1;
2588 fasttrap_tpoints
.fth_table
= kmem_zalloc(fasttrap_tpoints
.fth_nent
*
2589 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2590 ASSERT(fasttrap_tpoints
.fth_table
!= NULL
);
2593 * APPLE NOTE: explicitly initialize all locks...
2596 for (i
=0; i
<fasttrap_tpoints
.fth_nent
; i
++) {
2597 lck_mtx_init(&fasttrap_tpoints
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2601 * ... and the providers hash table...
2603 nent
= FASTTRAP_PROVIDERS_DEFAULT_SIZE
;
2604 if ((nent
& (nent
- 1)) == 0)
2605 fasttrap_provs
.fth_nent
= nent
;
2607 fasttrap_provs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2608 ASSERT(fasttrap_provs
.fth_nent
> 0);
2609 fasttrap_provs
.fth_mask
= fasttrap_provs
.fth_nent
- 1;
2610 fasttrap_provs
.fth_table
= kmem_zalloc(fasttrap_provs
.fth_nent
*
2611 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2612 ASSERT(fasttrap_provs
.fth_table
!= NULL
);
2615 * APPLE NOTE: explicitly initialize all locks...
2617 for (i
=0; i
<fasttrap_provs
.fth_nent
; i
++) {
2618 lck_mtx_init(&fasttrap_provs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2622 * ... and the procs hash table.
2624 nent
= FASTTRAP_PROCS_DEFAULT_SIZE
;
2625 if ((nent
& (nent
- 1)) == 0)
2626 fasttrap_procs
.fth_nent
= nent
;
2628 fasttrap_procs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2629 ASSERT(fasttrap_procs
.fth_nent
> 0);
2630 fasttrap_procs
.fth_mask
= fasttrap_procs
.fth_nent
- 1;
2631 fasttrap_procs
.fth_table
= kmem_zalloc(fasttrap_procs
.fth_nent
*
2632 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2633 ASSERT(fasttrap_procs
.fth_table
!= NULL
);
2636 * APPLE NOTE: explicitly initialize all locks...
2638 for (i
=0; i
<fasttrap_procs
.fth_nent
; i
++) {
2639 lck_mtx_init(&fasttrap_procs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2642 (void) dtrace_meta_register("fasttrap", &fasttrap_mops
, NULL
,
2645 return (DDI_SUCCESS
);
2649 _fasttrap_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
2651 #pragma unused(dev, flags, devtype, p)
2656 _fasttrap_ioctl(dev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct proc
*p
)
2661 if (proc_is64bit(p
))
2662 uaddrp
= *(user_addr_t
*)data
;
2664 uaddrp
= (user_addr_t
) *(uint32_t *)data
;
2666 err
= fasttrap_ioctl(dev
, cmd
, uaddrp
, fflag
, CRED(), &rv
);
2668 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2670 ASSERT( (err
& 0xfffff000) == 0 );
2671 return (err
& 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2672 } else if (rv
!= 0) {
2673 ASSERT( (rv
& 0xfff00000) == 0 );
2674 return (((rv
& 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2679 static int gFasttrapInited
= 0;
2681 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2684 * A struct describing which functions will get invoked for certain
2688 static struct cdevsw fasttrap_cdevsw
=
2690 _fasttrap_open
, /* open */
2691 eno_opcl
, /* close */
2692 eno_rdwrt
, /* read */
2693 eno_rdwrt
, /* write */
2694 _fasttrap_ioctl
, /* ioctl */
2695 (stop_fcn_t
*)nulldev
, /* stop */
2696 (reset_fcn_t
*)nulldev
, /* reset */
2698 eno_select
, /* select */
2699 eno_mmap
, /* mmap */
2700 eno_strat
, /* strategy */
2701 eno_getc
, /* getc */
2702 eno_putc
, /* putc */
2706 void fasttrap_init(void);
2709 fasttrap_init( void )
2712 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2713 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2715 * The reason is to delay allocating the (rather large) resources as late as possible.
2717 if (0 == gFasttrapInited
) {
2718 int majdevno
= cdevsw_add(FASTTRAP_MAJOR
, &fasttrap_cdevsw
);
2721 // FIX ME! What kind of error reporting to do here?
2722 printf("fasttrap_init: failed to allocate a major number!\n");
2726 dev_t device
= makedev( (uint32_t)majdevno
, 0 );
2727 if (NULL
== devfs_make_node( device
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666, "fasttrap", 0 )) {
2732 * Allocate the fasttrap_tracepoint_t zone
2734 fasttrap_tracepoint_t_zone
= zinit(sizeof(fasttrap_tracepoint_t
),
2735 1024 * sizeof(fasttrap_tracepoint_t
),
2736 sizeof(fasttrap_tracepoint_t
),
2737 "dtrace.fasttrap_tracepoint_t");
2740 * fasttrap_probe_t's are variable in size. We use an array of zones to
2741 * cover the most common sizes.
2744 for (i
=1; i
<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
; i
++) {
2745 size_t zone_element_size
= offsetof(fasttrap_probe_t
, ftp_tps
[i
]);
2746 fasttrap_probe_t_zones
[i
] = zinit(zone_element_size
,
2747 1024 * zone_element_size
,
2749 fasttrap_probe_t_zone_names
[i
]);
2754 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2756 fasttrap_lck_attr
= lck_attr_alloc_init();
2757 fasttrap_lck_grp_attr
= lck_grp_attr_alloc_init();
2758 fasttrap_lck_grp
= lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr
);
2761 * Initialize global locks
2763 lck_mtx_init(&fasttrap_cleanup_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2764 lck_mtx_init(&fasttrap_count_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2766 if (DDI_FAILURE
== fasttrap_attach((dev_info_t
*)(uintptr_t)device
, 0 )) {
2767 // FIX ME! Do we remove the devfs node here?
2768 // What kind of error reporting?
2769 printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2774 * Start the fasttrap cleanup thread
2776 kern_return_t res
= kernel_thread_start_priority((thread_continue_t
)fasttrap_pid_cleanup_cb
, NULL
, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread
);
2777 if (res
!= KERN_SUCCESS
) {
2778 panic("Could not create fasttrap_cleanup_thread");
2780 thread_set_thread_name(fasttrap_cleanup_thread
, "dtrace_fasttrap_cleanup_thread");
2782 #ifdef FASTTRAP_ASYNC_REMOVE
2783 fasttrap_retired_size
= DEFAULT_RETIRED_SIZE
;
2784 fasttrap_retired_spec
= kmem_zalloc(fasttrap_retired_size
* sizeof(fasttrap_tracepoint_t
*),
2786 lck_mtx_init(&fasttrap_retired_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2789 gFasttrapInited
= 1;
2793 #undef FASTTRAP_MAJOR