4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <sys/types.h>
30 #include <sys/codesign.h>
31 #include <sys/errno.h>
34 #include <sys/systm.h>
35 #include <sys/kauth.h>
36 #include <sys/utfconv.h>
38 #include <sys/fasttrap.h>
39 #include <sys/fasttrap_impl.h>
40 #include <sys/fasttrap_isa.h>
41 #include <sys/dtrace.h>
42 #include <sys/dtrace_impl.h>
45 #include <security/mac_framework.h>
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
52 #include <kern/cs_blobs.h>
53 #include <kern/thread.h>
54 #include <kern/zalloc.h>
56 #include <mach/thread_act.h>
58 extern kern_return_t
kernel_thread_start_priority(thread_continue_t continuation
, void *parameter
, integer_t priority
, thread_t
*new_thread
);
60 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
61 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
65 qsort(void *a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
68 * User-Land Trap-Based Tracing
69 * ----------------------------
71 * The fasttrap provider allows DTrace consumers to instrument any user-level
72 * instruction to gather data; this includes probes with semantic
73 * signifigance like entry and return as well as simple offsets into the
74 * function. While the specific techniques used are very ISA specific, the
75 * methodology is generalizable to any architecture.
78 * The General Methodology
79 * -----------------------
81 * With the primary goal of tracing every user-land instruction and the
82 * limitation that we can't trust user space so don't want to rely on much
83 * information there, we begin by replacing the instructions we want to trace
84 * with trap instructions. Each instruction we overwrite is saved into a hash
85 * table keyed by process ID and pc address. When we enter the kernel due to
86 * this trap instruction, we need the effects of the replaced instruction to
87 * appear to have occurred before we proceed with the user thread's
90 * Each user level thread is represented by a ulwp_t structure which is
91 * always easily accessible through a register. The most basic way to produce
92 * the effects of the instruction we replaced is to copy that instruction out
93 * to a bit of scratch space reserved in the user thread's ulwp_t structure
94 * (a sort of kernel-private thread local storage), set the PC to that
95 * scratch space and single step. When we reenter the kernel after single
96 * stepping the instruction we must then adjust the PC to point to what would
97 * normally be the next instruction. Of course, special care must be taken
98 * for branches and jumps, but these represent such a small fraction of any
99 * instruction set that writing the code to emulate these in the kernel is
102 * Return probes may require several tracepoints to trace every return site,
103 * and, conversely, each tracepoint may activate several probes (the entry
104 * and offset 0 probes, for example). To solve this muliplexing problem,
105 * tracepoints contain lists of probes to activate and probes contain lists
106 * of tracepoints to enable. If a probe is activated, it adds its ID to
107 * existing tracepoints or creates new ones as necessary.
109 * Most probes are activated _before_ the instruction is executed, but return
110 * probes are activated _after_ the effects of the last instruction of the
111 * function are visible. Return probes must be fired _after_ we have
112 * single-stepped the instruction whereas all other probes are fired
119 * The lock ordering below -- both internally and with respect to the DTrace
120 * framework -- is a little tricky and bears some explanation. Each provider
121 * has a lock (ftp_mtx) that protects its members including reference counts
122 * for enabled probes (ftp_rcount), consumers actively creating probes
123 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
124 * from being freed. A provider is looked up by taking the bucket lock for the
125 * provider hash table, and is returned with its lock held. The provider lock
126 * may be taken in functions invoked by the DTrace framework, but may not be
127 * held while calling functions in the DTrace framework.
129 * To ensure consistency over multiple calls to the DTrace framework, the
130 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
131 * not be taken when holding the provider lock as that would create a cyclic
132 * lock ordering. In situations where one would naturally take the provider
133 * lock and then the creation lock, we instead up a reference count to prevent
134 * the provider from disappearing, drop the provider lock, and acquire the
138 * bucket lock before provider lock
139 * DTrace before provider lock
140 * creation lock before DTrace
141 * never hold the provider lock and creation lock simultaneously
144 static dtrace_meta_provider_id_t fasttrap_meta_id
;
146 static thread_t fasttrap_cleanup_thread
;
148 static lck_mtx_t fasttrap_cleanup_mtx
;
151 #define FASTTRAP_CLEANUP_PROVIDER 0x1
152 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
154 static uint32_t fasttrap_cleanup_work
= 0;
157 * Generation count on modifications to the global tracepoint lookup table.
159 static volatile uint64_t fasttrap_mod_gen
;
162 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
163 * base on system memory. Each time a probe is created, fasttrap_total is
164 * incremented by the number of tracepoints that may be associated with that
165 * probe; fasttrap_total is capped at fasttrap_max.
168 static uint32_t fasttrap_max
;
169 static uint32_t fasttrap_retired
;
170 static uint32_t fasttrap_total
;
173 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
174 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
175 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
177 fasttrap_hash_t fasttrap_tpoints
;
178 static fasttrap_hash_t fasttrap_provs
;
179 static fasttrap_hash_t fasttrap_procs
;
181 static uint64_t fasttrap_pid_count
; /* pid ref count */
182 static lck_mtx_t fasttrap_count_mtx
; /* lock on ref count */
184 #define FASTTRAP_ENABLE_FAIL 1
185 #define FASTTRAP_ENABLE_PARTIAL 2
187 static int fasttrap_tracepoint_enable(proc_t
*, fasttrap_probe_t
*, uint_t
);
188 static void fasttrap_tracepoint_disable(proc_t
*, fasttrap_probe_t
*, uint_t
);
190 static fasttrap_provider_t
*fasttrap_provider_lookup(proc_t
*, fasttrap_provider_type_t
, const char *,
191 const dtrace_pattr_t
*);
192 static void fasttrap_provider_retire(proc_t
*, const char *, int);
193 static void fasttrap_provider_free(fasttrap_provider_t
*);
195 static fasttrap_proc_t
*fasttrap_proc_lookup(pid_t
);
196 static void fasttrap_proc_release(fasttrap_proc_t
*);
198 #define FASTTRAP_PROVS_INDEX(pid, name) \
199 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
201 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
204 * APPLE NOTE: To save memory, some common memory allocations are given
205 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
206 * which means it would fall into the kalloc.128 bucket. With
207 * 20k elements allocated, the space saved is substantial.
210 struct zone
*fasttrap_tracepoint_t_zone
;
213 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
214 * that the sweet spot for reducing memory footprint is covering the first
215 * three sizes. Everything larger goes into the common pool.
217 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
219 struct zone
*fasttrap_probe_t_zones
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
];
221 static const char *fasttrap_probe_t_zone_names
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
] = {
223 "dtrace.fasttrap_probe_t[1]",
224 "dtrace.fasttrap_probe_t[2]",
225 "dtrace.fasttrap_probe_t[3]"
229 * APPLE NOTE: We have to manage locks explicitly
231 lck_grp_t
* fasttrap_lck_grp
;
232 lck_grp_attr_t
* fasttrap_lck_grp_attr
;
233 lck_attr_t
* fasttrap_lck_attr
;
236 fasttrap_highbit(ulong_t i
)
243 if (i
& 0xffffffff00000000ul
) {
247 if (i
& 0xffff0000) {
266 fasttrap_hash_str(const char *p
)
272 hval
= (hval
<< 4) + *p
++;
273 if ((g
= (hval
& 0xf0000000)) != 0)
281 * APPLE NOTE: fasttrap_sigtrap not implemented
284 fasttrap_sigtrap(proc_t
*p
, uthread_t t
, user_addr_t pc
)
286 #pragma unused(p, t, pc)
288 #if !defined(__APPLE__)
289 sigqueue_t
*sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
291 sqp
->sq_info
.si_signo
= SIGTRAP
;
292 sqp
->sq_info
.si_code
= TRAP_DTRACE
;
293 sqp
->sq_info
.si_addr
= (caddr_t
)pc
;
295 mutex_enter(&p
->p_lock
);
297 mutex_exit(&p
->p_lock
);
301 #endif /* __APPLE__ */
303 printf("fasttrap_sigtrap called with no implementation.\n");
307 * This function ensures that no threads are actively using the memory
308 * associated with probes that were formerly live.
311 fasttrap_mod_barrier(uint64_t gen
)
315 if (gen
< fasttrap_mod_gen
)
320 for (i
= 0; i
< NCPU
; i
++) {
321 lck_mtx_lock(&cpu_core
[i
].cpuc_pid_lock
);
322 lck_mtx_unlock(&cpu_core
[i
].cpuc_pid_lock
);
326 static void fasttrap_pid_cleanup(uint32_t);
329 fasttrap_pid_cleanup_providers(void)
331 fasttrap_provider_t
**fpp
, *fp
;
332 fasttrap_bucket_t
*bucket
;
333 dtrace_provider_id_t provid
;
334 unsigned int later
= 0, i
;
337 * Iterate over all the providers trying to remove the marked
338 * ones. If a provider is marked but not retired, we just
339 * have to take a crack at removing it -- it's no big deal if
342 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
343 bucket
= &fasttrap_provs
.fth_table
[i
];
344 lck_mtx_lock(&bucket
->ftb_mtx
);
345 fpp
= (fasttrap_provider_t
**)&bucket
->ftb_data
;
347 while ((fp
= *fpp
) != NULL
) {
348 if (!fp
->ftp_marked
) {
353 lck_mtx_lock(&fp
->ftp_mtx
);
356 * If this provider has consumers actively
357 * creating probes (ftp_ccount) or is a USDT
358 * provider (ftp_mcount), we can't unregister
361 if (fp
->ftp_ccount
!= 0 ||
362 fp
->ftp_mcount
!= 0) {
364 lck_mtx_unlock(&fp
->ftp_mtx
);
368 if (!fp
->ftp_retired
|| fp
->ftp_rcount
!= 0)
371 lck_mtx_unlock(&fp
->ftp_mtx
);
374 * If we successfully unregister this
375 * provider we can remove it from the hash
376 * chain and free the memory. If our attempt
377 * to unregister fails and this is a retired
378 * provider, increment our flag to try again
379 * pretty soon. If we've consumed more than
380 * half of our total permitted number of
381 * probes call dtrace_condense() to try to
382 * clean out the unenabled probes.
384 provid
= fp
->ftp_provid
;
385 if (dtrace_unregister(provid
) != 0) {
386 if (fasttrap_total
> fasttrap_max
/ 2)
387 (void) dtrace_condense(provid
);
388 later
+= fp
->ftp_marked
;
392 fasttrap_provider_free(fp
);
395 lck_mtx_unlock(&bucket
->ftb_mtx
);
401 typedef struct fasttrap_tracepoint_spec
{
403 user_addr_t fttps_pc
;
404 } fasttrap_tracepoint_spec_t
;
406 static fasttrap_tracepoint_spec_t
*fasttrap_retired_spec
;
407 static size_t fasttrap_cur_retired
= 0, fasttrap_retired_size
;
408 static lck_mtx_t fasttrap_retired_mtx
;
410 #define DEFAULT_RETIRED_SIZE 256
413 fasttrap_tracepoint_cleanup(void)
418 proc_t
*p
= PROC_NULL
;
419 fasttrap_tracepoint_t
*tp
= NULL
;
420 lck_mtx_lock(&fasttrap_retired_mtx
);
421 fasttrap_bucket_t
*bucket
;
422 for (i
= 0; i
< fasttrap_cur_retired
; i
++) {
423 pc
= fasttrap_retired_spec
[i
].fttps_pc
;
424 if (fasttrap_retired_spec
[i
].fttps_pid
!= pid
) {
425 pid
= fasttrap_retired_spec
[i
].fttps_pid
;
426 if (p
!= PROC_NULL
) {
429 if ((p
= sprlock(pid
)) == PROC_NULL
) {
434 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
435 lck_mtx_lock(&bucket
->ftb_mtx
);
436 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
437 if (pid
== tp
->ftt_pid
&& pc
== tp
->ftt_pc
&&
438 tp
->ftt_proc
->ftpc_acount
!= 0)
442 * Check that the tracepoint is not gone or has not been
443 * re-activated for another probe
445 if (tp
== NULL
|| tp
->ftt_retired
== 0) {
446 lck_mtx_unlock(&bucket
->ftb_mtx
);
449 fasttrap_tracepoint_remove(p
, tp
);
450 lck_mtx_unlock(&bucket
->ftb_mtx
);
452 if (p
!= PROC_NULL
) {
456 fasttrap_cur_retired
= 0;
458 lck_mtx_unlock(&fasttrap_retired_mtx
);
462 fasttrap_tracepoint_retire(proc_t
*p
, fasttrap_tracepoint_t
*tp
)
466 lck_mtx_lock(&fasttrap_retired_mtx
);
467 fasttrap_tracepoint_spec_t
*s
= &fasttrap_retired_spec
[fasttrap_cur_retired
++];
468 s
->fttps_pid
= p
->p_pid
;
469 s
->fttps_pc
= tp
->ftt_pc
;
471 if (fasttrap_cur_retired
== fasttrap_retired_size
) {
472 fasttrap_tracepoint_spec_t
*new_retired
= kmem_zalloc(
473 fasttrap_retired_size
* 2 *
474 sizeof(*fasttrap_retired_spec
),
476 memcpy(new_retired
, fasttrap_retired_spec
, sizeof(*fasttrap_retired_spec
) * fasttrap_retired_size
);
477 kmem_free(fasttrap_retired_spec
, sizeof(*fasttrap_retired_spec
) * fasttrap_retired_size
);
478 fasttrap_retired_size
*= 2;
479 fasttrap_retired_spec
= new_retired
;
482 lck_mtx_unlock(&fasttrap_retired_mtx
);
486 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT
);
490 fasttrap_pid_cleanup_compute_priority(void)
492 if (fasttrap_total
> (fasttrap_max
/ 100 * 90) || fasttrap_retired
> fasttrap_max
/ 2) {
493 thread_precedence_policy_data_t precedence
= {12 /* BASEPRI_PREEMPT_HIGH */};
494 thread_policy_set(fasttrap_cleanup_thread
, THREAD_PRECEDENCE_POLICY
, (thread_policy_t
) &precedence
, THREAD_PRECEDENCE_POLICY_COUNT
);
497 thread_precedence_policy_data_t precedence
= {-39 /* BASEPRI_USER_INITIATED */};
498 thread_policy_set(fasttrap_cleanup_thread
, THREAD_PRECEDENCE_POLICY
, (thread_policy_t
) &precedence
, THREAD_PRECEDENCE_POLICY_COUNT
);
504 * This is the timeout's callback for cleaning up the providers and their
508 __attribute__((noreturn
))
510 fasttrap_pid_cleanup_cb(void)
513 lck_mtx_lock(&fasttrap_cleanup_mtx
);
514 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", NULL
);
516 unsigned int later
= 0;
518 work
= os_atomic_xchg(&fasttrap_cleanup_work
, 0, relaxed
);
519 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
520 if (work
& FASTTRAP_CLEANUP_PROVIDER
) {
521 later
= fasttrap_pid_cleanup_providers();
523 if (work
& FASTTRAP_CLEANUP_TRACEPOINT
) {
524 fasttrap_tracepoint_cleanup();
526 lck_mtx_lock(&fasttrap_cleanup_mtx
);
528 fasttrap_pid_cleanup_compute_priority();
529 if (!fasttrap_cleanup_work
) {
531 * If we were unable to remove a retired provider, try again after
532 * a second. This situation can occur in certain circumstances where
533 * providers cannot be unregistered even though they have no probes
534 * enabled because of an execution of dtrace -l or something similar.
535 * If the timeout has been disabled (set to 1 because we're trying
536 * to detach), we set fasttrap_cleanup_work to ensure that we'll
537 * get a chance to do that work if and when the timeout is reenabled
541 struct timespec t
= {.tv_sec
= 1, .tv_nsec
= 0};
542 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", &t
);
545 msleep(&fasttrap_pid_cleanup_cb
, &fasttrap_cleanup_mtx
, PRIBIO
, "fasttrap_pid_cleanup_cb", NULL
);
552 * Activates the asynchronous cleanup mechanism.
555 fasttrap_pid_cleanup(uint32_t work
)
557 lck_mtx_lock(&fasttrap_cleanup_mtx
);
558 os_atomic_or(&fasttrap_cleanup_work
, work
, relaxed
);
559 fasttrap_pid_cleanup_compute_priority();
560 wakeup(&fasttrap_pid_cleanup_cb
);
561 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
566 * This is called from cfork() via dtrace_fasttrap_fork(). The child
567 * process's address space is a (roughly) a copy of the parent process's so
568 * we have to remove all the instrumentation we had previously enabled in the
572 fasttrap_fork(proc_t
*p
, proc_t
*cp
)
574 pid_t ppid
= p
->p_pid
;
577 ASSERT(current_proc() == p
);
578 LCK_MTX_ASSERT(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_OWNED
);
579 ASSERT(p
->p_dtrace_count
> 0);
580 ASSERT(cp
->p_dtrace_count
== 0);
583 * This would be simpler and faster if we maintained per-process
584 * hash tables of enabled tracepoints. It could, however, potentially
585 * slow down execution of a tracepoint since we'd need to go
586 * through two levels of indirection. In the future, we should
587 * consider either maintaining per-process ancillary lists of
588 * enabled tracepoints or hanging a pointer to a per-process hash
589 * table of enabled tracepoints off the proc structure.
593 * We don't have to worry about the child process disappearing
594 * because we're in fork().
596 if (cp
!= sprlock(cp
->p_pid
)) {
597 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp
->p_pid
);
602 * Iterate over every tracepoint looking for ones that belong to the
603 * parent process, and remove each from the child process.
605 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
606 fasttrap_tracepoint_t
*tp
;
607 fasttrap_bucket_t
*bucket
= &fasttrap_tpoints
.fth_table
[i
];
609 lck_mtx_lock(&bucket
->ftb_mtx
);
610 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
611 if (tp
->ftt_pid
== ppid
&&
612 tp
->ftt_proc
->ftpc_acount
!= 0) {
613 fasttrap_tracepoint_remove(cp
, tp
);
616 * The count of active providers can only be
617 * decremented (i.e. to zero) during exec,
618 * exit, and removal of a meta provider so it
619 * should be impossible to drop the count
622 ASSERT(tp
->ftt_proc
->ftpc_acount
!= 0);
625 lck_mtx_unlock(&bucket
->ftb_mtx
);
629 * Free any ptss pages/entries in the child.
631 dtrace_ptss_fork(p
, cp
);
637 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
638 * is set on the proc structure to indicate that there is a pid provider
639 * associated with this process.
642 fasttrap_exec_exit(proc_t
*p
)
644 ASSERT(p
== current_proc());
645 LCK_MTX_ASSERT(&p
->p_mlock
, LCK_MTX_ASSERT_OWNED
);
646 LCK_MTX_ASSERT(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_NOTOWNED
);
649 /* APPLE NOTE: Okay, the locking here is really odd and needs some
650 * explaining. This method is always called with the proc_lock held.
651 * We must drop the proc_lock before calling fasttrap_provider_retire
652 * to avoid a deadlock when it takes the bucket lock.
654 * Next, the dtrace_ptss_exec_exit function requires the sprlock
655 * be held, but not the proc_lock.
657 * Finally, we must re-acquire the proc_lock
662 * We clean up the pid provider for this process here; user-land
663 * static probes are handled by the meta-provider remove entry point.
665 fasttrap_provider_retire(p
, FASTTRAP_PID_NAME
, 0);
668 * APPLE NOTE: We also need to remove any aliased providers.
669 * XXX optimization: track which provider types are instantiated
670 * and only retire as needed.
672 fasttrap_provider_retire(p
, FASTTRAP_OBJC_NAME
, 0);
673 fasttrap_provider_retire(p
, FASTTRAP_ONESHOT_NAME
, 0);
676 * This should be called after it is no longer possible for a user
677 * thread to execute (potentially dtrace instrumented) instructions.
679 lck_mtx_lock(&p
->p_dtrace_sprlock
);
680 dtrace_ptss_exec_exit(p
);
681 lck_mtx_unlock(&p
->p_dtrace_sprlock
);
689 fasttrap_pid_provide(void *arg
, const dtrace_probedesc_t
*desc
)
691 #pragma unused(arg, desc)
693 * There are no "default" pid probes.
698 fasttrap_tracepoint_enable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
700 fasttrap_tracepoint_t
*tp
, *new_tp
= NULL
;
701 fasttrap_bucket_t
*bucket
;
706 ASSERT(index
< probe
->ftp_ntps
);
708 pid
= probe
->ftp_pid
;
709 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
710 id
= &probe
->ftp_tps
[index
].fit_id
;
712 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
714 //ASSERT(!(p->p_flag & SVFORK));
717 * Before we make any modifications, make sure we've imposed a barrier
718 * on the generation in which this probe was last modified.
720 fasttrap_mod_barrier(probe
->ftp_gen
);
722 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
725 * If the tracepoint has already been enabled, just add our id to the
726 * list of interested probes. This may be our second time through
727 * this path in which case we'll have constructed the tracepoint we'd
728 * like to install. If we can't find a match, and have an allocated
729 * tracepoint ready to go, enable that one now.
731 * A tracepoint whose process is defunct is also considered defunct.
734 lck_mtx_lock(&bucket
->ftb_mtx
);
735 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
738 * Note that it's safe to access the active count on the
739 * associated proc structure because we know that at least one
740 * provider (this one) will still be around throughout this
743 if (tp
->ftt_pid
!= pid
|| tp
->ftt_pc
!= pc
||
744 tp
->ftt_proc
->ftpc_acount
== 0)
748 * Now that we've found a matching tracepoint, it would be
749 * a decent idea to confirm that the tracepoint is still
750 * enabled and the trap instruction hasn't been overwritten.
751 * Since this is a little hairy, we'll punt for now.
753 if (!tp
->ftt_installed
) {
754 if (fasttrap_tracepoint_install(p
, tp
) != 0)
755 rc
= FASTTRAP_ENABLE_PARTIAL
;
758 * This can't be the first interested probe. We don't have
759 * to worry about another thread being in the midst of
760 * deleting this tracepoint (which would be the only valid
761 * reason for a tracepoint to have no interested probes)
762 * since we're holding P_PR_LOCK for this process.
764 ASSERT(tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
);
766 switch (id
->fti_ptype
) {
769 case DTFTP_IS_ENABLED
:
770 id
->fti_next
= tp
->ftt_ids
;
771 dtrace_membar_producer();
773 dtrace_membar_producer();
777 case DTFTP_POST_OFFSETS
:
778 id
->fti_next
= tp
->ftt_retids
;
779 dtrace_membar_producer();
781 dtrace_membar_producer();
790 lck_mtx_unlock(&bucket
->ftb_mtx
);
792 if (new_tp
!= NULL
) {
793 new_tp
->ftt_ids
= NULL
;
794 new_tp
->ftt_retids
= NULL
;
801 * If we have a good tracepoint ready to go, install it now while
802 * we have the lock held and no one can screw with us.
804 if (new_tp
!= NULL
) {
807 new_tp
->ftt_next
= bucket
->ftb_data
;
808 dtrace_membar_producer();
809 bucket
->ftb_data
= new_tp
;
810 dtrace_membar_producer();
811 lck_mtx_unlock(&bucket
->ftb_mtx
);
814 * Activate the tracepoint in the ISA-specific manner.
815 * If this fails, we need to report the failure, but
816 * indicate that this tracepoint must still be disabled
817 * by calling fasttrap_tracepoint_disable().
819 if (fasttrap_tracepoint_install(p
, new_tp
) != 0)
820 rc
= FASTTRAP_ENABLE_PARTIAL
;
822 * Increment the count of the number of tracepoints active in
823 * the victim process.
825 //ASSERT(p->p_proc_flag & P_PR_LOCK);
832 lck_mtx_unlock(&bucket
->ftb_mtx
);
835 * Initialize the tracepoint that's been preallocated with the probe.
837 new_tp
= probe
->ftp_tps
[index
].fit_tp
;
838 new_tp
->ftt_retired
= 0;
840 ASSERT(new_tp
->ftt_pid
== pid
);
841 ASSERT(new_tp
->ftt_pc
== pc
);
842 ASSERT(new_tp
->ftt_proc
== probe
->ftp_prov
->ftp_proc
);
843 ASSERT(new_tp
->ftt_ids
== NULL
);
844 ASSERT(new_tp
->ftt_retids
== NULL
);
846 switch (id
->fti_ptype
) {
849 case DTFTP_IS_ENABLED
:
851 new_tp
->ftt_ids
= id
;
855 case DTFTP_POST_OFFSETS
:
857 new_tp
->ftt_retids
= id
;
865 * If the ISA-dependent initialization goes to plan, go back to the
866 * beginning and try to install this freshly made tracepoint.
868 if (fasttrap_tracepoint_init(p
, new_tp
, pc
, id
->fti_ptype
) == 0)
871 new_tp
->ftt_ids
= NULL
;
872 new_tp
->ftt_retids
= NULL
;
874 return (FASTTRAP_ENABLE_FAIL
);
878 fasttrap_tracepoint_disable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
880 fasttrap_bucket_t
*bucket
;
881 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
882 fasttrap_tracepoint_t
**pp
, *tp
;
883 fasttrap_id_t
*id
, **idp
;
887 ASSERT(index
< probe
->ftp_ntps
);
889 pid
= probe
->ftp_pid
;
890 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
891 id
= &probe
->ftp_tps
[index
].fit_id
;
893 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
896 * Find the tracepoint and make sure that our id is one of the
897 * ones registered with it.
899 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
900 lck_mtx_lock(&bucket
->ftb_mtx
);
901 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
902 if (tp
->ftt_pid
== pid
&& tp
->ftt_pc
== pc
&&
903 tp
->ftt_proc
== provider
->ftp_proc
)
908 * If we somehow lost this tracepoint, we're in a world of hurt.
912 switch (id
->fti_ptype
) {
915 case DTFTP_IS_ENABLED
:
916 ASSERT(tp
->ftt_ids
!= NULL
);
921 case DTFTP_POST_OFFSETS
:
922 ASSERT(tp
->ftt_retids
!= NULL
);
923 idp
= &tp
->ftt_retids
;
927 /* Fix compiler warning... */
932 while ((*idp
)->fti_probe
!= probe
) {
933 idp
= &(*idp
)->fti_next
;
934 ASSERT(*idp
!= NULL
);
939 dtrace_membar_producer();
941 ASSERT(id
->fti_probe
== probe
);
944 * If there are other registered enablings of this tracepoint, we're
945 * all done, but if this was the last probe assocated with this
946 * this tracepoint, we need to remove and free it.
948 if (tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
) {
951 * If the current probe's tracepoint is in use, swap it
952 * for an unused tracepoint.
954 if (tp
== probe
->ftp_tps
[index
].fit_tp
) {
955 fasttrap_probe_t
*tmp_probe
;
956 fasttrap_tracepoint_t
**tmp_tp
;
959 if (tp
->ftt_ids
!= NULL
) {
960 tmp_probe
= tp
->ftt_ids
->fti_probe
;
961 /* LINTED - alignment */
962 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_ids
);
963 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
965 tmp_probe
= tp
->ftt_retids
->fti_probe
;
966 /* LINTED - alignment */
967 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_retids
);
968 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
971 ASSERT(*tmp_tp
!= NULL
);
972 ASSERT(*tmp_tp
!= probe
->ftp_tps
[index
].fit_tp
);
973 ASSERT((*tmp_tp
)->ftt_ids
== NULL
);
974 ASSERT((*tmp_tp
)->ftt_retids
== NULL
);
976 probe
->ftp_tps
[index
].fit_tp
= *tmp_tp
;
981 lck_mtx_unlock(&bucket
->ftb_mtx
);
984 * Tag the modified probe with the generation in which it was
987 probe
->ftp_gen
= fasttrap_mod_gen
;
991 lck_mtx_unlock(&bucket
->ftb_mtx
);
994 * We can't safely remove the tracepoint from the set of active
995 * tracepoints until we've actually removed the fasttrap instruction
996 * from the process's text. We can, however, operate on this
997 * tracepoint secure in the knowledge that no other thread is going to
998 * be looking at it since we hold P_PR_LOCK on the process if it's
999 * live or we hold the provider lock on the process if it's dead and
1004 * We only need to remove the actual instruction if we're looking
1005 * at an existing process
1009 * If we fail to restore the instruction we need to kill
1010 * this process since it's in a completely unrecoverable
1013 if (fasttrap_tracepoint_remove(p
, tp
) != 0)
1014 fasttrap_sigtrap(p
, NULL
, pc
);
1017 * Decrement the count of the number of tracepoints active
1018 * in the victim process.
1020 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1021 p
->p_dtrace_count
--;
1025 * Remove the probe from the hash table of active tracepoints.
1027 lck_mtx_lock(&bucket
->ftb_mtx
);
1028 pp
= (fasttrap_tracepoint_t
**)&bucket
->ftb_data
;
1029 ASSERT(*pp
!= NULL
);
1031 pp
= &(*pp
)->ftt_next
;
1032 ASSERT(*pp
!= NULL
);
1036 dtrace_membar_producer();
1038 lck_mtx_unlock(&bucket
->ftb_mtx
);
1041 * Tag the modified probe with the generation in which it was changed.
1043 probe
->ftp_gen
= fasttrap_mod_gen
;
1047 fasttrap_enable_callbacks(void)
1050 * We don't have to play the rw lock game here because we're
1051 * providing something rather than taking something away --
1052 * we can be sure that no threads have tried to follow this
1053 * function pointer yet.
1055 lck_mtx_lock(&fasttrap_count_mtx
);
1056 if (fasttrap_pid_count
== 0) {
1057 ASSERT(dtrace_pid_probe_ptr
== NULL
);
1058 ASSERT(dtrace_return_probe_ptr
== NULL
);
1059 dtrace_pid_probe_ptr
= &fasttrap_pid_probe
;
1060 dtrace_return_probe_ptr
= &fasttrap_return_probe
;
1062 ASSERT(dtrace_pid_probe_ptr
== &fasttrap_pid_probe
);
1063 ASSERT(dtrace_return_probe_ptr
== &fasttrap_return_probe
);
1064 fasttrap_pid_count
++;
1065 lck_mtx_unlock(&fasttrap_count_mtx
);
1069 fasttrap_disable_callbacks(void)
1071 //ASSERT(MUTEX_HELD(&cpu_lock));
1073 lck_mtx_lock(&fasttrap_count_mtx
);
1074 ASSERT(fasttrap_pid_count
> 0);
1075 fasttrap_pid_count
--;
1076 if (fasttrap_pid_count
== 0) {
1077 dtrace_cpu_t
*cur
, *cpu
= CPU
;
1080 * APPLE NOTE: This loop seems broken, it touches every CPU
1081 * but the one we're actually running on. Need to ask Sun folks
1082 * if that is safe. Scenario is this: We're running on CPU A,
1083 * and lock all but A. Then we get preempted, and start running
1084 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1086 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
1087 lck_rw_lock_exclusive(&cur
->cpu_ft_lock
);
1088 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1091 dtrace_pid_probe_ptr
= NULL
;
1092 dtrace_return_probe_ptr
= NULL
;
1094 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
1095 lck_rw_unlock_exclusive(&cur
->cpu_ft_lock
);
1096 // rw_exit(&cur->cpu_ft_lock);
1099 lck_mtx_unlock(&fasttrap_count_mtx
);
1104 fasttrap_pid_enable(void *arg
, dtrace_id_t id
, void *parg
)
1106 #pragma unused(arg, id)
1107 fasttrap_probe_t
*probe
= parg
;
1111 ASSERT(probe
!= NULL
);
1112 ASSERT(!probe
->ftp_enabled
);
1113 ASSERT(id
== probe
->ftp_id
);
1114 // ASSERT(MUTEX_HELD(&cpu_lock));
1117 * Increment the count of enabled probes on this probe's provider;
1118 * the provider can't go away while the probe still exists. We
1119 * must increment this even if we aren't able to properly enable
1122 lck_mtx_lock(&probe
->ftp_prov
->ftp_mtx
);
1123 probe
->ftp_prov
->ftp_rcount
++;
1124 lck_mtx_unlock(&probe
->ftp_prov
->ftp_mtx
);
1127 * If this probe's provider is retired (meaning it was valid in a
1128 * previously exec'ed incarnation of this address space), bail out. The
1129 * provider can't go away while we're in this code path.
1131 if (probe
->ftp_prov
->ftp_retired
)
1135 * If we can't find the process, it may be that we're in the context of
1136 * a fork in which the traced process is being born and we're copying
1137 * USDT probes. Otherwise, the process is gone so bail.
1139 if ((p
= sprlock(probe
->ftp_pid
)) == PROC_NULL
) {
1141 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1142 * does not return process's with SIDL set, but we always return
1143 * the child process.
1150 if ((p
->p_csflags
& (CS_KILL
|CS_HARD
))) {
1152 for (i
= 0; i
< DTRACE_NCLIENTS
; i
++) {
1153 dtrace_state_t
*state
= dtrace_state_get(i
);
1156 if (state
->dts_cred
.dcr_cred
== NULL
)
1158 mac_proc_check_get_task(state
->dts_cred
.dcr_cred
, p
);
1160 rc
= cs_allow_invalid(p
);
1163 cmn_err(CE_WARN
, "process doesn't allow invalid code pages, failing to install fasttrap probe\n");
1170 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1171 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1172 * To mimic this, we allocate on demand scratch space. If this is the first
1173 * time a probe has been enabled in this process, we need to allocate scratch
1174 * space for each already existing thread. Now is a good time to do this, as
1175 * the target process is suspended and the proc_lock is held.
1177 if (p
->p_dtrace_ptss_pages
== NULL
) {
1178 dtrace_ptss_enable(p
);
1181 // ASSERT(!(p->p_flag & SVFORK));
1185 * We have to enable the trap entry point before any user threads have
1186 * the chance to execute the trap instruction we're about to place
1187 * in their process's text.
1189 fasttrap_enable_callbacks();
1192 * Enable all the tracepoints and add this probe's id to each
1193 * tracepoint's list of active probes.
1195 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1196 if ((rc
= fasttrap_tracepoint_enable(p
, probe
, i
)) != 0) {
1198 * If enabling the tracepoint failed completely,
1199 * we don't have to disable it; if the failure
1200 * was only partial we must disable it.
1202 if (rc
== FASTTRAP_ENABLE_FAIL
)
1205 ASSERT(rc
== FASTTRAP_ENABLE_PARTIAL
);
1208 * Back up and pull out all the tracepoints we've
1209 * created so far for this probe.
1212 fasttrap_tracepoint_disable(p
, probe
, i
);
1219 * Since we're not actually enabling this probe,
1220 * drop our reference on the trap table entry.
1222 fasttrap_disable_callbacks();
1229 probe
->ftp_enabled
= 1;
1235 fasttrap_pid_disable(void *arg
, dtrace_id_t id
, void *parg
)
1237 #pragma unused(arg, id)
1238 fasttrap_probe_t
*probe
= parg
;
1239 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
1243 ASSERT(id
== probe
->ftp_id
);
1246 * We won't be able to acquire a /proc-esque lock on the process
1247 * iff the process is dead and gone. In this case, we rely on the
1248 * provider lock as a point of mutual exclusion to prevent other
1249 * DTrace consumers from disabling this probe.
1251 if ((p
= sprlock(probe
->ftp_pid
)) != PROC_NULL
) {
1252 // ASSERT(!(p->p_flag & SVFORK));
1255 lck_mtx_lock(&provider
->ftp_mtx
);
1258 * Disable all the associated tracepoints (for fully enabled probes).
1260 if (probe
->ftp_enabled
) {
1261 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1262 fasttrap_tracepoint_disable(p
, probe
, i
);
1266 ASSERT(provider
->ftp_rcount
> 0);
1267 provider
->ftp_rcount
--;
1271 * Even though we may not be able to remove it entirely, we
1272 * mark this retired provider to get a chance to remove some
1273 * of the associated probes.
1275 if (provider
->ftp_retired
&& !provider
->ftp_marked
)
1276 whack
= provider
->ftp_marked
= 1;
1277 lck_mtx_unlock(&provider
->ftp_mtx
);
1282 * If the process is dead, we're just waiting for the
1283 * last probe to be disabled to be able to free it.
1285 if (provider
->ftp_rcount
== 0 && !provider
->ftp_marked
)
1286 whack
= provider
->ftp_marked
= 1;
1287 lck_mtx_unlock(&provider
->ftp_mtx
);
1291 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
1294 if (!probe
->ftp_enabled
)
1297 probe
->ftp_enabled
= 0;
1299 // ASSERT(MUTEX_HELD(&cpu_lock));
1300 fasttrap_disable_callbacks();
1305 fasttrap_pid_getargdesc(void *arg
, dtrace_id_t id
, void *parg
,
1306 dtrace_argdesc_t
*desc
)
1308 #pragma unused(arg, id)
1309 fasttrap_probe_t
*probe
= parg
;
1313 desc
->dtargd_native
[0] = '\0';
1314 desc
->dtargd_xlate
[0] = '\0';
1316 if (probe
->ftp_prov
->ftp_retired
!= 0 ||
1317 desc
->dtargd_ndx
>= probe
->ftp_nargs
) {
1318 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
1322 ndx
= (probe
->ftp_argmap
!= NULL
) ?
1323 probe
->ftp_argmap
[desc
->dtargd_ndx
] : desc
->dtargd_ndx
;
1325 str
= probe
->ftp_ntypes
;
1326 for (i
= 0; i
< ndx
; i
++) {
1327 str
+= strlen(str
) + 1;
1330 (void) strlcpy(desc
->dtargd_native
, str
, sizeof(desc
->dtargd_native
));
1332 if (probe
->ftp_xtypes
== NULL
)
1335 str
= probe
->ftp_xtypes
;
1336 for (i
= 0; i
< desc
->dtargd_ndx
; i
++) {
1337 str
+= strlen(str
) + 1;
1340 (void) strlcpy(desc
->dtargd_xlate
, str
, sizeof(desc
->dtargd_xlate
));
1345 fasttrap_pid_destroy(void *arg
, dtrace_id_t id
, void *parg
)
1347 #pragma unused(arg, id)
1348 fasttrap_probe_t
*probe
= parg
;
1351 ASSERT(probe
!= NULL
);
1352 ASSERT(!probe
->ftp_enabled
);
1353 ASSERT(fasttrap_total
>= probe
->ftp_ntps
);
1355 os_atomic_sub(&fasttrap_total
, probe
->ftp_ntps
, relaxed
);
1356 os_atomic_sub(&fasttrap_retired
, probe
->ftp_ntps
, relaxed
);
1358 if (probe
->ftp_gen
+ 1 >= fasttrap_mod_gen
)
1359 fasttrap_mod_barrier(probe
->ftp_gen
);
1361 for (i
= 0; i
< probe
->ftp_ntps
; i
++) {
1362 zfree(fasttrap_tracepoint_t_zone
, probe
->ftp_tps
[i
].fit_tp
);
1365 if (probe
->ftp_ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1366 zfree(fasttrap_probe_t_zones
[probe
->ftp_ntps
], probe
);
1368 size_t size
= offsetof(fasttrap_probe_t
, ftp_tps
[probe
->ftp_ntps
]);
1369 kmem_free(probe
, size
);
1374 static const dtrace_pattr_t pid_attr
= {
1375 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1376 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1377 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1378 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1379 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1382 static dtrace_pops_t pid_pops
= {
1383 .dtps_provide
= fasttrap_pid_provide
,
1384 .dtps_provide_module
= NULL
,
1385 .dtps_enable
= fasttrap_pid_enable
,
1386 .dtps_disable
= fasttrap_pid_disable
,
1387 .dtps_suspend
= NULL
,
1388 .dtps_resume
= NULL
,
1389 .dtps_getargdesc
= fasttrap_pid_getargdesc
,
1390 .dtps_getargval
= fasttrap_pid_getarg
,
1391 .dtps_usermode
= NULL
,
1392 .dtps_destroy
= fasttrap_pid_destroy
1395 static dtrace_pops_t usdt_pops
= {
1396 .dtps_provide
= fasttrap_pid_provide
,
1397 .dtps_provide_module
= NULL
,
1398 .dtps_enable
= fasttrap_pid_enable
,
1399 .dtps_disable
= fasttrap_pid_disable
,
1400 .dtps_suspend
= NULL
,
1401 .dtps_resume
= NULL
,
1402 .dtps_getargdesc
= fasttrap_pid_getargdesc
,
1403 .dtps_getargval
= fasttrap_usdt_getarg
,
1404 .dtps_usermode
= NULL
,
1405 .dtps_destroy
= fasttrap_pid_destroy
1408 static fasttrap_proc_t
*
1409 fasttrap_proc_lookup(pid_t pid
)
1411 fasttrap_bucket_t
*bucket
;
1412 fasttrap_proc_t
*fprc
, *new_fprc
;
1414 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1415 lck_mtx_lock(&bucket
->ftb_mtx
);
1417 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1418 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1419 lck_mtx_lock(&fprc
->ftpc_mtx
);
1420 lck_mtx_unlock(&bucket
->ftb_mtx
);
1421 fprc
->ftpc_rcount
++;
1422 os_atomic_inc(&fprc
->ftpc_acount
, relaxed
);
1423 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1424 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1431 * Drop the bucket lock so we don't try to perform a sleeping
1432 * allocation under it.
1434 lck_mtx_unlock(&bucket
->ftb_mtx
);
1436 new_fprc
= kmem_zalloc(sizeof (fasttrap_proc_t
), KM_SLEEP
);
1437 ASSERT(new_fprc
!= NULL
);
1438 new_fprc
->ftpc_pid
= pid
;
1439 new_fprc
->ftpc_rcount
= 1;
1440 new_fprc
->ftpc_acount
= 1;
1442 lck_mtx_lock(&bucket
->ftb_mtx
);
1445 * Take another lap through the list to make sure a proc hasn't
1446 * been created for this pid while we weren't under the bucket lock.
1448 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1449 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1450 lck_mtx_lock(&fprc
->ftpc_mtx
);
1451 lck_mtx_unlock(&bucket
->ftb_mtx
);
1452 fprc
->ftpc_rcount
++;
1453 os_atomic_inc(&fprc
->ftpc_acount
, relaxed
);
1454 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1455 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1457 kmem_free(new_fprc
, sizeof (fasttrap_proc_t
));
1464 * APPLE NOTE: We have to initialize all locks explicitly
1466 lck_mtx_init(&new_fprc
->ftpc_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1468 new_fprc
->ftpc_next
= bucket
->ftb_data
;
1469 bucket
->ftb_data
= new_fprc
;
1471 lck_mtx_unlock(&bucket
->ftb_mtx
);
1477 fasttrap_proc_release(fasttrap_proc_t
*proc
)
1479 fasttrap_bucket_t
*bucket
;
1480 fasttrap_proc_t
*fprc
, **fprcp
;
1481 pid_t pid
= proc
->ftpc_pid
;
1483 lck_mtx_lock(&proc
->ftpc_mtx
);
1485 ASSERT(proc
->ftpc_rcount
!= 0);
1486 ASSERT(proc
->ftpc_acount
<= proc
->ftpc_rcount
);
1488 if (--proc
->ftpc_rcount
!= 0) {
1489 lck_mtx_unlock(&proc
->ftpc_mtx
);
1493 lck_mtx_unlock(&proc
->ftpc_mtx
);
1496 * There should definitely be no live providers associated with this
1497 * process at this point.
1499 ASSERT(proc
->ftpc_acount
== 0);
1501 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1502 lck_mtx_lock(&bucket
->ftb_mtx
);
1504 fprcp
= (fasttrap_proc_t
**)&bucket
->ftb_data
;
1505 while ((fprc
= *fprcp
) != NULL
) {
1509 fprcp
= &fprc
->ftpc_next
;
1513 * Something strange has happened if we can't find the proc.
1515 ASSERT(fprc
!= NULL
);
1517 *fprcp
= fprc
->ftpc_next
;
1519 lck_mtx_unlock(&bucket
->ftb_mtx
);
1522 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1523 * memory is freed even without the destroy. Maybe accounting cleanup?
1525 lck_mtx_destroy(&fprc
->ftpc_mtx
, fasttrap_lck_grp
);
1527 kmem_free(fprc
, sizeof (fasttrap_proc_t
));
1531 * Lookup a fasttrap-managed provider based on its name and associated proc.
1532 * A reference to the proc must be held for the duration of the call.
1533 * If the pattr argument is non-NULL, this function instantiates the provider
1534 * if it doesn't exist otherwise it returns NULL. The provider is returned
1535 * with its lock held.
1537 static fasttrap_provider_t
*
1538 fasttrap_provider_lookup(proc_t
*p
, fasttrap_provider_type_t provider_type
, const char *name
,
1539 const dtrace_pattr_t
*pattr
)
1541 pid_t pid
= p
->p_pid
;
1542 fasttrap_provider_t
*fp
, *new_fp
= NULL
;
1543 fasttrap_bucket_t
*bucket
;
1544 char provname
[DTRACE_PROVNAMELEN
];
1547 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1548 ASSERT(pattr
!= NULL
);
1550 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(pid
, name
)];
1551 lck_mtx_lock(&bucket
->ftb_mtx
);
1554 * Take a lap through the list and return the match if we find it.
1556 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1557 if (fp
->ftp_pid
== pid
&&
1558 fp
->ftp_provider_type
== provider_type
&&
1559 strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1561 lck_mtx_lock(&fp
->ftp_mtx
);
1562 lck_mtx_unlock(&bucket
->ftb_mtx
);
1568 * Drop the bucket lock so we don't try to perform a sleeping
1569 * allocation under it.
1571 lck_mtx_unlock(&bucket
->ftb_mtx
);
1574 * Make sure the process isn't a child created as the result
1575 * of a vfork(2), and isn't a zombie (but may be in fork).
1578 if (p
->p_lflag
& (P_LINVFORK
| P_LEXIT
)) {
1584 * Increment p_dtrace_probes so that the process knows to inform us
1585 * when it exits or execs. fasttrap_provider_free() decrements this
1586 * when we're done with this provider.
1588 p
->p_dtrace_probes
++;
1591 * Grab the credentials for this process so we have
1592 * something to pass to dtrace_register().
1593 * APPLE NOTE: We have no equivalent to crhold,
1594 * even though there is a cr_ref filed in ucred.
1596 cred
= kauth_cred_proc_ref(p
);
1599 new_fp
= kmem_zalloc(sizeof (fasttrap_provider_t
), KM_SLEEP
);
1600 ASSERT(new_fp
!= NULL
);
1601 new_fp
->ftp_pid
= p
->p_pid
;
1602 new_fp
->ftp_proc
= fasttrap_proc_lookup(pid
);
1603 new_fp
->ftp_provider_type
= provider_type
;
1606 * APPLE NOTE: locks require explicit init
1608 lck_mtx_init(&new_fp
->ftp_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1609 lck_mtx_init(&new_fp
->ftp_cmtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1611 ASSERT(new_fp
->ftp_proc
!= NULL
);
1613 lck_mtx_lock(&bucket
->ftb_mtx
);
1616 * Take another lap through the list to make sure a provider hasn't
1617 * been created for this pid while we weren't under the bucket lock.
1619 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1620 if (fp
->ftp_pid
== pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1622 lck_mtx_lock(&fp
->ftp_mtx
);
1623 lck_mtx_unlock(&bucket
->ftb_mtx
);
1624 fasttrap_provider_free(new_fp
);
1625 kauth_cred_unref(&cred
);
1630 (void) strlcpy(new_fp
->ftp_name
, name
, sizeof(new_fp
->ftp_name
));
1633 * Fail and return NULL if either the provider name is too long
1634 * or we fail to register this new provider with the DTrace
1635 * framework. Note that this is the only place we ever construct
1636 * the full provider name -- we keep it in pieces in the provider
1639 if (snprintf(provname
, sizeof (provname
), "%s%u", name
, (uint_t
)pid
) >=
1640 (int)sizeof (provname
) ||
1641 dtrace_register(provname
, pattr
,
1642 DTRACE_PRIV_PROC
| DTRACE_PRIV_OWNER
| DTRACE_PRIV_ZONEOWNER
, cred
,
1643 pattr
== &pid_attr
? &pid_pops
: &usdt_pops
, new_fp
,
1644 &new_fp
->ftp_provid
) != 0) {
1645 lck_mtx_unlock(&bucket
->ftb_mtx
);
1646 fasttrap_provider_free(new_fp
);
1647 kauth_cred_unref(&cred
);
1651 new_fp
->ftp_next
= bucket
->ftb_data
;
1652 bucket
->ftb_data
= new_fp
;
1654 lck_mtx_lock(&new_fp
->ftp_mtx
);
1655 lck_mtx_unlock(&bucket
->ftb_mtx
);
1657 kauth_cred_unref(&cred
);
1663 fasttrap_provider_free(fasttrap_provider_t
*provider
)
1665 pid_t pid
= provider
->ftp_pid
;
1669 * There need to be no associated enabled probes, no consumers
1670 * creating probes, and no meta providers referencing this provider.
1672 ASSERT(provider
->ftp_rcount
== 0);
1673 ASSERT(provider
->ftp_ccount
== 0);
1674 ASSERT(provider
->ftp_mcount
== 0);
1677 * If this provider hasn't been retired, we need to explicitly drop the
1678 * count of active providers on the associated process structure.
1680 if (!provider
->ftp_retired
) {
1681 os_atomic_dec(&provider
->ftp_proc
->ftpc_acount
, relaxed
);
1682 ASSERT(provider
->ftp_proc
->ftpc_acount
<
1683 provider
->ftp_proc
->ftpc_rcount
);
1686 fasttrap_proc_release(provider
->ftp_proc
);
1689 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1690 * memory is freed even without the destroy. Maybe accounting cleanup?
1692 lck_mtx_destroy(&provider
->ftp_mtx
, fasttrap_lck_grp
);
1693 lck_mtx_destroy(&provider
->ftp_cmtx
, fasttrap_lck_grp
);
1695 kmem_free(provider
, sizeof (fasttrap_provider_t
));
1698 * Decrement p_dtrace_probes on the process whose provider we're
1699 * freeing. We don't have to worry about clobbering somone else's
1700 * modifications to it because we have locked the bucket that
1701 * corresponds to this process's hash chain in the provider hash
1702 * table. Don't sweat it if we can't find the process.
1704 if ((p
= proc_find(pid
)) == NULL
) {
1709 p
->p_dtrace_probes
--;
1716 fasttrap_provider_retire(proc_t
*p
, const char *name
, int mprov
)
1718 fasttrap_provider_t
*fp
;
1719 fasttrap_bucket_t
*bucket
;
1720 dtrace_provider_id_t provid
;
1721 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1723 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(p
->p_pid
, name
)];
1724 lck_mtx_lock(&bucket
->ftb_mtx
);
1726 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1727 if (fp
->ftp_pid
== p
->p_pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1733 lck_mtx_unlock(&bucket
->ftb_mtx
);
1737 lck_mtx_lock(&fp
->ftp_mtx
);
1738 ASSERT(!mprov
|| fp
->ftp_mcount
> 0);
1739 if (mprov
&& --fp
->ftp_mcount
!= 0) {
1740 lck_mtx_unlock(&fp
->ftp_mtx
);
1741 lck_mtx_unlock(&bucket
->ftb_mtx
);
1746 * Mark the provider to be removed in our post-processing step, mark it
1747 * retired, and drop the active count on its proc. Marking it indicates
1748 * that we should try to remove it; setting the retired flag indicates
1749 * that we're done with this provider; dropping the active the proc
1750 * releases our hold, and when this reaches zero (as it will during
1751 * exit or exec) the proc and associated providers become defunct.
1753 * We obviously need to take the bucket lock before the provider lock
1754 * to perform the lookup, but we need to drop the provider lock
1755 * before calling into the DTrace framework since we acquire the
1756 * provider lock in callbacks invoked from the DTrace framework. The
1757 * bucket lock therefore protects the integrity of the provider hash
1760 os_atomic_dec(&fp
->ftp_proc
->ftpc_acount
, relaxed
);
1761 ASSERT(fp
->ftp_proc
->ftpc_acount
< fp
->ftp_proc
->ftpc_rcount
);
1764 * Add this provider probes to the retired count and
1765 * make sure we don't add them twice
1767 os_atomic_add(&fasttrap_retired
, fp
->ftp_pcount
, relaxed
);
1770 fp
->ftp_retired
= 1;
1772 provid
= fp
->ftp_provid
;
1773 lck_mtx_unlock(&fp
->ftp_mtx
);
1776 * We don't have to worry about invalidating the same provider twice
1777 * since fasttrap_provider_lookup() will ignore providers that have
1778 * been marked as retired.
1780 dtrace_invalidate(provid
);
1782 lck_mtx_unlock(&bucket
->ftb_mtx
);
1784 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
1788 fasttrap_uint32_cmp(const void *ap
, const void *bp
)
1790 return (*(const uint32_t *)ap
- *(const uint32_t *)bp
);
1794 fasttrap_uint64_cmp(const void *ap
, const void *bp
)
1796 return (*(const uint64_t *)ap
- *(const uint64_t *)bp
);
1800 fasttrap_add_probe(fasttrap_probe_spec_t
*pdata
)
1803 fasttrap_provider_t
*provider
;
1804 fasttrap_probe_t
*pp
;
1805 fasttrap_tracepoint_t
*tp
;
1807 unsigned int i
, aframes
, whack
;
1810 * There needs to be at least one desired trace point.
1812 if (pdata
->ftps_noffs
== 0)
1815 switch (pdata
->ftps_probe_type
) {
1818 aframes
= FASTTRAP_ENTRY_AFRAMES
;
1822 aframes
= FASTTRAP_RETURN_AFRAMES
;
1832 const char* provider_name
;
1833 switch (pdata
->ftps_provider_type
) {
1834 case DTFTP_PROVIDER_PID
:
1835 provider_name
= FASTTRAP_PID_NAME
;
1837 case DTFTP_PROVIDER_OBJC
:
1838 provider_name
= FASTTRAP_OBJC_NAME
;
1840 case DTFTP_PROVIDER_ONESHOT
:
1841 provider_name
= FASTTRAP_ONESHOT_NAME
;
1847 p
= proc_find(pdata
->ftps_pid
);
1851 if ((provider
= fasttrap_provider_lookup(p
, pdata
->ftps_provider_type
,
1852 provider_name
, &pid_attr
)) == NULL
) {
1859 * Increment this reference count to indicate that a consumer is
1860 * actively adding a new probe associated with this provider. This
1861 * prevents the provider from being deleted -- we'll need to check
1862 * for pending deletions when we drop this reference count.
1864 provider
->ftp_ccount
++;
1865 lck_mtx_unlock(&provider
->ftp_mtx
);
1868 * Grab the creation lock to ensure consistency between calls to
1869 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1870 * other threads creating probes. We must drop the provider lock
1871 * before taking this lock to avoid a three-way deadlock with the
1874 lck_mtx_lock(&provider
->ftp_cmtx
);
1877 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1880 (void) snprintf(name_str
, sizeof(name_str
), "%llx",
1881 (uint64_t)pdata
->ftps_offs
[i
]);
1883 if (dtrace_probe_lookup(provider
->ftp_provid
,
1884 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
) != 0)
1887 os_atomic_inc(&fasttrap_total
, relaxed
);
1888 if (fasttrap_total
> fasttrap_max
) {
1889 os_atomic_dec(&fasttrap_total
, relaxed
);
1892 provider
->ftp_pcount
++;
1894 pp
= zalloc(fasttrap_probe_t_zones
[1]);
1895 bzero(pp
, sizeof (fasttrap_probe_t
));
1897 pp
->ftp_prov
= provider
;
1898 pp
->ftp_faddr
= pdata
->ftps_pc
;
1899 pp
->ftp_fsize
= pdata
->ftps_size
;
1900 pp
->ftp_pid
= pdata
->ftps_pid
;
1903 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1904 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1906 tp
->ftt_proc
= provider
->ftp_proc
;
1907 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1908 tp
->ftt_pid
= pdata
->ftps_pid
;
1910 #if defined(__arm__) || defined(__arm64__)
1912 * On arm the subinfo is used to distinguish between arm
1913 * and thumb modes. On arm64 there is no thumb mode, so
1914 * this field is simply initialized to 0 on its way
1917 tp
->ftt_fntype
= pdata
->ftps_arch_subinfo
;
1920 pp
->ftp_tps
[0].fit_tp
= tp
;
1921 pp
->ftp_tps
[0].fit_id
.fti_probe
= pp
;
1922 pp
->ftp_tps
[0].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1923 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1924 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
,
1925 FASTTRAP_OFFSET_AFRAMES
, pp
);
1928 } else if (dtrace_probe_lookup(provider
->ftp_provid
, pdata
->ftps_mod
,
1929 pdata
->ftps_func
, name
) == 0) {
1930 os_atomic_add(&fasttrap_total
, pdata
->ftps_noffs
, relaxed
);
1932 if (fasttrap_total
> fasttrap_max
) {
1933 os_atomic_sub(&fasttrap_total
, pdata
->ftps_noffs
, relaxed
);
1938 * Make sure all tracepoint program counter values are unique.
1939 * We later assume that each probe has exactly one tracepoint
1942 qsort(pdata
->ftps_offs
, pdata
->ftps_noffs
,
1943 sizeof (uint64_t), fasttrap_uint64_cmp
);
1944 for (i
= 1; i
< pdata
->ftps_noffs
; i
++) {
1945 if (pdata
->ftps_offs
[i
] > pdata
->ftps_offs
[i
- 1])
1948 os_atomic_sub(&fasttrap_total
, pdata
->ftps_noffs
, relaxed
);
1951 provider
->ftp_pcount
+= pdata
->ftps_noffs
;
1952 ASSERT(pdata
->ftps_noffs
> 0);
1953 if (pdata
->ftps_noffs
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1954 pp
= zalloc(fasttrap_probe_t_zones
[pdata
->ftps_noffs
]);
1955 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]));
1957 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]), KM_SLEEP
);
1960 pp
->ftp_prov
= provider
;
1961 pp
->ftp_faddr
= pdata
->ftps_pc
;
1962 pp
->ftp_fsize
= pdata
->ftps_size
;
1963 pp
->ftp_pid
= pdata
->ftps_pid
;
1964 pp
->ftp_ntps
= pdata
->ftps_noffs
;
1966 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1967 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1968 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1969 tp
->ftt_proc
= provider
->ftp_proc
;
1970 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1971 tp
->ftt_pid
= pdata
->ftps_pid
;
1973 #if defined(__arm__) || defined (__arm64__)
1975 * On arm the subinfo is used to distinguish between arm
1976 * and thumb modes. On arm64 there is no thumb mode, so
1977 * this field is simply initialized to 0 on its way
1981 tp
->ftt_fntype
= pdata
->ftps_arch_subinfo
;
1983 pp
->ftp_tps
[i
].fit_tp
= tp
;
1984 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
1985 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1988 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1989 pdata
->ftps_mod
, pdata
->ftps_func
, name
, aframes
, pp
);
1992 lck_mtx_unlock(&provider
->ftp_cmtx
);
1995 * We know that the provider is still valid since we incremented the
1996 * creation reference count. If someone tried to clean up this provider
1997 * while we were using it (e.g. because the process called exec(2) or
1998 * exit(2)), take note of that and try to clean it up now.
2000 lck_mtx_lock(&provider
->ftp_mtx
);
2001 provider
->ftp_ccount
--;
2002 whack
= provider
->ftp_retired
;
2003 lck_mtx_unlock(&provider
->ftp_mtx
);
2006 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
2012 * If we've exhausted the allowable resources, we'll try to remove
2013 * this provider to free some up. This is to cover the case where
2014 * the user has accidentally created many more probes than was
2015 * intended (e.g. pid123:::).
2017 lck_mtx_unlock(&provider
->ftp_cmtx
);
2018 lck_mtx_lock(&provider
->ftp_mtx
);
2019 provider
->ftp_ccount
--;
2020 provider
->ftp_marked
= 1;
2021 lck_mtx_unlock(&provider
->ftp_mtx
);
2023 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER
);
2030 fasttrap_meta_provide(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2033 fasttrap_provider_t
*provider
;
2036 * A 32-bit unsigned integer (like a pid for example) can be
2037 * expressed in 10 or fewer decimal digits. Make sure that we'll
2038 * have enough space for the provider name.
2040 if (strlen(dhpv
->dthpv_provname
) + 10 >=
2041 sizeof (provider
->ftp_name
)) {
2042 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2043 "name too long to accomodate pid", dhpv
->dthpv_provname
);
2048 * Don't let folks spoof the true pid provider.
2050 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_PID_NAME
, sizeof(FASTTRAP_PID_NAME
)) == 0) {
2051 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2052 "%s is an invalid name", dhpv
->dthpv_provname
,
2058 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2060 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_OBJC_NAME
, sizeof(FASTTRAP_OBJC_NAME
)) == 0) {
2061 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2062 "%s is an invalid name", dhpv
->dthpv_provname
,
2063 FASTTRAP_OBJC_NAME
);
2066 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_ONESHOT_NAME
, sizeof(FASTTRAP_ONESHOT_NAME
)) == 0) {
2067 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
2068 "%s is an invalid name", dhpv
->dthpv_provname
,
2069 FASTTRAP_ONESHOT_NAME
);
2074 * The highest stability class that fasttrap supports is ISA; cap
2075 * the stability of the new provider accordingly.
2077 if (dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
> DTRACE_CLASS_ISA
)
2078 dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
= DTRACE_CLASS_ISA
;
2079 if (dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
> DTRACE_CLASS_ISA
)
2080 dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
= DTRACE_CLASS_ISA
;
2081 if (dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
> DTRACE_CLASS_ISA
)
2082 dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
= DTRACE_CLASS_ISA
;
2083 if (dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
> DTRACE_CLASS_ISA
)
2084 dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
= DTRACE_CLASS_ISA
;
2085 if (dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
> DTRACE_CLASS_ISA
)
2086 dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
= DTRACE_CLASS_ISA
;
2088 if ((provider
= fasttrap_provider_lookup(p
, DTFTP_PROVIDER_USDT
, dhpv
->dthpv_provname
,
2089 &dhpv
->dthpv_pattr
)) == NULL
) {
2090 cmn_err(CE_WARN
, "failed to instantiate provider %s for "
2091 "process %u", dhpv
->dthpv_provname
, (uint_t
)p
->p_pid
);
2098 * USDT probes (fasttrap meta probes) are very expensive to create.
2099 * Profiling has shown that the largest single cost is verifying that
2100 * dtrace hasn't already created a given meta_probe. The reason for
2101 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2102 * each static probe being created. We want to get rid of that check.
2103 * The simplest way of eliminating it is to deny the ability to add
2104 * probes to an existing provider. If the provider already exists, BZZT!
2105 * This still leaves the possibility of intentionally malformed DOF
2106 * having duplicate probes. However, duplicate probes are not fatal,
2107 * and there is no way to get that by accident, so we will not check
2110 * UPDATE: It turns out there are several use cases that require adding
2111 * probes to existing providers. Disabling the dtrace_probe_lookup()
2112 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2116 * Up the meta provider count so this provider isn't removed until
2117 * the meta provider has been told to remove it.
2119 provider
->ftp_mcount
++;
2121 lck_mtx_unlock(&provider
->ftp_mtx
);
2128 fasttrap_meta_create_probe(void *arg
, void *parg
,
2129 dtrace_helper_probedesc_t
*dhpb
)
2132 fasttrap_provider_t
*provider
= parg
;
2133 fasttrap_probe_t
*pp
;
2134 fasttrap_tracepoint_t
*tp
;
2139 * Since the meta provider count is non-zero we don't have to worry
2140 * about this provider disappearing.
2142 ASSERT(provider
->ftp_mcount
> 0);
2145 * The offsets must be unique.
2147 qsort(dhpb
->dthpb_offs
, dhpb
->dthpb_noffs
, sizeof (uint32_t),
2148 fasttrap_uint32_cmp
);
2149 for (i
= 1; i
< dhpb
->dthpb_noffs
; i
++) {
2150 if (dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
] <=
2151 dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
- 1])
2155 qsort(dhpb
->dthpb_enoffs
, dhpb
->dthpb_nenoffs
, sizeof (uint32_t),
2156 fasttrap_uint32_cmp
);
2157 for (i
= 1; i
< dhpb
->dthpb_nenoffs
; i
++) {
2158 if (dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
] <=
2159 dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
- 1])
2164 * Grab the creation lock to ensure consistency between calls to
2165 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2166 * other threads creating probes.
2168 lck_mtx_lock(&provider
->ftp_cmtx
);
2172 * APPLE NOTE: This is hideously expensive. See note in
2173 * fasttrap_meta_provide() for why we can get away without
2176 if (dtrace_probe_lookup(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2177 dhpb
->dthpb_func
, dhpb
->dthpb_name
) != 0) {
2178 lck_mtx_unlock(&provider
->ftp_cmtx
);
2183 ntps
= dhpb
->dthpb_noffs
+ dhpb
->dthpb_nenoffs
;
2186 os_atomic_add(&fasttrap_total
, ntps
, relaxed
);
2188 if (fasttrap_total
> fasttrap_max
) {
2189 os_atomic_sub(&fasttrap_total
, ntps
, relaxed
);
2190 lck_mtx_unlock(&provider
->ftp_cmtx
);
2194 provider
->ftp_pcount
+= ntps
;
2196 if (ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
2197 pp
= zalloc(fasttrap_probe_t_zones
[ntps
]);
2198 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]));
2200 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]), KM_SLEEP
);
2203 pp
->ftp_prov
= provider
;
2204 pp
->ftp_pid
= provider
->ftp_pid
;
2205 pp
->ftp_ntps
= ntps
;
2206 pp
->ftp_nargs
= dhpb
->dthpb_xargc
;
2207 pp
->ftp_xtypes
= dhpb
->dthpb_xtypes
;
2208 pp
->ftp_ntypes
= dhpb
->dthpb_ntypes
;
2211 * First create a tracepoint for each actual point of interest.
2213 for (i
= 0; i
< dhpb
->dthpb_noffs
; i
++) {
2214 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2215 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2217 tp
->ftt_proc
= provider
->ftp_proc
;
2220 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2221 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2222 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2224 #if defined(__x86_64__)
2226 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2228 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
] - 1;
2229 #elif defined(__arm__) || defined(__arm64__)
2231 * All ARM and ARM64 probes are zero offset. We need to zero out the
2232 * thumb bit because we still support 32bit user processes.
2233 * On 64bit user processes, bit zero won't be set anyway.
2235 tp
->ftt_pc
= (dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
]) & ~0x1UL
;
2236 tp
->ftt_fntype
= FASTTRAP_FN_USDT
;
2238 #error "Architecture not supported"
2241 tp
->ftt_pid
= provider
->ftp_pid
;
2243 pp
->ftp_tps
[i
].fit_tp
= tp
;
2244 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2245 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_OFFSETS
;
2249 * Then create a tracepoint for each is-enabled point.
2251 for (j
= 0; i
< ntps
; i
++, j
++) {
2252 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2253 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2255 tp
->ftt_proc
= provider
->ftp_proc
;
2258 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2259 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2260 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2262 #if defined(__x86_64__)
2264 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2266 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
] + 2;
2267 #elif defined(__arm__) || defined(__arm64__)
2269 * All ARM and ARM64 probes are zero offset. We need to zero out the
2270 * thumb bit because we still support 32bit user processes.
2271 * On 64bit user processes, bit zero won't be set anyway.
2273 tp
->ftt_pc
= (dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
]) & ~0x1UL
;
2274 tp
->ftt_fntype
= FASTTRAP_FN_USDT
;
2276 #error "Architecture not supported"
2279 tp
->ftt_pid
= provider
->ftp_pid
;
2281 pp
->ftp_tps
[i
].fit_tp
= tp
;
2282 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2283 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_IS_ENABLED
;
2287 * If the arguments are shuffled around we set the argument remapping
2288 * table. Later, when the probe fires, we only remap the arguments
2289 * if the table is non-NULL.
2291 for (i
= 0; i
< dhpb
->dthpb_xargc
; i
++) {
2292 if (dhpb
->dthpb_args
[i
] != i
) {
2293 pp
->ftp_argmap
= dhpb
->dthpb_args
;
2299 * The probe is fully constructed -- register it with DTrace.
2301 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2302 dhpb
->dthpb_func
, dhpb
->dthpb_name
, FASTTRAP_OFFSET_AFRAMES
, pp
);
2304 lck_mtx_unlock(&provider
->ftp_cmtx
);
2309 fasttrap_meta_remove(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2313 * Clean up the USDT provider. There may be active consumers of the
2314 * provider busy adding probes, no damage will actually befall the
2315 * provider until that count has dropped to zero. This just puts
2316 * the provider on death row.
2318 fasttrap_provider_retire(p
, dhpv
->dthpv_provname
, 1);
2322 fasttrap_meta_provider_name(void *arg
)
2324 fasttrap_provider_t
*fprovider
= arg
;
2325 dtrace_provider_t
*provider
= (dtrace_provider_t
*)(fprovider
->ftp_provid
);
2326 return provider
->dtpv_name
;
2329 static dtrace_mops_t fasttrap_mops
= {
2330 .dtms_create_probe
= fasttrap_meta_create_probe
,
2331 .dtms_provide_proc
= fasttrap_meta_provide
,
2332 .dtms_remove_proc
= fasttrap_meta_remove
,
2333 .dtms_provider_name
= fasttrap_meta_provider_name
2337 * Validate a null-terminated string. If str is not null-terminated,
2338 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2341 * str: string to validate.
2342 * maxlen: maximal length of the string, null-terminated byte included.
2345 fasttrap_validatestr(char const* str
, size_t maxlen
) {
2349 assert(maxlen
!= 0);
2351 /* Check if the string is null-terminated. */
2352 len
= strnlen(str
, maxlen
);
2356 /* Finally, check for UTF8 validity. */
2357 return utf8_validatestr((unsigned const char*) str
, len
);
2362 fasttrap_ioctl(dev_t dev
, u_long cmd
, user_addr_t arg
, int md
, cred_t
*cr
, int *rv
)
2364 #pragma unused(dev, md, rv)
2365 if (!dtrace_attached())
2368 if (cmd
== FASTTRAPIOC_MAKEPROBE
) {
2369 fasttrap_probe_spec_t
*probe
;
2374 if (copyin(arg
+ __offsetof(fasttrap_probe_spec_t
, ftps_noffs
), &noffs
,
2375 sizeof (probe
->ftps_noffs
)))
2379 * Probes must have at least one tracepoint.
2385 * We want to check the number of noffs before doing
2386 * sizing math, to prevent potential buffer overflows.
2388 if (noffs
> ((1024 * 1024) - sizeof(fasttrap_probe_spec_t
)) / sizeof(probe
->ftps_offs
[0]))
2391 size
= sizeof (fasttrap_probe_spec_t
) +
2392 sizeof (probe
->ftps_offs
[0]) * (noffs
- 1);
2394 probe
= kmem_alloc(size
, KM_SLEEP
);
2396 if (copyin(arg
, probe
, size
) != 0 ||
2397 probe
->ftps_noffs
!= noffs
) {
2398 kmem_free(probe
, size
);
2403 * Verify that the function and module strings contain no
2407 if (fasttrap_validatestr(probe
->ftps_func
, sizeof(probe
->ftps_func
)) != 0) {
2412 if (fasttrap_validatestr(probe
->ftps_mod
, sizeof(probe
->ftps_mod
)) != 0) {
2417 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2419 pid_t pid
= probe
->ftps_pid
;
2422 * Report an error if the process doesn't exist
2423 * or is actively being birthed.
2425 if ((p
= proc_find(pid
)) == PROC_NULL
|| p
->p_stat
== SIDL
) {
2432 // FIXME! How is this done on OS X?
2433 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2434 // VREAD | VWRITE)) != 0) {
2435 // mutex_exit(&p->p_lock);
2442 ret
= fasttrap_add_probe(probe
);
2445 kmem_free(probe
, size
);
2449 } else if (cmd
== FASTTRAPIOC_GETINSTR
) {
2450 fasttrap_instr_query_t instr
;
2451 fasttrap_tracepoint_t
*tp
;
2455 if (copyin(arg
, &instr
, sizeof (instr
)) != 0)
2458 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2460 pid_t pid
= instr
.ftiq_pid
;
2463 * Report an error if the process doesn't exist
2464 * or is actively being birthed.
2466 if ((p
= proc_find(pid
)) == NULL
|| p
->p_stat
== SIDL
) {
2472 // FIXME! How is this done on OS X?
2473 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2475 // mutex_exit(&p->p_lock);
2482 index
= FASTTRAP_TPOINTS_INDEX(instr
.ftiq_pid
, instr
.ftiq_pc
);
2484 lck_mtx_lock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2485 tp
= fasttrap_tpoints
.fth_table
[index
].ftb_data
;
2486 while (tp
!= NULL
) {
2487 if (instr
.ftiq_pid
== tp
->ftt_pid
&&
2488 instr
.ftiq_pc
== tp
->ftt_pc
&&
2489 tp
->ftt_proc
->ftpc_acount
!= 0)
2496 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2500 bcopy(&tp
->ftt_instr
, &instr
.ftiq_instr
,
2501 sizeof (instr
.ftiq_instr
));
2502 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2504 if (copyout(&instr
, arg
, sizeof (instr
)) != 0)
2514 fasttrap_attach(void)
2520 * Install our hooks into fork(2), exec(2), and exit(2).
2522 dtrace_fasttrap_fork_ptr
= &fasttrap_fork
;
2523 dtrace_fasttrap_exit_ptr
= &fasttrap_exec_exit
;
2524 dtrace_fasttrap_exec_ptr
= &fasttrap_exec_exit
;
2527 * APPLE NOTE: We size the maximum number of fasttrap probes
2528 * based on system memory. 100k probes per 256M of system memory.
2529 * Yes, this is a WAG.
2531 fasttrap_max
= (sane_size
>> 28) * 100000;
2533 if (fasttrap_max
== 0)
2534 fasttrap_max
= 50000;
2537 fasttrap_retired
= 0;
2540 * Conjure up the tracepoints hashtable...
2543 nent
= ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
2544 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE
);
2546 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2549 if (nent
<= 0 || nent
> 0x1000000)
2550 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2552 if ((nent
& (nent
- 1)) == 0)
2553 fasttrap_tpoints
.fth_nent
= nent
;
2555 fasttrap_tpoints
.fth_nent
= 1 << fasttrap_highbit(nent
);
2556 ASSERT(fasttrap_tpoints
.fth_nent
> 0);
2557 fasttrap_tpoints
.fth_mask
= fasttrap_tpoints
.fth_nent
- 1;
2558 fasttrap_tpoints
.fth_table
= kmem_zalloc(fasttrap_tpoints
.fth_nent
*
2559 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2560 ASSERT(fasttrap_tpoints
.fth_table
!= NULL
);
2562 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
2563 lck_mtx_init(&fasttrap_tpoints
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2567 * ... and the providers hash table...
2569 nent
= FASTTRAP_PROVIDERS_DEFAULT_SIZE
;
2570 if ((nent
& (nent
- 1)) == 0)
2571 fasttrap_provs
.fth_nent
= nent
;
2573 fasttrap_provs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2574 ASSERT(fasttrap_provs
.fth_nent
> 0);
2575 fasttrap_provs
.fth_mask
= fasttrap_provs
.fth_nent
- 1;
2576 fasttrap_provs
.fth_table
= kmem_zalloc(fasttrap_provs
.fth_nent
*
2577 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2578 ASSERT(fasttrap_provs
.fth_table
!= NULL
);
2580 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
2581 lck_mtx_init(&fasttrap_provs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2585 * ... and the procs hash table.
2587 nent
= FASTTRAP_PROCS_DEFAULT_SIZE
;
2588 if ((nent
& (nent
- 1)) == 0)
2589 fasttrap_procs
.fth_nent
= nent
;
2591 fasttrap_procs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2592 ASSERT(fasttrap_procs
.fth_nent
> 0);
2593 fasttrap_procs
.fth_mask
= fasttrap_procs
.fth_nent
- 1;
2594 fasttrap_procs
.fth_table
= kmem_zalloc(fasttrap_procs
.fth_nent
*
2595 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2596 ASSERT(fasttrap_procs
.fth_table
!= NULL
);
2599 for (i
= 0; i
< fasttrap_procs
.fth_nent
; i
++) {
2600 lck_mtx_init(&fasttrap_procs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2604 (void) dtrace_meta_register("fasttrap", &fasttrap_mops
, NULL
,
2609 _fasttrap_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
2611 #pragma unused(dev, flags, devtype, p)
2616 _fasttrap_ioctl(dev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct proc
*p
)
2621 if (proc_is64bit(p
))
2622 uaddrp
= *(user_addr_t
*)data
;
2624 uaddrp
= (user_addr_t
) *(uint32_t *)data
;
2626 err
= fasttrap_ioctl(dev
, cmd
, uaddrp
, fflag
, CRED(), &rv
);
2628 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2630 ASSERT( (err
& 0xfffff000) == 0 );
2631 return (err
& 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2632 } else if (rv
!= 0) {
2633 ASSERT( (rv
& 0xfff00000) == 0 );
2634 return (((rv
& 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2639 static int fasttrap_inited
= 0;
2641 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2644 * A struct describing which functions will get invoked for certain
2648 static struct cdevsw fasttrap_cdevsw
=
2650 _fasttrap_open
, /* open */
2651 eno_opcl
, /* close */
2652 eno_rdwrt
, /* read */
2653 eno_rdwrt
, /* write */
2654 _fasttrap_ioctl
, /* ioctl */
2655 (stop_fcn_t
*)nulldev
, /* stop */
2656 (reset_fcn_t
*)nulldev
, /* reset */
2658 eno_select
, /* select */
2659 eno_mmap
, /* mmap */
2660 eno_strat
, /* strategy */
2661 eno_getc
, /* getc */
2662 eno_putc
, /* putc */
2666 void fasttrap_init(void);
2669 fasttrap_init( void )
2672 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2673 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2675 * The reason is to delay allocating the (rather large) resources as late as possible.
2677 if (!fasttrap_inited
) {
2678 int majdevno
= cdevsw_add(FASTTRAP_MAJOR
, &fasttrap_cdevsw
);
2681 // FIX ME! What kind of error reporting to do here?
2682 printf("fasttrap_init: failed to allocate a major number!\n");
2686 dev_t device
= makedev( (uint32_t)majdevno
, 0 );
2687 if (NULL
== devfs_make_node( device
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666, "fasttrap", 0 )) {
2692 * Allocate the fasttrap_tracepoint_t zone
2694 fasttrap_tracepoint_t_zone
= zinit(sizeof(fasttrap_tracepoint_t
),
2695 1024 * sizeof(fasttrap_tracepoint_t
),
2696 sizeof(fasttrap_tracepoint_t
),
2697 "dtrace.fasttrap_tracepoint_t");
2700 * fasttrap_probe_t's are variable in size. We use an array of zones to
2701 * cover the most common sizes.
2704 for (i
=1; i
<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
; i
++) {
2705 size_t zone_element_size
= offsetof(fasttrap_probe_t
, ftp_tps
[i
]);
2706 fasttrap_probe_t_zones
[i
] = zinit(zone_element_size
,
2707 1024 * zone_element_size
,
2709 fasttrap_probe_t_zone_names
[i
]);
2714 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2716 fasttrap_lck_attr
= lck_attr_alloc_init();
2717 fasttrap_lck_grp_attr
= lck_grp_attr_alloc_init();
2718 fasttrap_lck_grp
= lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr
);
2721 * Initialize global locks
2723 lck_mtx_init(&fasttrap_cleanup_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2724 lck_mtx_init(&fasttrap_count_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2729 * Start the fasttrap cleanup thread
2731 kern_return_t res
= kernel_thread_start_priority((thread_continue_t
)fasttrap_pid_cleanup_cb
, NULL
, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread
);
2732 if (res
!= KERN_SUCCESS
) {
2733 panic("Could not create fasttrap_cleanup_thread");
2735 thread_set_thread_name(fasttrap_cleanup_thread
, "dtrace_fasttrap_cleanup_thread");
2737 fasttrap_retired_size
= DEFAULT_RETIRED_SIZE
;
2738 fasttrap_retired_spec
= kmem_zalloc(fasttrap_retired_size
* sizeof(*fasttrap_retired_spec
),
2740 lck_mtx_init(&fasttrap_retired_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2742 fasttrap_inited
= 1;
2746 #undef FASTTRAP_MAJOR