4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
31 #include <sys/types.h>
34 #include <sys/errno.h>
37 #include <sys/systm.h>
38 #include <sys/kauth.h>
39 #include <sys/utfconv.h>
41 #include <sys/fasttrap.h>
42 #include <sys/fasttrap_impl.h>
43 #include <sys/fasttrap_isa.h>
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
48 #include <miscfs/devfs/devfs.h>
49 #include <sys/proc_internal.h>
50 #include <sys/dtrace_glue.h>
51 #include <sys/dtrace_ptss.h>
53 #include <kern/zalloc.h>
55 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
56 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
60 qsort(void *a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
63 * User-Land Trap-Based Tracing
64 * ----------------------------
66 * The fasttrap provider allows DTrace consumers to instrument any user-level
67 * instruction to gather data; this includes probes with semantic
68 * signifigance like entry and return as well as simple offsets into the
69 * function. While the specific techniques used are very ISA specific, the
70 * methodology is generalizable to any architecture.
73 * The General Methodology
74 * -----------------------
76 * With the primary goal of tracing every user-land instruction and the
77 * limitation that we can't trust user space so don't want to rely on much
78 * information there, we begin by replacing the instructions we want to trace
79 * with trap instructions. Each instruction we overwrite is saved into a hash
80 * table keyed by process ID and pc address. When we enter the kernel due to
81 * this trap instruction, we need the effects of the replaced instruction to
82 * appear to have occurred before we proceed with the user thread's
85 * Each user level thread is represented by a ulwp_t structure which is
86 * always easily accessible through a register. The most basic way to produce
87 * the effects of the instruction we replaced is to copy that instruction out
88 * to a bit of scratch space reserved in the user thread's ulwp_t structure
89 * (a sort of kernel-private thread local storage), set the PC to that
90 * scratch space and single step. When we reenter the kernel after single
91 * stepping the instruction we must then adjust the PC to point to what would
92 * normally be the next instruction. Of course, special care must be taken
93 * for branches and jumps, but these represent such a small fraction of any
94 * instruction set that writing the code to emulate these in the kernel is
97 * Return probes may require several tracepoints to trace every return site,
98 * and, conversely, each tracepoint may activate several probes (the entry
99 * and offset 0 probes, for example). To solve this muliplexing problem,
100 * tracepoints contain lists of probes to activate and probes contain lists
101 * of tracepoints to enable. If a probe is activated, it adds its ID to
102 * existing tracepoints or creates new ones as necessary.
104 * Most probes are activated _before_ the instruction is executed, but return
105 * probes are activated _after_ the effects of the last instruction of the
106 * function are visible. Return probes must be fired _after_ we have
107 * single-stepped the instruction whereas all other probes are fired
114 * The lock ordering below -- both internally and with respect to the DTrace
115 * framework -- is a little tricky and bears some explanation. Each provider
116 * has a lock (ftp_mtx) that protects its members including reference counts
117 * for enabled probes (ftp_rcount), consumers actively creating probes
118 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
119 * from being freed. A provider is looked up by taking the bucket lock for the
120 * provider hash table, and is returned with its lock held. The provider lock
121 * may be taken in functions invoked by the DTrace framework, but may not be
122 * held while calling functions in the DTrace framework.
124 * To ensure consistency over multiple calls to the DTrace framework, the
125 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
126 * not be taken when holding the provider lock as that would create a cyclic
127 * lock ordering. In situations where one would naturally take the provider
128 * lock and then the creation lock, we instead up a reference count to prevent
129 * the provider from disappearing, drop the provider lock, and acquire the
133 * bucket lock before provider lock
134 * DTrace before provider lock
135 * creation lock before DTrace
136 * never hold the provider lock and creation lock simultaneously
139 static dev_info_t
*fasttrap_devi
;
140 static dtrace_meta_provider_id_t fasttrap_meta_id
;
142 static thread_call_t fasttrap_timeout
;
143 static lck_mtx_t fasttrap_cleanup_mtx
;
144 static uint_t fasttrap_cleanup_work
;
147 * Generation count on modifications to the global tracepoint lookup table.
149 static volatile uint64_t fasttrap_mod_gen
;
152 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
153 * base on system memory. Each time a probe is created, fasttrap_total is
154 * incremented by the number of tracepoints that may be associated with that
155 * probe; fasttrap_total is capped at fasttrap_max.
158 static uint32_t fasttrap_max
;
159 static uint32_t fasttrap_total
;
162 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
163 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
164 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
166 fasttrap_hash_t fasttrap_tpoints
;
167 static fasttrap_hash_t fasttrap_provs
;
168 static fasttrap_hash_t fasttrap_procs
;
170 static uint64_t fasttrap_pid_count
; /* pid ref count */
171 static lck_mtx_t fasttrap_count_mtx
; /* lock on ref count */
173 #define FASTTRAP_ENABLE_FAIL 1
174 #define FASTTRAP_ENABLE_PARTIAL 2
176 static int fasttrap_tracepoint_enable(proc_t
*, fasttrap_probe_t
*, uint_t
);
177 static void fasttrap_tracepoint_disable(proc_t
*, fasttrap_probe_t
*, uint_t
);
179 static fasttrap_provider_t
*fasttrap_provider_lookup(proc_t
*, fasttrap_provider_type_t
, const char *,
180 const dtrace_pattr_t
*);
181 static void fasttrap_provider_retire(proc_t
*, const char *, int);
182 static void fasttrap_provider_free(fasttrap_provider_t
*);
184 static fasttrap_proc_t
*fasttrap_proc_lookup(pid_t
);
185 static void fasttrap_proc_release(fasttrap_proc_t
*);
187 #define FASTTRAP_PROVS_INDEX(pid, name) \
188 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
190 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
193 * APPLE NOTE: To save memory, some common memory allocations are given
194 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
195 * which means it would fall into the kalloc.128 bucket. With
196 * 20k elements allocated, the space saved is substantial.
199 struct zone
*fasttrap_tracepoint_t_zone
;
202 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
203 * that the sweet spot for reducing memory footprint is covering the first
204 * three sizes. Everything larger goes into the common pool.
206 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
208 struct zone
*fasttrap_probe_t_zones
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
];
210 static const char *fasttrap_probe_t_zone_names
[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
] = {
212 "dtrace.fasttrap_probe_t[1]",
213 "dtrace.fasttrap_probe_t[2]",
214 "dtrace.fasttrap_probe_t[3]"
218 * APPLE NOTE: We have to manage locks explicitly
220 lck_grp_t
* fasttrap_lck_grp
;
221 lck_grp_attr_t
* fasttrap_lck_grp_attr
;
222 lck_attr_t
* fasttrap_lck_attr
;
225 fasttrap_highbit(ulong_t i
)
232 if (i
& 0xffffffff00000000ul
) {
236 if (i
& 0xffff0000) {
255 fasttrap_hash_str(const char *p
)
261 hval
= (hval
<< 4) + *p
++;
262 if ((g
= (hval
& 0xf0000000)) != 0)
270 * APPLE NOTE: fasttrap_sigtrap not implemented
273 fasttrap_sigtrap(proc_t
*p
, uthread_t t
, user_addr_t pc
)
275 #pragma unused(p, t, pc)
277 #if !defined(__APPLE__)
278 sigqueue_t
*sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
280 sqp
->sq_info
.si_signo
= SIGTRAP
;
281 sqp
->sq_info
.si_code
= TRAP_DTRACE
;
282 sqp
->sq_info
.si_addr
= (caddr_t
)pc
;
284 mutex_enter(&p
->p_lock
);
286 mutex_exit(&p
->p_lock
);
290 #endif /* __APPLE__ */
292 printf("fasttrap_sigtrap called with no implementation.\n");
296 * This function ensures that no threads are actively using the memory
297 * associated with probes that were formerly live.
300 fasttrap_mod_barrier(uint64_t gen
)
304 if (gen
< fasttrap_mod_gen
)
309 for (i
= 0; i
< NCPU
; i
++) {
310 lck_mtx_lock(&cpu_core
[i
].cpuc_pid_lock
);
311 lck_mtx_unlock(&cpu_core
[i
].cpuc_pid_lock
);
316 * This is the timeout's callback for cleaning up the providers and their
321 fasttrap_pid_cleanup_cb(void *ignored
, void* ignored2
)
323 #pragma unused(ignored, ignored2)
324 fasttrap_provider_t
**fpp
, *fp
;
325 fasttrap_bucket_t
*bucket
;
326 dtrace_provider_id_t provid
;
327 unsigned int i
, later
= 0;
329 static volatile int in
= 0;
333 lck_mtx_lock(&fasttrap_cleanup_mtx
);
334 while (fasttrap_cleanup_work
) {
335 fasttrap_cleanup_work
= 0;
336 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
341 * Iterate over all the providers trying to remove the marked
342 * ones. If a provider is marked but not retired, we just
343 * have to take a crack at removing it -- it's no big deal if
346 for (i
= 0; i
< fasttrap_provs
.fth_nent
; i
++) {
347 bucket
= &fasttrap_provs
.fth_table
[i
];
348 lck_mtx_lock(&bucket
->ftb_mtx
);
349 fpp
= (fasttrap_provider_t
**)&bucket
->ftb_data
;
351 while ((fp
= *fpp
) != NULL
) {
352 if (!fp
->ftp_marked
) {
357 lck_mtx_lock(&fp
->ftp_mtx
);
360 * If this provider has consumers actively
361 * creating probes (ftp_ccount) or is a USDT
362 * provider (ftp_mcount), we can't unregister
365 if (fp
->ftp_ccount
!= 0 ||
366 fp
->ftp_mcount
!= 0) {
368 lck_mtx_unlock(&fp
->ftp_mtx
);
372 if (!fp
->ftp_retired
|| fp
->ftp_rcount
!= 0)
375 lck_mtx_unlock(&fp
->ftp_mtx
);
378 * If we successfully unregister this
379 * provider we can remove it from the hash
380 * chain and free the memory. If our attempt
381 * to unregister fails and this is a retired
382 * provider, increment our flag to try again
383 * pretty soon. If we've consumed more than
384 * half of our total permitted number of
385 * probes call dtrace_condense() to try to
386 * clean out the unenabled probes.
388 provid
= fp
->ftp_provid
;
389 if (dtrace_unregister(provid
) != 0) {
390 if (fasttrap_total
> fasttrap_max
/ 2)
391 (void) dtrace_condense(provid
);
392 later
+= fp
->ftp_marked
;
396 fasttrap_provider_free(fp
);
399 lck_mtx_unlock(&bucket
->ftb_mtx
);
402 lck_mtx_lock(&fasttrap_cleanup_mtx
);
405 ASSERT(fasttrap_timeout
!= 0);
408 * APPLE NOTE: You must hold the fasttrap_cleanup_mtx to do this!
410 if (fasttrap_timeout
!= (thread_call_t
)1)
411 thread_call_free(fasttrap_timeout
);
414 * If we were unable to remove a retired provider, try again after
415 * a second. This situation can occur in certain circumstances where
416 * providers cannot be unregistered even though they have no probes
417 * enabled because of an execution of dtrace -l or something similar.
418 * If the timeout has been disabled (set to 1 because we're trying
419 * to detach), we set fasttrap_cleanup_work to ensure that we'll
420 * get a chance to do that work if and when the timeout is reenabled
423 if (later
> 0 && fasttrap_timeout
!= (thread_call_t
)1)
424 /* The time value passed to dtrace_timeout is in nanos */
425 fasttrap_timeout
= dtrace_timeout(&fasttrap_pid_cleanup_cb
, NULL
, NANOSEC
/ SEC
);
427 fasttrap_cleanup_work
= 1;
429 fasttrap_timeout
= 0;
431 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
436 * Activates the asynchronous cleanup mechanism.
439 fasttrap_pid_cleanup(void)
441 lck_mtx_lock(&fasttrap_cleanup_mtx
);
442 fasttrap_cleanup_work
= 1;
443 if (fasttrap_timeout
== 0)
444 fasttrap_timeout
= dtrace_timeout(&fasttrap_pid_cleanup_cb
, NULL
, NANOSEC
/ MILLISEC
);
445 lck_mtx_unlock(&fasttrap_cleanup_mtx
);
449 * This is called from cfork() via dtrace_fasttrap_fork(). The child
450 * process's address space is a (roughly) a copy of the parent process's so
451 * we have to remove all the instrumentation we had previously enabled in the
455 fasttrap_fork(proc_t
*p
, proc_t
*cp
)
457 pid_t ppid
= p
->p_pid
;
460 ASSERT(current_proc() == p
);
461 lck_mtx_assert(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_OWNED
);
462 ASSERT(p
->p_dtrace_count
> 0);
463 ASSERT(cp
->p_dtrace_count
== 0);
466 * This would be simpler and faster if we maintained per-process
467 * hash tables of enabled tracepoints. It could, however, potentially
468 * slow down execution of a tracepoint since we'd need to go
469 * through two levels of indirection. In the future, we should
470 * consider either maintaining per-process ancillary lists of
471 * enabled tracepoints or hanging a pointer to a per-process hash
472 * table of enabled tracepoints off the proc structure.
476 * We don't have to worry about the child process disappearing
477 * because we're in fork().
479 if (cp
!= sprlock(cp
->p_pid
)) {
480 printf("fasttrap_fork: sprlock(%d) returned a differt proc\n", cp
->p_pid
);
486 * Iterate over every tracepoint looking for ones that belong to the
487 * parent process, and remove each from the child process.
489 for (i
= 0; i
< fasttrap_tpoints
.fth_nent
; i
++) {
490 fasttrap_tracepoint_t
*tp
;
491 fasttrap_bucket_t
*bucket
= &fasttrap_tpoints
.fth_table
[i
];
493 lck_mtx_lock(&bucket
->ftb_mtx
);
494 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
495 if (tp
->ftt_pid
== ppid
&&
496 tp
->ftt_proc
->ftpc_acount
!= 0) {
497 fasttrap_tracepoint_remove(cp
, tp
);
500 * The count of active providers can only be
501 * decremented (i.e. to zero) during exec,
502 * exit, and removal of a meta provider so it
503 * should be impossible to drop the count
506 ASSERT(tp
->ftt_proc
->ftpc_acount
!= 0);
509 lck_mtx_unlock(&bucket
->ftb_mtx
);
513 * Free any ptss pages/entries in the child.
515 dtrace_ptss_fork(p
, cp
);
522 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
523 * is set on the proc structure to indicate that there is a pid provider
524 * associated with this process.
527 fasttrap_exec_exit(proc_t
*p
)
529 ASSERT(p
== current_proc());
530 lck_mtx_assert(&p
->p_mlock
, LCK_MTX_ASSERT_OWNED
);
531 lck_mtx_assert(&p
->p_dtrace_sprlock
, LCK_MTX_ASSERT_NOTOWNED
);
534 /* APPLE NOTE: Okay, the locking here is really odd and needs some
535 * explaining. This method is always called with the proc_lock held.
536 * We must drop the proc_lock before calling fasttrap_provider_retire
537 * to avoid a deadlock when it takes the bucket lock.
539 * Next, the dtrace_ptss_exec_exit function requires the sprlock
540 * be held, but not the proc_lock.
542 * Finally, we must re-acquire the proc_lock
547 * We clean up the pid provider for this process here; user-land
548 * static probes are handled by the meta-provider remove entry point.
550 fasttrap_provider_retire(p
, FASTTRAP_PID_NAME
, 0);
553 * APPLE NOTE: We also need to remove any aliased providers.
554 * XXX optimization: track which provider types are instantiated
555 * and only retire as needed.
557 fasttrap_provider_retire(p
, FASTTRAP_OBJC_NAME
, 0);
558 fasttrap_provider_retire(p
, FASTTRAP_ONESHOT_NAME
, 0);
561 * This should be called after it is no longer possible for a user
562 * thread to execute (potentially dtrace instrumented) instructions.
564 lck_mtx_lock(&p
->p_dtrace_sprlock
);
565 dtrace_ptss_exec_exit(p
);
566 lck_mtx_unlock(&p
->p_dtrace_sprlock
);
574 fasttrap_pid_provide(void *arg
, const dtrace_probedesc_t
*desc
)
576 #pragma unused(arg, desc)
578 * There are no "default" pid probes.
583 fasttrap_tracepoint_enable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
585 fasttrap_tracepoint_t
*tp
, *new_tp
= NULL
;
586 fasttrap_bucket_t
*bucket
;
591 ASSERT(index
< probe
->ftp_ntps
);
593 pid
= probe
->ftp_pid
;
594 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
595 id
= &probe
->ftp_tps
[index
].fit_id
;
597 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
599 //ASSERT(!(p->p_flag & SVFORK));
602 * Before we make any modifications, make sure we've imposed a barrier
603 * on the generation in which this probe was last modified.
605 fasttrap_mod_barrier(probe
->ftp_gen
);
607 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
610 * If the tracepoint has already been enabled, just add our id to the
611 * list of interested probes. This may be our second time through
612 * this path in which case we'll have constructed the tracepoint we'd
613 * like to install. If we can't find a match, and have an allocated
614 * tracepoint ready to go, enable that one now.
616 * A tracepoint whose process is defunct is also considered defunct.
619 lck_mtx_lock(&bucket
->ftb_mtx
);
620 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
622 * Note that it's safe to access the active count on the
623 * associated proc structure because we know that at least one
624 * provider (this one) will still be around throughout this
627 if (tp
->ftt_pid
!= pid
|| tp
->ftt_pc
!= pc
||
628 tp
->ftt_proc
->ftpc_acount
== 0)
632 * Now that we've found a matching tracepoint, it would be
633 * a decent idea to confirm that the tracepoint is still
634 * enabled and the trap instruction hasn't been overwritten.
635 * Since this is a little hairy, we'll punt for now.
639 * This can't be the first interested probe. We don't have
640 * to worry about another thread being in the midst of
641 * deleting this tracepoint (which would be the only valid
642 * reason for a tracepoint to have no interested probes)
643 * since we're holding P_PR_LOCK for this process.
645 ASSERT(tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
);
647 switch (id
->fti_ptype
) {
650 case DTFTP_IS_ENABLED
:
651 id
->fti_next
= tp
->ftt_ids
;
652 dtrace_membar_producer();
654 dtrace_membar_producer();
658 case DTFTP_POST_OFFSETS
:
659 id
->fti_next
= tp
->ftt_retids
;
660 dtrace_membar_producer();
662 dtrace_membar_producer();
669 lck_mtx_unlock(&bucket
->ftb_mtx
);
671 if (new_tp
!= NULL
) {
672 new_tp
->ftt_ids
= NULL
;
673 new_tp
->ftt_retids
= NULL
;
680 * If we have a good tracepoint ready to go, install it now while
681 * we have the lock held and no one can screw with us.
683 if (new_tp
!= NULL
) {
686 new_tp
->ftt_next
= bucket
->ftb_data
;
687 dtrace_membar_producer();
688 bucket
->ftb_data
= new_tp
;
689 dtrace_membar_producer();
690 lck_mtx_unlock(&bucket
->ftb_mtx
);
693 * Activate the tracepoint in the ISA-specific manner.
694 * If this fails, we need to report the failure, but
695 * indicate that this tracepoint must still be disabled
696 * by calling fasttrap_tracepoint_disable().
698 if (fasttrap_tracepoint_install(p
, new_tp
) != 0)
699 rc
= FASTTRAP_ENABLE_PARTIAL
;
702 * Increment the count of the number of tracepoints active in
703 * the victim process.
705 //ASSERT(p->p_proc_flag & P_PR_LOCK);
711 lck_mtx_unlock(&bucket
->ftb_mtx
);
714 * Initialize the tracepoint that's been preallocated with the probe.
716 new_tp
= probe
->ftp_tps
[index
].fit_tp
;
718 ASSERT(new_tp
->ftt_pid
== pid
);
719 ASSERT(new_tp
->ftt_pc
== pc
);
720 ASSERT(new_tp
->ftt_proc
== probe
->ftp_prov
->ftp_proc
);
721 ASSERT(new_tp
->ftt_ids
== NULL
);
722 ASSERT(new_tp
->ftt_retids
== NULL
);
724 switch (id
->fti_ptype
) {
727 case DTFTP_IS_ENABLED
:
729 new_tp
->ftt_ids
= id
;
733 case DTFTP_POST_OFFSETS
:
735 new_tp
->ftt_retids
= id
;
743 * If the ISA-dependent initialization goes to plan, go back to the
744 * beginning and try to install this freshly made tracepoint.
746 if (fasttrap_tracepoint_init(p
, new_tp
, pc
, id
->fti_ptype
) == 0)
749 new_tp
->ftt_ids
= NULL
;
750 new_tp
->ftt_retids
= NULL
;
752 return (FASTTRAP_ENABLE_FAIL
);
756 fasttrap_tracepoint_disable(proc_t
*p
, fasttrap_probe_t
*probe
, uint_t index
)
758 fasttrap_bucket_t
*bucket
;
759 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
760 fasttrap_tracepoint_t
**pp
, *tp
;
761 fasttrap_id_t
*id
, **idp
;
765 ASSERT(index
< probe
->ftp_ntps
);
767 pid
= probe
->ftp_pid
;
768 pc
= probe
->ftp_tps
[index
].fit_tp
->ftt_pc
;
769 id
= &probe
->ftp_tps
[index
].fit_id
;
771 ASSERT(probe
->ftp_tps
[index
].fit_tp
->ftt_pid
== pid
);
774 * Find the tracepoint and make sure that our id is one of the
775 * ones registered with it.
777 bucket
= &fasttrap_tpoints
.fth_table
[FASTTRAP_TPOINTS_INDEX(pid
, pc
)];
778 lck_mtx_lock(&bucket
->ftb_mtx
);
779 for (tp
= bucket
->ftb_data
; tp
!= NULL
; tp
= tp
->ftt_next
) {
780 if (tp
->ftt_pid
== pid
&& tp
->ftt_pc
== pc
&&
781 tp
->ftt_proc
== provider
->ftp_proc
)
786 * If we somehow lost this tracepoint, we're in a world of hurt.
790 switch (id
->fti_ptype
) {
793 case DTFTP_IS_ENABLED
:
794 ASSERT(tp
->ftt_ids
!= NULL
);
799 case DTFTP_POST_OFFSETS
:
800 ASSERT(tp
->ftt_retids
!= NULL
);
801 idp
= &tp
->ftt_retids
;
805 /* Fix compiler warning... */
810 while ((*idp
)->fti_probe
!= probe
) {
811 idp
= &(*idp
)->fti_next
;
812 ASSERT(*idp
!= NULL
);
817 dtrace_membar_producer();
819 ASSERT(id
->fti_probe
== probe
);
822 * If there are other registered enablings of this tracepoint, we're
823 * all done, but if this was the last probe assocated with this
824 * this tracepoint, we need to remove and free it.
826 if (tp
->ftt_ids
!= NULL
|| tp
->ftt_retids
!= NULL
) {
829 * If the current probe's tracepoint is in use, swap it
830 * for an unused tracepoint.
832 if (tp
== probe
->ftp_tps
[index
].fit_tp
) {
833 fasttrap_probe_t
*tmp_probe
;
834 fasttrap_tracepoint_t
**tmp_tp
;
837 if (tp
->ftt_ids
!= NULL
) {
838 tmp_probe
= tp
->ftt_ids
->fti_probe
;
839 /* LINTED - alignment */
840 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_ids
);
841 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
843 tmp_probe
= tp
->ftt_retids
->fti_probe
;
844 /* LINTED - alignment */
845 tmp_index
= FASTTRAP_ID_INDEX(tp
->ftt_retids
);
846 tmp_tp
= &tmp_probe
->ftp_tps
[tmp_index
].fit_tp
;
849 ASSERT(*tmp_tp
!= NULL
);
850 ASSERT(*tmp_tp
!= probe
->ftp_tps
[index
].fit_tp
);
851 ASSERT((*tmp_tp
)->ftt_ids
== NULL
);
852 ASSERT((*tmp_tp
)->ftt_retids
== NULL
);
854 probe
->ftp_tps
[index
].fit_tp
= *tmp_tp
;
859 lck_mtx_unlock(&bucket
->ftb_mtx
);
862 * Tag the modified probe with the generation in which it was
865 probe
->ftp_gen
= fasttrap_mod_gen
;
869 lck_mtx_unlock(&bucket
->ftb_mtx
);
872 * We can't safely remove the tracepoint from the set of active
873 * tracepoints until we've actually removed the fasttrap instruction
874 * from the process's text. We can, however, operate on this
875 * tracepoint secure in the knowledge that no other thread is going to
876 * be looking at it since we hold P_PR_LOCK on the process if it's
877 * live or we hold the provider lock on the process if it's dead and
882 * We only need to remove the actual instruction if we're looking
883 * at an existing process
887 * If we fail to restore the instruction we need to kill
888 * this process since it's in a completely unrecoverable
891 if (fasttrap_tracepoint_remove(p
, tp
) != 0)
892 fasttrap_sigtrap(p
, NULL
, pc
);
895 * Decrement the count of the number of tracepoints active
896 * in the victim process.
898 //ASSERT(p->p_proc_flag & P_PR_LOCK);
903 * Remove the probe from the hash table of active tracepoints.
905 lck_mtx_lock(&bucket
->ftb_mtx
);
906 pp
= (fasttrap_tracepoint_t
**)&bucket
->ftb_data
;
909 pp
= &(*pp
)->ftt_next
;
914 dtrace_membar_producer();
916 lck_mtx_unlock(&bucket
->ftb_mtx
);
919 * Tag the modified probe with the generation in which it was changed.
921 probe
->ftp_gen
= fasttrap_mod_gen
;
925 fasttrap_enable_callbacks(void)
928 * We don't have to play the rw lock game here because we're
929 * providing something rather than taking something away --
930 * we can be sure that no threads have tried to follow this
931 * function pointer yet.
933 lck_mtx_lock(&fasttrap_count_mtx
);
934 if (fasttrap_pid_count
== 0) {
935 ASSERT(dtrace_pid_probe_ptr
== NULL
);
936 ASSERT(dtrace_return_probe_ptr
== NULL
);
937 dtrace_pid_probe_ptr
= &fasttrap_pid_probe
;
938 dtrace_return_probe_ptr
= &fasttrap_return_probe
;
940 ASSERT(dtrace_pid_probe_ptr
== &fasttrap_pid_probe
);
941 ASSERT(dtrace_return_probe_ptr
== &fasttrap_return_probe
);
942 fasttrap_pid_count
++;
943 lck_mtx_unlock(&fasttrap_count_mtx
);
947 fasttrap_disable_callbacks(void)
949 //ASSERT(MUTEX_HELD(&cpu_lock));
951 lck_mtx_lock(&fasttrap_count_mtx
);
952 ASSERT(fasttrap_pid_count
> 0);
953 fasttrap_pid_count
--;
954 if (fasttrap_pid_count
== 0) {
955 dtrace_cpu_t
*cur
, *cpu
= CPU
;
958 * APPLE NOTE: This loop seems broken, it touches every CPU
959 * but the one we're actually running on. Need to ask Sun folks
960 * if that is safe. Scenario is this: We're running on CPU A,
961 * and lock all but A. Then we get preempted, and start running
962 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
964 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
965 lck_rw_lock_exclusive(&cur
->cpu_ft_lock
);
966 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
969 dtrace_pid_probe_ptr
= NULL
;
970 dtrace_return_probe_ptr
= NULL
;
972 for (cur
= cpu
->cpu_next
; cur
!= cpu
; cur
= cur
->cpu_next
) {
973 lck_rw_unlock_exclusive(&cur
->cpu_ft_lock
);
974 // rw_exit(&cur->cpu_ft_lock);
977 lck_mtx_unlock(&fasttrap_count_mtx
);
982 fasttrap_pid_enable(void *arg
, dtrace_id_t id
, void *parg
)
984 #pragma unused(arg, id)
985 fasttrap_probe_t
*probe
= parg
;
989 ASSERT(probe
!= NULL
);
990 ASSERT(!probe
->ftp_enabled
);
991 ASSERT(id
== probe
->ftp_id
);
992 // ASSERT(MUTEX_HELD(&cpu_lock));
995 * Increment the count of enabled probes on this probe's provider;
996 * the provider can't go away while the probe still exists. We
997 * must increment this even if we aren't able to properly enable
1000 lck_mtx_lock(&probe
->ftp_prov
->ftp_mtx
);
1001 probe
->ftp_prov
->ftp_rcount
++;
1002 lck_mtx_unlock(&probe
->ftp_prov
->ftp_mtx
);
1005 * If this probe's provider is retired (meaning it was valid in a
1006 * previously exec'ed incarnation of this address space), bail out. The
1007 * provider can't go away while we're in this code path.
1009 if (probe
->ftp_prov
->ftp_retired
)
1013 * If we can't find the process, it may be that we're in the context of
1014 * a fork in which the traced process is being born and we're copying
1015 * USDT probes. Otherwise, the process is gone so bail.
1017 if ((p
= sprlock(probe
->ftp_pid
)) == PROC_NULL
) {
1019 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1020 * does not return process's with SIDL set, but we always return
1021 * the child process.
1027 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1028 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1029 * To mimic this, we allocate on demand scratch space. If this is the first
1030 * time a probe has been enabled in this process, we need to allocate scratch
1031 * space for each already existing thread. Now is a good time to do this, as
1032 * the target process is suspended and the proc_lock is held.
1034 if (p
->p_dtrace_ptss_pages
== NULL
) {
1035 dtrace_ptss_enable(p
);
1038 // ASSERT(!(p->p_flag & SVFORK));
1042 * We have to enable the trap entry point before any user threads have
1043 * the chance to execute the trap instruction we're about to place
1044 * in their process's text.
1046 fasttrap_enable_callbacks();
1049 * Enable all the tracepoints and add this probe's id to each
1050 * tracepoint's list of active probes.
1052 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1053 if ((rc
= fasttrap_tracepoint_enable(p
, probe
, i
)) != 0) {
1055 * If enabling the tracepoint failed completely,
1056 * we don't have to disable it; if the failure
1057 * was only partial we must disable it.
1059 if (rc
== FASTTRAP_ENABLE_FAIL
)
1062 ASSERT(rc
== FASTTRAP_ENABLE_PARTIAL
);
1065 * Back up and pull out all the tracepoints we've
1066 * created so far for this probe.
1069 fasttrap_tracepoint_disable(p
, probe
, i
);
1077 * Since we're not actually enabling this probe,
1078 * drop our reference on the trap table entry.
1080 fasttrap_disable_callbacks();
1088 probe
->ftp_enabled
= 1;
1094 fasttrap_pid_disable(void *arg
, dtrace_id_t id
, void *parg
)
1096 #pragma unused(arg, id)
1097 fasttrap_probe_t
*probe
= parg
;
1098 fasttrap_provider_t
*provider
= probe
->ftp_prov
;
1102 ASSERT(id
== probe
->ftp_id
);
1105 * We won't be able to acquire a /proc-esque lock on the process
1106 * iff the process is dead and gone. In this case, we rely on the
1107 * provider lock as a point of mutual exclusion to prevent other
1108 * DTrace consumers from disabling this probe.
1110 if ((p
= sprlock(probe
->ftp_pid
)) != PROC_NULL
) {
1111 // ASSERT(!(p->p_flag & SVFORK));
1115 lck_mtx_lock(&provider
->ftp_mtx
);
1118 * Disable all the associated tracepoints (for fully enabled probes).
1120 if (probe
->ftp_enabled
) {
1121 for (i
= 0; i
< (int)probe
->ftp_ntps
; i
++) {
1122 fasttrap_tracepoint_disable(p
, probe
, i
);
1126 ASSERT(provider
->ftp_rcount
> 0);
1127 provider
->ftp_rcount
--;
1131 * Even though we may not be able to remove it entirely, we
1132 * mark this retired provider to get a chance to remove some
1133 * of the associated probes.
1135 if (provider
->ftp_retired
&& !provider
->ftp_marked
)
1136 whack
= provider
->ftp_marked
= 1;
1137 lck_mtx_unlock(&provider
->ftp_mtx
);
1143 * If the process is dead, we're just waiting for the
1144 * last probe to be disabled to be able to free it.
1146 if (provider
->ftp_rcount
== 0 && !provider
->ftp_marked
)
1147 whack
= provider
->ftp_marked
= 1;
1148 lck_mtx_unlock(&provider
->ftp_mtx
);
1152 fasttrap_pid_cleanup();
1154 if (!probe
->ftp_enabled
)
1157 probe
->ftp_enabled
= 0;
1159 // ASSERT(MUTEX_HELD(&cpu_lock));
1160 fasttrap_disable_callbacks();
1165 fasttrap_pid_getargdesc(void *arg
, dtrace_id_t id
, void *parg
,
1166 dtrace_argdesc_t
*desc
)
1168 #pragma unused(arg, id)
1169 fasttrap_probe_t
*probe
= parg
;
1173 desc
->dtargd_native
[0] = '\0';
1174 desc
->dtargd_xlate
[0] = '\0';
1176 if (probe
->ftp_prov
->ftp_retired
!= 0 ||
1177 desc
->dtargd_ndx
>= probe
->ftp_nargs
) {
1178 desc
->dtargd_ndx
= DTRACE_ARGNONE
;
1182 ndx
= (probe
->ftp_argmap
!= NULL
) ?
1183 probe
->ftp_argmap
[desc
->dtargd_ndx
] : desc
->dtargd_ndx
;
1185 str
= probe
->ftp_ntypes
;
1186 for (i
= 0; i
< ndx
; i
++) {
1187 str
+= strlen(str
) + 1;
1190 (void) strlcpy(desc
->dtargd_native
, str
, sizeof(desc
->dtargd_native
));
1192 if (probe
->ftp_xtypes
== NULL
)
1195 str
= probe
->ftp_xtypes
;
1196 for (i
= 0; i
< desc
->dtargd_ndx
; i
++) {
1197 str
+= strlen(str
) + 1;
1200 (void) strlcpy(desc
->dtargd_xlate
, str
, sizeof(desc
->dtargd_xlate
));
1205 fasttrap_pid_destroy(void *arg
, dtrace_id_t id
, void *parg
)
1207 #pragma unused(arg, id)
1208 fasttrap_probe_t
*probe
= parg
;
1211 ASSERT(probe
!= NULL
);
1212 ASSERT(!probe
->ftp_enabled
);
1213 ASSERT(fasttrap_total
>= probe
->ftp_ntps
);
1215 atomic_add_32(&fasttrap_total
, -probe
->ftp_ntps
);
1217 if (probe
->ftp_gen
+ 1 >= fasttrap_mod_gen
)
1218 fasttrap_mod_barrier(probe
->ftp_gen
);
1220 for (i
= 0; i
< probe
->ftp_ntps
; i
++) {
1221 zfree(fasttrap_tracepoint_t_zone
, probe
->ftp_tps
[i
].fit_tp
);
1224 if (probe
->ftp_ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1225 zfree(fasttrap_probe_t_zones
[probe
->ftp_ntps
], probe
);
1227 size_t size
= offsetof(fasttrap_probe_t
, ftp_tps
[probe
->ftp_ntps
]);
1228 kmem_free(probe
, size
);
1233 static const dtrace_pattr_t pid_attr
= {
1234 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1235 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1236 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1237 { DTRACE_STABILITY_EVOLVING
, DTRACE_STABILITY_EVOLVING
, DTRACE_CLASS_ISA
},
1238 { DTRACE_STABILITY_PRIVATE
, DTRACE_STABILITY_PRIVATE
, DTRACE_CLASS_UNKNOWN
},
1241 static dtrace_pops_t pid_pops
= {
1242 fasttrap_pid_provide
,
1244 fasttrap_pid_enable
,
1245 fasttrap_pid_disable
,
1248 fasttrap_pid_getargdesc
,
1249 fasttrap_pid_getarg
,
1251 fasttrap_pid_destroy
1254 static dtrace_pops_t usdt_pops
= {
1255 fasttrap_pid_provide
,
1257 fasttrap_pid_enable
,
1258 fasttrap_pid_disable
,
1261 fasttrap_pid_getargdesc
,
1262 fasttrap_usdt_getarg
,
1264 fasttrap_pid_destroy
1267 static fasttrap_proc_t
*
1268 fasttrap_proc_lookup(pid_t pid
)
1270 fasttrap_bucket_t
*bucket
;
1271 fasttrap_proc_t
*fprc
, *new_fprc
;
1273 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1274 lck_mtx_lock(&bucket
->ftb_mtx
);
1276 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1277 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1278 lck_mtx_lock(&fprc
->ftpc_mtx
);
1279 lck_mtx_unlock(&bucket
->ftb_mtx
);
1280 fprc
->ftpc_rcount
++;
1281 atomic_add_64(&fprc
->ftpc_acount
, 1);
1282 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1283 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1290 * Drop the bucket lock so we don't try to perform a sleeping
1291 * allocation under it.
1293 lck_mtx_unlock(&bucket
->ftb_mtx
);
1295 new_fprc
= kmem_zalloc(sizeof (fasttrap_proc_t
), KM_SLEEP
);
1296 ASSERT(new_fprc
!= NULL
);
1297 new_fprc
->ftpc_pid
= pid
;
1298 new_fprc
->ftpc_rcount
= 1;
1299 new_fprc
->ftpc_acount
= 1;
1301 lck_mtx_lock(&bucket
->ftb_mtx
);
1304 * Take another lap through the list to make sure a proc hasn't
1305 * been created for this pid while we weren't under the bucket lock.
1307 for (fprc
= bucket
->ftb_data
; fprc
!= NULL
; fprc
= fprc
->ftpc_next
) {
1308 if (fprc
->ftpc_pid
== pid
&& fprc
->ftpc_acount
!= 0) {
1309 lck_mtx_lock(&fprc
->ftpc_mtx
);
1310 lck_mtx_unlock(&bucket
->ftb_mtx
);
1311 fprc
->ftpc_rcount
++;
1312 atomic_add_64(&fprc
->ftpc_acount
, 1);
1313 ASSERT(fprc
->ftpc_acount
<= fprc
->ftpc_rcount
);
1314 lck_mtx_unlock(&fprc
->ftpc_mtx
);
1316 kmem_free(new_fprc
, sizeof (fasttrap_proc_t
));
1323 * APPLE NOTE: We have to initialize all locks explicitly
1325 lck_mtx_init(&new_fprc
->ftpc_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1327 new_fprc
->ftpc_next
= bucket
->ftb_data
;
1328 bucket
->ftb_data
= new_fprc
;
1330 lck_mtx_unlock(&bucket
->ftb_mtx
);
1336 fasttrap_proc_release(fasttrap_proc_t
*proc
)
1338 fasttrap_bucket_t
*bucket
;
1339 fasttrap_proc_t
*fprc
, **fprcp
;
1340 pid_t pid
= proc
->ftpc_pid
;
1342 lck_mtx_lock(&proc
->ftpc_mtx
);
1344 ASSERT(proc
->ftpc_rcount
!= 0);
1345 ASSERT(proc
->ftpc_acount
<= proc
->ftpc_rcount
);
1347 if (--proc
->ftpc_rcount
!= 0) {
1348 lck_mtx_unlock(&proc
->ftpc_mtx
);
1352 lck_mtx_unlock(&proc
->ftpc_mtx
);
1355 * There should definitely be no live providers associated with this
1356 * process at this point.
1358 ASSERT(proc
->ftpc_acount
== 0);
1360 bucket
= &fasttrap_procs
.fth_table
[FASTTRAP_PROCS_INDEX(pid
)];
1361 lck_mtx_lock(&bucket
->ftb_mtx
);
1363 fprcp
= (fasttrap_proc_t
**)&bucket
->ftb_data
;
1364 while ((fprc
= *fprcp
) != NULL
) {
1368 fprcp
= &fprc
->ftpc_next
;
1372 * Something strange has happened if we can't find the proc.
1374 ASSERT(fprc
!= NULL
);
1376 *fprcp
= fprc
->ftpc_next
;
1378 lck_mtx_unlock(&bucket
->ftb_mtx
);
1381 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1382 * memory is freed even without the destroy. Maybe accounting cleanup?
1384 lck_mtx_destroy(&fprc
->ftpc_mtx
, fasttrap_lck_grp
);
1386 kmem_free(fprc
, sizeof (fasttrap_proc_t
));
1390 * Lookup a fasttrap-managed provider based on its name and associated proc.
1391 * A reference to the proc must be held for the duration of the call.
1392 * If the pattr argument is non-NULL, this function instantiates the provider
1393 * if it doesn't exist otherwise it returns NULL. The provider is returned
1394 * with its lock held.
1396 static fasttrap_provider_t
*
1397 fasttrap_provider_lookup(proc_t
*p
, fasttrap_provider_type_t provider_type
, const char *name
,
1398 const dtrace_pattr_t
*pattr
)
1400 pid_t pid
= p
->p_pid
;
1401 fasttrap_provider_t
*fp
, *new_fp
= NULL
;
1402 fasttrap_bucket_t
*bucket
;
1403 char provname
[DTRACE_PROVNAMELEN
];
1406 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1407 ASSERT(pattr
!= NULL
);
1409 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(pid
, name
)];
1410 lck_mtx_lock(&bucket
->ftb_mtx
);
1413 * Take a lap through the list and return the match if we find it.
1415 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1416 if (fp
->ftp_pid
== pid
&&
1417 fp
->ftp_provider_type
== provider_type
&&
1418 strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1420 lck_mtx_lock(&fp
->ftp_mtx
);
1421 lck_mtx_unlock(&bucket
->ftb_mtx
);
1427 * Drop the bucket lock so we don't try to perform a sleeping
1428 * allocation under it.
1430 lck_mtx_unlock(&bucket
->ftb_mtx
);
1433 * Make sure the process isn't a child created as the result
1434 * of a vfork(2), and isn't a zombie (but may be in fork).
1437 if (p
->p_lflag
& (P_LINVFORK
| P_LEXIT
)) {
1443 * Increment p_dtrace_probes so that the process knows to inform us
1444 * when it exits or execs. fasttrap_provider_free() decrements this
1445 * when we're done with this provider.
1447 p
->p_dtrace_probes
++;
1450 * Grab the credentials for this process so we have
1451 * something to pass to dtrace_register().
1452 * APPLE NOTE: We have no equivalent to crhold,
1453 * even though there is a cr_ref filed in ucred.
1455 // lck_mtx_lock(&p->p_crlock;
1458 // lck_mtx_unlock(&p->p_crlock);
1461 new_fp
= kmem_zalloc(sizeof (fasttrap_provider_t
), KM_SLEEP
);
1462 ASSERT(new_fp
!= NULL
);
1463 new_fp
->ftp_pid
= p
->p_pid
;
1464 new_fp
->ftp_proc
= fasttrap_proc_lookup(pid
);
1465 new_fp
->ftp_provider_type
= provider_type
;
1468 * APPLE NOTE: locks require explicit init
1470 lck_mtx_init(&new_fp
->ftp_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1471 lck_mtx_init(&new_fp
->ftp_cmtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
1473 ASSERT(new_fp
->ftp_proc
!= NULL
);
1475 lck_mtx_lock(&bucket
->ftb_mtx
);
1478 * Take another lap through the list to make sure a provider hasn't
1479 * been created for this pid while we weren't under the bucket lock.
1481 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1482 if (fp
->ftp_pid
== pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1484 lck_mtx_lock(&fp
->ftp_mtx
);
1485 lck_mtx_unlock(&bucket
->ftb_mtx
);
1486 fasttrap_provider_free(new_fp
);
1492 (void) strlcpy(new_fp
->ftp_name
, name
, sizeof(new_fp
->ftp_name
));
1495 * Fail and return NULL if either the provider name is too long
1496 * or we fail to register this new provider with the DTrace
1497 * framework. Note that this is the only place we ever construct
1498 * the full provider name -- we keep it in pieces in the provider
1501 if (snprintf(provname
, sizeof (provname
), "%s%u", name
, (uint_t
)pid
) >=
1502 (int)sizeof (provname
) ||
1503 dtrace_register(provname
, pattr
,
1504 DTRACE_PRIV_PROC
| DTRACE_PRIV_OWNER
| DTRACE_PRIV_ZONEOWNER
, cred
,
1505 pattr
== &pid_attr
? &pid_pops
: &usdt_pops
, new_fp
,
1506 &new_fp
->ftp_provid
) != 0) {
1507 lck_mtx_unlock(&bucket
->ftb_mtx
);
1508 fasttrap_provider_free(new_fp
);
1513 new_fp
->ftp_next
= bucket
->ftb_data
;
1514 bucket
->ftb_data
= new_fp
;
1516 lck_mtx_lock(&new_fp
->ftp_mtx
);
1517 lck_mtx_unlock(&bucket
->ftb_mtx
);
1524 fasttrap_provider_free(fasttrap_provider_t
*provider
)
1526 pid_t pid
= provider
->ftp_pid
;
1530 * There need to be no associated enabled probes, no consumers
1531 * creating probes, and no meta providers referencing this provider.
1533 ASSERT(provider
->ftp_rcount
== 0);
1534 ASSERT(provider
->ftp_ccount
== 0);
1535 ASSERT(provider
->ftp_mcount
== 0);
1538 * If this provider hasn't been retired, we need to explicitly drop the
1539 * count of active providers on the associated process structure.
1541 if (!provider
->ftp_retired
) {
1542 atomic_add_64(&provider
->ftp_proc
->ftpc_acount
, -1);
1543 ASSERT(provider
->ftp_proc
->ftpc_acount
<
1544 provider
->ftp_proc
->ftpc_rcount
);
1547 fasttrap_proc_release(provider
->ftp_proc
);
1550 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1551 * memory is freed even without the destroy. Maybe accounting cleanup?
1553 lck_mtx_destroy(&provider
->ftp_mtx
, fasttrap_lck_grp
);
1554 lck_mtx_destroy(&provider
->ftp_cmtx
, fasttrap_lck_grp
);
1556 kmem_free(provider
, sizeof (fasttrap_provider_t
));
1559 * Decrement p_dtrace_probes on the process whose provider we're
1560 * freeing. We don't have to worry about clobbering somone else's
1561 * modifications to it because we have locked the bucket that
1562 * corresponds to this process's hash chain in the provider hash
1563 * table. Don't sweat it if we can't find the process.
1565 if ((p
= proc_find(pid
)) == NULL
) {
1570 p
->p_dtrace_probes
--;
1577 fasttrap_provider_retire(proc_t
*p
, const char *name
, int mprov
)
1579 fasttrap_provider_t
*fp
;
1580 fasttrap_bucket_t
*bucket
;
1581 dtrace_provider_id_t provid
;
1583 ASSERT(strlen(name
) < sizeof (fp
->ftp_name
));
1585 bucket
= &fasttrap_provs
.fth_table
[FASTTRAP_PROVS_INDEX(p
->p_pid
, name
)];
1586 lck_mtx_lock(&bucket
->ftb_mtx
);
1588 for (fp
= bucket
->ftb_data
; fp
!= NULL
; fp
= fp
->ftp_next
) {
1589 if (fp
->ftp_pid
== p
->p_pid
&& strncmp(fp
->ftp_name
, name
, sizeof(fp
->ftp_name
)) == 0 &&
1595 lck_mtx_unlock(&bucket
->ftb_mtx
);
1599 lck_mtx_lock(&fp
->ftp_mtx
);
1600 ASSERT(!mprov
|| fp
->ftp_mcount
> 0);
1601 if (mprov
&& --fp
->ftp_mcount
!= 0) {
1602 lck_mtx_unlock(&fp
->ftp_mtx
);
1603 lck_mtx_unlock(&bucket
->ftb_mtx
);
1608 * Mark the provider to be removed in our post-processing step, mark it
1609 * retired, and drop the active count on its proc. Marking it indicates
1610 * that we should try to remove it; setting the retired flag indicates
1611 * that we're done with this provider; dropping the active the proc
1612 * releases our hold, and when this reaches zero (as it will during
1613 * exit or exec) the proc and associated providers become defunct.
1615 * We obviously need to take the bucket lock before the provider lock
1616 * to perform the lookup, but we need to drop the provider lock
1617 * before calling into the DTrace framework since we acquire the
1618 * provider lock in callbacks invoked from the DTrace framework. The
1619 * bucket lock therefore protects the integrity of the provider hash
1622 atomic_add_64(&fp
->ftp_proc
->ftpc_acount
, -1);
1623 ASSERT(fp
->ftp_proc
->ftpc_acount
< fp
->ftp_proc
->ftpc_rcount
);
1625 fp
->ftp_retired
= 1;
1627 provid
= fp
->ftp_provid
;
1628 lck_mtx_unlock(&fp
->ftp_mtx
);
1631 * We don't have to worry about invalidating the same provider twice
1632 * since fasttrap_provider_lookup() will ignore providers that have
1633 * been marked as retired.
1635 dtrace_invalidate(provid
);
1637 lck_mtx_unlock(&bucket
->ftb_mtx
);
1639 fasttrap_pid_cleanup();
1643 fasttrap_uint32_cmp(const void *ap
, const void *bp
)
1645 return (*(const uint32_t *)ap
- *(const uint32_t *)bp
);
1649 fasttrap_uint64_cmp(const void *ap
, const void *bp
)
1651 return (*(const uint64_t *)ap
- *(const uint64_t *)bp
);
1655 fasttrap_add_probe(fasttrap_probe_spec_t
*pdata
)
1658 fasttrap_provider_t
*provider
;
1659 fasttrap_probe_t
*pp
;
1660 fasttrap_tracepoint_t
*tp
;
1662 unsigned int i
, aframes
, whack
;
1665 * There needs to be at least one desired trace point.
1667 if (pdata
->ftps_noffs
== 0)
1670 switch (pdata
->ftps_probe_type
) {
1673 aframes
= FASTTRAP_ENTRY_AFRAMES
;
1677 aframes
= FASTTRAP_RETURN_AFRAMES
;
1687 const char* provider_name
;
1688 switch (pdata
->ftps_provider_type
) {
1689 case DTFTP_PROVIDER_PID
:
1690 provider_name
= FASTTRAP_PID_NAME
;
1692 case DTFTP_PROVIDER_OBJC
:
1693 provider_name
= FASTTRAP_OBJC_NAME
;
1695 case DTFTP_PROVIDER_ONESHOT
:
1696 provider_name
= FASTTRAP_ONESHOT_NAME
;
1702 p
= proc_find(pdata
->ftps_pid
);
1706 if ((provider
= fasttrap_provider_lookup(p
, pdata
->ftps_provider_type
,
1707 provider_name
, &pid_attr
)) == NULL
)
1712 * Increment this reference count to indicate that a consumer is
1713 * actively adding a new probe associated with this provider. This
1714 * prevents the provider from being deleted -- we'll need to check
1715 * for pending deletions when we drop this reference count.
1717 provider
->ftp_ccount
++;
1718 lck_mtx_unlock(&provider
->ftp_mtx
);
1721 * Grab the creation lock to ensure consistency between calls to
1722 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1723 * other threads creating probes. We must drop the provider lock
1724 * before taking this lock to avoid a three-way deadlock with the
1727 lck_mtx_lock(&provider
->ftp_cmtx
);
1730 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1733 (void) snprintf(name_str
, sizeof(name_str
), "%llx",
1734 (uint64_t)pdata
->ftps_offs
[i
]);
1736 if (dtrace_probe_lookup(provider
->ftp_provid
,
1737 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
) != 0)
1740 atomic_add_32(&fasttrap_total
, 1);
1742 if (fasttrap_total
> fasttrap_max
) {
1743 atomic_add_32(&fasttrap_total
, -1);
1747 pp
= zalloc(fasttrap_probe_t_zones
[1]);
1748 bzero(pp
, sizeof (fasttrap_probe_t
));
1750 pp
->ftp_prov
= provider
;
1751 pp
->ftp_faddr
= pdata
->ftps_pc
;
1752 pp
->ftp_fsize
= pdata
->ftps_size
;
1753 pp
->ftp_pid
= pdata
->ftps_pid
;
1756 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1757 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1759 tp
->ftt_proc
= provider
->ftp_proc
;
1760 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1761 tp
->ftt_pid
= pdata
->ftps_pid
;
1764 pp
->ftp_tps
[0].fit_tp
= tp
;
1765 pp
->ftp_tps
[0].fit_id
.fti_probe
= pp
;
1766 pp
->ftp_tps
[0].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1767 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1768 pdata
->ftps_mod
, pdata
->ftps_func
, name_str
,
1769 FASTTRAP_OFFSET_AFRAMES
, pp
);
1772 } else if (dtrace_probe_lookup(provider
->ftp_provid
, pdata
->ftps_mod
,
1773 pdata
->ftps_func
, name
) == 0) {
1774 atomic_add_32(&fasttrap_total
, pdata
->ftps_noffs
);
1776 if (fasttrap_total
> fasttrap_max
) {
1777 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1782 * Make sure all tracepoint program counter values are unique.
1783 * We later assume that each probe has exactly one tracepoint
1786 qsort(pdata
->ftps_offs
, pdata
->ftps_noffs
,
1787 sizeof (uint64_t), fasttrap_uint64_cmp
);
1788 for (i
= 1; i
< pdata
->ftps_noffs
; i
++) {
1789 if (pdata
->ftps_offs
[i
] > pdata
->ftps_offs
[i
- 1])
1792 atomic_add_32(&fasttrap_total
, -pdata
->ftps_noffs
);
1796 ASSERT(pdata
->ftps_noffs
> 0);
1797 if (pdata
->ftps_noffs
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
1798 pp
= zalloc(fasttrap_probe_t_zones
[pdata
->ftps_noffs
]);
1799 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]));
1801 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[pdata
->ftps_noffs
]), KM_SLEEP
);
1804 pp
->ftp_prov
= provider
;
1805 pp
->ftp_faddr
= pdata
->ftps_pc
;
1806 pp
->ftp_fsize
= pdata
->ftps_size
;
1807 pp
->ftp_pid
= pdata
->ftps_pid
;
1808 pp
->ftp_ntps
= pdata
->ftps_noffs
;
1810 for (i
= 0; i
< pdata
->ftps_noffs
; i
++) {
1811 tp
= zalloc(fasttrap_tracepoint_t_zone
);
1812 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
1813 tp
->ftt_proc
= provider
->ftp_proc
;
1814 tp
->ftt_pc
= pdata
->ftps_offs
[i
] + pdata
->ftps_pc
;
1815 tp
->ftt_pid
= pdata
->ftps_pid
;
1817 pp
->ftp_tps
[i
].fit_tp
= tp
;
1818 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
1819 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= pdata
->ftps_probe_type
;
1822 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
,
1823 pdata
->ftps_mod
, pdata
->ftps_func
, name
, aframes
, pp
);
1826 lck_mtx_unlock(&provider
->ftp_cmtx
);
1829 * We know that the provider is still valid since we incremented the
1830 * creation reference count. If someone tried to clean up this provider
1831 * while we were using it (e.g. because the process called exec(2) or
1832 * exit(2)), take note of that and try to clean it up now.
1834 lck_mtx_lock(&provider
->ftp_mtx
);
1835 provider
->ftp_ccount
--;
1836 whack
= provider
->ftp_retired
;
1837 lck_mtx_unlock(&provider
->ftp_mtx
);
1840 fasttrap_pid_cleanup();
1846 * If we've exhausted the allowable resources, we'll try to remove
1847 * this provider to free some up. This is to cover the case where
1848 * the user has accidentally created many more probes than was
1849 * intended (e.g. pid123:::).
1851 lck_mtx_unlock(&provider
->ftp_cmtx
);
1852 lck_mtx_lock(&provider
->ftp_mtx
);
1853 provider
->ftp_ccount
--;
1854 provider
->ftp_marked
= 1;
1855 lck_mtx_unlock(&provider
->ftp_mtx
);
1857 fasttrap_pid_cleanup();
1864 fasttrap_meta_provide(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
1867 fasttrap_provider_t
*provider
;
1870 * A 32-bit unsigned integer (like a pid for example) can be
1871 * expressed in 10 or fewer decimal digits. Make sure that we'll
1872 * have enough space for the provider name.
1874 if (strlen(dhpv
->dthpv_provname
) + 10 >=
1875 sizeof (provider
->ftp_name
)) {
1876 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1877 "name too long to accomodate pid", dhpv
->dthpv_provname
);
1882 * Don't let folks spoof the true pid provider.
1884 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_PID_NAME
, sizeof(FASTTRAP_PID_NAME
)) == 0) {
1885 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1886 "%s is an invalid name", dhpv
->dthpv_provname
,
1892 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
1894 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_OBJC_NAME
, sizeof(FASTTRAP_OBJC_NAME
)) == 0) {
1895 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1896 "%s is an invalid name", dhpv
->dthpv_provname
,
1897 FASTTRAP_OBJC_NAME
);
1900 if (strncmp(dhpv
->dthpv_provname
, FASTTRAP_ONESHOT_NAME
, sizeof(FASTTRAP_ONESHOT_NAME
)) == 0) {
1901 cmn_err(CE_WARN
, "failed to instantiate provider %s: "
1902 "%s is an invalid name", dhpv
->dthpv_provname
,
1903 FASTTRAP_ONESHOT_NAME
);
1908 * The highest stability class that fasttrap supports is ISA; cap
1909 * the stability of the new provider accordingly.
1911 if (dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
> DTRACE_CLASS_ISA
)
1912 dhpv
->dthpv_pattr
.dtpa_provider
.dtat_class
= DTRACE_CLASS_ISA
;
1913 if (dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
> DTRACE_CLASS_ISA
)
1914 dhpv
->dthpv_pattr
.dtpa_mod
.dtat_class
= DTRACE_CLASS_ISA
;
1915 if (dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
> DTRACE_CLASS_ISA
)
1916 dhpv
->dthpv_pattr
.dtpa_func
.dtat_class
= DTRACE_CLASS_ISA
;
1917 if (dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
> DTRACE_CLASS_ISA
)
1918 dhpv
->dthpv_pattr
.dtpa_name
.dtat_class
= DTRACE_CLASS_ISA
;
1919 if (dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
> DTRACE_CLASS_ISA
)
1920 dhpv
->dthpv_pattr
.dtpa_args
.dtat_class
= DTRACE_CLASS_ISA
;
1922 if ((provider
= fasttrap_provider_lookup(p
, DTFTP_PROVIDER_USDT
, dhpv
->dthpv_provname
,
1923 &dhpv
->dthpv_pattr
)) == NULL
) {
1924 cmn_err(CE_WARN
, "failed to instantiate provider %s for "
1925 "process %u", dhpv
->dthpv_provname
, (uint_t
)p
->p_pid
);
1932 * USDT probes (fasttrap meta probes) are very expensive to create.
1933 * Profiling has shown that the largest single cost is verifying that
1934 * dtrace hasn't already created a given meta_probe. The reason for
1935 * this is dtrace_match() often has to strcmp ~100 hashed entries for
1936 * each static probe being created. We want to get rid of that check.
1937 * The simplest way of eliminating it is to deny the ability to add
1938 * probes to an existing provider. If the provider already exists, BZZT!
1939 * This still leaves the possibility of intentionally malformed DOF
1940 * having duplicate probes. However, duplicate probes are not fatal,
1941 * and there is no way to get that by accident, so we will not check
1944 * UPDATE: It turns out there are several use cases that require adding
1945 * probes to existing providers. Disabling the dtrace_probe_lookup()
1946 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
1950 * Up the meta provider count so this provider isn't removed until
1951 * the meta provider has been told to remove it.
1953 provider
->ftp_mcount
++;
1955 lck_mtx_unlock(&provider
->ftp_mtx
);
1962 fasttrap_meta_create_probe(void *arg
, void *parg
,
1963 dtrace_helper_probedesc_t
*dhpb
)
1966 fasttrap_provider_t
*provider
= parg
;
1967 fasttrap_probe_t
*pp
;
1968 fasttrap_tracepoint_t
*tp
;
1973 * Since the meta provider count is non-zero we don't have to worry
1974 * about this provider disappearing.
1976 ASSERT(provider
->ftp_mcount
> 0);
1979 * The offsets must be unique.
1981 qsort(dhpb
->dthpb_offs
, dhpb
->dthpb_noffs
, sizeof (uint32_t),
1982 fasttrap_uint32_cmp
);
1983 for (i
= 1; i
< dhpb
->dthpb_noffs
; i
++) {
1984 if (dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
] <=
1985 dhpb
->dthpb_base
+ dhpb
->dthpb_offs
[i
- 1])
1989 qsort(dhpb
->dthpb_enoffs
, dhpb
->dthpb_nenoffs
, sizeof (uint32_t),
1990 fasttrap_uint32_cmp
);
1991 for (i
= 1; i
< dhpb
->dthpb_nenoffs
; i
++) {
1992 if (dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
] <=
1993 dhpb
->dthpb_base
+ dhpb
->dthpb_enoffs
[i
- 1])
1998 * Grab the creation lock to ensure consistency between calls to
1999 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2000 * other threads creating probes.
2002 lck_mtx_lock(&provider
->ftp_cmtx
);
2006 * APPLE NOTE: This is hideously expensive. See note in
2007 * fasttrap_meta_provide() for why we can get away without
2010 if (dtrace_probe_lookup(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2011 dhpb
->dthpb_func
, dhpb
->dthpb_name
) != 0) {
2012 lck_mtx_unlock(&provider
->ftp_cmtx
);
2017 ntps
= dhpb
->dthpb_noffs
+ dhpb
->dthpb_nenoffs
;
2020 atomic_add_32(&fasttrap_total
, ntps
);
2022 if (fasttrap_total
> fasttrap_max
) {
2023 atomic_add_32(&fasttrap_total
, -ntps
);
2024 lck_mtx_unlock(&provider
->ftp_cmtx
);
2028 if (ntps
< FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
) {
2029 pp
= zalloc(fasttrap_probe_t_zones
[ntps
]);
2030 bzero(pp
, offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]));
2032 pp
= kmem_zalloc(offsetof(fasttrap_probe_t
, ftp_tps
[ntps
]), KM_SLEEP
);
2035 pp
->ftp_prov
= provider
;
2036 pp
->ftp_pid
= provider
->ftp_pid
;
2037 pp
->ftp_ntps
= ntps
;
2038 pp
->ftp_nargs
= dhpb
->dthpb_xargc
;
2039 pp
->ftp_xtypes
= dhpb
->dthpb_xtypes
;
2040 pp
->ftp_ntypes
= dhpb
->dthpb_ntypes
;
2043 * First create a tracepoint for each actual point of interest.
2045 for (i
= 0; i
< dhpb
->dthpb_noffs
; i
++) {
2046 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2047 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2049 tp
->ftt_proc
= provider
->ftp_proc
;
2052 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2053 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2054 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2056 #if defined(__x86_64__)
2058 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2060 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_offs
[i
] - 1;
2062 #error "Architecture not supported"
2065 tp
->ftt_pid
= provider
->ftp_pid
;
2067 pp
->ftp_tps
[i
].fit_tp
= tp
;
2068 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2069 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_OFFSETS
;
2073 * Then create a tracepoint for each is-enabled point.
2075 for (j
= 0; i
< ntps
; i
++, j
++) {
2076 tp
= zalloc(fasttrap_tracepoint_t_zone
);
2077 bzero(tp
, sizeof (fasttrap_tracepoint_t
));
2079 tp
->ftt_proc
= provider
->ftp_proc
;
2082 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2083 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2084 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2086 #if defined(__x86_64__)
2088 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2090 tp
->ftt_pc
= dhpb
->dthpb_base
+ (int64_t)dhpb
->dthpb_enoffs
[j
] + 2;
2092 #error "Architecture not supported"
2095 tp
->ftt_pid
= provider
->ftp_pid
;
2097 pp
->ftp_tps
[i
].fit_tp
= tp
;
2098 pp
->ftp_tps
[i
].fit_id
.fti_probe
= pp
;
2099 pp
->ftp_tps
[i
].fit_id
.fti_ptype
= DTFTP_IS_ENABLED
;
2103 * If the arguments are shuffled around we set the argument remapping
2104 * table. Later, when the probe fires, we only remap the arguments
2105 * if the table is non-NULL.
2107 for (i
= 0; i
< dhpb
->dthpb_xargc
; i
++) {
2108 if (dhpb
->dthpb_args
[i
] != i
) {
2109 pp
->ftp_argmap
= dhpb
->dthpb_args
;
2115 * The probe is fully constructed -- register it with DTrace.
2117 pp
->ftp_id
= dtrace_probe_create(provider
->ftp_provid
, dhpb
->dthpb_mod
,
2118 dhpb
->dthpb_func
, dhpb
->dthpb_name
, FASTTRAP_OFFSET_AFRAMES
, pp
);
2120 lck_mtx_unlock(&provider
->ftp_cmtx
);
2125 fasttrap_meta_remove(void *arg
, dtrace_helper_provdesc_t
*dhpv
, proc_t
*p
)
2129 * Clean up the USDT provider. There may be active consumers of the
2130 * provider busy adding probes, no damage will actually befall the
2131 * provider until that count has dropped to zero. This just puts
2132 * the provider on death row.
2134 fasttrap_provider_retire(p
, dhpv
->dthpv_provname
, 1);
2138 fasttrap_meta_provider_name(void *arg
)
2140 fasttrap_provider_t
*fprovider
= arg
;
2141 dtrace_provider_t
*provider
= (dtrace_provider_t
*)(fprovider
->ftp_provid
);
2142 return provider
->dtpv_name
;
2145 static dtrace_mops_t fasttrap_mops
= {
2146 fasttrap_meta_create_probe
,
2147 fasttrap_meta_provide
,
2148 fasttrap_meta_remove
,
2149 fasttrap_meta_provider_name
2153 * Validate a null-terminated string. If str is not null-terminated,
2154 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2157 * str: string to validate.
2158 * maxlen: maximal length of the string, null-terminated byte included.
2161 fasttrap_validatestr(char const* str
, size_t maxlen
) {
2165 assert(maxlen
!= 0);
2167 /* Check if the string is null-terminated. */
2168 len
= strnlen(str
, maxlen
);
2172 /* Finally, check for UTF8 validity. */
2173 return utf8_validatestr((unsigned const char*) str
, len
);
2178 fasttrap_ioctl(dev_t dev
, u_long cmd
, user_addr_t arg
, int md
, cred_t
*cr
, int *rv
)
2180 #pragma unused(dev, md, rv)
2181 if (!dtrace_attached())
2184 if (cmd
== FASTTRAPIOC_MAKEPROBE
) {
2185 fasttrap_probe_spec_t
*probe
;
2190 if (copyin(arg
+ __offsetof(fasttrap_probe_spec_t
, ftps_noffs
), &noffs
,
2191 sizeof (probe
->ftps_noffs
)))
2195 * Probes must have at least one tracepoint.
2201 * We want to check the number of noffs before doing
2202 * sizing math, to prevent potential buffer overflows.
2204 if (noffs
> ((1024 * 1024) - sizeof(fasttrap_probe_spec_t
)) / sizeof(probe
->ftps_offs
[0]))
2207 size
= sizeof (fasttrap_probe_spec_t
) +
2208 sizeof (probe
->ftps_offs
[0]) * (noffs
- 1);
2210 probe
= kmem_alloc(size
, KM_SLEEP
);
2212 if (copyin(arg
, probe
, size
) != 0 ||
2213 probe
->ftps_noffs
!= noffs
) {
2214 kmem_free(probe
, size
);
2219 * Verify that the function and module strings contain no
2223 if (fasttrap_validatestr(probe
->ftps_func
, sizeof(probe
->ftps_func
)) != 0) {
2228 if (fasttrap_validatestr(probe
->ftps_mod
, sizeof(probe
->ftps_mod
)) != 0) {
2233 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2235 pid_t pid
= probe
->ftps_pid
;
2238 * Report an error if the process doesn't exist
2239 * or is actively being birthed.
2241 if ((p
= proc_find(pid
)) == PROC_NULL
|| p
->p_stat
== SIDL
) {
2248 // FIXME! How is this done on OS X?
2249 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2250 // VREAD | VWRITE)) != 0) {
2251 // mutex_exit(&p->p_lock);
2258 ret
= fasttrap_add_probe(probe
);
2261 kmem_free(probe
, size
);
2265 } else if (cmd
== FASTTRAPIOC_GETINSTR
) {
2266 fasttrap_instr_query_t instr
;
2267 fasttrap_tracepoint_t
*tp
;
2271 if (copyin(arg
, &instr
, sizeof (instr
)) != 0)
2274 if (!PRIV_POLICY_CHOICE(cr
, PRIV_ALL
, B_FALSE
)) {
2276 pid_t pid
= instr
.ftiq_pid
;
2279 * Report an error if the process doesn't exist
2280 * or is actively being birthed.
2282 if ((p
= proc_find(pid
)) == NULL
|| p
->p_stat
== SIDL
) {
2288 // FIXME! How is this done on OS X?
2289 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2291 // mutex_exit(&p->p_lock);
2298 index
= FASTTRAP_TPOINTS_INDEX(instr
.ftiq_pid
, instr
.ftiq_pc
);
2300 lck_mtx_lock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2301 tp
= fasttrap_tpoints
.fth_table
[index
].ftb_data
;
2302 while (tp
!= NULL
) {
2303 if (instr
.ftiq_pid
== tp
->ftt_pid
&&
2304 instr
.ftiq_pc
== tp
->ftt_pc
&&
2305 tp
->ftt_proc
->ftpc_acount
!= 0)
2312 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2316 bcopy(&tp
->ftt_instr
, &instr
.ftiq_instr
,
2317 sizeof (instr
.ftiq_instr
));
2318 lck_mtx_unlock(&fasttrap_tpoints
.fth_table
[index
].ftb_mtx
);
2320 if (copyout(&instr
, arg
, sizeof (instr
)) != 0)
2330 fasttrap_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
2338 return (DDI_SUCCESS
);
2340 return (DDI_FAILURE
);
2343 ddi_report_dev(devi
);
2344 fasttrap_devi
= devi
;
2347 * Install our hooks into fork(2), exec(2), and exit(2).
2349 dtrace_fasttrap_fork_ptr
= &fasttrap_fork
;
2350 dtrace_fasttrap_exit_ptr
= &fasttrap_exec_exit
;
2351 dtrace_fasttrap_exec_ptr
= &fasttrap_exec_exit
;
2354 * APPLE NOTE: We size the maximum number of fasttrap probes
2355 * based on system memory. 100k probes per 256M of system memory.
2356 * Yes, this is a WAG.
2358 fasttrap_max
= (sane_size
>> 28) * 100000;
2359 if (fasttrap_max
== 0)
2360 fasttrap_max
= 50000;
2365 * Conjure up the tracepoints hashtable...
2367 nent
= ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
2368 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE
);
2370 if (nent
<= 0 || nent
> 0x1000000)
2371 nent
= FASTTRAP_TPOINTS_DEFAULT_SIZE
;
2373 if ((nent
& (nent
- 1)) == 0)
2374 fasttrap_tpoints
.fth_nent
= nent
;
2376 fasttrap_tpoints
.fth_nent
= 1 << fasttrap_highbit(nent
);
2377 ASSERT(fasttrap_tpoints
.fth_nent
> 0);
2378 fasttrap_tpoints
.fth_mask
= fasttrap_tpoints
.fth_nent
- 1;
2379 fasttrap_tpoints
.fth_table
= kmem_zalloc(fasttrap_tpoints
.fth_nent
*
2380 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2381 ASSERT(fasttrap_tpoints
.fth_table
!= NULL
);
2384 * APPLE NOTE: explicitly initialize all locks...
2387 for (i
=0; i
<fasttrap_tpoints
.fth_nent
; i
++) {
2388 lck_mtx_init(&fasttrap_tpoints
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2392 * ... and the providers hash table...
2394 nent
= FASTTRAP_PROVIDERS_DEFAULT_SIZE
;
2395 if ((nent
& (nent
- 1)) == 0)
2396 fasttrap_provs
.fth_nent
= nent
;
2398 fasttrap_provs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2399 ASSERT(fasttrap_provs
.fth_nent
> 0);
2400 fasttrap_provs
.fth_mask
= fasttrap_provs
.fth_nent
- 1;
2401 fasttrap_provs
.fth_table
= kmem_zalloc(fasttrap_provs
.fth_nent
*
2402 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2403 ASSERT(fasttrap_provs
.fth_table
!= NULL
);
2406 * APPLE NOTE: explicitly initialize all locks...
2408 for (i
=0; i
<fasttrap_provs
.fth_nent
; i
++) {
2409 lck_mtx_init(&fasttrap_provs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2413 * ... and the procs hash table.
2415 nent
= FASTTRAP_PROCS_DEFAULT_SIZE
;
2416 if ((nent
& (nent
- 1)) == 0)
2417 fasttrap_procs
.fth_nent
= nent
;
2419 fasttrap_procs
.fth_nent
= 1 << fasttrap_highbit(nent
);
2420 ASSERT(fasttrap_procs
.fth_nent
> 0);
2421 fasttrap_procs
.fth_mask
= fasttrap_procs
.fth_nent
- 1;
2422 fasttrap_procs
.fth_table
= kmem_zalloc(fasttrap_procs
.fth_nent
*
2423 sizeof (fasttrap_bucket_t
), KM_SLEEP
);
2424 ASSERT(fasttrap_procs
.fth_table
!= NULL
);
2427 * APPLE NOTE: explicitly initialize all locks...
2429 for (i
=0; i
<fasttrap_procs
.fth_nent
; i
++) {
2430 lck_mtx_init(&fasttrap_procs
.fth_table
[i
].ftb_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2433 (void) dtrace_meta_register("fasttrap", &fasttrap_mops
, NULL
,
2436 return (DDI_SUCCESS
);
2440 _fasttrap_open(dev_t dev
, int flags
, int devtype
, struct proc
*p
)
2442 #pragma unused(dev, flags, devtype, p)
2447 _fasttrap_ioctl(dev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct proc
*p
)
2452 if (proc_is64bit(p
))
2453 uaddrp
= *(user_addr_t
*)data
;
2455 uaddrp
= (user_addr_t
) *(uint32_t *)data
;
2457 err
= fasttrap_ioctl(dev
, cmd
, uaddrp
, fflag
, CRED(), &rv
);
2459 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2461 ASSERT( (err
& 0xfffff000) == 0 );
2462 return (err
& 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2463 } else if (rv
!= 0) {
2464 ASSERT( (rv
& 0xfff00000) == 0 );
2465 return (((rv
& 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2470 static int gFasttrapInited
= 0;
2472 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2475 * A struct describing which functions will get invoked for certain
2479 static struct cdevsw fasttrap_cdevsw
=
2481 _fasttrap_open
, /* open */
2482 eno_opcl
, /* close */
2483 eno_rdwrt
, /* read */
2484 eno_rdwrt
, /* write */
2485 _fasttrap_ioctl
, /* ioctl */
2486 (stop_fcn_t
*)nulldev
, /* stop */
2487 (reset_fcn_t
*)nulldev
, /* reset */
2489 eno_select
, /* select */
2490 eno_mmap
, /* mmap */
2491 eno_strat
, /* strategy */
2492 eno_getc
, /* getc */
2493 eno_putc
, /* putc */
2497 void fasttrap_init(void);
2500 fasttrap_init( void )
2503 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2504 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2506 * The reason is to delay allocating the (rather large) resources as late as possible.
2508 if (0 == gFasttrapInited
) {
2509 int majdevno
= cdevsw_add(FASTTRAP_MAJOR
, &fasttrap_cdevsw
);
2512 // FIX ME! What kind of error reporting to do here?
2513 printf("fasttrap_init: failed to allocate a major number!\n");
2517 dev_t device
= makedev( (uint32_t)majdevno
, 0 );
2518 if (NULL
== devfs_make_node( device
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666, "fasttrap", 0 )) {
2523 * Allocate the fasttrap_tracepoint_t zone
2525 fasttrap_tracepoint_t_zone
= zinit(sizeof(fasttrap_tracepoint_t
),
2526 1024 * sizeof(fasttrap_tracepoint_t
),
2527 sizeof(fasttrap_tracepoint_t
),
2528 "dtrace.fasttrap_tracepoint_t");
2531 * fasttrap_probe_t's are variable in size. We use an array of zones to
2532 * cover the most common sizes.
2535 for (i
=1; i
<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS
; i
++) {
2536 size_t zone_element_size
= offsetof(fasttrap_probe_t
, ftp_tps
[i
]);
2537 fasttrap_probe_t_zones
[i
] = zinit(zone_element_size
,
2538 1024 * zone_element_size
,
2540 fasttrap_probe_t_zone_names
[i
]);
2545 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2547 fasttrap_lck_attr
= lck_attr_alloc_init();
2548 fasttrap_lck_grp_attr
= lck_grp_attr_alloc_init();
2549 fasttrap_lck_grp
= lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr
);
2552 * Initialize global locks
2554 lck_mtx_init(&fasttrap_cleanup_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2555 lck_mtx_init(&fasttrap_count_mtx
, fasttrap_lck_grp
, fasttrap_lck_attr
);
2557 if (DDI_FAILURE
== fasttrap_attach((dev_info_t
*)(uintptr_t)device
, 0 )) {
2558 // FIX ME! Do we remove the devfs node here?
2559 // What kind of error reporting?
2560 printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2564 gFasttrapInited
= 1;
2568 #undef FASTTRAP_MAJOR