]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/fasttrap.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
CommitLineData
2d21ac55
A
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
6d2010ae 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
2d21ac55
A
24 * Use is subject to license terms.
25 */
26
27/*
b0d623f7 28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
2d21ac55
A
29 */
30
31#include <sys/types.h>
32#include <sys/time.h>
33
34#include <sys/errno.h>
35#include <sys/stat.h>
36#include <sys/conf.h>
37#include <sys/systm.h>
38#include <sys/kauth.h>
39
40#include <sys/fasttrap.h>
41#include <sys/fasttrap_impl.h>
42#include <sys/fasttrap_isa.h>
43#include <sys/dtrace.h>
44#include <sys/dtrace_impl.h>
45#include <sys/proc.h>
46
47#include <miscfs/devfs/devfs.h>
48#include <sys/proc_internal.h>
49#include <sys/dtrace_glue.h>
50#include <sys/dtrace_ptss.h>
51
52#include <kern/zalloc.h>
53
b0d623f7
A
54/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
55#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
56
57__private_extern__
58void
59qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
2d21ac55
A
60
61/*
62 * User-Land Trap-Based Tracing
63 * ----------------------------
64 *
65 * The fasttrap provider allows DTrace consumers to instrument any user-level
66 * instruction to gather data; this includes probes with semantic
67 * signifigance like entry and return as well as simple offsets into the
68 * function. While the specific techniques used are very ISA specific, the
69 * methodology is generalizable to any architecture.
70 *
71 *
72 * The General Methodology
73 * -----------------------
74 *
75 * With the primary goal of tracing every user-land instruction and the
76 * limitation that we can't trust user space so don't want to rely on much
77 * information there, we begin by replacing the instructions we want to trace
78 * with trap instructions. Each instruction we overwrite is saved into a hash
79 * table keyed by process ID and pc address. When we enter the kernel due to
80 * this trap instruction, we need the effects of the replaced instruction to
81 * appear to have occurred before we proceed with the user thread's
82 * execution.
83 *
84 * Each user level thread is represented by a ulwp_t structure which is
85 * always easily accessible through a register. The most basic way to produce
86 * the effects of the instruction we replaced is to copy that instruction out
87 * to a bit of scratch space reserved in the user thread's ulwp_t structure
88 * (a sort of kernel-private thread local storage), set the PC to that
89 * scratch space and single step. When we reenter the kernel after single
90 * stepping the instruction we must then adjust the PC to point to what would
91 * normally be the next instruction. Of course, special care must be taken
92 * for branches and jumps, but these represent such a small fraction of any
93 * instruction set that writing the code to emulate these in the kernel is
94 * not too difficult.
95 *
96 * Return probes may require several tracepoints to trace every return site,
97 * and, conversely, each tracepoint may activate several probes (the entry
98 * and offset 0 probes, for example). To solve this muliplexing problem,
99 * tracepoints contain lists of probes to activate and probes contain lists
100 * of tracepoints to enable. If a probe is activated, it adds its ID to
101 * existing tracepoints or creates new ones as necessary.
102 *
103 * Most probes are activated _before_ the instruction is executed, but return
104 * probes are activated _after_ the effects of the last instruction of the
105 * function are visible. Return probes must be fired _after_ we have
106 * single-stepped the instruction whereas all other probes are fired
107 * beforehand.
108 *
109 *
110 * Lock Ordering
111 * -------------
112 *
113 * The lock ordering below -- both internally and with respect to the DTrace
114 * framework -- is a little tricky and bears some explanation. Each provider
115 * has a lock (ftp_mtx) that protects its members including reference counts
116 * for enabled probes (ftp_rcount), consumers actively creating probes
117 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
118 * from being freed. A provider is looked up by taking the bucket lock for the
119 * provider hash table, and is returned with its lock held. The provider lock
120 * may be taken in functions invoked by the DTrace framework, but may not be
121 * held while calling functions in the DTrace framework.
122 *
123 * To ensure consistency over multiple calls to the DTrace framework, the
124 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
125 * not be taken when holding the provider lock as that would create a cyclic
126 * lock ordering. In situations where one would naturally take the provider
127 * lock and then the creation lock, we instead up a reference count to prevent
128 * the provider from disappearing, drop the provider lock, and acquire the
129 * creation lock.
130 *
131 * Briefly:
132 * bucket lock before provider lock
133 * DTrace before provider lock
134 * creation lock before DTrace
135 * never hold the provider lock and creation lock simultaneously
136 */
137
138static dev_info_t *fasttrap_devi;
139static dtrace_meta_provider_id_t fasttrap_meta_id;
140
141static thread_call_t fasttrap_timeout;
142static lck_mtx_t fasttrap_cleanup_mtx;
143static uint_t fasttrap_cleanup_work;
144
145/*
146 * Generation count on modifications to the global tracepoint lookup table.
147 */
148static volatile uint64_t fasttrap_mod_gen;
149
150#if !defined(__APPLE__)
151/*
152 * When the fasttrap provider is loaded, fasttrap_max is set to either
153 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
154 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
155 * incremented by the number of tracepoints that may be associated with that
156 * probe; fasttrap_total is capped at fasttrap_max.
157 */
158#define FASTTRAP_MAX_DEFAULT 2500000
159#endif
160
161static uint32_t fasttrap_max;
162static uint32_t fasttrap_total;
163
164
165#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
166#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
167#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
168
169fasttrap_hash_t fasttrap_tpoints;
170static fasttrap_hash_t fasttrap_provs;
171static fasttrap_hash_t fasttrap_procs;
172
173static uint64_t fasttrap_pid_count; /* pid ref count */
174static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
175
176#define FASTTRAP_ENABLE_FAIL 1
177#define FASTTRAP_ENABLE_PARTIAL 2
178
179static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
180static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
181
182#if defined(__APPLE__)
183static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, fasttrap_provider_type_t, const char *,
184 const dtrace_pattr_t *);
185#endif
186static void fasttrap_provider_retire(pid_t, const char *, int);
187static void fasttrap_provider_free(fasttrap_provider_t *);
188
189static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
190static void fasttrap_proc_release(fasttrap_proc_t *);
191
192#define FASTTRAP_PROVS_INDEX(pid, name) \
193 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
194
195#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
196
197#if defined(__APPLE__)
198
199/*
200 * To save memory, some common memory allocations are given a
201 * unique zone. In example, dtrace_probe_t is 72 bytes in size,
202 * which means it would fall into the kalloc.128 bucket. With
203 * 20k elements allocated, the space saved is substantial.
204 */
205
206struct zone *fasttrap_tracepoint_t_zone;
207
208/*
209 * fasttrap_probe_t's are variable in size. Some quick profiling has shown
210 * that the sweet spot for reducing memory footprint is covering the first
211 * three sizes. Everything larger goes into the common pool.
212 */
213#define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
214
215struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
216
217static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
218 "",
219 "dtrace.fasttrap_probe_t[1]",
220 "dtrace.fasttrap_probe_t[2]",
221 "dtrace.fasttrap_probe_t[3]"
222};
223
224/*
225 * We have to manage locks explicitly
226 */
227lck_grp_t* fasttrap_lck_grp;
228lck_grp_attr_t* fasttrap_lck_grp_attr;
229lck_attr_t* fasttrap_lck_attr;
230#endif
231
232static int
233fasttrap_highbit(ulong_t i)
234{
235 int h = 1;
236
237 if (i == 0)
238 return (0);
239#ifdef _LP64
240 if (i & 0xffffffff00000000ul) {
241 h += 32; i >>= 32;
242 }
243#endif
244 if (i & 0xffff0000) {
245 h += 16; i >>= 16;
246 }
247 if (i & 0xff00) {
248 h += 8; i >>= 8;
249 }
250 if (i & 0xf0) {
251 h += 4; i >>= 4;
252 }
253 if (i & 0xc) {
254 h += 2; i >>= 2;
255 }
256 if (i & 0x2) {
257 h += 1;
258 }
259 return (h);
260}
261
262static uint_t
263fasttrap_hash_str(const char *p)
264{
265 unsigned int g;
266 uint_t hval = 0;
267
268 while (*p) {
269 hval = (hval << 4) + *p++;
270 if ((g = (hval & 0xf0000000)) != 0)
271 hval ^= g >> 24;
272 hval &= ~g;
273 }
274 return (hval);
275}
276
277/*
278 * FIXME - needs implementation
279 */
280void
281fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
282{
283#pragma unused(p, t, pc)
284
285#if 0
286 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
287
288 sqp->sq_info.si_signo = SIGTRAP;
289 sqp->sq_info.si_code = TRAP_DTRACE;
290 sqp->sq_info.si_addr = (caddr_t)pc;
291
292 mutex_enter(&p->p_lock);
293 sigaddqa(p, t, sqp);
294 mutex_exit(&p->p_lock);
295
296 if (t != NULL)
297 aston(t);
298#endif
299
300 printf("fasttrap_sigtrap called with no implementation.\n");
301}
302
303/*
304 * This function ensures that no threads are actively using the memory
305 * associated with probes that were formerly live.
306 */
307static void
308fasttrap_mod_barrier(uint64_t gen)
309{
310 unsigned int i;
311
312 if (gen < fasttrap_mod_gen)
313 return;
314
315 fasttrap_mod_gen++;
316
317 for (i = 0; i < NCPU; i++) {
318 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
319 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
320 }
321}
322
323/*
324 * This is the timeout's callback for cleaning up the providers and their
325 * probes.
326 */
327/*ARGSUSED*/
328static void
329fasttrap_pid_cleanup_cb(void *ignored, void* ignored2)
330{
331#pragma unused(ignored, ignored2)
332 fasttrap_provider_t **fpp, *fp;
333 fasttrap_bucket_t *bucket;
334 dtrace_provider_id_t provid;
335 unsigned int i, later = 0;
336
337 static volatile int in = 0;
338 ASSERT(in == 0);
339 in = 1;
340
341 lck_mtx_lock(&fasttrap_cleanup_mtx);
342 while (fasttrap_cleanup_work) {
343 fasttrap_cleanup_work = 0;
344 lck_mtx_unlock(&fasttrap_cleanup_mtx);
345
346 later = 0;
347
348 /*
349 * Iterate over all the providers trying to remove the marked
350 * ones. If a provider is marked but not retired, we just
351 * have to take a crack at removing it -- it's no big deal if
352 * we can't.
353 */
354 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
355 bucket = &fasttrap_provs.fth_table[i];
356 lck_mtx_lock(&bucket->ftb_mtx);
357 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
358
359 while ((fp = *fpp) != NULL) {
360 if (!fp->ftp_marked) {
361 fpp = &fp->ftp_next;
362 continue;
363 }
364
365 lck_mtx_lock(&fp->ftp_mtx);
366
367 /*
368 * If this provider has consumers actively
369 * creating probes (ftp_ccount) or is a USDT
370 * provider (ftp_mcount), we can't unregister
371 * or even condense.
372 */
373 if (fp->ftp_ccount != 0 ||
374 fp->ftp_mcount != 0) {
375 fp->ftp_marked = 0;
376 lck_mtx_unlock(&fp->ftp_mtx);
377 continue;
378 }
379
380 if (!fp->ftp_retired || fp->ftp_rcount != 0)
381 fp->ftp_marked = 0;
382
383 lck_mtx_unlock(&fp->ftp_mtx);
384
385 /*
386 * If we successfully unregister this
387 * provider we can remove it from the hash
388 * chain and free the memory. If our attempt
389 * to unregister fails and this is a retired
390 * provider, increment our flag to try again
391 * pretty soon. If we've consumed more than
392 * half of our total permitted number of
393 * probes call dtrace_condense() to try to
394 * clean out the unenabled probes.
395 */
396 provid = fp->ftp_provid;
397 if (dtrace_unregister(provid) != 0) {
398 if (fasttrap_total > fasttrap_max / 2)
399 (void) dtrace_condense(provid);
400 later += fp->ftp_marked;
401 fpp = &fp->ftp_next;
402 } else {
403 *fpp = fp->ftp_next;
404 fasttrap_provider_free(fp);
405 }
406 }
407 lck_mtx_unlock(&bucket->ftb_mtx);
408 }
409
410 lck_mtx_lock(&fasttrap_cleanup_mtx);
411 }
412
413 ASSERT(fasttrap_timeout != 0);
414
415 /*
416 * APPLE NOTE: You must hold the fasttrap_cleanup_mtx to do this!
417 */
418 if (fasttrap_timeout != (thread_call_t)1)
419 thread_call_free(fasttrap_timeout);
420
421 /*
422 * If we were unable to remove a retired provider, try again after
423 * a second. This situation can occur in certain circumstances where
424 * providers cannot be unregistered even though they have no probes
425 * enabled because of an execution of dtrace -l or something similar.
426 * If the timeout has been disabled (set to 1 because we're trying
427 * to detach), we set fasttrap_cleanup_work to ensure that we'll
428 * get a chance to do that work if and when the timeout is reenabled
429 * (if detach fails).
430 */
431 if (later > 0 && fasttrap_timeout != (thread_call_t)1)
432 /* The time value passed to dtrace_timeout is in nanos */
433 fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / SEC);
434 else if (later > 0)
435 fasttrap_cleanup_work = 1;
436 else
437 fasttrap_timeout = 0;
438
439 lck_mtx_unlock(&fasttrap_cleanup_mtx);
440 in = 0;
441}
442
443/*
444 * Activates the asynchronous cleanup mechanism.
445 */
446static void
447fasttrap_pid_cleanup(void)
448{
449 lck_mtx_lock(&fasttrap_cleanup_mtx);
450 fasttrap_cleanup_work = 1;
451 if (fasttrap_timeout == 0)
452 fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / MILLISEC);
453 lck_mtx_unlock(&fasttrap_cleanup_mtx);
454}
455
456/*
457 * This is called from cfork() via dtrace_fasttrap_fork(). The child
458 * process's address space is a (roughly) a copy of the parent process's so
459 * we have to remove all the instrumentation we had previously enabled in the
460 * parent.
461 */
462static void
463fasttrap_fork(proc_t *p, proc_t *cp)
464{
465 pid_t ppid = p->p_pid;
466 unsigned int i;
467
468 ASSERT(current_proc() == p);
469 lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
470 ASSERT(p->p_dtrace_count > 0);
471 ASSERT(cp->p_dtrace_count == 0);
472
473 /*
474 * This would be simpler and faster if we maintained per-process
475 * hash tables of enabled tracepoints. It could, however, potentially
476 * slow down execution of a tracepoint since we'd need to go
477 * through two levels of indirection. In the future, we should
478 * consider either maintaining per-process ancillary lists of
479 * enabled tracepoints or hanging a pointer to a per-process hash
480 * table of enabled tracepoints off the proc structure.
481 */
482
483 /*
484 * We don't have to worry about the child process disappearing
485 * because we're in fork().
486 */
487 if (cp != sprlock(cp->p_pid)) {
488 printf("fasttrap_fork: sprlock(%d) returned a differt proc\n", cp->p_pid);
489 return;
490 }
491 proc_unlock(cp);
492
493 /*
494 * Iterate over every tracepoint looking for ones that belong to the
495 * parent process, and remove each from the child process.
496 */
497 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
498 fasttrap_tracepoint_t *tp;
499 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
500
501 lck_mtx_lock(&bucket->ftb_mtx);
502 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
503 if (tp->ftt_pid == ppid &&
b0d623f7 504 tp->ftt_proc->ftpc_acount != 0) {
2d21ac55 505 fasttrap_tracepoint_remove(cp, tp);
b0d623f7
A
506
507 /*
508 * The count of active providers can only be
509 * decremented (i.e. to zero) during exec,
510 * exit, and removal of a meta provider so it
511 * should be impossible to drop the count
512 * mid-fork.
513 */
514 ASSERT(tp->ftt_proc->ftpc_acount != 0);
2d21ac55
A
515 }
516 }
517 lck_mtx_unlock(&bucket->ftb_mtx);
518 }
519
520 /*
521 * Free any ptss pages/entries in the child.
522 */
523 dtrace_ptss_fork(p, cp);
524
525 proc_lock(cp);
526 sprunlock(cp);
527}
528
529/*
530 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
531 * is set on the proc structure to indicate that there is a pid provider
532 * associated with this process.
533 */
534static void
535fasttrap_exec_exit(proc_t *p)
536{
537 ASSERT(p == current_proc());
538 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
539 lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
540
541
542 /* APPLE NOTE: Okay, the locking here is really odd and needs some
543 * explaining. This method is always called with the proc_lock held.
544 * We must drop the proc_lock before calling fasttrap_provider_retire
545 * to avoid a deadlock when it takes the bucket lock.
546 *
547 * Next, the dtrace_ptss_exec_exit function requires the sprlock
548 * be held, but not the proc_lock.
549 *
550 * Finally, we must re-acquire the proc_lock
551 */
552 proc_unlock(p);
553
554 /*
555 * We clean up the pid provider for this process here; user-land
556 * static probes are handled by the meta-provider remove entry point.
557 */
558 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
559#if defined(__APPLE__)
560 /*
561 * We also need to remove any aliased providers.
562 * XXX optimization: track which provider types are instantiated
563 * and only retire as needed.
564 */
565 fasttrap_provider_retire(p->p_pid, FASTTRAP_OBJC_NAME, 0);
566 fasttrap_provider_retire(p->p_pid, FASTTRAP_ONESHOT_NAME, 0);
567#endif /* __APPLE__ */
568
569 /*
570 * This should be called after it is no longer possible for a user
571 * thread to execute (potentially dtrace instrumented) instructions.
572 */
573 lck_mtx_lock(&p->p_dtrace_sprlock);
574 dtrace_ptss_exec_exit(p);
575 lck_mtx_unlock(&p->p_dtrace_sprlock);
576
577 proc_lock(p);
578}
579
580
581/*ARGSUSED*/
582static void
583fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
584{
585#pragma unused(arg, desc)
586 /*
587 * There are no "default" pid probes.
588 */
589}
590
591static int
592fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
593{
594 fasttrap_tracepoint_t *tp, *new_tp = NULL;
595 fasttrap_bucket_t *bucket;
596 fasttrap_id_t *id;
597 pid_t pid;
598 user_addr_t pc;
599
600 ASSERT(index < probe->ftp_ntps);
601
602 pid = probe->ftp_pid;
603 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
604 id = &probe->ftp_tps[index].fit_id;
605
606 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
607
608 //ASSERT(!(p->p_flag & SVFORK));
609
610 /*
611 * Before we make any modifications, make sure we've imposed a barrier
612 * on the generation in which this probe was last modified.
613 */
614 fasttrap_mod_barrier(probe->ftp_gen);
615
616 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
617
618 /*
619 * If the tracepoint has already been enabled, just add our id to the
620 * list of interested probes. This may be our second time through
621 * this path in which case we'll have constructed the tracepoint we'd
622 * like to install. If we can't find a match, and have an allocated
623 * tracepoint ready to go, enable that one now.
624 *
625 * A tracepoint whose process is defunct is also considered defunct.
626 */
627again:
628 lck_mtx_lock(&bucket->ftb_mtx);
629 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
b0d623f7
A
630 /*
631 * Note that it's safe to access the active count on the
632 * associated proc structure because we know that at least one
633 * provider (this one) will still be around throughout this
634 * operation.
635 */
2d21ac55 636 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
b0d623f7 637 tp->ftt_proc->ftpc_acount == 0)
2d21ac55
A
638 continue;
639
640 /*
641 * Now that we've found a matching tracepoint, it would be
642 * a decent idea to confirm that the tracepoint is still
643 * enabled and the trap instruction hasn't been overwritten.
644 * Since this is a little hairy, we'll punt for now.
645 */
646
647 /*
648 * This can't be the first interested probe. We don't have
649 * to worry about another thread being in the midst of
650 * deleting this tracepoint (which would be the only valid
651 * reason for a tracepoint to have no interested probes)
652 * since we're holding P_PR_LOCK for this process.
653 */
654 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
655
656 switch (id->fti_ptype) {
657 case DTFTP_ENTRY:
658 case DTFTP_OFFSETS:
659 case DTFTP_IS_ENABLED:
660 id->fti_next = tp->ftt_ids;
661 dtrace_membar_producer();
662 tp->ftt_ids = id;
663 dtrace_membar_producer();
664 break;
665
666 case DTFTP_RETURN:
667 case DTFTP_POST_OFFSETS:
668 id->fti_next = tp->ftt_retids;
669 dtrace_membar_producer();
670 tp->ftt_retids = id;
671 dtrace_membar_producer();
672 break;
673
674 default:
675 ASSERT(0);
676 }
677
678 lck_mtx_unlock(&bucket->ftb_mtx);
679
680 if (new_tp != NULL) {
681 new_tp->ftt_ids = NULL;
682 new_tp->ftt_retids = NULL;
683 }
684
685 return (0);
686 }
687
688 /*
689 * If we have a good tracepoint ready to go, install it now while
690 * we have the lock held and no one can screw with us.
691 */
692 if (new_tp != NULL) {
693 int rc = 0;
694
695 new_tp->ftt_next = bucket->ftb_data;
696 dtrace_membar_producer();
697 bucket->ftb_data = new_tp;
698 dtrace_membar_producer();
699 lck_mtx_unlock(&bucket->ftb_mtx);
700
701 /*
702 * Activate the tracepoint in the ISA-specific manner.
703 * If this fails, we need to report the failure, but
704 * indicate that this tracepoint must still be disabled
705 * by calling fasttrap_tracepoint_disable().
706 */
707 if (fasttrap_tracepoint_install(p, new_tp) != 0)
708 rc = FASTTRAP_ENABLE_PARTIAL;
709
710 /*
711 * Increment the count of the number of tracepoints active in
712 * the victim process.
713 */
714 //ASSERT(p->p_proc_flag & P_PR_LOCK);
715 p->p_dtrace_count++;
716
717 return (rc);
718 }
719
720 lck_mtx_unlock(&bucket->ftb_mtx);
721
722 /*
723 * Initialize the tracepoint that's been preallocated with the probe.
724 */
725 new_tp = probe->ftp_tps[index].fit_tp;
726
727 ASSERT(new_tp->ftt_pid == pid);
728 ASSERT(new_tp->ftt_pc == pc);
729 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
730 ASSERT(new_tp->ftt_ids == NULL);
731 ASSERT(new_tp->ftt_retids == NULL);
732
733 switch (id->fti_ptype) {
734 case DTFTP_ENTRY:
735 case DTFTP_OFFSETS:
736 case DTFTP_IS_ENABLED:
737 id->fti_next = NULL;
738 new_tp->ftt_ids = id;
739 break;
740
741 case DTFTP_RETURN:
742 case DTFTP_POST_OFFSETS:
743 id->fti_next = NULL;
744 new_tp->ftt_retids = id;
745 break;
746
747 default:
748 ASSERT(0);
749 }
750
751 /*
752 * If the ISA-dependent initialization goes to plan, go back to the
753 * beginning and try to install this freshly made tracepoint.
754 */
755 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
756 goto again;
757
758 new_tp->ftt_ids = NULL;
759 new_tp->ftt_retids = NULL;
760
761 return (FASTTRAP_ENABLE_FAIL);
762}
763
764static void
765fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
766{
767 fasttrap_bucket_t *bucket;
768 fasttrap_provider_t *provider = probe->ftp_prov;
769 fasttrap_tracepoint_t **pp, *tp;
770 fasttrap_id_t *id, **idp;
771 pid_t pid;
772 user_addr_t pc;
773
774 ASSERT(index < probe->ftp_ntps);
775
776 pid = probe->ftp_pid;
777 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
778 id = &probe->ftp_tps[index].fit_id;
779
780 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
781
782 /*
783 * Find the tracepoint and make sure that our id is one of the
784 * ones registered with it.
785 */
786 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
787 lck_mtx_lock(&bucket->ftb_mtx);
788 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
789 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
790 tp->ftt_proc == provider->ftp_proc)
791 break;
792 }
793
794 /*
795 * If we somehow lost this tracepoint, we're in a world of hurt.
796 */
797 ASSERT(tp != NULL);
798
799 switch (id->fti_ptype) {
800 case DTFTP_ENTRY:
801 case DTFTP_OFFSETS:
802 case DTFTP_IS_ENABLED:
803 ASSERT(tp->ftt_ids != NULL);
804 idp = &tp->ftt_ids;
805 break;
806
807 case DTFTP_RETURN:
808 case DTFTP_POST_OFFSETS:
809 ASSERT(tp->ftt_retids != NULL);
810 idp = &tp->ftt_retids;
811 break;
812
813 default:
814 /* Fix compiler warning... */
815 idp = NULL;
816 ASSERT(0);
817 }
818
819 while ((*idp)->fti_probe != probe) {
820 idp = &(*idp)->fti_next;
821 ASSERT(*idp != NULL);
822 }
823
824 id = *idp;
825 *idp = id->fti_next;
826 dtrace_membar_producer();
827
828 ASSERT(id->fti_probe == probe);
829
830 /*
831 * If there are other registered enablings of this tracepoint, we're
832 * all done, but if this was the last probe assocated with this
833 * this tracepoint, we need to remove and free it.
834 */
835 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
836
837 /*
838 * If the current probe's tracepoint is in use, swap it
839 * for an unused tracepoint.
840 */
841 if (tp == probe->ftp_tps[index].fit_tp) {
842 fasttrap_probe_t *tmp_probe;
843 fasttrap_tracepoint_t **tmp_tp;
844 uint_t tmp_index;
845
846 if (tp->ftt_ids != NULL) {
847 tmp_probe = tp->ftt_ids->fti_probe;
b0d623f7 848 /* LINTED - alignment */
2d21ac55
A
849 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
850 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
851 } else {
852 tmp_probe = tp->ftt_retids->fti_probe;
b0d623f7 853 /* LINTED - alignment */
2d21ac55
A
854 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
855 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
856 }
857
858 ASSERT(*tmp_tp != NULL);
859 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
860 ASSERT((*tmp_tp)->ftt_ids == NULL);
861 ASSERT((*tmp_tp)->ftt_retids == NULL);
862
863 probe->ftp_tps[index].fit_tp = *tmp_tp;
864 *tmp_tp = tp;
865
866 }
867
868 lck_mtx_unlock(&bucket->ftb_mtx);
869
870 /*
871 * Tag the modified probe with the generation in which it was
872 * changed.
873 */
874 probe->ftp_gen = fasttrap_mod_gen;
875 return;
876 }
877
878 lck_mtx_unlock(&bucket->ftb_mtx);
879
880 /*
881 * We can't safely remove the tracepoint from the set of active
882 * tracepoints until we've actually removed the fasttrap instruction
883 * from the process's text. We can, however, operate on this
884 * tracepoint secure in the knowledge that no other thread is going to
885 * be looking at it since we hold P_PR_LOCK on the process if it's
886 * live or we hold the provider lock on the process if it's dead and
887 * gone.
888 */
889
890 /*
891 * We only need to remove the actual instruction if we're looking
892 * at an existing process
893 */
894 if (p != NULL) {
895 /*
896 * If we fail to restore the instruction we need to kill
897 * this process since it's in a completely unrecoverable
898 * state.
899 */
900 if (fasttrap_tracepoint_remove(p, tp) != 0)
901 fasttrap_sigtrap(p, NULL, pc);
902
903 /*
904 * Decrement the count of the number of tracepoints active
905 * in the victim process.
906 */
907 //ASSERT(p->p_proc_flag & P_PR_LOCK);
908 p->p_dtrace_count--;
909 }
910
911 /*
912 * Remove the probe from the hash table of active tracepoints.
913 */
914 lck_mtx_lock(&bucket->ftb_mtx);
915 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
916 ASSERT(*pp != NULL);
917 while (*pp != tp) {
918 pp = &(*pp)->ftt_next;
919 ASSERT(*pp != NULL);
920 }
921
922 *pp = tp->ftt_next;
923 dtrace_membar_producer();
924
925 lck_mtx_unlock(&bucket->ftb_mtx);
926
927 /*
928 * Tag the modified probe with the generation in which it was changed.
929 */
930 probe->ftp_gen = fasttrap_mod_gen;
931}
932
933static void
934fasttrap_enable_callbacks(void)
935{
936 /*
937 * We don't have to play the rw lock game here because we're
938 * providing something rather than taking something away --
939 * we can be sure that no threads have tried to follow this
940 * function pointer yet.
941 */
942 lck_mtx_lock(&fasttrap_count_mtx);
943 if (fasttrap_pid_count == 0) {
944 ASSERT(dtrace_pid_probe_ptr == NULL);
945 ASSERT(dtrace_return_probe_ptr == NULL);
946 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
947 dtrace_return_probe_ptr = &fasttrap_return_probe;
948 }
949 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
950 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
951 fasttrap_pid_count++;
952 lck_mtx_unlock(&fasttrap_count_mtx);
953}
954
955static void
956fasttrap_disable_callbacks(void)
957{
958 //ASSERT(MUTEX_HELD(&cpu_lock));
959
960 lck_mtx_lock(&fasttrap_count_mtx);
961 ASSERT(fasttrap_pid_count > 0);
962 fasttrap_pid_count--;
963 if (fasttrap_pid_count == 0) {
6d2010ae 964 dtrace_cpu_t *cur, *cpu = CPU;
2d21ac55
A
965
966 /*
967 * APPLE NOTE: This loop seems broken, it touches every CPU
968 * but the one we're actually running on. Need to ask Sun folks
969 * if that is safe. Scenario is this: We're running on CPU A,
970 * and lock all but A. Then we get preempted, and start running
971 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
972 */
973 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
974 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
975 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
976 }
977
978 dtrace_pid_probe_ptr = NULL;
979 dtrace_return_probe_ptr = NULL;
980
981 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
982 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
983 // rw_exit(&cur->cpu_ft_lock);
984 }
985 }
986 lck_mtx_unlock(&fasttrap_count_mtx);
987}
988
989/*ARGSUSED*/
6d2010ae 990static int
2d21ac55
A
991fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
992{
993#pragma unused(arg, id)
994 fasttrap_probe_t *probe = parg;
995 proc_t *p;
996 int i, rc;
997
998 ASSERT(probe != NULL);
999 ASSERT(!probe->ftp_enabled);
1000 ASSERT(id == probe->ftp_id);
1001 // ASSERT(MUTEX_HELD(&cpu_lock));
1002
1003 /*
1004 * Increment the count of enabled probes on this probe's provider;
1005 * the provider can't go away while the probe still exists. We
1006 * must increment this even if we aren't able to properly enable
1007 * this probe.
1008 */
1009 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1010 probe->ftp_prov->ftp_rcount++;
1011 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1012
1013 /*
1014 * If this probe's provider is retired (meaning it was valid in a
1015 * previously exec'ed incarnation of this address space), bail out. The
1016 * provider can't go away while we're in this code path.
1017 */
1018 if (probe->ftp_prov->ftp_retired)
6d2010ae 1019 return(0);
2d21ac55
A
1020
1021 /*
1022 * If we can't find the process, it may be that we're in the context of
1023 * a fork in which the traced process is being born and we're copying
1024 * USDT probes. Otherwise, the process is gone so bail.
1025 */
1026 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1027#if defined(__APPLE__)
1028 /*
1029 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1030 * does not return process's with SIDL set, but we always return
1031 * the child process.
1032 */
6d2010ae 1033 return(0);
2d21ac55
A
1034#else
1035
1036 if ((curproc->p_flag & SFORKING) == 0)
6d2010ae 1037 return(0);
2d21ac55
A
1038
1039 lck_mtx_lock(&pidlock);
1040 p = prfind(probe->ftp_pid);
1041
1042 /*
1043 * Confirm that curproc is indeed forking the process in which
1044 * we're trying to enable probes.
1045 */
1046 ASSERT(p != NULL);
1047 //ASSERT(p->p_parent == curproc);
1048 ASSERT(p->p_stat == SIDL);
1049
1050 lck_mtx_lock(&p->p_lock);
1051 lck_mtx_unlock(&pidlock);
1052
1053 sprlock_proc(p);
1054#endif
1055 }
1056
1057 /*
1058 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1059 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1060 * To mimic this, we allocate on demand scratch space. If this is the first
1061 * time a probe has been enabled in this process, we need to allocate scratch
1062 * space for each already existing thread. Now is a good time to do this, as
1063 * the target process is suspended and the proc_lock is held.
1064 */
1065 if (p->p_dtrace_ptss_pages == NULL) {
1066 dtrace_ptss_enable(p);
1067 }
1068
1069 // ASSERT(!(p->p_flag & SVFORK));
1070 proc_unlock(p);
1071
1072 /*
1073 * We have to enable the trap entry point before any user threads have
1074 * the chance to execute the trap instruction we're about to place
1075 * in their process's text.
1076 */
1077 fasttrap_enable_callbacks();
1078
1079 /*
1080 * Enable all the tracepoints and add this probe's id to each
1081 * tracepoint's list of active probes.
1082 */
1083 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1084 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1085 /*
1086 * If enabling the tracepoint failed completely,
1087 * we don't have to disable it; if the failure
1088 * was only partial we must disable it.
1089 */
1090 if (rc == FASTTRAP_ENABLE_FAIL)
1091 i--;
1092 else
1093 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1094
1095 /*
1096 * Back up and pull out all the tracepoints we've
1097 * created so far for this probe.
1098 */
1099 while (i >= 0) {
1100 fasttrap_tracepoint_disable(p, probe, i);
1101 i--;
1102 }
1103
1104 proc_lock(p);
1105 sprunlock(p);
1106
1107 /*
1108 * Since we're not actually enabling this probe,
1109 * drop our reference on the trap table entry.
1110 */
1111 fasttrap_disable_callbacks();
6d2010ae 1112 return(0);
2d21ac55
A
1113 }
1114 }
1115
1116 proc_lock(p);
1117 sprunlock(p);
1118
1119 probe->ftp_enabled = 1;
6d2010ae 1120 return (0);
2d21ac55
A
1121}
1122
1123/*ARGSUSED*/
1124static void
1125fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1126{
1127#pragma unused(arg, id)
1128 fasttrap_probe_t *probe = parg;
1129 fasttrap_provider_t *provider = probe->ftp_prov;
1130 proc_t *p;
1131 int i, whack = 0;
1132
1133 ASSERT(id == probe->ftp_id);
1134
1135 /*
1136 * We won't be able to acquire a /proc-esque lock on the process
1137 * iff the process is dead and gone. In this case, we rely on the
1138 * provider lock as a point of mutual exclusion to prevent other
1139 * DTrace consumers from disabling this probe.
1140 */
1141 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1142 // ASSERT(!(p->p_flag & SVFORK));
1143 proc_unlock(p);
1144 }
1145
1146 lck_mtx_lock(&provider->ftp_mtx);
1147
1148 /*
1149 * Disable all the associated tracepoints (for fully enabled probes).
1150 */
1151 if (probe->ftp_enabled) {
1152 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1153 fasttrap_tracepoint_disable(p, probe, i);
1154 }
1155 }
1156
1157 ASSERT(provider->ftp_rcount > 0);
1158 provider->ftp_rcount--;
1159
1160 if (p != NULL) {
1161 /*
1162 * Even though we may not be able to remove it entirely, we
1163 * mark this retired provider to get a chance to remove some
1164 * of the associated probes.
1165 */
1166 if (provider->ftp_retired && !provider->ftp_marked)
1167 whack = provider->ftp_marked = 1;
1168 lck_mtx_unlock(&provider->ftp_mtx);
1169
1170 proc_lock(p);
1171 sprunlock(p);
1172 } else {
1173 /*
1174 * If the process is dead, we're just waiting for the
1175 * last probe to be disabled to be able to free it.
1176 */
1177 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1178 whack = provider->ftp_marked = 1;
1179 lck_mtx_unlock(&provider->ftp_mtx);
1180 }
1181
1182 if (whack)
1183 fasttrap_pid_cleanup();
1184
1185 if (!probe->ftp_enabled)
1186 return;
1187
1188 probe->ftp_enabled = 0;
1189
1190 // ASSERT(MUTEX_HELD(&cpu_lock));
1191 fasttrap_disable_callbacks();
1192}
1193
1194/*ARGSUSED*/
1195static void
1196fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1197 dtrace_argdesc_t *desc)
1198{
1199#pragma unused(arg, id)
1200 fasttrap_probe_t *probe = parg;
1201 char *str;
b0d623f7 1202 int i, ndx;
2d21ac55
A
1203
1204 desc->dtargd_native[0] = '\0';
1205 desc->dtargd_xlate[0] = '\0';
1206
1207 if (probe->ftp_prov->ftp_retired != 0 ||
1208 desc->dtargd_ndx >= probe->ftp_nargs) {
1209 desc->dtargd_ndx = DTRACE_ARGNONE;
1210 return;
1211 }
1212
b0d623f7
A
1213 ndx = (probe->ftp_argmap != NULL) ?
1214 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
2d21ac55
A
1215
1216 str = probe->ftp_ntypes;
b0d623f7 1217 for (i = 0; i < ndx; i++) {
2d21ac55
A
1218 str += strlen(str) + 1;
1219 }
1220
1221 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1222
1223 if (probe->ftp_xtypes == NULL)
1224 return;
1225
1226 str = probe->ftp_xtypes;
1227 for (i = 0; i < desc->dtargd_ndx; i++) {
1228 str += strlen(str) + 1;
1229 }
1230
1231 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1232}
1233
1234/*ARGSUSED*/
1235static void
1236fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1237{
1238#pragma unused(arg, id)
1239 fasttrap_probe_t *probe = parg;
1240 unsigned int i;
1241
1242 ASSERT(probe != NULL);
1243 ASSERT(!probe->ftp_enabled);
1244 ASSERT(fasttrap_total >= probe->ftp_ntps);
1245
1246 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1247#if !defined(__APPLE__)
1248 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1249#endif
1250
1251 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1252 fasttrap_mod_barrier(probe->ftp_gen);
1253
1254 for (i = 0; i < probe->ftp_ntps; i++) {
1255#if !defined(__APPLE__)
1256 kmem_free(probe->ftp_tps[i].fit_tp, sizeof (fasttrap_tracepoint_t));
1257#else
1258 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1259#endif
1260 }
1261
1262#if !defined(__APPLE__)
1263 kmem_free(probe, size);
1264#else
1265 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1266 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1267 } else {
1268 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1269 kmem_free(probe, size);
1270 }
1271#endif
1272}
1273
1274
1275static const dtrace_pattr_t pid_attr = {
1276{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1277{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1278{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1279{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1280{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1281};
1282
1283static dtrace_pops_t pid_pops = {
1284 fasttrap_pid_provide,
1285 NULL,
1286 fasttrap_pid_enable,
1287 fasttrap_pid_disable,
1288 NULL,
1289 NULL,
1290 fasttrap_pid_getargdesc,
1291 fasttrap_pid_getarg,
1292 NULL,
1293 fasttrap_pid_destroy
1294};
1295
1296static dtrace_pops_t usdt_pops = {
1297 fasttrap_pid_provide,
1298 NULL,
1299 fasttrap_pid_enable,
1300 fasttrap_pid_disable,
1301 NULL,
1302 NULL,
1303 fasttrap_pid_getargdesc,
1304 fasttrap_usdt_getarg,
1305 NULL,
1306 fasttrap_pid_destroy
1307};
1308
1309static fasttrap_proc_t *
1310fasttrap_proc_lookup(pid_t pid)
1311{
1312 fasttrap_bucket_t *bucket;
1313 fasttrap_proc_t *fprc, *new_fprc;
1314
1315 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1316 lck_mtx_lock(&bucket->ftb_mtx);
1317
1318 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
b0d623f7 1319 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
2d21ac55
A
1320 lck_mtx_lock(&fprc->ftpc_mtx);
1321 lck_mtx_unlock(&bucket->ftb_mtx);
b0d623f7
A
1322 fprc->ftpc_rcount++;
1323 atomic_add_64(&fprc->ftpc_acount, 1);
1324 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
2d21ac55
A
1325 lck_mtx_unlock(&fprc->ftpc_mtx);
1326
1327 return (fprc);
1328 }
1329 }
1330
1331 /*
1332 * Drop the bucket lock so we don't try to perform a sleeping
1333 * allocation under it.
1334 */
1335 lck_mtx_unlock(&bucket->ftb_mtx);
1336
1337 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1338 ASSERT(new_fprc != NULL);
1339 new_fprc->ftpc_pid = pid;
b0d623f7
A
1340 new_fprc->ftpc_rcount = 1;
1341 new_fprc->ftpc_acount = 1;
2d21ac55
A
1342
1343 lck_mtx_lock(&bucket->ftb_mtx);
1344
1345 /*
1346 * Take another lap through the list to make sure a proc hasn't
1347 * been created for this pid while we weren't under the bucket lock.
1348 */
1349 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
b0d623f7 1350 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
2d21ac55
A
1351 lck_mtx_lock(&fprc->ftpc_mtx);
1352 lck_mtx_unlock(&bucket->ftb_mtx);
b0d623f7
A
1353 fprc->ftpc_rcount++;
1354 atomic_add_64(&fprc->ftpc_acount, 1);
1355 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
2d21ac55
A
1356 lck_mtx_unlock(&fprc->ftpc_mtx);
1357
1358 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1359
1360 return (fprc);
1361 }
1362 }
1363
1364#if defined(__APPLE__)
1365 /*
1366 * We have to initialize all locks explicitly
1367 */
1368 lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1369#endif
1370
1371 new_fprc->ftpc_next = bucket->ftb_data;
1372 bucket->ftb_data = new_fprc;
1373
1374 lck_mtx_unlock(&bucket->ftb_mtx);
1375
1376 return (new_fprc);
1377}
1378
1379static void
1380fasttrap_proc_release(fasttrap_proc_t *proc)
1381{
1382 fasttrap_bucket_t *bucket;
1383 fasttrap_proc_t *fprc, **fprcp;
1384 pid_t pid = proc->ftpc_pid;
1385
1386 lck_mtx_lock(&proc->ftpc_mtx);
1387
b0d623f7
A
1388 ASSERT(proc->ftpc_rcount != 0);
1389 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
2d21ac55 1390
b0d623f7 1391 if (--proc->ftpc_rcount != 0) {
2d21ac55
A
1392 lck_mtx_unlock(&proc->ftpc_mtx);
1393 return;
1394 }
1395
1396 lck_mtx_unlock(&proc->ftpc_mtx);
1397
b0d623f7
A
1398 /*
1399 * There should definitely be no live providers associated with this
1400 * process at this point.
1401 */
1402 ASSERT(proc->ftpc_acount == 0);
1403
2d21ac55
A
1404 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1405 lck_mtx_lock(&bucket->ftb_mtx);
1406
1407 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1408 while ((fprc = *fprcp) != NULL) {
1409 if (fprc == proc)
1410 break;
1411
1412 fprcp = &fprc->ftpc_next;
1413 }
1414
1415 /*
1416 * Something strange has happened if we can't find the proc.
1417 */
1418 ASSERT(fprc != NULL);
1419
1420 *fprcp = fprc->ftpc_next;
1421
1422 lck_mtx_unlock(&bucket->ftb_mtx);
1423
1424#if defined(__APPLE__)
1425 /*
1426 * Apple explicit lock management. Not 100% certain we need this, the
1427 * memory is freed even without the destroy. Maybe accounting cleanup?
1428 */
1429 lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1430#endif
1431
1432 kmem_free(fprc, sizeof (fasttrap_proc_t));
1433}
1434
1435/*
1436 * Lookup a fasttrap-managed provider based on its name and associated pid.
1437 * If the pattr argument is non-NULL, this function instantiates the provider
1438 * if it doesn't exist otherwise it returns NULL. The provider is returned
1439 * with its lock held.
1440 */
1441#if defined(__APPLE__)
1442static fasttrap_provider_t *
1443fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, const char *name,
1444 const dtrace_pattr_t *pattr)
1445#endif /* __APPLE__ */
1446{
1447 fasttrap_provider_t *fp, *new_fp = NULL;
1448 fasttrap_bucket_t *bucket;
1449 char provname[DTRACE_PROVNAMELEN];
1450 proc_t *p;
1451 cred_t *cred;
1452
1453 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1454 ASSERT(pattr != NULL);
1455
1456 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1457 lck_mtx_lock(&bucket->ftb_mtx);
1458
1459 /*
1460 * Take a lap through the list and return the match if we find it.
1461 */
1462 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1463 if (fp->ftp_pid == pid &&
1464#if defined(__APPLE__)
1465 fp->ftp_provider_type == provider_type &&
1466#endif /* __APPLE__ */
1467 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1468 !fp->ftp_retired) {
1469 lck_mtx_lock(&fp->ftp_mtx);
1470 lck_mtx_unlock(&bucket->ftb_mtx);
1471 return (fp);
1472 }
1473 }
1474
1475 /*
1476 * Drop the bucket lock so we don't try to perform a sleeping
1477 * allocation under it.
1478 */
1479 lck_mtx_unlock(&bucket->ftb_mtx);
1480
1481 /*
1482 * Make sure the process exists, isn't a child created as the result
1483 * of a vfork(2), and isn't a zombie (but may be in fork).
1484 */
1485 if ((p = proc_find(pid)) == NULL) {
1486 return NULL;
1487 }
1488 proc_lock(p);
1489 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1490 proc_unlock(p);
1491 proc_rele(p);
1492 return (NULL);
1493 }
1494
1495 /*
1496 * Increment p_dtrace_probes so that the process knows to inform us
1497 * when it exits or execs. fasttrap_provider_free() decrements this
1498 * when we're done with this provider.
1499 */
1500 p->p_dtrace_probes++;
1501
1502 /*
1503 * Grab the credentials for this process so we have
1504 * something to pass to dtrace_register().
1505 */
1506#if !defined(__APPLE__)
1507 mutex_enter(&p->p_crlock);
1508 crhold(p->p_cred);
1509 cred = p->p_cred;
1510 mutex_exit(&p->p_crlock);
1511 mutex_exit(&p->p_lock);
1512#else
1513 // lck_mtx_lock(&p->p_crlock);
1514 // Seems like OS X has no equivalent to crhold, even though it has a cr_ref field in ucred
1515 crhold(p->p_ucred);
1516 cred = p->p_ucred;
1517 // lck_mtx_unlock(&p->p_crlock);
1518 proc_unlock(p);
1519 proc_rele(p);
1520#endif /* __APPLE__ */
1521
1522 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1523 ASSERT(new_fp != NULL);
1524 new_fp->ftp_pid = pid;
1525 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1526#if defined(__APPLE__)
1527 new_fp->ftp_provider_type = provider_type;
1528
1529 /*
1530 * Apple locks require explicit init.
1531 */
1532 lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1533 lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1534#endif /* __APPLE__ */
1535
1536 ASSERT(new_fp->ftp_proc != NULL);
1537
1538 lck_mtx_lock(&bucket->ftb_mtx);
1539
1540 /*
1541 * Take another lap through the list to make sure a provider hasn't
1542 * been created for this pid while we weren't under the bucket lock.
1543 */
1544 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1545 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1546 !fp->ftp_retired) {
1547 lck_mtx_lock(&fp->ftp_mtx);
1548 lck_mtx_unlock(&bucket->ftb_mtx);
1549 fasttrap_provider_free(new_fp);
1550 crfree(cred);
1551 return (fp);
1552 }
1553 }
1554
1555 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1556
1557 /*
1558 * Fail and return NULL if either the provider name is too long
1559 * or we fail to register this new provider with the DTrace
1560 * framework. Note that this is the only place we ever construct
1561 * the full provider name -- we keep it in pieces in the provider
1562 * structure.
1563 */
1564 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1565 (int)sizeof (provname) ||
1566 dtrace_register(provname, pattr,
1567 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1568 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1569 &new_fp->ftp_provid) != 0) {
1570 lck_mtx_unlock(&bucket->ftb_mtx);
1571 fasttrap_provider_free(new_fp);
1572 crfree(cred);
1573 return (NULL);
1574 }
1575
1576 new_fp->ftp_next = bucket->ftb_data;
1577 bucket->ftb_data = new_fp;
1578
1579 lck_mtx_lock(&new_fp->ftp_mtx);
1580 lck_mtx_unlock(&bucket->ftb_mtx);
1581
1582 crfree(cred);
1583 return (new_fp);
1584}
1585
1586static void
1587fasttrap_provider_free(fasttrap_provider_t *provider)
1588{
1589 pid_t pid = provider->ftp_pid;
1590 proc_t *p;
1591
1592 /*
1593 * There need to be no associated enabled probes, no consumers
1594 * creating probes, and no meta providers referencing this provider.
1595 */
1596 ASSERT(provider->ftp_rcount == 0);
1597 ASSERT(provider->ftp_ccount == 0);
1598 ASSERT(provider->ftp_mcount == 0);
1599
b0d623f7
A
1600 /*
1601 * If this provider hasn't been retired, we need to explicitly drop the
1602 * count of active providers on the associated process structure.
1603 */
1604 if (!provider->ftp_retired) {
1605 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1606 ASSERT(provider->ftp_proc->ftpc_acount <
1607 provider->ftp_proc->ftpc_rcount);
1608 }
1609
2d21ac55
A
1610 fasttrap_proc_release(provider->ftp_proc);
1611
1612#if defined(__APPLE__)
1613 /*
1614 * Apple explicit lock management. Not 100% certain we need this, the
1615 * memory is freed even without the destroy. Maybe accounting cleanup?
1616 */
1617 lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1618 lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1619#endif
1620
1621 kmem_free(provider, sizeof (fasttrap_provider_t));
1622
1623 /*
1624 * Decrement p_dtrace_probes on the process whose provider we're
1625 * freeing. We don't have to worry about clobbering somone else's
1626 * modifications to it because we have locked the bucket that
1627 * corresponds to this process's hash chain in the provider hash
1628 * table. Don't sweat it if we can't find the process.
1629 */
1630 if ((p = proc_find(pid)) == NULL) {
1631 return;
1632 }
1633
1634 proc_lock(p);
1635 p->p_dtrace_probes--;
1636 proc_unlock(p);
1637
1638 proc_rele(p);
1639}
1640
1641static void
1642fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1643{
1644 fasttrap_provider_t *fp;
1645 fasttrap_bucket_t *bucket;
1646 dtrace_provider_id_t provid;
1647
1648 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1649
1650 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1651 lck_mtx_lock(&bucket->ftb_mtx);
1652
1653 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1654 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1655 !fp->ftp_retired)
1656 break;
1657 }
1658
1659 if (fp == NULL) {
1660 lck_mtx_unlock(&bucket->ftb_mtx);
1661 return;
1662 }
1663
1664 lck_mtx_lock(&fp->ftp_mtx);
1665 ASSERT(!mprov || fp->ftp_mcount > 0);
1666 if (mprov && --fp->ftp_mcount != 0) {
1667 lck_mtx_unlock(&fp->ftp_mtx);
1668 lck_mtx_unlock(&bucket->ftb_mtx);
1669 return;
1670 }
1671
1672 /*
b0d623f7
A
1673 * Mark the provider to be removed in our post-processing step, mark it
1674 * retired, and drop the active count on its proc. Marking it indicates
1675 * that we should try to remove it; setting the retired flag indicates
1676 * that we're done with this provider; dropping the active the proc
1677 * releases our hold, and when this reaches zero (as it will during
1678 * exit or exec) the proc and associated providers become defunct.
2d21ac55
A
1679 *
1680 * We obviously need to take the bucket lock before the provider lock
1681 * to perform the lookup, but we need to drop the provider lock
1682 * before calling into the DTrace framework since we acquire the
1683 * provider lock in callbacks invoked from the DTrace framework. The
1684 * bucket lock therefore protects the integrity of the provider hash
1685 * table.
1686 */
b0d623f7
A
1687 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1688 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1689
2d21ac55
A
1690 fp->ftp_retired = 1;
1691 fp->ftp_marked = 1;
1692 provid = fp->ftp_provid;
1693 lck_mtx_unlock(&fp->ftp_mtx);
1694
1695 /*
1696 * We don't have to worry about invalidating the same provider twice
1697 * since fasttrap_provider_lookup() will ignore provider that have
1698 * been marked as retired.
1699 */
1700 dtrace_invalidate(provid);
1701
1702 lck_mtx_unlock(&bucket->ftb_mtx);
1703
1704 fasttrap_pid_cleanup();
1705}
1706
b0d623f7
A
1707static int
1708fasttrap_uint32_cmp(const void *ap, const void *bp)
1709{
1710 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1711}
1712
1713static int
1714fasttrap_uint64_cmp(const void *ap, const void *bp)
1715{
1716 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1717}
1718
2d21ac55
A
1719static int
1720fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1721{
1722 fasttrap_provider_t *provider;
1723 fasttrap_probe_t *pp;
1724 fasttrap_tracepoint_t *tp;
1725 const char *name;
1726 unsigned int i, aframes, whack;
1727
b0d623f7
A
1728 /*
1729 * There needs to be at least one desired trace point.
1730 */
1731 if (pdata->ftps_noffs == 0)
1732 return (EINVAL);
1733
2d21ac55
A
1734#if defined(__APPLE__)
1735 switch (pdata->ftps_probe_type) {
1736#endif
1737 case DTFTP_ENTRY:
1738 name = "entry";
1739 aframes = FASTTRAP_ENTRY_AFRAMES;
1740 break;
1741 case DTFTP_RETURN:
1742 name = "return";
1743 aframes = FASTTRAP_RETURN_AFRAMES;
1744 break;
1745 case DTFTP_OFFSETS:
1746 aframes = 0;
1747 name = NULL;
1748 break;
1749 default:
1750 return (EINVAL);
1751 }
1752
1753#if defined(__APPLE__)
1754 const char* provider_name;
1755 switch (pdata->ftps_provider_type) {
1756 case DTFTP_PROVIDER_PID:
1757 provider_name = FASTTRAP_PID_NAME;
1758 break;
1759 case DTFTP_PROVIDER_OBJC:
1760 provider_name = FASTTRAP_OBJC_NAME;
1761 break;
1762 case DTFTP_PROVIDER_ONESHOT:
1763 provider_name = FASTTRAP_ONESHOT_NAME;
1764 break;
1765 default:
1766 return (EINVAL);
1767 }
1768
1769 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, pdata->ftps_provider_type,
1770 provider_name, &pid_attr)) == NULL)
1771 return (ESRCH);
1772#endif /* __APPLE__ */
1773
1774 /*
1775 * Increment this reference count to indicate that a consumer is
1776 * actively adding a new probe associated with this provider. This
1777 * prevents the provider from being deleted -- we'll need to check
1778 * for pending deletions when we drop this reference count.
1779 */
1780 provider->ftp_ccount++;
1781 lck_mtx_unlock(&provider->ftp_mtx);
1782
1783 /*
1784 * Grab the creation lock to ensure consistency between calls to
1785 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1786 * other threads creating probes. We must drop the provider lock
1787 * before taking this lock to avoid a three-way deadlock with the
1788 * DTrace framework.
1789 */
1790 lck_mtx_lock(&provider->ftp_cmtx);
1791
1792 if (name == NULL) {
1793 for (i = 0; i < pdata->ftps_noffs; i++) {
1794 char name_str[17];
1795
1796 (void) snprintf(name_str, sizeof(name_str), "%llx",
b0d623f7 1797 (uint64_t)pdata->ftps_offs[i]);
2d21ac55
A
1798
1799 if (dtrace_probe_lookup(provider->ftp_provid,
1800 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1801 continue;
1802
1803 atomic_add_32(&fasttrap_total, 1);
1804
1805 if (fasttrap_total > fasttrap_max) {
1806 atomic_add_32(&fasttrap_total, -1);
1807 goto no_mem;
1808 }
1809
1810#if !defined(__APPLE__)
1811 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1812 ASSERT(pp != NULL);
1813#else
1814 pp = zalloc(fasttrap_probe_t_zones[1]);
1815 bzero(pp, sizeof (fasttrap_probe_t));
1816#endif
1817
1818 pp->ftp_prov = provider;
1819 pp->ftp_faddr = pdata->ftps_pc;
1820 pp->ftp_fsize = pdata->ftps_size;
1821 pp->ftp_pid = pdata->ftps_pid;
1822 pp->ftp_ntps = 1;
1823
1824#if !defined(__APPLE__)
1825 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1826#else
1827 tp = zalloc(fasttrap_tracepoint_t_zone);
1828 bzero(tp, sizeof (fasttrap_tracepoint_t));
1829#endif
1830
1831 tp->ftt_proc = provider->ftp_proc;
1832 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1833 tp->ftt_pid = pdata->ftps_pid;
1834
c910b4d9 1835
2d21ac55
A
1836 pp->ftp_tps[0].fit_tp = tp;
1837 pp->ftp_tps[0].fit_id.fti_probe = pp;
1838#if defined(__APPLE__)
1839 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1840#endif
1841 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1842 pdata->ftps_mod, pdata->ftps_func, name_str,
1843 FASTTRAP_OFFSET_AFRAMES, pp);
1844 }
1845
1846 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1847 pdata->ftps_func, name) == 0) {
1848 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1849
1850 if (fasttrap_total > fasttrap_max) {
1851 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1852 goto no_mem;
1853 }
1854
b0d623f7
A
1855 /*
1856 * Make sure all tracepoint program counter values are unique.
1857 * We later assume that each probe has exactly one tracepoint
1858 * for a given pc.
1859 */
1860 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1861 sizeof (uint64_t), fasttrap_uint64_cmp);
1862 for (i = 1; i < pdata->ftps_noffs; i++) {
1863 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1864 continue;
1865
1866 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1867 goto no_mem;
1868 }
1869
2d21ac55
A
1870 ASSERT(pdata->ftps_noffs > 0);
1871#if !defined(__APPLE__)
1872 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1873 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1874 ASSERT(pp != NULL);
1875#else
1876 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1877 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
1878 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
1879 } else {
1880 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1881 }
1882#endif
1883
1884 pp->ftp_prov = provider;
1885 pp->ftp_faddr = pdata->ftps_pc;
1886 pp->ftp_fsize = pdata->ftps_size;
1887 pp->ftp_pid = pdata->ftps_pid;
1888 pp->ftp_ntps = pdata->ftps_noffs;
1889
1890 for (i = 0; i < pdata->ftps_noffs; i++) {
1891#if !defined(__APPLE__)
1892 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1893#else
1894 tp = zalloc(fasttrap_tracepoint_t_zone);
1895 bzero(tp, sizeof (fasttrap_tracepoint_t));
1896#endif
1897
1898 tp->ftt_proc = provider->ftp_proc;
1899 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1900 tp->ftt_pid = pdata->ftps_pid;
1901
1902 pp->ftp_tps[i].fit_tp = tp;
1903 pp->ftp_tps[i].fit_id.fti_probe = pp;
1904#if defined(__APPLE__)
1905 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
1906#endif
1907 }
1908
1909 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1910 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1911 }
1912
1913 lck_mtx_unlock(&provider->ftp_cmtx);
1914
1915 /*
1916 * We know that the provider is still valid since we incremented the
1917 * creation reference count. If someone tried to clean up this provider
1918 * while we were using it (e.g. because the process called exec(2) or
1919 * exit(2)), take note of that and try to clean it up now.
1920 */
1921 lck_mtx_lock(&provider->ftp_mtx);
1922 provider->ftp_ccount--;
1923 whack = provider->ftp_retired;
1924 lck_mtx_unlock(&provider->ftp_mtx);
1925
1926 if (whack)
1927 fasttrap_pid_cleanup();
1928
1929 return (0);
1930
1931no_mem:
1932 /*
1933 * If we've exhausted the allowable resources, we'll try to remove
1934 * this provider to free some up. This is to cover the case where
1935 * the user has accidentally created many more probes than was
1936 * intended (e.g. pid123:::).
1937 */
1938 lck_mtx_unlock(&provider->ftp_cmtx);
1939 lck_mtx_lock(&provider->ftp_mtx);
1940 provider->ftp_ccount--;
1941 provider->ftp_marked = 1;
1942 lck_mtx_unlock(&provider->ftp_mtx);
1943
1944 fasttrap_pid_cleanup();
1945
1946 return (ENOMEM);
1947}
1948
1949/*ARGSUSED*/
1950static void *
1951fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1952{
1953#pragma unused(arg)
1954 fasttrap_provider_t *provider;
1955
1956 /*
1957 * A 32-bit unsigned integer (like a pid for example) can be
1958 * expressed in 10 or fewer decimal digits. Make sure that we'll
1959 * have enough space for the provider name.
1960 */
1961 if (strlen(dhpv->dthpv_provname) + 10 >=
1962 sizeof (provider->ftp_name)) {
1963 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1964 "name too long to accomodate pid", dhpv->dthpv_provname);
1965 return (NULL);
1966 }
1967
1968 /*
1969 * Don't let folks spoof the true pid provider.
1970 */
1971 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
1972 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1973 "%s is an invalid name", dhpv->dthpv_provname,
1974 FASTTRAP_PID_NAME);
1975 return (NULL);
1976 }
1977#if defined(__APPLE__)
1978 /*
1979 * We also need to check the other pid provider types
1980 */
1981 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
1982 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1983 "%s is an invalid name", dhpv->dthpv_provname,
1984 FASTTRAP_OBJC_NAME);
1985 return (NULL);
1986 }
1987 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
1988 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1989 "%s is an invalid name", dhpv->dthpv_provname,
1990 FASTTRAP_ONESHOT_NAME);
1991 return (NULL);
1992 }
1993#endif /* __APPLE__ */
1994
1995 /*
1996 * The highest stability class that fasttrap supports is ISA; cap
1997 * the stability of the new provider accordingly.
1998 */
b0d623f7 1999 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2d21ac55 2000 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
b0d623f7 2001 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2d21ac55 2002 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
b0d623f7 2003 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2d21ac55 2004 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
b0d623f7 2005 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2d21ac55 2006 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
b0d623f7 2007 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2d21ac55
A
2008 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2009
2010#if defined(__APPLE__)
2011 if ((provider = fasttrap_provider_lookup(pid, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2012 &dhpv->dthpv_pattr)) == NULL) {
2013 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2014 "process %u", dhpv->dthpv_provname, (uint_t)pid);
2015 return (NULL);
2016 }
2017
2018 /*
2019 * APPLE NOTE!
2020 *
2021 * USDT probes (fasttrap meta probes) are very expensive to create.
2022 * Profiling has shown that the largest single cost is verifying that
2023 * dtrace hasn't already created a given meta_probe. The reason for
2024 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2025 * each static probe being created. We want to get rid of that check.
2026 * The simplest way of eliminating it is to deny the ability to add
2027 * probes to an existing provider. If the provider already exists, BZZT!
2028 * This still leaves the possibility of intentionally malformed DOF
2029 * having duplicate probes. However, duplicate probes are not fatal,
2030 * and there is no way to get that by accident, so we will not check
2031 * for that case.
b0d623f7
A
2032 *
2033 * UPDATE: It turns out there are several use cases that require adding
2034 * probes to existing providers. Disabling this optimization for now...
2d21ac55 2035 */
2d21ac55
A
2036#endif /* __APPLE__ */
2037
2038 /*
2039 * Up the meta provider count so this provider isn't removed until
2040 * the meta provider has been told to remove it.
2041 */
2042 provider->ftp_mcount++;
2043
2044 lck_mtx_unlock(&provider->ftp_mtx);
2045
2046 return (provider);
2047}
2048
2049/*ARGSUSED*/
2050static void
2051fasttrap_meta_create_probe(void *arg, void *parg,
2052 dtrace_helper_probedesc_t *dhpb)
2053{
2054#pragma unused(arg)
2055 fasttrap_provider_t *provider = parg;
2056 fasttrap_probe_t *pp;
2057 fasttrap_tracepoint_t *tp;
2058 unsigned int i, j;
2059 uint32_t ntps;
2060
2061 /*
2062 * Since the meta provider count is non-zero we don't have to worry
2063 * about this provider disappearing.
2064 */
2065 ASSERT(provider->ftp_mcount > 0);
2066
b0d623f7
A
2067 /*
2068 * The offsets must be unique.
2069 */
2070 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2071 fasttrap_uint32_cmp);
2072 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2073 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2074 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2075 return;
2076 }
2077
2078 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2079 fasttrap_uint32_cmp);
2080 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2081 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2082 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2083 return;
2084 }
2085
2d21ac55
A
2086 /*
2087 * Grab the creation lock to ensure consistency between calls to
2088 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2089 * other threads creating probes.
2090 */
2091 lck_mtx_lock(&provider->ftp_cmtx);
2092
2093#if !defined(__APPLE__)
2094 /*
2095 * APPLE NOTE: This is hideously expensive. See note in
2096 * fasttrap_meta_provide() for why we can get away without
2097 * checking here.
2098 */
2099 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2100 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2101 lck_mtx_unlock(&provider->ftp_cmtx);
2102 return;
2103 }
2104#endif
2105
2106 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2107 ASSERT(ntps > 0);
2108
2109 atomic_add_32(&fasttrap_total, ntps);
2110
2111 if (fasttrap_total > fasttrap_max) {
2112 atomic_add_32(&fasttrap_total, -ntps);
2113 lck_mtx_unlock(&provider->ftp_cmtx);
2114 return;
2115 }
2116
2117#if !defined(__APPLE__)
2118 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2119 ASSERT(pp != NULL);
2120#else
2121 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2122 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2123 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2124 } else {
2125 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2126 }
2127#endif
2128
2129 pp->ftp_prov = provider;
2130 pp->ftp_pid = provider->ftp_pid;
2131 pp->ftp_ntps = ntps;
2132 pp->ftp_nargs = dhpb->dthpb_xargc;
2133 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2134 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2135
2136 /*
2137 * First create a tracepoint for each actual point of interest.
2138 */
2139 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2140#if !defined(__APPLE__)
2141 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2142#else
2143 tp = zalloc(fasttrap_tracepoint_t_zone);
2144 bzero(tp, sizeof (fasttrap_tracepoint_t));
2145#endif
2146
2147 tp->ftt_proc = provider->ftp_proc;
2148#if defined(__APPLE__)
2149 /*
2150 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2151 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2152 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2153 */
39236c6e 2154#if defined(__x86_64__)
2d21ac55
A
2155 /*
2156 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2157 */
2158 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2d21ac55
A
2159#else
2160#error "Architecture not supported"
2161#endif
2162
2163#else
2164 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
2165#endif
2166 tp->ftt_pid = provider->ftp_pid;
2167
2168 pp->ftp_tps[i].fit_tp = tp;
2169 pp->ftp_tps[i].fit_id.fti_probe = pp;
2170#ifdef __sparc
2171 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
2172#else
2173 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2174#endif
2175 }
2176
2177 /*
2178 * Then create a tracepoint for each is-enabled point.
2179 */
2180 for (j = 0; i < ntps; i++, j++) {
2181#if !defined(__APPLE__)
2182 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2183#else
2184 tp = zalloc(fasttrap_tracepoint_t_zone);
2185 bzero(tp, sizeof (fasttrap_tracepoint_t));
2186#endif
2187
2188 tp->ftt_proc = provider->ftp_proc;
2189#if defined(__APPLE__)
2190 /*
2191 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2192 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2193 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2194 */
39236c6e 2195#if defined(__x86_64__)
2d21ac55
A
2196 /*
2197 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2198 */
2199 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2d21ac55
A
2200#else
2201#error "Architecture not supported"
2202#endif
2203
2204#else
2205 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
2206#endif
2207 tp->ftt_pid = provider->ftp_pid;
2208
2209 pp->ftp_tps[i].fit_tp = tp;
2210 pp->ftp_tps[i].fit_id.fti_probe = pp;
2211 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2212 }
2213
2214 /*
2215 * If the arguments are shuffled around we set the argument remapping
2216 * table. Later, when the probe fires, we only remap the arguments
2217 * if the table is non-NULL.
2218 */
2219 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2220 if (dhpb->dthpb_args[i] != i) {
2221 pp->ftp_argmap = dhpb->dthpb_args;
2222 break;
2223 }
2224 }
2225
2226 /*
2227 * The probe is fully constructed -- register it with DTrace.
2228 */
2229 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2230 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2231
2232 lck_mtx_unlock(&provider->ftp_cmtx);
2233}
2234
2235/*ARGSUSED*/
2236static void
2237fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2238{
2239#pragma unused(arg)
2240 /*
2241 * Clean up the USDT provider. There may be active consumers of the
2242 * provider busy adding probes, no damage will actually befall the
2243 * provider until that count has dropped to zero. This just puts
2244 * the provider on death row.
2245 */
2246 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2247}
2248
2249static dtrace_mops_t fasttrap_mops = {
2250 fasttrap_meta_create_probe,
2251 fasttrap_meta_provide,
2252 fasttrap_meta_remove
2253};
2254
2255/*ARGSUSED*/
2256static int
b0d623f7 2257fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2d21ac55
A
2258{
2259#pragma unused(dev, md, rv)
2260 if (!dtrace_attached())
2261 return (EAGAIN);
2262
2263 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2d21ac55
A
2264 fasttrap_probe_spec_t *probe;
2265 uint64_t noffs;
2266 size_t size, i;
2267 int ret;
2268 char *c;
2269
b0d623f7
A
2270 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2271 sizeof (probe->ftps_noffs)))
2d21ac55
A
2272 return (EFAULT);
2273
2274 /*
2275 * Probes must have at least one tracepoint.
2276 */
2277 if (noffs == 0)
2278 return (EINVAL);
2279
2280 /*
2281 * We want to check the number of noffs before doing
2282 * sizing math, to prevent potential buffer overflows.
2283 */
2284 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2285 return (ENOMEM);
2286
2287 size = sizeof (fasttrap_probe_spec_t) +
2288 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2289
2290 probe = kmem_alloc(size, KM_SLEEP);
2291
6d2010ae
A
2292 if (copyin(arg, probe, size) != 0 ||
2293 probe->ftps_noffs != noffs) {
2d21ac55
A
2294 kmem_free(probe, size);
2295 return (EFAULT);
2296 }
2297
2298 /*
2299 * Verify that the function and module strings contain no
2300 * funny characters.
2301 */
2302 for (i = 0, c = &probe->ftps_func[0]; i < sizeof(probe->ftps_func) && *c != '\0'; i++, c++) {
2303 if (*c < 0x20 || 0x7f <= *c) {
2304 ret = EINVAL;
2305 goto err;
2306 }
2307 }
2308 if (*c != '\0') {
2309 ret = EINVAL;
2310 goto err;
2311 }
2312
2313 for (i = 0, c = &probe->ftps_mod[0]; i < sizeof(probe->ftps_mod) && *c != '\0'; i++, c++) {
2314 if (*c < 0x20 || 0x7f <= *c) {
2315 ret = EINVAL;
2316 goto err;
2317 }
2318 }
2319 if (*c != '\0') {
2320 ret = EINVAL;
2321 goto err;
2322 }
2323
2324 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2325 proc_t *p;
2326 pid_t pid = probe->ftps_pid;
2327
2328 /*
2329 * Report an error if the process doesn't exist
2330 * or is actively being birthed.
2331 */
2332 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2333 if (p != PROC_NULL)
2334 proc_rele(p);
2335 return (ESRCH);
2336 }
2337 // proc_lock(p);
2338 // FIXME! How is this done on OS X?
2339 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2340 // VREAD | VWRITE)) != 0) {
2341 // mutex_exit(&p->p_lock);
2342 // return (ret);
2343 // }
2344 // proc_unlock(p);
2345 proc_rele(p);
2346 }
2347
2348 ret = fasttrap_add_probe(probe);
2349
2350err:
2351 kmem_free(probe, size);
2352
2353 return (ret);
2354
2355 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2356 fasttrap_instr_query_t instr;
2357 fasttrap_tracepoint_t *tp;
2358 uint_t index;
2359 // int ret;
2360
b0d623f7 2361 if (copyin(arg, &instr, sizeof (instr)) != 0)
2d21ac55
A
2362 return (EFAULT);
2363
2364 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2365 proc_t *p;
2366 pid_t pid = instr.ftiq_pid;
2367
2368 /*
2369 * Report an error if the process doesn't exist
2370 * or is actively being birthed.
2371 */
2372 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2373 if (p != PROC_NULL)
2374 proc_rele(p);
2375 return (ESRCH);
2376 }
2377 //proc_lock(p);
2378 // FIXME! How is this done on OS X?
2379 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2380 // VREAD)) != 0) {
2381 // mutex_exit(&p->p_lock);
2382 // return (ret);
2383 // }
2384 // proc_unlock(p);
2385 proc_rele(p);
2386 }
2387
2388 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2389
2390 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2391 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2392 while (tp != NULL) {
2393 if (instr.ftiq_pid == tp->ftt_pid &&
2394 instr.ftiq_pc == tp->ftt_pc &&
b0d623f7 2395 tp->ftt_proc->ftpc_acount != 0)
2d21ac55
A
2396 break;
2397
2398 tp = tp->ftt_next;
2399 }
2400
2401 if (tp == NULL) {
2402 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2403 return (ENOENT);
2404 }
2405
2406 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2407 sizeof (instr.ftiq_instr));
2408 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2409
b0d623f7 2410 if (copyout(&instr, arg, sizeof (instr)) != 0)
2d21ac55
A
2411 return (EFAULT);
2412
2413 return (0);
2414 }
2415
2416 return (EINVAL);
2417}
2418
2419static int
2420fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
2421{
2422 ulong_t nent;
2423
2424 switch (cmd) {
2425 case DDI_ATTACH:
2426 break;
2427 case DDI_RESUME:
2428 return (DDI_SUCCESS);
2429 default:
2430 return (DDI_FAILURE);
2431 }
2432
2433 ddi_report_dev(devi);
2434 fasttrap_devi = devi;
2435
2436 /*
2437 * Install our hooks into fork(2), exec(2), and exit(2).
2438 */
2439 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2440 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2441 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2442
2443#if !defined(__APPLE__)
2444 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2445 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2446#else
2447 /*
2448 * We're sizing based on system memory. 100k probes per 256M of system memory.
2449 * Yes, this is a WAG.
2450 */
2451 fasttrap_max = (sane_size >> 28) * 100000;
c910b4d9
A
2452 if (fasttrap_max == 0)
2453 fasttrap_max = 50000;
2d21ac55
A
2454#endif
2455 fasttrap_total = 0;
2456
2457 /*
2458 * Conjure up the tracepoints hashtable...
2459 */
2460 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2461 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2462
2463 if (nent <= 0 || nent > 0x1000000)
2464 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2465
2466 if ((nent & (nent - 1)) == 0)
2467 fasttrap_tpoints.fth_nent = nent;
2468 else
2469 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2470 ASSERT(fasttrap_tpoints.fth_nent > 0);
2471 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2472 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2473 sizeof (fasttrap_bucket_t), KM_SLEEP);
2474 ASSERT(fasttrap_tpoints.fth_table != NULL);
2475#if defined(__APPLE__)
2476 /*
2477 * We have to explicitly initialize all locks...
2478 */
2479 unsigned int i;
2480 for (i=0; i<fasttrap_tpoints.fth_nent; i++) {
2481 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2482 }
2483#endif
2484
2485 /*
2486 * ... and the providers hash table...
2487 */
2488 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2489 if ((nent & (nent - 1)) == 0)
2490 fasttrap_provs.fth_nent = nent;
2491 else
2492 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2493 ASSERT(fasttrap_provs.fth_nent > 0);
2494 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2495 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2496 sizeof (fasttrap_bucket_t), KM_SLEEP);
2497 ASSERT(fasttrap_provs.fth_table != NULL);
2498#if defined(__APPLE__)
2499 /*
2500 * We have to explicitly initialize all locks...
2501 */
2502 for (i=0; i<fasttrap_provs.fth_nent; i++) {
2503 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2504 }
2505#endif
2506
2507 /*
2508 * ... and the procs hash table.
2509 */
2510 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2511 if ((nent & (nent - 1)) == 0)
2512 fasttrap_procs.fth_nent = nent;
2513 else
2514 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2515 ASSERT(fasttrap_procs.fth_nent > 0);
2516 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2517 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2518 sizeof (fasttrap_bucket_t), KM_SLEEP);
2519 ASSERT(fasttrap_procs.fth_table != NULL);
2520#if defined(__APPLE__)
2521 /*
2522 * We have to explicitly initialize all locks...
2523 */
2524 for (i=0; i<fasttrap_procs.fth_nent; i++) {
2525 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2526 }
2527#endif
2528
2529 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2530 &fasttrap_meta_id);
2531
2532 return (DDI_SUCCESS);
2533}
2534
2535static int
2536_fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2537{
2538#pragma unused(dev, flags, devtype, p)
2539 return 0;
2540}
2541
2542static int
2543_fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2544{
2d21ac55 2545 int err, rv = 0;
b0d623f7 2546 user_addr_t uaddrp;
2d21ac55 2547
b0d623f7
A
2548 if (proc_is64bit(p))
2549 uaddrp = *(user_addr_t *)data;
2550 else
2551 uaddrp = (user_addr_t) *(uint32_t *)data;
2552
2553 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2d21ac55
A
2554
2555 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2556 if (err != 0) {
2557 ASSERT( (err & 0xfffff000) == 0 );
2558 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2559 } else if (rv != 0) {
2560 ASSERT( (rv & 0xfff00000) == 0 );
2561 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2562 } else
2563 return 0;
2564}
2565
2566static int gFasttrapInited = 0;
2567
2568#define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2569
2570/*
2571 * A struct describing which functions will get invoked for certain
2572 * actions.
2573 */
2574
2575static struct cdevsw fasttrap_cdevsw =
2576{
2577 _fasttrap_open, /* open */
2578 eno_opcl, /* close */
2579 eno_rdwrt, /* read */
2580 eno_rdwrt, /* write */
2581 _fasttrap_ioctl, /* ioctl */
2582 (stop_fcn_t *)nulldev, /* stop */
2583 (reset_fcn_t *)nulldev, /* reset */
2584 NULL, /* tty's */
2585 eno_select, /* select */
2586 eno_mmap, /* mmap */
2587 eno_strat, /* strategy */
2588 eno_getc, /* getc */
2589 eno_putc, /* putc */
2590 0 /* type */
2591};
2592
2593void fasttrap_init(void);
2594
2595void
2596fasttrap_init( void )
2597{
2598 /*
2599 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2600 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2601 *
2602 * The reason is to delay allocating the (rather large) resources as late as possible.
2603 */
2604 if (0 == gFasttrapInited) {
2605 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2606
2607 if (majdevno < 0) {
2608 // FIX ME! What kind of error reporting to do here?
2609 printf("fasttrap_init: failed to allocate a major number!\n");
2610 return;
2611 }
2612
2613 dev_t device = makedev( (uint32_t)majdevno, 0 );
2614 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2615 return;
2616 }
2617
2618 /*
2619 * Allocate the fasttrap_tracepoint_t zone
2620 */
2621 fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
2622 1024 * sizeof(fasttrap_tracepoint_t),
2623 sizeof(fasttrap_tracepoint_t),
2624 "dtrace.fasttrap_tracepoint_t");
2625
2626 /*
2627 * fasttrap_probe_t's are variable in size. We use an array of zones to
2628 * cover the most common sizes.
2629 */
2630 int i;
2631 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2632 size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
2633 fasttrap_probe_t_zones[i] = zinit(zone_element_size,
2634 1024 * zone_element_size,
2635 zone_element_size,
2636 fasttrap_probe_t_zone_names[i]);
2637 }
2638
2639
2640 /*
2641 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2642 */
2643 fasttrap_lck_attr = lck_attr_alloc_init();
2644 fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2645 fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
2646
2647 /*
2648 * Initialize global locks
2649 */
2650 lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2651 lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2652
b0d623f7 2653 if (DDI_FAILURE == fasttrap_attach((dev_info_t *)(uintptr_t)device, 0 )) {
2d21ac55
A
2654 // FIX ME! Do we remove the devfs node here?
2655 // What kind of error reporting?
2656 printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2657 return;
2658 }
2659
2660 gFasttrapInited = 1;
2661 }
2662}
2663
2664#undef FASTTRAP_MAJOR