]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/fasttrap.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
29 */
30
31 #include <sys/types.h>
32 #include <sys/time.h>
33
34 #include <sys/errno.h>
35 #include <sys/stat.h>
36 #include <sys/conf.h>
37 #include <sys/systm.h>
38 #include <sys/kauth.h>
39
40 #include <sys/fasttrap.h>
41 #include <sys/fasttrap_impl.h>
42 #include <sys/fasttrap_isa.h>
43 #include <sys/dtrace.h>
44 #include <sys/dtrace_impl.h>
45 #include <sys/proc.h>
46
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
51
52 #include <kern/zalloc.h>
53
54 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
55 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
56
57 __private_extern__
58 void
59 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
60
61 /*
62 * User-Land Trap-Based Tracing
63 * ----------------------------
64 *
65 * The fasttrap provider allows DTrace consumers to instrument any user-level
66 * instruction to gather data; this includes probes with semantic
67 * signifigance like entry and return as well as simple offsets into the
68 * function. While the specific techniques used are very ISA specific, the
69 * methodology is generalizable to any architecture.
70 *
71 *
72 * The General Methodology
73 * -----------------------
74 *
75 * With the primary goal of tracing every user-land instruction and the
76 * limitation that we can't trust user space so don't want to rely on much
77 * information there, we begin by replacing the instructions we want to trace
78 * with trap instructions. Each instruction we overwrite is saved into a hash
79 * table keyed by process ID and pc address. When we enter the kernel due to
80 * this trap instruction, we need the effects of the replaced instruction to
81 * appear to have occurred before we proceed with the user thread's
82 * execution.
83 *
84 * Each user level thread is represented by a ulwp_t structure which is
85 * always easily accessible through a register. The most basic way to produce
86 * the effects of the instruction we replaced is to copy that instruction out
87 * to a bit of scratch space reserved in the user thread's ulwp_t structure
88 * (a sort of kernel-private thread local storage), set the PC to that
89 * scratch space and single step. When we reenter the kernel after single
90 * stepping the instruction we must then adjust the PC to point to what would
91 * normally be the next instruction. Of course, special care must be taken
92 * for branches and jumps, but these represent such a small fraction of any
93 * instruction set that writing the code to emulate these in the kernel is
94 * not too difficult.
95 *
96 * Return probes may require several tracepoints to trace every return site,
97 * and, conversely, each tracepoint may activate several probes (the entry
98 * and offset 0 probes, for example). To solve this muliplexing problem,
99 * tracepoints contain lists of probes to activate and probes contain lists
100 * of tracepoints to enable. If a probe is activated, it adds its ID to
101 * existing tracepoints or creates new ones as necessary.
102 *
103 * Most probes are activated _before_ the instruction is executed, but return
104 * probes are activated _after_ the effects of the last instruction of the
105 * function are visible. Return probes must be fired _after_ we have
106 * single-stepped the instruction whereas all other probes are fired
107 * beforehand.
108 *
109 *
110 * Lock Ordering
111 * -------------
112 *
113 * The lock ordering below -- both internally and with respect to the DTrace
114 * framework -- is a little tricky and bears some explanation. Each provider
115 * has a lock (ftp_mtx) that protects its members including reference counts
116 * for enabled probes (ftp_rcount), consumers actively creating probes
117 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
118 * from being freed. A provider is looked up by taking the bucket lock for the
119 * provider hash table, and is returned with its lock held. The provider lock
120 * may be taken in functions invoked by the DTrace framework, but may not be
121 * held while calling functions in the DTrace framework.
122 *
123 * To ensure consistency over multiple calls to the DTrace framework, the
124 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
125 * not be taken when holding the provider lock as that would create a cyclic
126 * lock ordering. In situations where one would naturally take the provider
127 * lock and then the creation lock, we instead up a reference count to prevent
128 * the provider from disappearing, drop the provider lock, and acquire the
129 * creation lock.
130 *
131 * Briefly:
132 * bucket lock before provider lock
133 * DTrace before provider lock
134 * creation lock before DTrace
135 * never hold the provider lock and creation lock simultaneously
136 */
137
138 static dev_info_t *fasttrap_devi;
139 static dtrace_meta_provider_id_t fasttrap_meta_id;
140
141 static thread_call_t fasttrap_timeout;
142 static lck_mtx_t fasttrap_cleanup_mtx;
143 static uint_t fasttrap_cleanup_work;
144
145 /*
146 * Generation count on modifications to the global tracepoint lookup table.
147 */
148 static volatile uint64_t fasttrap_mod_gen;
149
150 #if !defined(__APPLE__)
151 /*
152 * When the fasttrap provider is loaded, fasttrap_max is set to either
153 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
154 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
155 * incremented by the number of tracepoints that may be associated with that
156 * probe; fasttrap_total is capped at fasttrap_max.
157 */
158 #define FASTTRAP_MAX_DEFAULT 2500000
159 #endif
160
161 static uint32_t fasttrap_max;
162 static uint32_t fasttrap_total;
163
164
165 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
166 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
167 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
168
169 fasttrap_hash_t fasttrap_tpoints;
170 static fasttrap_hash_t fasttrap_provs;
171 static fasttrap_hash_t fasttrap_procs;
172
173 static uint64_t fasttrap_pid_count; /* pid ref count */
174 static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
175
176 #define FASTTRAP_ENABLE_FAIL 1
177 #define FASTTRAP_ENABLE_PARTIAL 2
178
179 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
180 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
181
182 #if defined(__APPLE__)
183 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, fasttrap_provider_type_t, const char *,
184 const dtrace_pattr_t *);
185 #endif
186 static void fasttrap_provider_retire(pid_t, const char *, int);
187 static void fasttrap_provider_free(fasttrap_provider_t *);
188
189 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
190 static void fasttrap_proc_release(fasttrap_proc_t *);
191
192 #define FASTTRAP_PROVS_INDEX(pid, name) \
193 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
194
195 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
196
197 #if defined(__APPLE__)
198
199 /*
200 * To save memory, some common memory allocations are given a
201 * unique zone. In example, dtrace_probe_t is 72 bytes in size,
202 * which means it would fall into the kalloc.128 bucket. With
203 * 20k elements allocated, the space saved is substantial.
204 */
205
206 struct zone *fasttrap_tracepoint_t_zone;
207
208 /*
209 * fasttrap_probe_t's are variable in size. Some quick profiling has shown
210 * that the sweet spot for reducing memory footprint is covering the first
211 * three sizes. Everything larger goes into the common pool.
212 */
213 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
214
215 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
216
217 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
218 "",
219 "dtrace.fasttrap_probe_t[1]",
220 "dtrace.fasttrap_probe_t[2]",
221 "dtrace.fasttrap_probe_t[3]"
222 };
223
224 /*
225 * We have to manage locks explicitly
226 */
227 lck_grp_t* fasttrap_lck_grp;
228 lck_grp_attr_t* fasttrap_lck_grp_attr;
229 lck_attr_t* fasttrap_lck_attr;
230 #endif
231
232 static int
233 fasttrap_highbit(ulong_t i)
234 {
235 int h = 1;
236
237 if (i == 0)
238 return (0);
239 #ifdef _LP64
240 if (i & 0xffffffff00000000ul) {
241 h += 32; i >>= 32;
242 }
243 #endif
244 if (i & 0xffff0000) {
245 h += 16; i >>= 16;
246 }
247 if (i & 0xff00) {
248 h += 8; i >>= 8;
249 }
250 if (i & 0xf0) {
251 h += 4; i >>= 4;
252 }
253 if (i & 0xc) {
254 h += 2; i >>= 2;
255 }
256 if (i & 0x2) {
257 h += 1;
258 }
259 return (h);
260 }
261
262 static uint_t
263 fasttrap_hash_str(const char *p)
264 {
265 unsigned int g;
266 uint_t hval = 0;
267
268 while (*p) {
269 hval = (hval << 4) + *p++;
270 if ((g = (hval & 0xf0000000)) != 0)
271 hval ^= g >> 24;
272 hval &= ~g;
273 }
274 return (hval);
275 }
276
277 /*
278 * FIXME - needs implementation
279 */
280 void
281 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
282 {
283 #pragma unused(p, t, pc)
284
285 #if 0
286 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
287
288 sqp->sq_info.si_signo = SIGTRAP;
289 sqp->sq_info.si_code = TRAP_DTRACE;
290 sqp->sq_info.si_addr = (caddr_t)pc;
291
292 mutex_enter(&p->p_lock);
293 sigaddqa(p, t, sqp);
294 mutex_exit(&p->p_lock);
295
296 if (t != NULL)
297 aston(t);
298 #endif
299
300 printf("fasttrap_sigtrap called with no implementation.\n");
301 }
302
303 /*
304 * This function ensures that no threads are actively using the memory
305 * associated with probes that were formerly live.
306 */
307 static void
308 fasttrap_mod_barrier(uint64_t gen)
309 {
310 unsigned int i;
311
312 if (gen < fasttrap_mod_gen)
313 return;
314
315 fasttrap_mod_gen++;
316
317 for (i = 0; i < NCPU; i++) {
318 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
319 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
320 }
321 }
322
323 /*
324 * This is the timeout's callback for cleaning up the providers and their
325 * probes.
326 */
327 /*ARGSUSED*/
328 static void
329 fasttrap_pid_cleanup_cb(void *ignored, void* ignored2)
330 {
331 #pragma unused(ignored, ignored2)
332 fasttrap_provider_t **fpp, *fp;
333 fasttrap_bucket_t *bucket;
334 dtrace_provider_id_t provid;
335 unsigned int i, later = 0;
336
337 static volatile int in = 0;
338 ASSERT(in == 0);
339 in = 1;
340
341 lck_mtx_lock(&fasttrap_cleanup_mtx);
342 while (fasttrap_cleanup_work) {
343 fasttrap_cleanup_work = 0;
344 lck_mtx_unlock(&fasttrap_cleanup_mtx);
345
346 later = 0;
347
348 /*
349 * Iterate over all the providers trying to remove the marked
350 * ones. If a provider is marked but not retired, we just
351 * have to take a crack at removing it -- it's no big deal if
352 * we can't.
353 */
354 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
355 bucket = &fasttrap_provs.fth_table[i];
356 lck_mtx_lock(&bucket->ftb_mtx);
357 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
358
359 while ((fp = *fpp) != NULL) {
360 if (!fp->ftp_marked) {
361 fpp = &fp->ftp_next;
362 continue;
363 }
364
365 lck_mtx_lock(&fp->ftp_mtx);
366
367 /*
368 * If this provider has consumers actively
369 * creating probes (ftp_ccount) or is a USDT
370 * provider (ftp_mcount), we can't unregister
371 * or even condense.
372 */
373 if (fp->ftp_ccount != 0 ||
374 fp->ftp_mcount != 0) {
375 fp->ftp_marked = 0;
376 lck_mtx_unlock(&fp->ftp_mtx);
377 continue;
378 }
379
380 if (!fp->ftp_retired || fp->ftp_rcount != 0)
381 fp->ftp_marked = 0;
382
383 lck_mtx_unlock(&fp->ftp_mtx);
384
385 /*
386 * If we successfully unregister this
387 * provider we can remove it from the hash
388 * chain and free the memory. If our attempt
389 * to unregister fails and this is a retired
390 * provider, increment our flag to try again
391 * pretty soon. If we've consumed more than
392 * half of our total permitted number of
393 * probes call dtrace_condense() to try to
394 * clean out the unenabled probes.
395 */
396 provid = fp->ftp_provid;
397 if (dtrace_unregister(provid) != 0) {
398 if (fasttrap_total > fasttrap_max / 2)
399 (void) dtrace_condense(provid);
400 later += fp->ftp_marked;
401 fpp = &fp->ftp_next;
402 } else {
403 *fpp = fp->ftp_next;
404 fasttrap_provider_free(fp);
405 }
406 }
407 lck_mtx_unlock(&bucket->ftb_mtx);
408 }
409
410 lck_mtx_lock(&fasttrap_cleanup_mtx);
411 }
412
413 ASSERT(fasttrap_timeout != 0);
414
415 /*
416 * APPLE NOTE: You must hold the fasttrap_cleanup_mtx to do this!
417 */
418 if (fasttrap_timeout != (thread_call_t)1)
419 thread_call_free(fasttrap_timeout);
420
421 /*
422 * If we were unable to remove a retired provider, try again after
423 * a second. This situation can occur in certain circumstances where
424 * providers cannot be unregistered even though they have no probes
425 * enabled because of an execution of dtrace -l or something similar.
426 * If the timeout has been disabled (set to 1 because we're trying
427 * to detach), we set fasttrap_cleanup_work to ensure that we'll
428 * get a chance to do that work if and when the timeout is reenabled
429 * (if detach fails).
430 */
431 if (later > 0 && fasttrap_timeout != (thread_call_t)1)
432 /* The time value passed to dtrace_timeout is in nanos */
433 fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / SEC);
434 else if (later > 0)
435 fasttrap_cleanup_work = 1;
436 else
437 fasttrap_timeout = 0;
438
439 lck_mtx_unlock(&fasttrap_cleanup_mtx);
440 in = 0;
441 }
442
443 /*
444 * Activates the asynchronous cleanup mechanism.
445 */
446 static void
447 fasttrap_pid_cleanup(void)
448 {
449 lck_mtx_lock(&fasttrap_cleanup_mtx);
450 fasttrap_cleanup_work = 1;
451 if (fasttrap_timeout == 0)
452 fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / MILLISEC);
453 lck_mtx_unlock(&fasttrap_cleanup_mtx);
454 }
455
456 /*
457 * This is called from cfork() via dtrace_fasttrap_fork(). The child
458 * process's address space is a (roughly) a copy of the parent process's so
459 * we have to remove all the instrumentation we had previously enabled in the
460 * parent.
461 */
462 static void
463 fasttrap_fork(proc_t *p, proc_t *cp)
464 {
465 pid_t ppid = p->p_pid;
466 unsigned int i;
467
468 ASSERT(current_proc() == p);
469 lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
470 ASSERT(p->p_dtrace_count > 0);
471 ASSERT(cp->p_dtrace_count == 0);
472
473 /*
474 * This would be simpler and faster if we maintained per-process
475 * hash tables of enabled tracepoints. It could, however, potentially
476 * slow down execution of a tracepoint since we'd need to go
477 * through two levels of indirection. In the future, we should
478 * consider either maintaining per-process ancillary lists of
479 * enabled tracepoints or hanging a pointer to a per-process hash
480 * table of enabled tracepoints off the proc structure.
481 */
482
483 /*
484 * We don't have to worry about the child process disappearing
485 * because we're in fork().
486 */
487 if (cp != sprlock(cp->p_pid)) {
488 printf("fasttrap_fork: sprlock(%d) returned a differt proc\n", cp->p_pid);
489 return;
490 }
491 proc_unlock(cp);
492
493 /*
494 * Iterate over every tracepoint looking for ones that belong to the
495 * parent process, and remove each from the child process.
496 */
497 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
498 fasttrap_tracepoint_t *tp;
499 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
500
501 lck_mtx_lock(&bucket->ftb_mtx);
502 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
503 if (tp->ftt_pid == ppid &&
504 tp->ftt_proc->ftpc_acount != 0) {
505 fasttrap_tracepoint_remove(cp, tp);
506
507 /*
508 * The count of active providers can only be
509 * decremented (i.e. to zero) during exec,
510 * exit, and removal of a meta provider so it
511 * should be impossible to drop the count
512 * mid-fork.
513 */
514 ASSERT(tp->ftt_proc->ftpc_acount != 0);
515 }
516 }
517 lck_mtx_unlock(&bucket->ftb_mtx);
518 }
519
520 /*
521 * Free any ptss pages/entries in the child.
522 */
523 dtrace_ptss_fork(p, cp);
524
525 proc_lock(cp);
526 sprunlock(cp);
527 }
528
529 /*
530 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
531 * is set on the proc structure to indicate that there is a pid provider
532 * associated with this process.
533 */
534 static void
535 fasttrap_exec_exit(proc_t *p)
536 {
537 ASSERT(p == current_proc());
538 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
539 lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
540
541
542 /* APPLE NOTE: Okay, the locking here is really odd and needs some
543 * explaining. This method is always called with the proc_lock held.
544 * We must drop the proc_lock before calling fasttrap_provider_retire
545 * to avoid a deadlock when it takes the bucket lock.
546 *
547 * Next, the dtrace_ptss_exec_exit function requires the sprlock
548 * be held, but not the proc_lock.
549 *
550 * Finally, we must re-acquire the proc_lock
551 */
552 proc_unlock(p);
553
554 /*
555 * We clean up the pid provider for this process here; user-land
556 * static probes are handled by the meta-provider remove entry point.
557 */
558 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
559 #if defined(__APPLE__)
560 /*
561 * We also need to remove any aliased providers.
562 * XXX optimization: track which provider types are instantiated
563 * and only retire as needed.
564 */
565 fasttrap_provider_retire(p->p_pid, FASTTRAP_OBJC_NAME, 0);
566 fasttrap_provider_retire(p->p_pid, FASTTRAP_ONESHOT_NAME, 0);
567 #endif /* __APPLE__ */
568
569 /*
570 * This should be called after it is no longer possible for a user
571 * thread to execute (potentially dtrace instrumented) instructions.
572 */
573 lck_mtx_lock(&p->p_dtrace_sprlock);
574 dtrace_ptss_exec_exit(p);
575 lck_mtx_unlock(&p->p_dtrace_sprlock);
576
577 proc_lock(p);
578 }
579
580
581 /*ARGSUSED*/
582 static void
583 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
584 {
585 #pragma unused(arg, desc)
586 /*
587 * There are no "default" pid probes.
588 */
589 }
590
591 static int
592 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
593 {
594 fasttrap_tracepoint_t *tp, *new_tp = NULL;
595 fasttrap_bucket_t *bucket;
596 fasttrap_id_t *id;
597 pid_t pid;
598 user_addr_t pc;
599
600 ASSERT(index < probe->ftp_ntps);
601
602 pid = probe->ftp_pid;
603 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
604 id = &probe->ftp_tps[index].fit_id;
605
606 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
607
608 //ASSERT(!(p->p_flag & SVFORK));
609
610 /*
611 * Before we make any modifications, make sure we've imposed a barrier
612 * on the generation in which this probe was last modified.
613 */
614 fasttrap_mod_barrier(probe->ftp_gen);
615
616 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
617
618 /*
619 * If the tracepoint has already been enabled, just add our id to the
620 * list of interested probes. This may be our second time through
621 * this path in which case we'll have constructed the tracepoint we'd
622 * like to install. If we can't find a match, and have an allocated
623 * tracepoint ready to go, enable that one now.
624 *
625 * A tracepoint whose process is defunct is also considered defunct.
626 */
627 again:
628 lck_mtx_lock(&bucket->ftb_mtx);
629 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
630 /*
631 * Note that it's safe to access the active count on the
632 * associated proc structure because we know that at least one
633 * provider (this one) will still be around throughout this
634 * operation.
635 */
636 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
637 tp->ftt_proc->ftpc_acount == 0)
638 continue;
639
640 /*
641 * Now that we've found a matching tracepoint, it would be
642 * a decent idea to confirm that the tracepoint is still
643 * enabled and the trap instruction hasn't been overwritten.
644 * Since this is a little hairy, we'll punt for now.
645 */
646
647 /*
648 * This can't be the first interested probe. We don't have
649 * to worry about another thread being in the midst of
650 * deleting this tracepoint (which would be the only valid
651 * reason for a tracepoint to have no interested probes)
652 * since we're holding P_PR_LOCK for this process.
653 */
654 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
655
656 switch (id->fti_ptype) {
657 case DTFTP_ENTRY:
658 case DTFTP_OFFSETS:
659 case DTFTP_IS_ENABLED:
660 id->fti_next = tp->ftt_ids;
661 dtrace_membar_producer();
662 tp->ftt_ids = id;
663 dtrace_membar_producer();
664 break;
665
666 case DTFTP_RETURN:
667 case DTFTP_POST_OFFSETS:
668 id->fti_next = tp->ftt_retids;
669 dtrace_membar_producer();
670 tp->ftt_retids = id;
671 dtrace_membar_producer();
672 break;
673
674 default:
675 ASSERT(0);
676 }
677
678 lck_mtx_unlock(&bucket->ftb_mtx);
679
680 if (new_tp != NULL) {
681 new_tp->ftt_ids = NULL;
682 new_tp->ftt_retids = NULL;
683 }
684
685 return (0);
686 }
687
688 /*
689 * If we have a good tracepoint ready to go, install it now while
690 * we have the lock held and no one can screw with us.
691 */
692 if (new_tp != NULL) {
693 int rc = 0;
694
695 new_tp->ftt_next = bucket->ftb_data;
696 dtrace_membar_producer();
697 bucket->ftb_data = new_tp;
698 dtrace_membar_producer();
699 lck_mtx_unlock(&bucket->ftb_mtx);
700
701 /*
702 * Activate the tracepoint in the ISA-specific manner.
703 * If this fails, we need to report the failure, but
704 * indicate that this tracepoint must still be disabled
705 * by calling fasttrap_tracepoint_disable().
706 */
707 if (fasttrap_tracepoint_install(p, new_tp) != 0)
708 rc = FASTTRAP_ENABLE_PARTIAL;
709
710 /*
711 * Increment the count of the number of tracepoints active in
712 * the victim process.
713 */
714 //ASSERT(p->p_proc_flag & P_PR_LOCK);
715 p->p_dtrace_count++;
716
717 return (rc);
718 }
719
720 lck_mtx_unlock(&bucket->ftb_mtx);
721
722 /*
723 * Initialize the tracepoint that's been preallocated with the probe.
724 */
725 new_tp = probe->ftp_tps[index].fit_tp;
726
727 ASSERT(new_tp->ftt_pid == pid);
728 ASSERT(new_tp->ftt_pc == pc);
729 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
730 ASSERT(new_tp->ftt_ids == NULL);
731 ASSERT(new_tp->ftt_retids == NULL);
732
733 switch (id->fti_ptype) {
734 case DTFTP_ENTRY:
735 case DTFTP_OFFSETS:
736 case DTFTP_IS_ENABLED:
737 id->fti_next = NULL;
738 new_tp->ftt_ids = id;
739 break;
740
741 case DTFTP_RETURN:
742 case DTFTP_POST_OFFSETS:
743 id->fti_next = NULL;
744 new_tp->ftt_retids = id;
745 break;
746
747 default:
748 ASSERT(0);
749 }
750
751 /*
752 * If the ISA-dependent initialization goes to plan, go back to the
753 * beginning and try to install this freshly made tracepoint.
754 */
755 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
756 goto again;
757
758 new_tp->ftt_ids = NULL;
759 new_tp->ftt_retids = NULL;
760
761 return (FASTTRAP_ENABLE_FAIL);
762 }
763
764 static void
765 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
766 {
767 fasttrap_bucket_t *bucket;
768 fasttrap_provider_t *provider = probe->ftp_prov;
769 fasttrap_tracepoint_t **pp, *tp;
770 fasttrap_id_t *id, **idp;
771 pid_t pid;
772 user_addr_t pc;
773
774 ASSERT(index < probe->ftp_ntps);
775
776 pid = probe->ftp_pid;
777 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
778 id = &probe->ftp_tps[index].fit_id;
779
780 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
781
782 /*
783 * Find the tracepoint and make sure that our id is one of the
784 * ones registered with it.
785 */
786 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
787 lck_mtx_lock(&bucket->ftb_mtx);
788 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
789 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
790 tp->ftt_proc == provider->ftp_proc)
791 break;
792 }
793
794 /*
795 * If we somehow lost this tracepoint, we're in a world of hurt.
796 */
797 ASSERT(tp != NULL);
798
799 switch (id->fti_ptype) {
800 case DTFTP_ENTRY:
801 case DTFTP_OFFSETS:
802 case DTFTP_IS_ENABLED:
803 ASSERT(tp->ftt_ids != NULL);
804 idp = &tp->ftt_ids;
805 break;
806
807 case DTFTP_RETURN:
808 case DTFTP_POST_OFFSETS:
809 ASSERT(tp->ftt_retids != NULL);
810 idp = &tp->ftt_retids;
811 break;
812
813 default:
814 /* Fix compiler warning... */
815 idp = NULL;
816 ASSERT(0);
817 }
818
819 while ((*idp)->fti_probe != probe) {
820 idp = &(*idp)->fti_next;
821 ASSERT(*idp != NULL);
822 }
823
824 id = *idp;
825 *idp = id->fti_next;
826 dtrace_membar_producer();
827
828 ASSERT(id->fti_probe == probe);
829
830 /*
831 * If there are other registered enablings of this tracepoint, we're
832 * all done, but if this was the last probe assocated with this
833 * this tracepoint, we need to remove and free it.
834 */
835 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
836
837 /*
838 * If the current probe's tracepoint is in use, swap it
839 * for an unused tracepoint.
840 */
841 if (tp == probe->ftp_tps[index].fit_tp) {
842 fasttrap_probe_t *tmp_probe;
843 fasttrap_tracepoint_t **tmp_tp;
844 uint_t tmp_index;
845
846 if (tp->ftt_ids != NULL) {
847 tmp_probe = tp->ftt_ids->fti_probe;
848 /* LINTED - alignment */
849 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
850 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
851 } else {
852 tmp_probe = tp->ftt_retids->fti_probe;
853 /* LINTED - alignment */
854 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
855 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
856 }
857
858 ASSERT(*tmp_tp != NULL);
859 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
860 ASSERT((*tmp_tp)->ftt_ids == NULL);
861 ASSERT((*tmp_tp)->ftt_retids == NULL);
862
863 probe->ftp_tps[index].fit_tp = *tmp_tp;
864 *tmp_tp = tp;
865
866 }
867
868 lck_mtx_unlock(&bucket->ftb_mtx);
869
870 /*
871 * Tag the modified probe with the generation in which it was
872 * changed.
873 */
874 probe->ftp_gen = fasttrap_mod_gen;
875 return;
876 }
877
878 lck_mtx_unlock(&bucket->ftb_mtx);
879
880 /*
881 * We can't safely remove the tracepoint from the set of active
882 * tracepoints until we've actually removed the fasttrap instruction
883 * from the process's text. We can, however, operate on this
884 * tracepoint secure in the knowledge that no other thread is going to
885 * be looking at it since we hold P_PR_LOCK on the process if it's
886 * live or we hold the provider lock on the process if it's dead and
887 * gone.
888 */
889
890 /*
891 * We only need to remove the actual instruction if we're looking
892 * at an existing process
893 */
894 if (p != NULL) {
895 /*
896 * If we fail to restore the instruction we need to kill
897 * this process since it's in a completely unrecoverable
898 * state.
899 */
900 if (fasttrap_tracepoint_remove(p, tp) != 0)
901 fasttrap_sigtrap(p, NULL, pc);
902
903 /*
904 * Decrement the count of the number of tracepoints active
905 * in the victim process.
906 */
907 //ASSERT(p->p_proc_flag & P_PR_LOCK);
908 p->p_dtrace_count--;
909 }
910
911 /*
912 * Remove the probe from the hash table of active tracepoints.
913 */
914 lck_mtx_lock(&bucket->ftb_mtx);
915 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
916 ASSERT(*pp != NULL);
917 while (*pp != tp) {
918 pp = &(*pp)->ftt_next;
919 ASSERT(*pp != NULL);
920 }
921
922 *pp = tp->ftt_next;
923 dtrace_membar_producer();
924
925 lck_mtx_unlock(&bucket->ftb_mtx);
926
927 /*
928 * Tag the modified probe with the generation in which it was changed.
929 */
930 probe->ftp_gen = fasttrap_mod_gen;
931 }
932
933 static void
934 fasttrap_enable_callbacks(void)
935 {
936 /*
937 * We don't have to play the rw lock game here because we're
938 * providing something rather than taking something away --
939 * we can be sure that no threads have tried to follow this
940 * function pointer yet.
941 */
942 lck_mtx_lock(&fasttrap_count_mtx);
943 if (fasttrap_pid_count == 0) {
944 ASSERT(dtrace_pid_probe_ptr == NULL);
945 ASSERT(dtrace_return_probe_ptr == NULL);
946 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
947 dtrace_return_probe_ptr = &fasttrap_return_probe;
948 }
949 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
950 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
951 fasttrap_pid_count++;
952 lck_mtx_unlock(&fasttrap_count_mtx);
953 }
954
955 static void
956 fasttrap_disable_callbacks(void)
957 {
958 //ASSERT(MUTEX_HELD(&cpu_lock));
959
960 lck_mtx_lock(&fasttrap_count_mtx);
961 ASSERT(fasttrap_pid_count > 0);
962 fasttrap_pid_count--;
963 if (fasttrap_pid_count == 0) {
964 cpu_t *cur, *cpu = CPU;
965
966 /*
967 * APPLE NOTE: This loop seems broken, it touches every CPU
968 * but the one we're actually running on. Need to ask Sun folks
969 * if that is safe. Scenario is this: We're running on CPU A,
970 * and lock all but A. Then we get preempted, and start running
971 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
972 */
973 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
974 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
975 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
976 }
977
978 dtrace_pid_probe_ptr = NULL;
979 dtrace_return_probe_ptr = NULL;
980
981 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
982 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
983 // rw_exit(&cur->cpu_ft_lock);
984 }
985 }
986 lck_mtx_unlock(&fasttrap_count_mtx);
987 }
988
989 /*ARGSUSED*/
990 static void
991 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
992 {
993 #pragma unused(arg, id)
994 fasttrap_probe_t *probe = parg;
995 proc_t *p;
996 int i, rc;
997
998 ASSERT(probe != NULL);
999 ASSERT(!probe->ftp_enabled);
1000 ASSERT(id == probe->ftp_id);
1001 // ASSERT(MUTEX_HELD(&cpu_lock));
1002
1003 /*
1004 * Increment the count of enabled probes on this probe's provider;
1005 * the provider can't go away while the probe still exists. We
1006 * must increment this even if we aren't able to properly enable
1007 * this probe.
1008 */
1009 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1010 probe->ftp_prov->ftp_rcount++;
1011 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1012
1013 /*
1014 * If this probe's provider is retired (meaning it was valid in a
1015 * previously exec'ed incarnation of this address space), bail out. The
1016 * provider can't go away while we're in this code path.
1017 */
1018 if (probe->ftp_prov->ftp_retired)
1019 return;
1020
1021 /*
1022 * If we can't find the process, it may be that we're in the context of
1023 * a fork in which the traced process is being born and we're copying
1024 * USDT probes. Otherwise, the process is gone so bail.
1025 */
1026 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1027 #if defined(__APPLE__)
1028 /*
1029 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1030 * does not return process's with SIDL set, but we always return
1031 * the child process.
1032 */
1033 return;
1034 #else
1035
1036 if ((curproc->p_flag & SFORKING) == 0)
1037 return;
1038
1039 lck_mtx_lock(&pidlock);
1040 p = prfind(probe->ftp_pid);
1041
1042 /*
1043 * Confirm that curproc is indeed forking the process in which
1044 * we're trying to enable probes.
1045 */
1046 ASSERT(p != NULL);
1047 //ASSERT(p->p_parent == curproc);
1048 ASSERT(p->p_stat == SIDL);
1049
1050 lck_mtx_lock(&p->p_lock);
1051 lck_mtx_unlock(&pidlock);
1052
1053 sprlock_proc(p);
1054 #endif
1055 }
1056
1057 /*
1058 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1059 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1060 * To mimic this, we allocate on demand scratch space. If this is the first
1061 * time a probe has been enabled in this process, we need to allocate scratch
1062 * space for each already existing thread. Now is a good time to do this, as
1063 * the target process is suspended and the proc_lock is held.
1064 */
1065 if (p->p_dtrace_ptss_pages == NULL) {
1066 dtrace_ptss_enable(p);
1067 }
1068
1069 // ASSERT(!(p->p_flag & SVFORK));
1070 proc_unlock(p);
1071
1072 /*
1073 * We have to enable the trap entry point before any user threads have
1074 * the chance to execute the trap instruction we're about to place
1075 * in their process's text.
1076 */
1077 fasttrap_enable_callbacks();
1078
1079 /*
1080 * Enable all the tracepoints and add this probe's id to each
1081 * tracepoint's list of active probes.
1082 */
1083 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1084 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1085 /*
1086 * If enabling the tracepoint failed completely,
1087 * we don't have to disable it; if the failure
1088 * was only partial we must disable it.
1089 */
1090 if (rc == FASTTRAP_ENABLE_FAIL)
1091 i--;
1092 else
1093 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1094
1095 /*
1096 * Back up and pull out all the tracepoints we've
1097 * created so far for this probe.
1098 */
1099 while (i >= 0) {
1100 fasttrap_tracepoint_disable(p, probe, i);
1101 i--;
1102 }
1103
1104 proc_lock(p);
1105 sprunlock(p);
1106
1107 /*
1108 * Since we're not actually enabling this probe,
1109 * drop our reference on the trap table entry.
1110 */
1111 fasttrap_disable_callbacks();
1112 return;
1113 }
1114 }
1115
1116 proc_lock(p);
1117 sprunlock(p);
1118
1119 probe->ftp_enabled = 1;
1120 }
1121
1122 /*ARGSUSED*/
1123 static void
1124 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1125 {
1126 #pragma unused(arg, id)
1127 fasttrap_probe_t *probe = parg;
1128 fasttrap_provider_t *provider = probe->ftp_prov;
1129 proc_t *p;
1130 int i, whack = 0;
1131
1132 ASSERT(id == probe->ftp_id);
1133
1134 /*
1135 * We won't be able to acquire a /proc-esque lock on the process
1136 * iff the process is dead and gone. In this case, we rely on the
1137 * provider lock as a point of mutual exclusion to prevent other
1138 * DTrace consumers from disabling this probe.
1139 */
1140 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1141 // ASSERT(!(p->p_flag & SVFORK));
1142 proc_unlock(p);
1143 }
1144
1145 lck_mtx_lock(&provider->ftp_mtx);
1146
1147 /*
1148 * Disable all the associated tracepoints (for fully enabled probes).
1149 */
1150 if (probe->ftp_enabled) {
1151 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1152 fasttrap_tracepoint_disable(p, probe, i);
1153 }
1154 }
1155
1156 ASSERT(provider->ftp_rcount > 0);
1157 provider->ftp_rcount--;
1158
1159 if (p != NULL) {
1160 /*
1161 * Even though we may not be able to remove it entirely, we
1162 * mark this retired provider to get a chance to remove some
1163 * of the associated probes.
1164 */
1165 if (provider->ftp_retired && !provider->ftp_marked)
1166 whack = provider->ftp_marked = 1;
1167 lck_mtx_unlock(&provider->ftp_mtx);
1168
1169 proc_lock(p);
1170 sprunlock(p);
1171 } else {
1172 /*
1173 * If the process is dead, we're just waiting for the
1174 * last probe to be disabled to be able to free it.
1175 */
1176 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1177 whack = provider->ftp_marked = 1;
1178 lck_mtx_unlock(&provider->ftp_mtx);
1179 }
1180
1181 if (whack)
1182 fasttrap_pid_cleanup();
1183
1184 if (!probe->ftp_enabled)
1185 return;
1186
1187 probe->ftp_enabled = 0;
1188
1189 // ASSERT(MUTEX_HELD(&cpu_lock));
1190 fasttrap_disable_callbacks();
1191 }
1192
1193 /*ARGSUSED*/
1194 static void
1195 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1196 dtrace_argdesc_t *desc)
1197 {
1198 #pragma unused(arg, id)
1199 fasttrap_probe_t *probe = parg;
1200 char *str;
1201 int i, ndx;
1202
1203 desc->dtargd_native[0] = '\0';
1204 desc->dtargd_xlate[0] = '\0';
1205
1206 if (probe->ftp_prov->ftp_retired != 0 ||
1207 desc->dtargd_ndx >= probe->ftp_nargs) {
1208 desc->dtargd_ndx = DTRACE_ARGNONE;
1209 return;
1210 }
1211
1212 ndx = (probe->ftp_argmap != NULL) ?
1213 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1214
1215 str = probe->ftp_ntypes;
1216 for (i = 0; i < ndx; i++) {
1217 str += strlen(str) + 1;
1218 }
1219
1220 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1221
1222 if (probe->ftp_xtypes == NULL)
1223 return;
1224
1225 str = probe->ftp_xtypes;
1226 for (i = 0; i < desc->dtargd_ndx; i++) {
1227 str += strlen(str) + 1;
1228 }
1229
1230 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1231 }
1232
1233 /*ARGSUSED*/
1234 static void
1235 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1236 {
1237 #pragma unused(arg, id)
1238 fasttrap_probe_t *probe = parg;
1239 unsigned int i;
1240
1241 ASSERT(probe != NULL);
1242 ASSERT(!probe->ftp_enabled);
1243 ASSERT(fasttrap_total >= probe->ftp_ntps);
1244
1245 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1246 #if !defined(__APPLE__)
1247 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1248 #endif
1249
1250 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1251 fasttrap_mod_barrier(probe->ftp_gen);
1252
1253 for (i = 0; i < probe->ftp_ntps; i++) {
1254 #if !defined(__APPLE__)
1255 kmem_free(probe->ftp_tps[i].fit_tp, sizeof (fasttrap_tracepoint_t));
1256 #else
1257 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1258 #endif
1259 }
1260
1261 #if !defined(__APPLE__)
1262 kmem_free(probe, size);
1263 #else
1264 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1265 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1266 } else {
1267 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1268 kmem_free(probe, size);
1269 }
1270 #endif
1271 }
1272
1273
1274 static const dtrace_pattr_t pid_attr = {
1275 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1276 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1277 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1278 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1279 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1280 };
1281
1282 static dtrace_pops_t pid_pops = {
1283 fasttrap_pid_provide,
1284 NULL,
1285 fasttrap_pid_enable,
1286 fasttrap_pid_disable,
1287 NULL,
1288 NULL,
1289 fasttrap_pid_getargdesc,
1290 fasttrap_pid_getarg,
1291 NULL,
1292 fasttrap_pid_destroy
1293 };
1294
1295 static dtrace_pops_t usdt_pops = {
1296 fasttrap_pid_provide,
1297 NULL,
1298 fasttrap_pid_enable,
1299 fasttrap_pid_disable,
1300 NULL,
1301 NULL,
1302 fasttrap_pid_getargdesc,
1303 fasttrap_usdt_getarg,
1304 NULL,
1305 fasttrap_pid_destroy
1306 };
1307
1308 static fasttrap_proc_t *
1309 fasttrap_proc_lookup(pid_t pid)
1310 {
1311 fasttrap_bucket_t *bucket;
1312 fasttrap_proc_t *fprc, *new_fprc;
1313
1314 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1315 lck_mtx_lock(&bucket->ftb_mtx);
1316
1317 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1318 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1319 lck_mtx_lock(&fprc->ftpc_mtx);
1320 lck_mtx_unlock(&bucket->ftb_mtx);
1321 fprc->ftpc_rcount++;
1322 atomic_add_64(&fprc->ftpc_acount, 1);
1323 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1324 lck_mtx_unlock(&fprc->ftpc_mtx);
1325
1326 return (fprc);
1327 }
1328 }
1329
1330 /*
1331 * Drop the bucket lock so we don't try to perform a sleeping
1332 * allocation under it.
1333 */
1334 lck_mtx_unlock(&bucket->ftb_mtx);
1335
1336 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1337 ASSERT(new_fprc != NULL);
1338 new_fprc->ftpc_pid = pid;
1339 new_fprc->ftpc_rcount = 1;
1340 new_fprc->ftpc_acount = 1;
1341
1342 lck_mtx_lock(&bucket->ftb_mtx);
1343
1344 /*
1345 * Take another lap through the list to make sure a proc hasn't
1346 * been created for this pid while we weren't under the bucket lock.
1347 */
1348 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1349 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1350 lck_mtx_lock(&fprc->ftpc_mtx);
1351 lck_mtx_unlock(&bucket->ftb_mtx);
1352 fprc->ftpc_rcount++;
1353 atomic_add_64(&fprc->ftpc_acount, 1);
1354 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1355 lck_mtx_unlock(&fprc->ftpc_mtx);
1356
1357 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1358
1359 return (fprc);
1360 }
1361 }
1362
1363 #if defined(__APPLE__)
1364 /*
1365 * We have to initialize all locks explicitly
1366 */
1367 lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1368 #endif
1369
1370 new_fprc->ftpc_next = bucket->ftb_data;
1371 bucket->ftb_data = new_fprc;
1372
1373 lck_mtx_unlock(&bucket->ftb_mtx);
1374
1375 return (new_fprc);
1376 }
1377
1378 static void
1379 fasttrap_proc_release(fasttrap_proc_t *proc)
1380 {
1381 fasttrap_bucket_t *bucket;
1382 fasttrap_proc_t *fprc, **fprcp;
1383 pid_t pid = proc->ftpc_pid;
1384
1385 lck_mtx_lock(&proc->ftpc_mtx);
1386
1387 ASSERT(proc->ftpc_rcount != 0);
1388 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1389
1390 if (--proc->ftpc_rcount != 0) {
1391 lck_mtx_unlock(&proc->ftpc_mtx);
1392 return;
1393 }
1394
1395 lck_mtx_unlock(&proc->ftpc_mtx);
1396
1397 /*
1398 * There should definitely be no live providers associated with this
1399 * process at this point.
1400 */
1401 ASSERT(proc->ftpc_acount == 0);
1402
1403 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1404 lck_mtx_lock(&bucket->ftb_mtx);
1405
1406 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1407 while ((fprc = *fprcp) != NULL) {
1408 if (fprc == proc)
1409 break;
1410
1411 fprcp = &fprc->ftpc_next;
1412 }
1413
1414 /*
1415 * Something strange has happened if we can't find the proc.
1416 */
1417 ASSERT(fprc != NULL);
1418
1419 *fprcp = fprc->ftpc_next;
1420
1421 lck_mtx_unlock(&bucket->ftb_mtx);
1422
1423 #if defined(__APPLE__)
1424 /*
1425 * Apple explicit lock management. Not 100% certain we need this, the
1426 * memory is freed even without the destroy. Maybe accounting cleanup?
1427 */
1428 lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1429 #endif
1430
1431 kmem_free(fprc, sizeof (fasttrap_proc_t));
1432 }
1433
1434 /*
1435 * Lookup a fasttrap-managed provider based on its name and associated pid.
1436 * If the pattr argument is non-NULL, this function instantiates the provider
1437 * if it doesn't exist otherwise it returns NULL. The provider is returned
1438 * with its lock held.
1439 */
1440 #if defined(__APPLE__)
1441 static fasttrap_provider_t *
1442 fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, const char *name,
1443 const dtrace_pattr_t *pattr)
1444 #endif /* __APPLE__ */
1445 {
1446 fasttrap_provider_t *fp, *new_fp = NULL;
1447 fasttrap_bucket_t *bucket;
1448 char provname[DTRACE_PROVNAMELEN];
1449 proc_t *p;
1450 cred_t *cred;
1451
1452 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1453 ASSERT(pattr != NULL);
1454
1455 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1456 lck_mtx_lock(&bucket->ftb_mtx);
1457
1458 /*
1459 * Take a lap through the list and return the match if we find it.
1460 */
1461 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1462 if (fp->ftp_pid == pid &&
1463 #if defined(__APPLE__)
1464 fp->ftp_provider_type == provider_type &&
1465 #endif /* __APPLE__ */
1466 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1467 !fp->ftp_retired) {
1468 lck_mtx_lock(&fp->ftp_mtx);
1469 lck_mtx_unlock(&bucket->ftb_mtx);
1470 return (fp);
1471 }
1472 }
1473
1474 /*
1475 * Drop the bucket lock so we don't try to perform a sleeping
1476 * allocation under it.
1477 */
1478 lck_mtx_unlock(&bucket->ftb_mtx);
1479
1480 /*
1481 * Make sure the process exists, isn't a child created as the result
1482 * of a vfork(2), and isn't a zombie (but may be in fork).
1483 */
1484 if ((p = proc_find(pid)) == NULL) {
1485 return NULL;
1486 }
1487 proc_lock(p);
1488 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1489 proc_unlock(p);
1490 proc_rele(p);
1491 return (NULL);
1492 }
1493
1494 /*
1495 * Increment p_dtrace_probes so that the process knows to inform us
1496 * when it exits or execs. fasttrap_provider_free() decrements this
1497 * when we're done with this provider.
1498 */
1499 p->p_dtrace_probes++;
1500
1501 /*
1502 * Grab the credentials for this process so we have
1503 * something to pass to dtrace_register().
1504 */
1505 #if !defined(__APPLE__)
1506 mutex_enter(&p->p_crlock);
1507 crhold(p->p_cred);
1508 cred = p->p_cred;
1509 mutex_exit(&p->p_crlock);
1510 mutex_exit(&p->p_lock);
1511 #else
1512 // lck_mtx_lock(&p->p_crlock);
1513 // Seems like OS X has no equivalent to crhold, even though it has a cr_ref field in ucred
1514 crhold(p->p_ucred);
1515 cred = p->p_ucred;
1516 // lck_mtx_unlock(&p->p_crlock);
1517 proc_unlock(p);
1518 proc_rele(p);
1519 #endif /* __APPLE__ */
1520
1521 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1522 ASSERT(new_fp != NULL);
1523 new_fp->ftp_pid = pid;
1524 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1525 #if defined(__APPLE__)
1526 new_fp->ftp_provider_type = provider_type;
1527
1528 /*
1529 * Apple locks require explicit init.
1530 */
1531 lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1532 lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1533 #endif /* __APPLE__ */
1534
1535 ASSERT(new_fp->ftp_proc != NULL);
1536
1537 lck_mtx_lock(&bucket->ftb_mtx);
1538
1539 /*
1540 * Take another lap through the list to make sure a provider hasn't
1541 * been created for this pid while we weren't under the bucket lock.
1542 */
1543 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1544 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1545 !fp->ftp_retired) {
1546 lck_mtx_lock(&fp->ftp_mtx);
1547 lck_mtx_unlock(&bucket->ftb_mtx);
1548 fasttrap_provider_free(new_fp);
1549 crfree(cred);
1550 return (fp);
1551 }
1552 }
1553
1554 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1555
1556 /*
1557 * Fail and return NULL if either the provider name is too long
1558 * or we fail to register this new provider with the DTrace
1559 * framework. Note that this is the only place we ever construct
1560 * the full provider name -- we keep it in pieces in the provider
1561 * structure.
1562 */
1563 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1564 (int)sizeof (provname) ||
1565 dtrace_register(provname, pattr,
1566 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1567 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1568 &new_fp->ftp_provid) != 0) {
1569 lck_mtx_unlock(&bucket->ftb_mtx);
1570 fasttrap_provider_free(new_fp);
1571 crfree(cred);
1572 return (NULL);
1573 }
1574
1575 new_fp->ftp_next = bucket->ftb_data;
1576 bucket->ftb_data = new_fp;
1577
1578 lck_mtx_lock(&new_fp->ftp_mtx);
1579 lck_mtx_unlock(&bucket->ftb_mtx);
1580
1581 crfree(cred);
1582 return (new_fp);
1583 }
1584
1585 static void
1586 fasttrap_provider_free(fasttrap_provider_t *provider)
1587 {
1588 pid_t pid = provider->ftp_pid;
1589 proc_t *p;
1590
1591 /*
1592 * There need to be no associated enabled probes, no consumers
1593 * creating probes, and no meta providers referencing this provider.
1594 */
1595 ASSERT(provider->ftp_rcount == 0);
1596 ASSERT(provider->ftp_ccount == 0);
1597 ASSERT(provider->ftp_mcount == 0);
1598
1599 /*
1600 * If this provider hasn't been retired, we need to explicitly drop the
1601 * count of active providers on the associated process structure.
1602 */
1603 if (!provider->ftp_retired) {
1604 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1605 ASSERT(provider->ftp_proc->ftpc_acount <
1606 provider->ftp_proc->ftpc_rcount);
1607 }
1608
1609 fasttrap_proc_release(provider->ftp_proc);
1610
1611 #if defined(__APPLE__)
1612 /*
1613 * Apple explicit lock management. Not 100% certain we need this, the
1614 * memory is freed even without the destroy. Maybe accounting cleanup?
1615 */
1616 lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1617 lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1618 #endif
1619
1620 kmem_free(provider, sizeof (fasttrap_provider_t));
1621
1622 /*
1623 * Decrement p_dtrace_probes on the process whose provider we're
1624 * freeing. We don't have to worry about clobbering somone else's
1625 * modifications to it because we have locked the bucket that
1626 * corresponds to this process's hash chain in the provider hash
1627 * table. Don't sweat it if we can't find the process.
1628 */
1629 if ((p = proc_find(pid)) == NULL) {
1630 return;
1631 }
1632
1633 proc_lock(p);
1634 p->p_dtrace_probes--;
1635 proc_unlock(p);
1636
1637 proc_rele(p);
1638 }
1639
1640 static void
1641 fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1642 {
1643 fasttrap_provider_t *fp;
1644 fasttrap_bucket_t *bucket;
1645 dtrace_provider_id_t provid;
1646
1647 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1648
1649 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1650 lck_mtx_lock(&bucket->ftb_mtx);
1651
1652 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1653 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1654 !fp->ftp_retired)
1655 break;
1656 }
1657
1658 if (fp == NULL) {
1659 lck_mtx_unlock(&bucket->ftb_mtx);
1660 return;
1661 }
1662
1663 lck_mtx_lock(&fp->ftp_mtx);
1664 ASSERT(!mprov || fp->ftp_mcount > 0);
1665 if (mprov && --fp->ftp_mcount != 0) {
1666 lck_mtx_unlock(&fp->ftp_mtx);
1667 lck_mtx_unlock(&bucket->ftb_mtx);
1668 return;
1669 }
1670
1671 /*
1672 * Mark the provider to be removed in our post-processing step, mark it
1673 * retired, and drop the active count on its proc. Marking it indicates
1674 * that we should try to remove it; setting the retired flag indicates
1675 * that we're done with this provider; dropping the active the proc
1676 * releases our hold, and when this reaches zero (as it will during
1677 * exit or exec) the proc and associated providers become defunct.
1678 *
1679 * We obviously need to take the bucket lock before the provider lock
1680 * to perform the lookup, but we need to drop the provider lock
1681 * before calling into the DTrace framework since we acquire the
1682 * provider lock in callbacks invoked from the DTrace framework. The
1683 * bucket lock therefore protects the integrity of the provider hash
1684 * table.
1685 */
1686 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1687 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1688
1689 fp->ftp_retired = 1;
1690 fp->ftp_marked = 1;
1691 provid = fp->ftp_provid;
1692 lck_mtx_unlock(&fp->ftp_mtx);
1693
1694 /*
1695 * We don't have to worry about invalidating the same provider twice
1696 * since fasttrap_provider_lookup() will ignore provider that have
1697 * been marked as retired.
1698 */
1699 dtrace_invalidate(provid);
1700
1701 lck_mtx_unlock(&bucket->ftb_mtx);
1702
1703 fasttrap_pid_cleanup();
1704 }
1705
1706 static int
1707 fasttrap_uint32_cmp(const void *ap, const void *bp)
1708 {
1709 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1710 }
1711
1712 static int
1713 fasttrap_uint64_cmp(const void *ap, const void *bp)
1714 {
1715 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1716 }
1717
1718 static int
1719 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1720 {
1721 fasttrap_provider_t *provider;
1722 fasttrap_probe_t *pp;
1723 fasttrap_tracepoint_t *tp;
1724 const char *name;
1725 unsigned int i, aframes, whack;
1726
1727 /*
1728 * There needs to be at least one desired trace point.
1729 */
1730 if (pdata->ftps_noffs == 0)
1731 return (EINVAL);
1732
1733 #if defined(__APPLE__)
1734 switch (pdata->ftps_probe_type) {
1735 #endif
1736 case DTFTP_ENTRY:
1737 name = "entry";
1738 aframes = FASTTRAP_ENTRY_AFRAMES;
1739 break;
1740 case DTFTP_RETURN:
1741 name = "return";
1742 aframes = FASTTRAP_RETURN_AFRAMES;
1743 break;
1744 case DTFTP_OFFSETS:
1745 aframes = 0;
1746 name = NULL;
1747 break;
1748 default:
1749 return (EINVAL);
1750 }
1751
1752 #if defined(__APPLE__)
1753 const char* provider_name;
1754 switch (pdata->ftps_provider_type) {
1755 case DTFTP_PROVIDER_PID:
1756 provider_name = FASTTRAP_PID_NAME;
1757 break;
1758 case DTFTP_PROVIDER_OBJC:
1759 provider_name = FASTTRAP_OBJC_NAME;
1760 break;
1761 case DTFTP_PROVIDER_ONESHOT:
1762 provider_name = FASTTRAP_ONESHOT_NAME;
1763 break;
1764 default:
1765 return (EINVAL);
1766 }
1767
1768 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, pdata->ftps_provider_type,
1769 provider_name, &pid_attr)) == NULL)
1770 return (ESRCH);
1771 #endif /* __APPLE__ */
1772
1773 /*
1774 * Increment this reference count to indicate that a consumer is
1775 * actively adding a new probe associated with this provider. This
1776 * prevents the provider from being deleted -- we'll need to check
1777 * for pending deletions when we drop this reference count.
1778 */
1779 provider->ftp_ccount++;
1780 lck_mtx_unlock(&provider->ftp_mtx);
1781
1782 /*
1783 * Grab the creation lock to ensure consistency between calls to
1784 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1785 * other threads creating probes. We must drop the provider lock
1786 * before taking this lock to avoid a three-way deadlock with the
1787 * DTrace framework.
1788 */
1789 lck_mtx_lock(&provider->ftp_cmtx);
1790
1791 if (name == NULL) {
1792 for (i = 0; i < pdata->ftps_noffs; i++) {
1793 char name_str[17];
1794
1795 (void) snprintf(name_str, sizeof(name_str), "%llx",
1796 (uint64_t)pdata->ftps_offs[i]);
1797
1798 if (dtrace_probe_lookup(provider->ftp_provid,
1799 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1800 continue;
1801
1802 atomic_add_32(&fasttrap_total, 1);
1803
1804 if (fasttrap_total > fasttrap_max) {
1805 atomic_add_32(&fasttrap_total, -1);
1806 goto no_mem;
1807 }
1808
1809 #if !defined(__APPLE__)
1810 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1811 ASSERT(pp != NULL);
1812 #else
1813 pp = zalloc(fasttrap_probe_t_zones[1]);
1814 bzero(pp, sizeof (fasttrap_probe_t));
1815 #endif
1816
1817 pp->ftp_prov = provider;
1818 pp->ftp_faddr = pdata->ftps_pc;
1819 pp->ftp_fsize = pdata->ftps_size;
1820 pp->ftp_pid = pdata->ftps_pid;
1821 pp->ftp_ntps = 1;
1822
1823 #if !defined(__APPLE__)
1824 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1825 #else
1826 tp = zalloc(fasttrap_tracepoint_t_zone);
1827 bzero(tp, sizeof (fasttrap_tracepoint_t));
1828 #endif
1829
1830 tp->ftt_proc = provider->ftp_proc;
1831 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1832 tp->ftt_pid = pdata->ftps_pid;
1833
1834
1835 pp->ftp_tps[0].fit_tp = tp;
1836 pp->ftp_tps[0].fit_id.fti_probe = pp;
1837 #if defined(__APPLE__)
1838 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1839 #endif
1840 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1841 pdata->ftps_mod, pdata->ftps_func, name_str,
1842 FASTTRAP_OFFSET_AFRAMES, pp);
1843 }
1844
1845 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1846 pdata->ftps_func, name) == 0) {
1847 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1848
1849 if (fasttrap_total > fasttrap_max) {
1850 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1851 goto no_mem;
1852 }
1853
1854 /*
1855 * Make sure all tracepoint program counter values are unique.
1856 * We later assume that each probe has exactly one tracepoint
1857 * for a given pc.
1858 */
1859 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1860 sizeof (uint64_t), fasttrap_uint64_cmp);
1861 for (i = 1; i < pdata->ftps_noffs; i++) {
1862 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1863 continue;
1864
1865 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1866 goto no_mem;
1867 }
1868
1869 ASSERT(pdata->ftps_noffs > 0);
1870 #if !defined(__APPLE__)
1871 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1872 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1873 ASSERT(pp != NULL);
1874 #else
1875 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1876 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
1877 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
1878 } else {
1879 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1880 }
1881 #endif
1882
1883 pp->ftp_prov = provider;
1884 pp->ftp_faddr = pdata->ftps_pc;
1885 pp->ftp_fsize = pdata->ftps_size;
1886 pp->ftp_pid = pdata->ftps_pid;
1887 pp->ftp_ntps = pdata->ftps_noffs;
1888
1889 for (i = 0; i < pdata->ftps_noffs; i++) {
1890 #if !defined(__APPLE__)
1891 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1892 #else
1893 tp = zalloc(fasttrap_tracepoint_t_zone);
1894 bzero(tp, sizeof (fasttrap_tracepoint_t));
1895 #endif
1896
1897 tp->ftt_proc = provider->ftp_proc;
1898 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1899 tp->ftt_pid = pdata->ftps_pid;
1900
1901 pp->ftp_tps[i].fit_tp = tp;
1902 pp->ftp_tps[i].fit_id.fti_probe = pp;
1903 #if defined(__APPLE__)
1904 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
1905 #endif
1906 }
1907
1908 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1909 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1910 }
1911
1912 lck_mtx_unlock(&provider->ftp_cmtx);
1913
1914 /*
1915 * We know that the provider is still valid since we incremented the
1916 * creation reference count. If someone tried to clean up this provider
1917 * while we were using it (e.g. because the process called exec(2) or
1918 * exit(2)), take note of that and try to clean it up now.
1919 */
1920 lck_mtx_lock(&provider->ftp_mtx);
1921 provider->ftp_ccount--;
1922 whack = provider->ftp_retired;
1923 lck_mtx_unlock(&provider->ftp_mtx);
1924
1925 if (whack)
1926 fasttrap_pid_cleanup();
1927
1928 return (0);
1929
1930 no_mem:
1931 /*
1932 * If we've exhausted the allowable resources, we'll try to remove
1933 * this provider to free some up. This is to cover the case where
1934 * the user has accidentally created many more probes than was
1935 * intended (e.g. pid123:::).
1936 */
1937 lck_mtx_unlock(&provider->ftp_cmtx);
1938 lck_mtx_lock(&provider->ftp_mtx);
1939 provider->ftp_ccount--;
1940 provider->ftp_marked = 1;
1941 lck_mtx_unlock(&provider->ftp_mtx);
1942
1943 fasttrap_pid_cleanup();
1944
1945 return (ENOMEM);
1946 }
1947
1948 /*ARGSUSED*/
1949 static void *
1950 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1951 {
1952 #pragma unused(arg)
1953 fasttrap_provider_t *provider;
1954
1955 /*
1956 * A 32-bit unsigned integer (like a pid for example) can be
1957 * expressed in 10 or fewer decimal digits. Make sure that we'll
1958 * have enough space for the provider name.
1959 */
1960 if (strlen(dhpv->dthpv_provname) + 10 >=
1961 sizeof (provider->ftp_name)) {
1962 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1963 "name too long to accomodate pid", dhpv->dthpv_provname);
1964 return (NULL);
1965 }
1966
1967 /*
1968 * Don't let folks spoof the true pid provider.
1969 */
1970 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
1971 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1972 "%s is an invalid name", dhpv->dthpv_provname,
1973 FASTTRAP_PID_NAME);
1974 return (NULL);
1975 }
1976 #if defined(__APPLE__)
1977 /*
1978 * We also need to check the other pid provider types
1979 */
1980 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
1981 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1982 "%s is an invalid name", dhpv->dthpv_provname,
1983 FASTTRAP_OBJC_NAME);
1984 return (NULL);
1985 }
1986 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
1987 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1988 "%s is an invalid name", dhpv->dthpv_provname,
1989 FASTTRAP_ONESHOT_NAME);
1990 return (NULL);
1991 }
1992 #endif /* __APPLE__ */
1993
1994 /*
1995 * The highest stability class that fasttrap supports is ISA; cap
1996 * the stability of the new provider accordingly.
1997 */
1998 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
1999 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2000 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2001 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2002 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2003 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2004 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2005 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2006 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2007 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2008
2009 #if defined(__APPLE__)
2010 if ((provider = fasttrap_provider_lookup(pid, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2011 &dhpv->dthpv_pattr)) == NULL) {
2012 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2013 "process %u", dhpv->dthpv_provname, (uint_t)pid);
2014 return (NULL);
2015 }
2016
2017 /*
2018 * APPLE NOTE!
2019 *
2020 * USDT probes (fasttrap meta probes) are very expensive to create.
2021 * Profiling has shown that the largest single cost is verifying that
2022 * dtrace hasn't already created a given meta_probe. The reason for
2023 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2024 * each static probe being created. We want to get rid of that check.
2025 * The simplest way of eliminating it is to deny the ability to add
2026 * probes to an existing provider. If the provider already exists, BZZT!
2027 * This still leaves the possibility of intentionally malformed DOF
2028 * having duplicate probes. However, duplicate probes are not fatal,
2029 * and there is no way to get that by accident, so we will not check
2030 * for that case.
2031 *
2032 * UPDATE: It turns out there are several use cases that require adding
2033 * probes to existing providers. Disabling this optimization for now...
2034 */
2035 #endif /* __APPLE__ */
2036
2037 /*
2038 * Up the meta provider count so this provider isn't removed until
2039 * the meta provider has been told to remove it.
2040 */
2041 provider->ftp_mcount++;
2042
2043 lck_mtx_unlock(&provider->ftp_mtx);
2044
2045 return (provider);
2046 }
2047
2048 /*ARGSUSED*/
2049 static void
2050 fasttrap_meta_create_probe(void *arg, void *parg,
2051 dtrace_helper_probedesc_t *dhpb)
2052 {
2053 #pragma unused(arg)
2054 fasttrap_provider_t *provider = parg;
2055 fasttrap_probe_t *pp;
2056 fasttrap_tracepoint_t *tp;
2057 unsigned int i, j;
2058 uint32_t ntps;
2059
2060 /*
2061 * Since the meta provider count is non-zero we don't have to worry
2062 * about this provider disappearing.
2063 */
2064 ASSERT(provider->ftp_mcount > 0);
2065
2066 /*
2067 * The offsets must be unique.
2068 */
2069 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2070 fasttrap_uint32_cmp);
2071 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2072 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2073 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2074 return;
2075 }
2076
2077 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2078 fasttrap_uint32_cmp);
2079 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2080 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2081 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2082 return;
2083 }
2084
2085 /*
2086 * Grab the creation lock to ensure consistency between calls to
2087 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2088 * other threads creating probes.
2089 */
2090 lck_mtx_lock(&provider->ftp_cmtx);
2091
2092 #if !defined(__APPLE__)
2093 /*
2094 * APPLE NOTE: This is hideously expensive. See note in
2095 * fasttrap_meta_provide() for why we can get away without
2096 * checking here.
2097 */
2098 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2099 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2100 lck_mtx_unlock(&provider->ftp_cmtx);
2101 return;
2102 }
2103 #endif
2104
2105 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2106 ASSERT(ntps > 0);
2107
2108 atomic_add_32(&fasttrap_total, ntps);
2109
2110 if (fasttrap_total > fasttrap_max) {
2111 atomic_add_32(&fasttrap_total, -ntps);
2112 lck_mtx_unlock(&provider->ftp_cmtx);
2113 return;
2114 }
2115
2116 #if !defined(__APPLE__)
2117 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2118 ASSERT(pp != NULL);
2119 #else
2120 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2121 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2122 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2123 } else {
2124 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2125 }
2126 #endif
2127
2128 pp->ftp_prov = provider;
2129 pp->ftp_pid = provider->ftp_pid;
2130 pp->ftp_ntps = ntps;
2131 pp->ftp_nargs = dhpb->dthpb_xargc;
2132 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2133 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2134
2135 /*
2136 * First create a tracepoint for each actual point of interest.
2137 */
2138 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2139 #if !defined(__APPLE__)
2140 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2141 #else
2142 tp = zalloc(fasttrap_tracepoint_t_zone);
2143 bzero(tp, sizeof (fasttrap_tracepoint_t));
2144 #endif
2145
2146 tp->ftt_proc = provider->ftp_proc;
2147 #if defined(__APPLE__)
2148 /*
2149 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2150 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2151 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2152 */
2153 #if defined(__i386__) || defined(__x86_64__)
2154 /*
2155 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2156 */
2157 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2158 #elif defined(__ppc__)
2159 /* All PPC probes are zero offset. */
2160 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i];
2161 #else
2162 #error "Architecture not supported"
2163 #endif
2164
2165 #else
2166 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
2167 #endif
2168 tp->ftt_pid = provider->ftp_pid;
2169
2170 pp->ftp_tps[i].fit_tp = tp;
2171 pp->ftp_tps[i].fit_id.fti_probe = pp;
2172 #ifdef __sparc
2173 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
2174 #else
2175 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2176 #endif
2177 }
2178
2179 /*
2180 * Then create a tracepoint for each is-enabled point.
2181 */
2182 for (j = 0; i < ntps; i++, j++) {
2183 #if !defined(__APPLE__)
2184 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2185 #else
2186 tp = zalloc(fasttrap_tracepoint_t_zone);
2187 bzero(tp, sizeof (fasttrap_tracepoint_t));
2188 #endif
2189
2190 tp->ftt_proc = provider->ftp_proc;
2191 #if defined(__APPLE__)
2192 /*
2193 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2194 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2195 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2196 */
2197 #if defined(__i386__) || defined(__x86_64__)
2198 /*
2199 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2200 */
2201 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2202 #elif defined(__ppc__)
2203 /* All PPC is-enabled probes are zero offset. */
2204 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j];
2205 #else
2206 #error "Architecture not supported"
2207 #endif
2208
2209 #else
2210 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
2211 #endif
2212 tp->ftt_pid = provider->ftp_pid;
2213
2214 pp->ftp_tps[i].fit_tp = tp;
2215 pp->ftp_tps[i].fit_id.fti_probe = pp;
2216 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2217 }
2218
2219 /*
2220 * If the arguments are shuffled around we set the argument remapping
2221 * table. Later, when the probe fires, we only remap the arguments
2222 * if the table is non-NULL.
2223 */
2224 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2225 if (dhpb->dthpb_args[i] != i) {
2226 pp->ftp_argmap = dhpb->dthpb_args;
2227 break;
2228 }
2229 }
2230
2231 /*
2232 * The probe is fully constructed -- register it with DTrace.
2233 */
2234 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2235 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2236
2237 lck_mtx_unlock(&provider->ftp_cmtx);
2238 }
2239
2240 /*ARGSUSED*/
2241 static void
2242 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2243 {
2244 #pragma unused(arg)
2245 /*
2246 * Clean up the USDT provider. There may be active consumers of the
2247 * provider busy adding probes, no damage will actually befall the
2248 * provider until that count has dropped to zero. This just puts
2249 * the provider on death row.
2250 */
2251 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2252 }
2253
2254 static dtrace_mops_t fasttrap_mops = {
2255 fasttrap_meta_create_probe,
2256 fasttrap_meta_provide,
2257 fasttrap_meta_remove
2258 };
2259
2260 /*ARGSUSED*/
2261 static int
2262 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2263 {
2264 #pragma unused(dev, md, rv)
2265 if (!dtrace_attached())
2266 return (EAGAIN);
2267
2268 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2269 fasttrap_probe_spec_t *probe;
2270 uint64_t noffs;
2271 size_t size, i;
2272 int ret;
2273 char *c;
2274
2275 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2276 sizeof (probe->ftps_noffs)))
2277 return (EFAULT);
2278
2279 /*
2280 * Probes must have at least one tracepoint.
2281 */
2282 if (noffs == 0)
2283 return (EINVAL);
2284
2285 /*
2286 * We want to check the number of noffs before doing
2287 * sizing math, to prevent potential buffer overflows.
2288 */
2289 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2290 return (ENOMEM);
2291
2292 size = sizeof (fasttrap_probe_spec_t) +
2293 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2294
2295 probe = kmem_alloc(size, KM_SLEEP);
2296
2297 if (copyin(arg, probe, size) != 0) {
2298 kmem_free(probe, size);
2299 return (EFAULT);
2300 }
2301
2302 /*
2303 * Verify that the function and module strings contain no
2304 * funny characters.
2305 */
2306 for (i = 0, c = &probe->ftps_func[0]; i < sizeof(probe->ftps_func) && *c != '\0'; i++, c++) {
2307 if (*c < 0x20 || 0x7f <= *c) {
2308 ret = EINVAL;
2309 goto err;
2310 }
2311 }
2312 if (*c != '\0') {
2313 ret = EINVAL;
2314 goto err;
2315 }
2316
2317 for (i = 0, c = &probe->ftps_mod[0]; i < sizeof(probe->ftps_mod) && *c != '\0'; i++, c++) {
2318 if (*c < 0x20 || 0x7f <= *c) {
2319 ret = EINVAL;
2320 goto err;
2321 }
2322 }
2323 if (*c != '\0') {
2324 ret = EINVAL;
2325 goto err;
2326 }
2327
2328 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2329 proc_t *p;
2330 pid_t pid = probe->ftps_pid;
2331
2332 /*
2333 * Report an error if the process doesn't exist
2334 * or is actively being birthed.
2335 */
2336 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2337 if (p != PROC_NULL)
2338 proc_rele(p);
2339 return (ESRCH);
2340 }
2341 // proc_lock(p);
2342 // FIXME! How is this done on OS X?
2343 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2344 // VREAD | VWRITE)) != 0) {
2345 // mutex_exit(&p->p_lock);
2346 // return (ret);
2347 // }
2348 // proc_unlock(p);
2349 proc_rele(p);
2350 }
2351
2352 ret = fasttrap_add_probe(probe);
2353
2354 err:
2355 kmem_free(probe, size);
2356
2357 return (ret);
2358
2359 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2360 fasttrap_instr_query_t instr;
2361 fasttrap_tracepoint_t *tp;
2362 uint_t index;
2363 // int ret;
2364
2365 if (copyin(arg, &instr, sizeof (instr)) != 0)
2366 return (EFAULT);
2367
2368 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2369 proc_t *p;
2370 pid_t pid = instr.ftiq_pid;
2371
2372 /*
2373 * Report an error if the process doesn't exist
2374 * or is actively being birthed.
2375 */
2376 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2377 if (p != PROC_NULL)
2378 proc_rele(p);
2379 return (ESRCH);
2380 }
2381 //proc_lock(p);
2382 // FIXME! How is this done on OS X?
2383 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2384 // VREAD)) != 0) {
2385 // mutex_exit(&p->p_lock);
2386 // return (ret);
2387 // }
2388 // proc_unlock(p);
2389 proc_rele(p);
2390 }
2391
2392 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2393
2394 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2395 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2396 while (tp != NULL) {
2397 if (instr.ftiq_pid == tp->ftt_pid &&
2398 instr.ftiq_pc == tp->ftt_pc &&
2399 tp->ftt_proc->ftpc_acount != 0)
2400 break;
2401
2402 tp = tp->ftt_next;
2403 }
2404
2405 if (tp == NULL) {
2406 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2407 return (ENOENT);
2408 }
2409
2410 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2411 sizeof (instr.ftiq_instr));
2412 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2413
2414 if (copyout(&instr, arg, sizeof (instr)) != 0)
2415 return (EFAULT);
2416
2417 return (0);
2418 }
2419
2420 return (EINVAL);
2421 }
2422
2423 static int
2424 fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
2425 {
2426 ulong_t nent;
2427
2428 switch (cmd) {
2429 case DDI_ATTACH:
2430 break;
2431 case DDI_RESUME:
2432 return (DDI_SUCCESS);
2433 default:
2434 return (DDI_FAILURE);
2435 }
2436
2437 ddi_report_dev(devi);
2438 fasttrap_devi = devi;
2439
2440 /*
2441 * Install our hooks into fork(2), exec(2), and exit(2).
2442 */
2443 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2444 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2445 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2446
2447 #if !defined(__APPLE__)
2448 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2449 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2450 #else
2451 /*
2452 * We're sizing based on system memory. 100k probes per 256M of system memory.
2453 * Yes, this is a WAG.
2454 */
2455 fasttrap_max = (sane_size >> 28) * 100000;
2456 if (fasttrap_max == 0)
2457 fasttrap_max = 50000;
2458 #endif
2459 fasttrap_total = 0;
2460
2461 /*
2462 * Conjure up the tracepoints hashtable...
2463 */
2464 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2465 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2466
2467 if (nent <= 0 || nent > 0x1000000)
2468 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2469
2470 if ((nent & (nent - 1)) == 0)
2471 fasttrap_tpoints.fth_nent = nent;
2472 else
2473 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2474 ASSERT(fasttrap_tpoints.fth_nent > 0);
2475 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2476 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2477 sizeof (fasttrap_bucket_t), KM_SLEEP);
2478 ASSERT(fasttrap_tpoints.fth_table != NULL);
2479 #if defined(__APPLE__)
2480 /*
2481 * We have to explicitly initialize all locks...
2482 */
2483 unsigned int i;
2484 for (i=0; i<fasttrap_tpoints.fth_nent; i++) {
2485 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2486 }
2487 #endif
2488
2489 /*
2490 * ... and the providers hash table...
2491 */
2492 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2493 if ((nent & (nent - 1)) == 0)
2494 fasttrap_provs.fth_nent = nent;
2495 else
2496 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2497 ASSERT(fasttrap_provs.fth_nent > 0);
2498 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2499 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2500 sizeof (fasttrap_bucket_t), KM_SLEEP);
2501 ASSERT(fasttrap_provs.fth_table != NULL);
2502 #if defined(__APPLE__)
2503 /*
2504 * We have to explicitly initialize all locks...
2505 */
2506 for (i=0; i<fasttrap_provs.fth_nent; i++) {
2507 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2508 }
2509 #endif
2510
2511 /*
2512 * ... and the procs hash table.
2513 */
2514 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2515 if ((nent & (nent - 1)) == 0)
2516 fasttrap_procs.fth_nent = nent;
2517 else
2518 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2519 ASSERT(fasttrap_procs.fth_nent > 0);
2520 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2521 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2522 sizeof (fasttrap_bucket_t), KM_SLEEP);
2523 ASSERT(fasttrap_procs.fth_table != NULL);
2524 #if defined(__APPLE__)
2525 /*
2526 * We have to explicitly initialize all locks...
2527 */
2528 for (i=0; i<fasttrap_procs.fth_nent; i++) {
2529 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2530 }
2531 #endif
2532
2533 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2534 &fasttrap_meta_id);
2535
2536 return (DDI_SUCCESS);
2537 }
2538
2539 static int
2540 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2541 {
2542 #pragma unused(dev, flags, devtype, p)
2543 return 0;
2544 }
2545
2546 static int
2547 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2548 {
2549 int err, rv = 0;
2550 user_addr_t uaddrp;
2551
2552 if (proc_is64bit(p))
2553 uaddrp = *(user_addr_t *)data;
2554 else
2555 uaddrp = (user_addr_t) *(uint32_t *)data;
2556
2557 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2558
2559 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2560 if (err != 0) {
2561 ASSERT( (err & 0xfffff000) == 0 );
2562 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2563 } else if (rv != 0) {
2564 ASSERT( (rv & 0xfff00000) == 0 );
2565 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2566 } else
2567 return 0;
2568 }
2569
2570 static int gFasttrapInited = 0;
2571
2572 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2573
2574 /*
2575 * A struct describing which functions will get invoked for certain
2576 * actions.
2577 */
2578
2579 static struct cdevsw fasttrap_cdevsw =
2580 {
2581 _fasttrap_open, /* open */
2582 eno_opcl, /* close */
2583 eno_rdwrt, /* read */
2584 eno_rdwrt, /* write */
2585 _fasttrap_ioctl, /* ioctl */
2586 (stop_fcn_t *)nulldev, /* stop */
2587 (reset_fcn_t *)nulldev, /* reset */
2588 NULL, /* tty's */
2589 eno_select, /* select */
2590 eno_mmap, /* mmap */
2591 eno_strat, /* strategy */
2592 eno_getc, /* getc */
2593 eno_putc, /* putc */
2594 0 /* type */
2595 };
2596
2597 void fasttrap_init(void);
2598
2599 void
2600 fasttrap_init( void )
2601 {
2602 /*
2603 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2604 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2605 *
2606 * The reason is to delay allocating the (rather large) resources as late as possible.
2607 */
2608 if (0 == gFasttrapInited) {
2609 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2610
2611 if (majdevno < 0) {
2612 // FIX ME! What kind of error reporting to do here?
2613 printf("fasttrap_init: failed to allocate a major number!\n");
2614 return;
2615 }
2616
2617 dev_t device = makedev( (uint32_t)majdevno, 0 );
2618 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2619 return;
2620 }
2621
2622 /*
2623 * Allocate the fasttrap_tracepoint_t zone
2624 */
2625 fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
2626 1024 * sizeof(fasttrap_tracepoint_t),
2627 sizeof(fasttrap_tracepoint_t),
2628 "dtrace.fasttrap_tracepoint_t");
2629
2630 /*
2631 * fasttrap_probe_t's are variable in size. We use an array of zones to
2632 * cover the most common sizes.
2633 */
2634 int i;
2635 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2636 size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
2637 fasttrap_probe_t_zones[i] = zinit(zone_element_size,
2638 1024 * zone_element_size,
2639 zone_element_size,
2640 fasttrap_probe_t_zone_names[i]);
2641 }
2642
2643
2644 /*
2645 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2646 */
2647 fasttrap_lck_attr = lck_attr_alloc_init();
2648 fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2649 fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
2650
2651 /*
2652 * Initialize global locks
2653 */
2654 lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2655 lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2656
2657 if (DDI_FAILURE == fasttrap_attach((dev_info_t *)(uintptr_t)device, 0 )) {
2658 // FIX ME! Do we remove the devfs node here?
2659 // What kind of error reporting?
2660 printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2661 return;
2662 }
2663
2664 gFasttrapInited = 1;
2665 }
2666 }
2667
2668 #undef FASTTRAP_MAJOR