]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/fasttrap.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
29 */
30
31 #include <sys/types.h>
32 #include <sys/time.h>
33
34 #include <sys/errno.h>
35 #include <sys/stat.h>
36 #include <sys/conf.h>
37 #include <sys/systm.h>
38 #include <sys/kauth.h>
39 #include <sys/utfconv.h>
40
41 #include <sys/fasttrap.h>
42 #include <sys/fasttrap_impl.h>
43 #include <sys/fasttrap_isa.h>
44 #include <sys/dtrace.h>
45 #include <sys/dtrace_impl.h>
46 #include <sys/proc.h>
47
48 #include <miscfs/devfs/devfs.h>
49 #include <sys/proc_internal.h>
50 #include <sys/dtrace_glue.h>
51 #include <sys/dtrace_ptss.h>
52
53 #include <kern/zalloc.h>
54
55 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
56 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
57
58 __private_extern__
59 void
60 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
61
62 /*
63 * User-Land Trap-Based Tracing
64 * ----------------------------
65 *
66 * The fasttrap provider allows DTrace consumers to instrument any user-level
67 * instruction to gather data; this includes probes with semantic
68 * signifigance like entry and return as well as simple offsets into the
69 * function. While the specific techniques used are very ISA specific, the
70 * methodology is generalizable to any architecture.
71 *
72 *
73 * The General Methodology
74 * -----------------------
75 *
76 * With the primary goal of tracing every user-land instruction and the
77 * limitation that we can't trust user space so don't want to rely on much
78 * information there, we begin by replacing the instructions we want to trace
79 * with trap instructions. Each instruction we overwrite is saved into a hash
80 * table keyed by process ID and pc address. When we enter the kernel due to
81 * this trap instruction, we need the effects of the replaced instruction to
82 * appear to have occurred before we proceed with the user thread's
83 * execution.
84 *
85 * Each user level thread is represented by a ulwp_t structure which is
86 * always easily accessible through a register. The most basic way to produce
87 * the effects of the instruction we replaced is to copy that instruction out
88 * to a bit of scratch space reserved in the user thread's ulwp_t structure
89 * (a sort of kernel-private thread local storage), set the PC to that
90 * scratch space and single step. When we reenter the kernel after single
91 * stepping the instruction we must then adjust the PC to point to what would
92 * normally be the next instruction. Of course, special care must be taken
93 * for branches and jumps, but these represent such a small fraction of any
94 * instruction set that writing the code to emulate these in the kernel is
95 * not too difficult.
96 *
97 * Return probes may require several tracepoints to trace every return site,
98 * and, conversely, each tracepoint may activate several probes (the entry
99 * and offset 0 probes, for example). To solve this muliplexing problem,
100 * tracepoints contain lists of probes to activate and probes contain lists
101 * of tracepoints to enable. If a probe is activated, it adds its ID to
102 * existing tracepoints or creates new ones as necessary.
103 *
104 * Most probes are activated _before_ the instruction is executed, but return
105 * probes are activated _after_ the effects of the last instruction of the
106 * function are visible. Return probes must be fired _after_ we have
107 * single-stepped the instruction whereas all other probes are fired
108 * beforehand.
109 *
110 *
111 * Lock Ordering
112 * -------------
113 *
114 * The lock ordering below -- both internally and with respect to the DTrace
115 * framework -- is a little tricky and bears some explanation. Each provider
116 * has a lock (ftp_mtx) that protects its members including reference counts
117 * for enabled probes (ftp_rcount), consumers actively creating probes
118 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
119 * from being freed. A provider is looked up by taking the bucket lock for the
120 * provider hash table, and is returned with its lock held. The provider lock
121 * may be taken in functions invoked by the DTrace framework, but may not be
122 * held while calling functions in the DTrace framework.
123 *
124 * To ensure consistency over multiple calls to the DTrace framework, the
125 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
126 * not be taken when holding the provider lock as that would create a cyclic
127 * lock ordering. In situations where one would naturally take the provider
128 * lock and then the creation lock, we instead up a reference count to prevent
129 * the provider from disappearing, drop the provider lock, and acquire the
130 * creation lock.
131 *
132 * Briefly:
133 * bucket lock before provider lock
134 * DTrace before provider lock
135 * creation lock before DTrace
136 * never hold the provider lock and creation lock simultaneously
137 */
138
139 static dev_info_t *fasttrap_devi;
140 static dtrace_meta_provider_id_t fasttrap_meta_id;
141
142 static thread_call_t fasttrap_timeout;
143 static lck_mtx_t fasttrap_cleanup_mtx;
144 static uint_t fasttrap_cleanup_work;
145
146 /*
147 * Generation count on modifications to the global tracepoint lookup table.
148 */
149 static volatile uint64_t fasttrap_mod_gen;
150
151 /*
152 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
153 * base on system memory. Each time a probe is created, fasttrap_total is
154 * incremented by the number of tracepoints that may be associated with that
155 * probe; fasttrap_total is capped at fasttrap_max.
156 */
157
158 static uint32_t fasttrap_max;
159 static uint32_t fasttrap_total;
160
161
162 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
163 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
164 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
165
166 fasttrap_hash_t fasttrap_tpoints;
167 static fasttrap_hash_t fasttrap_provs;
168 static fasttrap_hash_t fasttrap_procs;
169
170 static uint64_t fasttrap_pid_count; /* pid ref count */
171 static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
172
173 #define FASTTRAP_ENABLE_FAIL 1
174 #define FASTTRAP_ENABLE_PARTIAL 2
175
176 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
177 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
178
179 static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
180 const dtrace_pattr_t *);
181 static void fasttrap_provider_retire(proc_t*, const char *, int);
182 static void fasttrap_provider_free(fasttrap_provider_t *);
183
184 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
185 static void fasttrap_proc_release(fasttrap_proc_t *);
186
187 #define FASTTRAP_PROVS_INDEX(pid, name) \
188 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
189
190 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
191
192 /*
193 * APPLE NOTE: To save memory, some common memory allocations are given
194 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
195 * which means it would fall into the kalloc.128 bucket. With
196 * 20k elements allocated, the space saved is substantial.
197 */
198
199 struct zone *fasttrap_tracepoint_t_zone;
200
201 /*
202 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
203 * that the sweet spot for reducing memory footprint is covering the first
204 * three sizes. Everything larger goes into the common pool.
205 */
206 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
207
208 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
209
210 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
211 "",
212 "dtrace.fasttrap_probe_t[1]",
213 "dtrace.fasttrap_probe_t[2]",
214 "dtrace.fasttrap_probe_t[3]"
215 };
216
217 /*
218 * APPLE NOTE: We have to manage locks explicitly
219 */
220 lck_grp_t* fasttrap_lck_grp;
221 lck_grp_attr_t* fasttrap_lck_grp_attr;
222 lck_attr_t* fasttrap_lck_attr;
223
224 static int
225 fasttrap_highbit(ulong_t i)
226 {
227 int h = 1;
228
229 if (i == 0)
230 return (0);
231 #ifdef _LP64
232 if (i & 0xffffffff00000000ul) {
233 h += 32; i >>= 32;
234 }
235 #endif
236 if (i & 0xffff0000) {
237 h += 16; i >>= 16;
238 }
239 if (i & 0xff00) {
240 h += 8; i >>= 8;
241 }
242 if (i & 0xf0) {
243 h += 4; i >>= 4;
244 }
245 if (i & 0xc) {
246 h += 2; i >>= 2;
247 }
248 if (i & 0x2) {
249 h += 1;
250 }
251 return (h);
252 }
253
254 static uint_t
255 fasttrap_hash_str(const char *p)
256 {
257 unsigned int g;
258 uint_t hval = 0;
259
260 while (*p) {
261 hval = (hval << 4) + *p++;
262 if ((g = (hval & 0xf0000000)) != 0)
263 hval ^= g >> 24;
264 hval &= ~g;
265 }
266 return (hval);
267 }
268
269 /*
270 * APPLE NOTE: fasttrap_sigtrap not implemented
271 */
272 void
273 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
274 {
275 #pragma unused(p, t, pc)
276
277 #if !defined(__APPLE__)
278 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
279
280 sqp->sq_info.si_signo = SIGTRAP;
281 sqp->sq_info.si_code = TRAP_DTRACE;
282 sqp->sq_info.si_addr = (caddr_t)pc;
283
284 mutex_enter(&p->p_lock);
285 sigaddqa(p, t, sqp);
286 mutex_exit(&p->p_lock);
287
288 if (t != NULL)
289 aston(t);
290 #endif /* __APPLE__ */
291
292 printf("fasttrap_sigtrap called with no implementation.\n");
293 }
294
295 /*
296 * This function ensures that no threads are actively using the memory
297 * associated with probes that were formerly live.
298 */
299 static void
300 fasttrap_mod_barrier(uint64_t gen)
301 {
302 unsigned int i;
303
304 if (gen < fasttrap_mod_gen)
305 return;
306
307 fasttrap_mod_gen++;
308
309 for (i = 0; i < NCPU; i++) {
310 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
311 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
312 }
313 }
314
315 /*
316 * This is the timeout's callback for cleaning up the providers and their
317 * probes.
318 */
319 /*ARGSUSED*/
320 static void
321 fasttrap_pid_cleanup_cb(void *ignored, void* ignored2)
322 {
323 #pragma unused(ignored, ignored2)
324 fasttrap_provider_t **fpp, *fp;
325 fasttrap_bucket_t *bucket;
326 dtrace_provider_id_t provid;
327 unsigned int i, later = 0;
328
329 static volatile int in = 0;
330 ASSERT(in == 0);
331 in = 1;
332
333 lck_mtx_lock(&fasttrap_cleanup_mtx);
334 while (fasttrap_cleanup_work) {
335 fasttrap_cleanup_work = 0;
336 lck_mtx_unlock(&fasttrap_cleanup_mtx);
337
338 later = 0;
339
340 /*
341 * Iterate over all the providers trying to remove the marked
342 * ones. If a provider is marked but not retired, we just
343 * have to take a crack at removing it -- it's no big deal if
344 * we can't.
345 */
346 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
347 bucket = &fasttrap_provs.fth_table[i];
348 lck_mtx_lock(&bucket->ftb_mtx);
349 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
350
351 while ((fp = *fpp) != NULL) {
352 if (!fp->ftp_marked) {
353 fpp = &fp->ftp_next;
354 continue;
355 }
356
357 lck_mtx_lock(&fp->ftp_mtx);
358
359 /*
360 * If this provider has consumers actively
361 * creating probes (ftp_ccount) or is a USDT
362 * provider (ftp_mcount), we can't unregister
363 * or even condense.
364 */
365 if (fp->ftp_ccount != 0 ||
366 fp->ftp_mcount != 0) {
367 fp->ftp_marked = 0;
368 lck_mtx_unlock(&fp->ftp_mtx);
369 continue;
370 }
371
372 if (!fp->ftp_retired || fp->ftp_rcount != 0)
373 fp->ftp_marked = 0;
374
375 lck_mtx_unlock(&fp->ftp_mtx);
376
377 /*
378 * If we successfully unregister this
379 * provider we can remove it from the hash
380 * chain and free the memory. If our attempt
381 * to unregister fails and this is a retired
382 * provider, increment our flag to try again
383 * pretty soon. If we've consumed more than
384 * half of our total permitted number of
385 * probes call dtrace_condense() to try to
386 * clean out the unenabled probes.
387 */
388 provid = fp->ftp_provid;
389 if (dtrace_unregister(provid) != 0) {
390 if (fasttrap_total > fasttrap_max / 2)
391 (void) dtrace_condense(provid);
392 later += fp->ftp_marked;
393 fpp = &fp->ftp_next;
394 } else {
395 *fpp = fp->ftp_next;
396 fasttrap_provider_free(fp);
397 }
398 }
399 lck_mtx_unlock(&bucket->ftb_mtx);
400 }
401
402 lck_mtx_lock(&fasttrap_cleanup_mtx);
403 }
404
405 ASSERT(fasttrap_timeout != 0);
406
407 /*
408 * APPLE NOTE: You must hold the fasttrap_cleanup_mtx to do this!
409 */
410 if (fasttrap_timeout != (thread_call_t)1)
411 thread_call_free(fasttrap_timeout);
412
413 /*
414 * If we were unable to remove a retired provider, try again after
415 * a second. This situation can occur in certain circumstances where
416 * providers cannot be unregistered even though they have no probes
417 * enabled because of an execution of dtrace -l or something similar.
418 * If the timeout has been disabled (set to 1 because we're trying
419 * to detach), we set fasttrap_cleanup_work to ensure that we'll
420 * get a chance to do that work if and when the timeout is reenabled
421 * (if detach fails).
422 */
423 if (later > 0 && fasttrap_timeout != (thread_call_t)1)
424 /* The time value passed to dtrace_timeout is in nanos */
425 fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / SEC);
426 else if (later > 0)
427 fasttrap_cleanup_work = 1;
428 else
429 fasttrap_timeout = 0;
430
431 lck_mtx_unlock(&fasttrap_cleanup_mtx);
432 in = 0;
433 }
434
435 /*
436 * Activates the asynchronous cleanup mechanism.
437 */
438 static void
439 fasttrap_pid_cleanup(void)
440 {
441 lck_mtx_lock(&fasttrap_cleanup_mtx);
442 fasttrap_cleanup_work = 1;
443 if (fasttrap_timeout == 0)
444 fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / MILLISEC);
445 lck_mtx_unlock(&fasttrap_cleanup_mtx);
446 }
447
448 /*
449 * This is called from cfork() via dtrace_fasttrap_fork(). The child
450 * process's address space is a (roughly) a copy of the parent process's so
451 * we have to remove all the instrumentation we had previously enabled in the
452 * parent.
453 */
454 static void
455 fasttrap_fork(proc_t *p, proc_t *cp)
456 {
457 pid_t ppid = p->p_pid;
458 unsigned int i;
459
460 ASSERT(current_proc() == p);
461 lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
462 ASSERT(p->p_dtrace_count > 0);
463 ASSERT(cp->p_dtrace_count == 0);
464
465 /*
466 * This would be simpler and faster if we maintained per-process
467 * hash tables of enabled tracepoints. It could, however, potentially
468 * slow down execution of a tracepoint since we'd need to go
469 * through two levels of indirection. In the future, we should
470 * consider either maintaining per-process ancillary lists of
471 * enabled tracepoints or hanging a pointer to a per-process hash
472 * table of enabled tracepoints off the proc structure.
473 */
474
475 /*
476 * We don't have to worry about the child process disappearing
477 * because we're in fork().
478 */
479 if (cp != sprlock(cp->p_pid)) {
480 printf("fasttrap_fork: sprlock(%d) returned a differt proc\n", cp->p_pid);
481 return;
482 }
483 proc_unlock(cp);
484
485 /*
486 * Iterate over every tracepoint looking for ones that belong to the
487 * parent process, and remove each from the child process.
488 */
489 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
490 fasttrap_tracepoint_t *tp;
491 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
492
493 lck_mtx_lock(&bucket->ftb_mtx);
494 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
495 if (tp->ftt_pid == ppid &&
496 tp->ftt_proc->ftpc_acount != 0) {
497 fasttrap_tracepoint_remove(cp, tp);
498
499 /*
500 * The count of active providers can only be
501 * decremented (i.e. to zero) during exec,
502 * exit, and removal of a meta provider so it
503 * should be impossible to drop the count
504 * mid-fork.
505 */
506 ASSERT(tp->ftt_proc->ftpc_acount != 0);
507 }
508 }
509 lck_mtx_unlock(&bucket->ftb_mtx);
510 }
511
512 /*
513 * Free any ptss pages/entries in the child.
514 */
515 dtrace_ptss_fork(p, cp);
516
517 proc_lock(cp);
518 sprunlock(cp);
519 }
520
521 /*
522 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
523 * is set on the proc structure to indicate that there is a pid provider
524 * associated with this process.
525 */
526 static void
527 fasttrap_exec_exit(proc_t *p)
528 {
529 ASSERT(p == current_proc());
530 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
531 lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
532
533
534 /* APPLE NOTE: Okay, the locking here is really odd and needs some
535 * explaining. This method is always called with the proc_lock held.
536 * We must drop the proc_lock before calling fasttrap_provider_retire
537 * to avoid a deadlock when it takes the bucket lock.
538 *
539 * Next, the dtrace_ptss_exec_exit function requires the sprlock
540 * be held, but not the proc_lock.
541 *
542 * Finally, we must re-acquire the proc_lock
543 */
544 proc_unlock(p);
545
546 /*
547 * We clean up the pid provider for this process here; user-land
548 * static probes are handled by the meta-provider remove entry point.
549 */
550 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
551
552 /*
553 * APPLE NOTE: We also need to remove any aliased providers.
554 * XXX optimization: track which provider types are instantiated
555 * and only retire as needed.
556 */
557 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
558 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
559
560 /*
561 * This should be called after it is no longer possible for a user
562 * thread to execute (potentially dtrace instrumented) instructions.
563 */
564 lck_mtx_lock(&p->p_dtrace_sprlock);
565 dtrace_ptss_exec_exit(p);
566 lck_mtx_unlock(&p->p_dtrace_sprlock);
567
568 proc_lock(p);
569 }
570
571
572 /*ARGSUSED*/
573 static void
574 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
575 {
576 #pragma unused(arg, desc)
577 /*
578 * There are no "default" pid probes.
579 */
580 }
581
582 static int
583 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
584 {
585 fasttrap_tracepoint_t *tp, *new_tp = NULL;
586 fasttrap_bucket_t *bucket;
587 fasttrap_id_t *id;
588 pid_t pid;
589 user_addr_t pc;
590
591 ASSERT(index < probe->ftp_ntps);
592
593 pid = probe->ftp_pid;
594 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
595 id = &probe->ftp_tps[index].fit_id;
596
597 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
598
599 //ASSERT(!(p->p_flag & SVFORK));
600
601 /*
602 * Before we make any modifications, make sure we've imposed a barrier
603 * on the generation in which this probe was last modified.
604 */
605 fasttrap_mod_barrier(probe->ftp_gen);
606
607 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
608
609 /*
610 * If the tracepoint has already been enabled, just add our id to the
611 * list of interested probes. This may be our second time through
612 * this path in which case we'll have constructed the tracepoint we'd
613 * like to install. If we can't find a match, and have an allocated
614 * tracepoint ready to go, enable that one now.
615 *
616 * A tracepoint whose process is defunct is also considered defunct.
617 */
618 again:
619 lck_mtx_lock(&bucket->ftb_mtx);
620 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
621 /*
622 * Note that it's safe to access the active count on the
623 * associated proc structure because we know that at least one
624 * provider (this one) will still be around throughout this
625 * operation.
626 */
627 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
628 tp->ftt_proc->ftpc_acount == 0)
629 continue;
630
631 /*
632 * Now that we've found a matching tracepoint, it would be
633 * a decent idea to confirm that the tracepoint is still
634 * enabled and the trap instruction hasn't been overwritten.
635 * Since this is a little hairy, we'll punt for now.
636 */
637
638 /*
639 * This can't be the first interested probe. We don't have
640 * to worry about another thread being in the midst of
641 * deleting this tracepoint (which would be the only valid
642 * reason for a tracepoint to have no interested probes)
643 * since we're holding P_PR_LOCK for this process.
644 */
645 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
646
647 switch (id->fti_ptype) {
648 case DTFTP_ENTRY:
649 case DTFTP_OFFSETS:
650 case DTFTP_IS_ENABLED:
651 id->fti_next = tp->ftt_ids;
652 dtrace_membar_producer();
653 tp->ftt_ids = id;
654 dtrace_membar_producer();
655 break;
656
657 case DTFTP_RETURN:
658 case DTFTP_POST_OFFSETS:
659 id->fti_next = tp->ftt_retids;
660 dtrace_membar_producer();
661 tp->ftt_retids = id;
662 dtrace_membar_producer();
663 break;
664
665 default:
666 ASSERT(0);
667 }
668
669 lck_mtx_unlock(&bucket->ftb_mtx);
670
671 if (new_tp != NULL) {
672 new_tp->ftt_ids = NULL;
673 new_tp->ftt_retids = NULL;
674 }
675
676 return (0);
677 }
678
679 /*
680 * If we have a good tracepoint ready to go, install it now while
681 * we have the lock held and no one can screw with us.
682 */
683 if (new_tp != NULL) {
684 int rc = 0;
685
686 new_tp->ftt_next = bucket->ftb_data;
687 dtrace_membar_producer();
688 bucket->ftb_data = new_tp;
689 dtrace_membar_producer();
690 lck_mtx_unlock(&bucket->ftb_mtx);
691
692 /*
693 * Activate the tracepoint in the ISA-specific manner.
694 * If this fails, we need to report the failure, but
695 * indicate that this tracepoint must still be disabled
696 * by calling fasttrap_tracepoint_disable().
697 */
698 if (fasttrap_tracepoint_install(p, new_tp) != 0)
699 rc = FASTTRAP_ENABLE_PARTIAL;
700
701 /*
702 * Increment the count of the number of tracepoints active in
703 * the victim process.
704 */
705 //ASSERT(p->p_proc_flag & P_PR_LOCK);
706 p->p_dtrace_count++;
707
708 return (rc);
709 }
710
711 lck_mtx_unlock(&bucket->ftb_mtx);
712
713 /*
714 * Initialize the tracepoint that's been preallocated with the probe.
715 */
716 new_tp = probe->ftp_tps[index].fit_tp;
717
718 ASSERT(new_tp->ftt_pid == pid);
719 ASSERT(new_tp->ftt_pc == pc);
720 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
721 ASSERT(new_tp->ftt_ids == NULL);
722 ASSERT(new_tp->ftt_retids == NULL);
723
724 switch (id->fti_ptype) {
725 case DTFTP_ENTRY:
726 case DTFTP_OFFSETS:
727 case DTFTP_IS_ENABLED:
728 id->fti_next = NULL;
729 new_tp->ftt_ids = id;
730 break;
731
732 case DTFTP_RETURN:
733 case DTFTP_POST_OFFSETS:
734 id->fti_next = NULL;
735 new_tp->ftt_retids = id;
736 break;
737
738 default:
739 ASSERT(0);
740 }
741
742 /*
743 * If the ISA-dependent initialization goes to plan, go back to the
744 * beginning and try to install this freshly made tracepoint.
745 */
746 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
747 goto again;
748
749 new_tp->ftt_ids = NULL;
750 new_tp->ftt_retids = NULL;
751
752 return (FASTTRAP_ENABLE_FAIL);
753 }
754
755 static void
756 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
757 {
758 fasttrap_bucket_t *bucket;
759 fasttrap_provider_t *provider = probe->ftp_prov;
760 fasttrap_tracepoint_t **pp, *tp;
761 fasttrap_id_t *id, **idp;
762 pid_t pid;
763 user_addr_t pc;
764
765 ASSERT(index < probe->ftp_ntps);
766
767 pid = probe->ftp_pid;
768 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
769 id = &probe->ftp_tps[index].fit_id;
770
771 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
772
773 /*
774 * Find the tracepoint and make sure that our id is one of the
775 * ones registered with it.
776 */
777 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
778 lck_mtx_lock(&bucket->ftb_mtx);
779 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
780 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
781 tp->ftt_proc == provider->ftp_proc)
782 break;
783 }
784
785 /*
786 * If we somehow lost this tracepoint, we're in a world of hurt.
787 */
788 ASSERT(tp != NULL);
789
790 switch (id->fti_ptype) {
791 case DTFTP_ENTRY:
792 case DTFTP_OFFSETS:
793 case DTFTP_IS_ENABLED:
794 ASSERT(tp->ftt_ids != NULL);
795 idp = &tp->ftt_ids;
796 break;
797
798 case DTFTP_RETURN:
799 case DTFTP_POST_OFFSETS:
800 ASSERT(tp->ftt_retids != NULL);
801 idp = &tp->ftt_retids;
802 break;
803
804 default:
805 /* Fix compiler warning... */
806 idp = NULL;
807 ASSERT(0);
808 }
809
810 while ((*idp)->fti_probe != probe) {
811 idp = &(*idp)->fti_next;
812 ASSERT(*idp != NULL);
813 }
814
815 id = *idp;
816 *idp = id->fti_next;
817 dtrace_membar_producer();
818
819 ASSERT(id->fti_probe == probe);
820
821 /*
822 * If there are other registered enablings of this tracepoint, we're
823 * all done, but if this was the last probe assocated with this
824 * this tracepoint, we need to remove and free it.
825 */
826 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
827
828 /*
829 * If the current probe's tracepoint is in use, swap it
830 * for an unused tracepoint.
831 */
832 if (tp == probe->ftp_tps[index].fit_tp) {
833 fasttrap_probe_t *tmp_probe;
834 fasttrap_tracepoint_t **tmp_tp;
835 uint_t tmp_index;
836
837 if (tp->ftt_ids != NULL) {
838 tmp_probe = tp->ftt_ids->fti_probe;
839 /* LINTED - alignment */
840 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
841 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
842 } else {
843 tmp_probe = tp->ftt_retids->fti_probe;
844 /* LINTED - alignment */
845 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
846 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
847 }
848
849 ASSERT(*tmp_tp != NULL);
850 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
851 ASSERT((*tmp_tp)->ftt_ids == NULL);
852 ASSERT((*tmp_tp)->ftt_retids == NULL);
853
854 probe->ftp_tps[index].fit_tp = *tmp_tp;
855 *tmp_tp = tp;
856
857 }
858
859 lck_mtx_unlock(&bucket->ftb_mtx);
860
861 /*
862 * Tag the modified probe with the generation in which it was
863 * changed.
864 */
865 probe->ftp_gen = fasttrap_mod_gen;
866 return;
867 }
868
869 lck_mtx_unlock(&bucket->ftb_mtx);
870
871 /*
872 * We can't safely remove the tracepoint from the set of active
873 * tracepoints until we've actually removed the fasttrap instruction
874 * from the process's text. We can, however, operate on this
875 * tracepoint secure in the knowledge that no other thread is going to
876 * be looking at it since we hold P_PR_LOCK on the process if it's
877 * live or we hold the provider lock on the process if it's dead and
878 * gone.
879 */
880
881 /*
882 * We only need to remove the actual instruction if we're looking
883 * at an existing process
884 */
885 if (p != NULL) {
886 /*
887 * If we fail to restore the instruction we need to kill
888 * this process since it's in a completely unrecoverable
889 * state.
890 */
891 if (fasttrap_tracepoint_remove(p, tp) != 0)
892 fasttrap_sigtrap(p, NULL, pc);
893
894 /*
895 * Decrement the count of the number of tracepoints active
896 * in the victim process.
897 */
898 //ASSERT(p->p_proc_flag & P_PR_LOCK);
899 p->p_dtrace_count--;
900 }
901
902 /*
903 * Remove the probe from the hash table of active tracepoints.
904 */
905 lck_mtx_lock(&bucket->ftb_mtx);
906 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
907 ASSERT(*pp != NULL);
908 while (*pp != tp) {
909 pp = &(*pp)->ftt_next;
910 ASSERT(*pp != NULL);
911 }
912
913 *pp = tp->ftt_next;
914 dtrace_membar_producer();
915
916 lck_mtx_unlock(&bucket->ftb_mtx);
917
918 /*
919 * Tag the modified probe with the generation in which it was changed.
920 */
921 probe->ftp_gen = fasttrap_mod_gen;
922 }
923
924 static void
925 fasttrap_enable_callbacks(void)
926 {
927 /*
928 * We don't have to play the rw lock game here because we're
929 * providing something rather than taking something away --
930 * we can be sure that no threads have tried to follow this
931 * function pointer yet.
932 */
933 lck_mtx_lock(&fasttrap_count_mtx);
934 if (fasttrap_pid_count == 0) {
935 ASSERT(dtrace_pid_probe_ptr == NULL);
936 ASSERT(dtrace_return_probe_ptr == NULL);
937 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
938 dtrace_return_probe_ptr = &fasttrap_return_probe;
939 }
940 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
941 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
942 fasttrap_pid_count++;
943 lck_mtx_unlock(&fasttrap_count_mtx);
944 }
945
946 static void
947 fasttrap_disable_callbacks(void)
948 {
949 //ASSERT(MUTEX_HELD(&cpu_lock));
950
951 lck_mtx_lock(&fasttrap_count_mtx);
952 ASSERT(fasttrap_pid_count > 0);
953 fasttrap_pid_count--;
954 if (fasttrap_pid_count == 0) {
955 dtrace_cpu_t *cur, *cpu = CPU;
956
957 /*
958 * APPLE NOTE: This loop seems broken, it touches every CPU
959 * but the one we're actually running on. Need to ask Sun folks
960 * if that is safe. Scenario is this: We're running on CPU A,
961 * and lock all but A. Then we get preempted, and start running
962 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
963 */
964 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
965 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
966 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
967 }
968
969 dtrace_pid_probe_ptr = NULL;
970 dtrace_return_probe_ptr = NULL;
971
972 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
973 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
974 // rw_exit(&cur->cpu_ft_lock);
975 }
976 }
977 lck_mtx_unlock(&fasttrap_count_mtx);
978 }
979
980 /*ARGSUSED*/
981 static int
982 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
983 {
984 #pragma unused(arg, id)
985 fasttrap_probe_t *probe = parg;
986 proc_t *p;
987 int i, rc;
988
989 ASSERT(probe != NULL);
990 ASSERT(!probe->ftp_enabled);
991 ASSERT(id == probe->ftp_id);
992 // ASSERT(MUTEX_HELD(&cpu_lock));
993
994 /*
995 * Increment the count of enabled probes on this probe's provider;
996 * the provider can't go away while the probe still exists. We
997 * must increment this even if we aren't able to properly enable
998 * this probe.
999 */
1000 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1001 probe->ftp_prov->ftp_rcount++;
1002 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1003
1004 /*
1005 * If this probe's provider is retired (meaning it was valid in a
1006 * previously exec'ed incarnation of this address space), bail out. The
1007 * provider can't go away while we're in this code path.
1008 */
1009 if (probe->ftp_prov->ftp_retired)
1010 return(0);
1011
1012 /*
1013 * If we can't find the process, it may be that we're in the context of
1014 * a fork in which the traced process is being born and we're copying
1015 * USDT probes. Otherwise, the process is gone so bail.
1016 */
1017 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1018 /*
1019 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1020 * does not return process's with SIDL set, but we always return
1021 * the child process.
1022 */
1023 return(0);
1024 }
1025
1026 /*
1027 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1028 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1029 * To mimic this, we allocate on demand scratch space. If this is the first
1030 * time a probe has been enabled in this process, we need to allocate scratch
1031 * space for each already existing thread. Now is a good time to do this, as
1032 * the target process is suspended and the proc_lock is held.
1033 */
1034 if (p->p_dtrace_ptss_pages == NULL) {
1035 dtrace_ptss_enable(p);
1036 }
1037
1038 // ASSERT(!(p->p_flag & SVFORK));
1039 proc_unlock(p);
1040
1041 /*
1042 * We have to enable the trap entry point before any user threads have
1043 * the chance to execute the trap instruction we're about to place
1044 * in their process's text.
1045 */
1046 fasttrap_enable_callbacks();
1047
1048 /*
1049 * Enable all the tracepoints and add this probe's id to each
1050 * tracepoint's list of active probes.
1051 */
1052 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1053 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1054 /*
1055 * If enabling the tracepoint failed completely,
1056 * we don't have to disable it; if the failure
1057 * was only partial we must disable it.
1058 */
1059 if (rc == FASTTRAP_ENABLE_FAIL)
1060 i--;
1061 else
1062 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1063
1064 /*
1065 * Back up and pull out all the tracepoints we've
1066 * created so far for this probe.
1067 */
1068 while (i >= 0) {
1069 fasttrap_tracepoint_disable(p, probe, i);
1070 i--;
1071 }
1072
1073 proc_lock(p);
1074 sprunlock(p);
1075
1076 /*
1077 * Since we're not actually enabling this probe,
1078 * drop our reference on the trap table entry.
1079 */
1080 fasttrap_disable_callbacks();
1081 return(0);
1082 }
1083 }
1084
1085 proc_lock(p);
1086 sprunlock(p);
1087
1088 probe->ftp_enabled = 1;
1089 return (0);
1090 }
1091
1092 /*ARGSUSED*/
1093 static void
1094 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1095 {
1096 #pragma unused(arg, id)
1097 fasttrap_probe_t *probe = parg;
1098 fasttrap_provider_t *provider = probe->ftp_prov;
1099 proc_t *p;
1100 int i, whack = 0;
1101
1102 ASSERT(id == probe->ftp_id);
1103
1104 /*
1105 * We won't be able to acquire a /proc-esque lock on the process
1106 * iff the process is dead and gone. In this case, we rely on the
1107 * provider lock as a point of mutual exclusion to prevent other
1108 * DTrace consumers from disabling this probe.
1109 */
1110 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1111 // ASSERT(!(p->p_flag & SVFORK));
1112 proc_unlock(p);
1113 }
1114
1115 lck_mtx_lock(&provider->ftp_mtx);
1116
1117 /*
1118 * Disable all the associated tracepoints (for fully enabled probes).
1119 */
1120 if (probe->ftp_enabled) {
1121 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1122 fasttrap_tracepoint_disable(p, probe, i);
1123 }
1124 }
1125
1126 ASSERT(provider->ftp_rcount > 0);
1127 provider->ftp_rcount--;
1128
1129 if (p != NULL) {
1130 /*
1131 * Even though we may not be able to remove it entirely, we
1132 * mark this retired provider to get a chance to remove some
1133 * of the associated probes.
1134 */
1135 if (provider->ftp_retired && !provider->ftp_marked)
1136 whack = provider->ftp_marked = 1;
1137 lck_mtx_unlock(&provider->ftp_mtx);
1138
1139 proc_lock(p);
1140 sprunlock(p);
1141 } else {
1142 /*
1143 * If the process is dead, we're just waiting for the
1144 * last probe to be disabled to be able to free it.
1145 */
1146 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1147 whack = provider->ftp_marked = 1;
1148 lck_mtx_unlock(&provider->ftp_mtx);
1149 }
1150
1151 if (whack)
1152 fasttrap_pid_cleanup();
1153
1154 if (!probe->ftp_enabled)
1155 return;
1156
1157 probe->ftp_enabled = 0;
1158
1159 // ASSERT(MUTEX_HELD(&cpu_lock));
1160 fasttrap_disable_callbacks();
1161 }
1162
1163 /*ARGSUSED*/
1164 static void
1165 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1166 dtrace_argdesc_t *desc)
1167 {
1168 #pragma unused(arg, id)
1169 fasttrap_probe_t *probe = parg;
1170 char *str;
1171 int i, ndx;
1172
1173 desc->dtargd_native[0] = '\0';
1174 desc->dtargd_xlate[0] = '\0';
1175
1176 if (probe->ftp_prov->ftp_retired != 0 ||
1177 desc->dtargd_ndx >= probe->ftp_nargs) {
1178 desc->dtargd_ndx = DTRACE_ARGNONE;
1179 return;
1180 }
1181
1182 ndx = (probe->ftp_argmap != NULL) ?
1183 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1184
1185 str = probe->ftp_ntypes;
1186 for (i = 0; i < ndx; i++) {
1187 str += strlen(str) + 1;
1188 }
1189
1190 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1191
1192 if (probe->ftp_xtypes == NULL)
1193 return;
1194
1195 str = probe->ftp_xtypes;
1196 for (i = 0; i < desc->dtargd_ndx; i++) {
1197 str += strlen(str) + 1;
1198 }
1199
1200 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1201 }
1202
1203 /*ARGSUSED*/
1204 static void
1205 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1206 {
1207 #pragma unused(arg, id)
1208 fasttrap_probe_t *probe = parg;
1209 unsigned int i;
1210
1211 ASSERT(probe != NULL);
1212 ASSERT(!probe->ftp_enabled);
1213 ASSERT(fasttrap_total >= probe->ftp_ntps);
1214
1215 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1216
1217 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1218 fasttrap_mod_barrier(probe->ftp_gen);
1219
1220 for (i = 0; i < probe->ftp_ntps; i++) {
1221 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1222 }
1223
1224 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1225 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1226 } else {
1227 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1228 kmem_free(probe, size);
1229 }
1230 }
1231
1232
1233 static const dtrace_pattr_t pid_attr = {
1234 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1235 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1236 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1237 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1238 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1239 };
1240
1241 static dtrace_pops_t pid_pops = {
1242 fasttrap_pid_provide,
1243 NULL,
1244 fasttrap_pid_enable,
1245 fasttrap_pid_disable,
1246 NULL,
1247 NULL,
1248 fasttrap_pid_getargdesc,
1249 fasttrap_pid_getarg,
1250 NULL,
1251 fasttrap_pid_destroy
1252 };
1253
1254 static dtrace_pops_t usdt_pops = {
1255 fasttrap_pid_provide,
1256 NULL,
1257 fasttrap_pid_enable,
1258 fasttrap_pid_disable,
1259 NULL,
1260 NULL,
1261 fasttrap_pid_getargdesc,
1262 fasttrap_usdt_getarg,
1263 NULL,
1264 fasttrap_pid_destroy
1265 };
1266
1267 static fasttrap_proc_t *
1268 fasttrap_proc_lookup(pid_t pid)
1269 {
1270 fasttrap_bucket_t *bucket;
1271 fasttrap_proc_t *fprc, *new_fprc;
1272
1273 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1274 lck_mtx_lock(&bucket->ftb_mtx);
1275
1276 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1277 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1278 lck_mtx_lock(&fprc->ftpc_mtx);
1279 lck_mtx_unlock(&bucket->ftb_mtx);
1280 fprc->ftpc_rcount++;
1281 atomic_add_64(&fprc->ftpc_acount, 1);
1282 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1283 lck_mtx_unlock(&fprc->ftpc_mtx);
1284
1285 return (fprc);
1286 }
1287 }
1288
1289 /*
1290 * Drop the bucket lock so we don't try to perform a sleeping
1291 * allocation under it.
1292 */
1293 lck_mtx_unlock(&bucket->ftb_mtx);
1294
1295 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1296 ASSERT(new_fprc != NULL);
1297 new_fprc->ftpc_pid = pid;
1298 new_fprc->ftpc_rcount = 1;
1299 new_fprc->ftpc_acount = 1;
1300
1301 lck_mtx_lock(&bucket->ftb_mtx);
1302
1303 /*
1304 * Take another lap through the list to make sure a proc hasn't
1305 * been created for this pid while we weren't under the bucket lock.
1306 */
1307 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1308 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1309 lck_mtx_lock(&fprc->ftpc_mtx);
1310 lck_mtx_unlock(&bucket->ftb_mtx);
1311 fprc->ftpc_rcount++;
1312 atomic_add_64(&fprc->ftpc_acount, 1);
1313 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1314 lck_mtx_unlock(&fprc->ftpc_mtx);
1315
1316 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1317
1318 return (fprc);
1319 }
1320 }
1321
1322 /*
1323 * APPLE NOTE: We have to initialize all locks explicitly
1324 */
1325 lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1326
1327 new_fprc->ftpc_next = bucket->ftb_data;
1328 bucket->ftb_data = new_fprc;
1329
1330 lck_mtx_unlock(&bucket->ftb_mtx);
1331
1332 return (new_fprc);
1333 }
1334
1335 static void
1336 fasttrap_proc_release(fasttrap_proc_t *proc)
1337 {
1338 fasttrap_bucket_t *bucket;
1339 fasttrap_proc_t *fprc, **fprcp;
1340 pid_t pid = proc->ftpc_pid;
1341
1342 lck_mtx_lock(&proc->ftpc_mtx);
1343
1344 ASSERT(proc->ftpc_rcount != 0);
1345 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1346
1347 if (--proc->ftpc_rcount != 0) {
1348 lck_mtx_unlock(&proc->ftpc_mtx);
1349 return;
1350 }
1351
1352 lck_mtx_unlock(&proc->ftpc_mtx);
1353
1354 /*
1355 * There should definitely be no live providers associated with this
1356 * process at this point.
1357 */
1358 ASSERT(proc->ftpc_acount == 0);
1359
1360 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1361 lck_mtx_lock(&bucket->ftb_mtx);
1362
1363 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1364 while ((fprc = *fprcp) != NULL) {
1365 if (fprc == proc)
1366 break;
1367
1368 fprcp = &fprc->ftpc_next;
1369 }
1370
1371 /*
1372 * Something strange has happened if we can't find the proc.
1373 */
1374 ASSERT(fprc != NULL);
1375
1376 *fprcp = fprc->ftpc_next;
1377
1378 lck_mtx_unlock(&bucket->ftb_mtx);
1379
1380 /*
1381 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1382 * memory is freed even without the destroy. Maybe accounting cleanup?
1383 */
1384 lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1385
1386 kmem_free(fprc, sizeof (fasttrap_proc_t));
1387 }
1388
1389 /*
1390 * Lookup a fasttrap-managed provider based on its name and associated proc.
1391 * A reference to the proc must be held for the duration of the call.
1392 * If the pattr argument is non-NULL, this function instantiates the provider
1393 * if it doesn't exist otherwise it returns NULL. The provider is returned
1394 * with its lock held.
1395 */
1396 static fasttrap_provider_t *
1397 fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1398 const dtrace_pattr_t *pattr)
1399 {
1400 pid_t pid = p->p_pid;
1401 fasttrap_provider_t *fp, *new_fp = NULL;
1402 fasttrap_bucket_t *bucket;
1403 char provname[DTRACE_PROVNAMELEN];
1404 cred_t *cred;
1405
1406 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1407 ASSERT(pattr != NULL);
1408
1409 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1410 lck_mtx_lock(&bucket->ftb_mtx);
1411
1412 /*
1413 * Take a lap through the list and return the match if we find it.
1414 */
1415 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1416 if (fp->ftp_pid == pid &&
1417 fp->ftp_provider_type == provider_type &&
1418 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1419 !fp->ftp_retired) {
1420 lck_mtx_lock(&fp->ftp_mtx);
1421 lck_mtx_unlock(&bucket->ftb_mtx);
1422 return (fp);
1423 }
1424 }
1425
1426 /*
1427 * Drop the bucket lock so we don't try to perform a sleeping
1428 * allocation under it.
1429 */
1430 lck_mtx_unlock(&bucket->ftb_mtx);
1431
1432 /*
1433 * Make sure the process isn't a child created as the result
1434 * of a vfork(2), and isn't a zombie (but may be in fork).
1435 */
1436 proc_lock(p);
1437 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1438 proc_unlock(p);
1439 return (NULL);
1440 }
1441
1442 /*
1443 * Increment p_dtrace_probes so that the process knows to inform us
1444 * when it exits or execs. fasttrap_provider_free() decrements this
1445 * when we're done with this provider.
1446 */
1447 p->p_dtrace_probes++;
1448
1449 /*
1450 * Grab the credentials for this process so we have
1451 * something to pass to dtrace_register().
1452 * APPLE NOTE: We have no equivalent to crhold,
1453 * even though there is a cr_ref filed in ucred.
1454 */
1455 // lck_mtx_lock(&p->p_crlock;
1456 crhold(p->p_ucred);
1457 cred = p->p_ucred;
1458 // lck_mtx_unlock(&p->p_crlock);
1459 proc_unlock(p);
1460
1461 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1462 ASSERT(new_fp != NULL);
1463 new_fp->ftp_pid = p->p_pid;
1464 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1465 new_fp->ftp_provider_type = provider_type;
1466
1467 /*
1468 * APPLE NOTE: locks require explicit init
1469 */
1470 lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1471 lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1472
1473 ASSERT(new_fp->ftp_proc != NULL);
1474
1475 lck_mtx_lock(&bucket->ftb_mtx);
1476
1477 /*
1478 * Take another lap through the list to make sure a provider hasn't
1479 * been created for this pid while we weren't under the bucket lock.
1480 */
1481 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1482 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1483 !fp->ftp_retired) {
1484 lck_mtx_lock(&fp->ftp_mtx);
1485 lck_mtx_unlock(&bucket->ftb_mtx);
1486 fasttrap_provider_free(new_fp);
1487 crfree(cred);
1488 return (fp);
1489 }
1490 }
1491
1492 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1493
1494 /*
1495 * Fail and return NULL if either the provider name is too long
1496 * or we fail to register this new provider with the DTrace
1497 * framework. Note that this is the only place we ever construct
1498 * the full provider name -- we keep it in pieces in the provider
1499 * structure.
1500 */
1501 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1502 (int)sizeof (provname) ||
1503 dtrace_register(provname, pattr,
1504 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1505 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1506 &new_fp->ftp_provid) != 0) {
1507 lck_mtx_unlock(&bucket->ftb_mtx);
1508 fasttrap_provider_free(new_fp);
1509 crfree(cred);
1510 return (NULL);
1511 }
1512
1513 new_fp->ftp_next = bucket->ftb_data;
1514 bucket->ftb_data = new_fp;
1515
1516 lck_mtx_lock(&new_fp->ftp_mtx);
1517 lck_mtx_unlock(&bucket->ftb_mtx);
1518
1519 crfree(cred);
1520 return (new_fp);
1521 }
1522
1523 static void
1524 fasttrap_provider_free(fasttrap_provider_t *provider)
1525 {
1526 pid_t pid = provider->ftp_pid;
1527 proc_t *p;
1528
1529 /*
1530 * There need to be no associated enabled probes, no consumers
1531 * creating probes, and no meta providers referencing this provider.
1532 */
1533 ASSERT(provider->ftp_rcount == 0);
1534 ASSERT(provider->ftp_ccount == 0);
1535 ASSERT(provider->ftp_mcount == 0);
1536
1537 /*
1538 * If this provider hasn't been retired, we need to explicitly drop the
1539 * count of active providers on the associated process structure.
1540 */
1541 if (!provider->ftp_retired) {
1542 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1543 ASSERT(provider->ftp_proc->ftpc_acount <
1544 provider->ftp_proc->ftpc_rcount);
1545 }
1546
1547 fasttrap_proc_release(provider->ftp_proc);
1548
1549 /*
1550 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1551 * memory is freed even without the destroy. Maybe accounting cleanup?
1552 */
1553 lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1554 lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1555
1556 kmem_free(provider, sizeof (fasttrap_provider_t));
1557
1558 /*
1559 * Decrement p_dtrace_probes on the process whose provider we're
1560 * freeing. We don't have to worry about clobbering somone else's
1561 * modifications to it because we have locked the bucket that
1562 * corresponds to this process's hash chain in the provider hash
1563 * table. Don't sweat it if we can't find the process.
1564 */
1565 if ((p = proc_find(pid)) == NULL) {
1566 return;
1567 }
1568
1569 proc_lock(p);
1570 p->p_dtrace_probes--;
1571 proc_unlock(p);
1572
1573 proc_rele(p);
1574 }
1575
1576 static void
1577 fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1578 {
1579 fasttrap_provider_t *fp;
1580 fasttrap_bucket_t *bucket;
1581 dtrace_provider_id_t provid;
1582
1583 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1584
1585 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
1586 lck_mtx_lock(&bucket->ftb_mtx);
1587
1588 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1589 if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1590 !fp->ftp_retired)
1591 break;
1592 }
1593
1594 if (fp == NULL) {
1595 lck_mtx_unlock(&bucket->ftb_mtx);
1596 return;
1597 }
1598
1599 lck_mtx_lock(&fp->ftp_mtx);
1600 ASSERT(!mprov || fp->ftp_mcount > 0);
1601 if (mprov && --fp->ftp_mcount != 0) {
1602 lck_mtx_unlock(&fp->ftp_mtx);
1603 lck_mtx_unlock(&bucket->ftb_mtx);
1604 return;
1605 }
1606
1607 /*
1608 * Mark the provider to be removed in our post-processing step, mark it
1609 * retired, and drop the active count on its proc. Marking it indicates
1610 * that we should try to remove it; setting the retired flag indicates
1611 * that we're done with this provider; dropping the active the proc
1612 * releases our hold, and when this reaches zero (as it will during
1613 * exit or exec) the proc and associated providers become defunct.
1614 *
1615 * We obviously need to take the bucket lock before the provider lock
1616 * to perform the lookup, but we need to drop the provider lock
1617 * before calling into the DTrace framework since we acquire the
1618 * provider lock in callbacks invoked from the DTrace framework. The
1619 * bucket lock therefore protects the integrity of the provider hash
1620 * table.
1621 */
1622 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1623 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1624
1625 fp->ftp_retired = 1;
1626 fp->ftp_marked = 1;
1627 provid = fp->ftp_provid;
1628 lck_mtx_unlock(&fp->ftp_mtx);
1629
1630 /*
1631 * We don't have to worry about invalidating the same provider twice
1632 * since fasttrap_provider_lookup() will ignore providers that have
1633 * been marked as retired.
1634 */
1635 dtrace_invalidate(provid);
1636
1637 lck_mtx_unlock(&bucket->ftb_mtx);
1638
1639 fasttrap_pid_cleanup();
1640 }
1641
1642 static int
1643 fasttrap_uint32_cmp(const void *ap, const void *bp)
1644 {
1645 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1646 }
1647
1648 static int
1649 fasttrap_uint64_cmp(const void *ap, const void *bp)
1650 {
1651 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1652 }
1653
1654 static int
1655 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1656 {
1657 proc_t *p;
1658 fasttrap_provider_t *provider;
1659 fasttrap_probe_t *pp;
1660 fasttrap_tracepoint_t *tp;
1661 const char *name;
1662 unsigned int i, aframes, whack;
1663
1664 /*
1665 * There needs to be at least one desired trace point.
1666 */
1667 if (pdata->ftps_noffs == 0)
1668 return (EINVAL);
1669
1670 switch (pdata->ftps_probe_type) {
1671 case DTFTP_ENTRY:
1672 name = "entry";
1673 aframes = FASTTRAP_ENTRY_AFRAMES;
1674 break;
1675 case DTFTP_RETURN:
1676 name = "return";
1677 aframes = FASTTRAP_RETURN_AFRAMES;
1678 break;
1679 case DTFTP_OFFSETS:
1680 aframes = 0;
1681 name = NULL;
1682 break;
1683 default:
1684 return (EINVAL);
1685 }
1686
1687 const char* provider_name;
1688 switch (pdata->ftps_provider_type) {
1689 case DTFTP_PROVIDER_PID:
1690 provider_name = FASTTRAP_PID_NAME;
1691 break;
1692 case DTFTP_PROVIDER_OBJC:
1693 provider_name = FASTTRAP_OBJC_NAME;
1694 break;
1695 case DTFTP_PROVIDER_ONESHOT:
1696 provider_name = FASTTRAP_ONESHOT_NAME;
1697 break;
1698 default:
1699 return (EINVAL);
1700 }
1701
1702 p = proc_find(pdata->ftps_pid);
1703 if (p == PROC_NULL)
1704 return (ESRCH);
1705
1706 if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
1707 provider_name, &pid_attr)) == NULL)
1708 return (ESRCH);
1709
1710 proc_rele(p);
1711 /*
1712 * Increment this reference count to indicate that a consumer is
1713 * actively adding a new probe associated with this provider. This
1714 * prevents the provider from being deleted -- we'll need to check
1715 * for pending deletions when we drop this reference count.
1716 */
1717 provider->ftp_ccount++;
1718 lck_mtx_unlock(&provider->ftp_mtx);
1719
1720 /*
1721 * Grab the creation lock to ensure consistency between calls to
1722 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1723 * other threads creating probes. We must drop the provider lock
1724 * before taking this lock to avoid a three-way deadlock with the
1725 * DTrace framework.
1726 */
1727 lck_mtx_lock(&provider->ftp_cmtx);
1728
1729 if (name == NULL) {
1730 for (i = 0; i < pdata->ftps_noffs; i++) {
1731 char name_str[17];
1732
1733 (void) snprintf(name_str, sizeof(name_str), "%llx",
1734 (uint64_t)pdata->ftps_offs[i]);
1735
1736 if (dtrace_probe_lookup(provider->ftp_provid,
1737 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1738 continue;
1739
1740 atomic_add_32(&fasttrap_total, 1);
1741
1742 if (fasttrap_total > fasttrap_max) {
1743 atomic_add_32(&fasttrap_total, -1);
1744 goto no_mem;
1745 }
1746
1747 pp = zalloc(fasttrap_probe_t_zones[1]);
1748 bzero(pp, sizeof (fasttrap_probe_t));
1749
1750 pp->ftp_prov = provider;
1751 pp->ftp_faddr = pdata->ftps_pc;
1752 pp->ftp_fsize = pdata->ftps_size;
1753 pp->ftp_pid = pdata->ftps_pid;
1754 pp->ftp_ntps = 1;
1755
1756 tp = zalloc(fasttrap_tracepoint_t_zone);
1757 bzero(tp, sizeof (fasttrap_tracepoint_t));
1758
1759 tp->ftt_proc = provider->ftp_proc;
1760 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1761 tp->ftt_pid = pdata->ftps_pid;
1762
1763
1764 pp->ftp_tps[0].fit_tp = tp;
1765 pp->ftp_tps[0].fit_id.fti_probe = pp;
1766 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1767 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1768 pdata->ftps_mod, pdata->ftps_func, name_str,
1769 FASTTRAP_OFFSET_AFRAMES, pp);
1770 }
1771
1772 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1773 pdata->ftps_func, name) == 0) {
1774 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1775
1776 if (fasttrap_total > fasttrap_max) {
1777 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1778 goto no_mem;
1779 }
1780
1781 /*
1782 * Make sure all tracepoint program counter values are unique.
1783 * We later assume that each probe has exactly one tracepoint
1784 * for a given pc.
1785 */
1786 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1787 sizeof (uint64_t), fasttrap_uint64_cmp);
1788 for (i = 1; i < pdata->ftps_noffs; i++) {
1789 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1790 continue;
1791
1792 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1793 goto no_mem;
1794 }
1795
1796 ASSERT(pdata->ftps_noffs > 0);
1797 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1798 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
1799 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
1800 } else {
1801 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1802 }
1803
1804 pp->ftp_prov = provider;
1805 pp->ftp_faddr = pdata->ftps_pc;
1806 pp->ftp_fsize = pdata->ftps_size;
1807 pp->ftp_pid = pdata->ftps_pid;
1808 pp->ftp_ntps = pdata->ftps_noffs;
1809
1810 for (i = 0; i < pdata->ftps_noffs; i++) {
1811 tp = zalloc(fasttrap_tracepoint_t_zone);
1812 bzero(tp, sizeof (fasttrap_tracepoint_t));
1813 tp->ftt_proc = provider->ftp_proc;
1814 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1815 tp->ftt_pid = pdata->ftps_pid;
1816
1817 pp->ftp_tps[i].fit_tp = tp;
1818 pp->ftp_tps[i].fit_id.fti_probe = pp;
1819 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
1820 }
1821
1822 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1823 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1824 }
1825
1826 lck_mtx_unlock(&provider->ftp_cmtx);
1827
1828 /*
1829 * We know that the provider is still valid since we incremented the
1830 * creation reference count. If someone tried to clean up this provider
1831 * while we were using it (e.g. because the process called exec(2) or
1832 * exit(2)), take note of that and try to clean it up now.
1833 */
1834 lck_mtx_lock(&provider->ftp_mtx);
1835 provider->ftp_ccount--;
1836 whack = provider->ftp_retired;
1837 lck_mtx_unlock(&provider->ftp_mtx);
1838
1839 if (whack)
1840 fasttrap_pid_cleanup();
1841
1842 return (0);
1843
1844 no_mem:
1845 /*
1846 * If we've exhausted the allowable resources, we'll try to remove
1847 * this provider to free some up. This is to cover the case where
1848 * the user has accidentally created many more probes than was
1849 * intended (e.g. pid123:::).
1850 */
1851 lck_mtx_unlock(&provider->ftp_cmtx);
1852 lck_mtx_lock(&provider->ftp_mtx);
1853 provider->ftp_ccount--;
1854 provider->ftp_marked = 1;
1855 lck_mtx_unlock(&provider->ftp_mtx);
1856
1857 fasttrap_pid_cleanup();
1858
1859 return (ENOMEM);
1860 }
1861
1862 /*ARGSUSED*/
1863 static void *
1864 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
1865 {
1866 #pragma unused(arg)
1867 fasttrap_provider_t *provider;
1868
1869 /*
1870 * A 32-bit unsigned integer (like a pid for example) can be
1871 * expressed in 10 or fewer decimal digits. Make sure that we'll
1872 * have enough space for the provider name.
1873 */
1874 if (strlen(dhpv->dthpv_provname) + 10 >=
1875 sizeof (provider->ftp_name)) {
1876 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1877 "name too long to accomodate pid", dhpv->dthpv_provname);
1878 return (NULL);
1879 }
1880
1881 /*
1882 * Don't let folks spoof the true pid provider.
1883 */
1884 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
1885 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1886 "%s is an invalid name", dhpv->dthpv_provname,
1887 FASTTRAP_PID_NAME);
1888 return (NULL);
1889 }
1890
1891 /*
1892 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
1893 */
1894 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
1895 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1896 "%s is an invalid name", dhpv->dthpv_provname,
1897 FASTTRAP_OBJC_NAME);
1898 return (NULL);
1899 }
1900 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
1901 cmn_err(CE_WARN, "failed to instantiate provider %s: "
1902 "%s is an invalid name", dhpv->dthpv_provname,
1903 FASTTRAP_ONESHOT_NAME);
1904 return (NULL);
1905 }
1906
1907 /*
1908 * The highest stability class that fasttrap supports is ISA; cap
1909 * the stability of the new provider accordingly.
1910 */
1911 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
1912 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1913 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
1914 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1915 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
1916 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1917 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
1918 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1919 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
1920 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1921
1922 if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
1923 &dhpv->dthpv_pattr)) == NULL) {
1924 cmn_err(CE_WARN, "failed to instantiate provider %s for "
1925 "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
1926 return (NULL);
1927 }
1928
1929 /*
1930 * APPLE NOTE!
1931 *
1932 * USDT probes (fasttrap meta probes) are very expensive to create.
1933 * Profiling has shown that the largest single cost is verifying that
1934 * dtrace hasn't already created a given meta_probe. The reason for
1935 * this is dtrace_match() often has to strcmp ~100 hashed entries for
1936 * each static probe being created. We want to get rid of that check.
1937 * The simplest way of eliminating it is to deny the ability to add
1938 * probes to an existing provider. If the provider already exists, BZZT!
1939 * This still leaves the possibility of intentionally malformed DOF
1940 * having duplicate probes. However, duplicate probes are not fatal,
1941 * and there is no way to get that by accident, so we will not check
1942 * for that case.
1943 *
1944 * UPDATE: It turns out there are several use cases that require adding
1945 * probes to existing providers. Disabling the dtrace_probe_lookup()
1946 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
1947 */
1948
1949 /*
1950 * Up the meta provider count so this provider isn't removed until
1951 * the meta provider has been told to remove it.
1952 */
1953 provider->ftp_mcount++;
1954
1955 lck_mtx_unlock(&provider->ftp_mtx);
1956
1957 return (provider);
1958 }
1959
1960 /*ARGSUSED*/
1961 static void
1962 fasttrap_meta_create_probe(void *arg, void *parg,
1963 dtrace_helper_probedesc_t *dhpb)
1964 {
1965 #pragma unused(arg)
1966 fasttrap_provider_t *provider = parg;
1967 fasttrap_probe_t *pp;
1968 fasttrap_tracepoint_t *tp;
1969 unsigned int i, j;
1970 uint32_t ntps;
1971
1972 /*
1973 * Since the meta provider count is non-zero we don't have to worry
1974 * about this provider disappearing.
1975 */
1976 ASSERT(provider->ftp_mcount > 0);
1977
1978 /*
1979 * The offsets must be unique.
1980 */
1981 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
1982 fasttrap_uint32_cmp);
1983 for (i = 1; i < dhpb->dthpb_noffs; i++) {
1984 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
1985 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
1986 return;
1987 }
1988
1989 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
1990 fasttrap_uint32_cmp);
1991 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
1992 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
1993 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
1994 return;
1995 }
1996
1997 /*
1998 * Grab the creation lock to ensure consistency between calls to
1999 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2000 * other threads creating probes.
2001 */
2002 lck_mtx_lock(&provider->ftp_cmtx);
2003
2004 #if 0
2005 /*
2006 * APPLE NOTE: This is hideously expensive. See note in
2007 * fasttrap_meta_provide() for why we can get away without
2008 * checking here.
2009 */
2010 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2011 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2012 lck_mtx_unlock(&provider->ftp_cmtx);
2013 return;
2014 }
2015 #endif
2016
2017 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2018 ASSERT(ntps > 0);
2019
2020 atomic_add_32(&fasttrap_total, ntps);
2021
2022 if (fasttrap_total > fasttrap_max) {
2023 atomic_add_32(&fasttrap_total, -ntps);
2024 lck_mtx_unlock(&provider->ftp_cmtx);
2025 return;
2026 }
2027
2028 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2029 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2030 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2031 } else {
2032 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2033 }
2034
2035 pp->ftp_prov = provider;
2036 pp->ftp_pid = provider->ftp_pid;
2037 pp->ftp_ntps = ntps;
2038 pp->ftp_nargs = dhpb->dthpb_xargc;
2039 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2040 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2041
2042 /*
2043 * First create a tracepoint for each actual point of interest.
2044 */
2045 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2046 tp = zalloc(fasttrap_tracepoint_t_zone);
2047 bzero(tp, sizeof (fasttrap_tracepoint_t));
2048
2049 tp->ftt_proc = provider->ftp_proc;
2050
2051 /*
2052 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2053 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2054 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2055 */
2056 #if defined(__x86_64__)
2057 /*
2058 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2059 */
2060 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2061 #else
2062 #error "Architecture not supported"
2063 #endif
2064
2065 tp->ftt_pid = provider->ftp_pid;
2066
2067 pp->ftp_tps[i].fit_tp = tp;
2068 pp->ftp_tps[i].fit_id.fti_probe = pp;
2069 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2070 }
2071
2072 /*
2073 * Then create a tracepoint for each is-enabled point.
2074 */
2075 for (j = 0; i < ntps; i++, j++) {
2076 tp = zalloc(fasttrap_tracepoint_t_zone);
2077 bzero(tp, sizeof (fasttrap_tracepoint_t));
2078
2079 tp->ftt_proc = provider->ftp_proc;
2080
2081 /*
2082 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2083 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2084 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2085 */
2086 #if defined(__x86_64__)
2087 /*
2088 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2089 */
2090 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2091 #else
2092 #error "Architecture not supported"
2093 #endif
2094
2095 tp->ftt_pid = provider->ftp_pid;
2096
2097 pp->ftp_tps[i].fit_tp = tp;
2098 pp->ftp_tps[i].fit_id.fti_probe = pp;
2099 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2100 }
2101
2102 /*
2103 * If the arguments are shuffled around we set the argument remapping
2104 * table. Later, when the probe fires, we only remap the arguments
2105 * if the table is non-NULL.
2106 */
2107 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2108 if (dhpb->dthpb_args[i] != i) {
2109 pp->ftp_argmap = dhpb->dthpb_args;
2110 break;
2111 }
2112 }
2113
2114 /*
2115 * The probe is fully constructed -- register it with DTrace.
2116 */
2117 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2118 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2119
2120 lck_mtx_unlock(&provider->ftp_cmtx);
2121 }
2122
2123 /*ARGSUSED*/
2124 static void
2125 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2126 {
2127 #pragma unused(arg)
2128 /*
2129 * Clean up the USDT provider. There may be active consumers of the
2130 * provider busy adding probes, no damage will actually befall the
2131 * provider until that count has dropped to zero. This just puts
2132 * the provider on death row.
2133 */
2134 fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
2135 }
2136
2137 static char*
2138 fasttrap_meta_provider_name(void *arg)
2139 {
2140 fasttrap_provider_t *fprovider = arg;
2141 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2142 return provider->dtpv_name;
2143 }
2144
2145 static dtrace_mops_t fasttrap_mops = {
2146 fasttrap_meta_create_probe,
2147 fasttrap_meta_provide,
2148 fasttrap_meta_remove,
2149 fasttrap_meta_provider_name
2150 };
2151
2152 /*
2153 * Validate a null-terminated string. If str is not null-terminated,
2154 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2155 * returned.
2156 *
2157 * str: string to validate.
2158 * maxlen: maximal length of the string, null-terminated byte included.
2159 */
2160 static int
2161 fasttrap_validatestr(char const* str, size_t maxlen) {
2162 size_t len;
2163
2164 assert(str);
2165 assert(maxlen != 0);
2166
2167 /* Check if the string is null-terminated. */
2168 len = strnlen(str, maxlen);
2169 if (len >= maxlen)
2170 return -1;
2171
2172 /* Finally, check for UTF8 validity. */
2173 return utf8_validatestr((unsigned const char*) str, len);
2174 }
2175
2176 /*ARGSUSED*/
2177 static int
2178 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2179 {
2180 #pragma unused(dev, md, rv)
2181 if (!dtrace_attached())
2182 return (EAGAIN);
2183
2184 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2185 fasttrap_probe_spec_t *probe;
2186 uint64_t noffs;
2187 size_t size;
2188 int ret;
2189
2190 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2191 sizeof (probe->ftps_noffs)))
2192 return (EFAULT);
2193
2194 /*
2195 * Probes must have at least one tracepoint.
2196 */
2197 if (noffs == 0)
2198 return (EINVAL);
2199
2200 /*
2201 * We want to check the number of noffs before doing
2202 * sizing math, to prevent potential buffer overflows.
2203 */
2204 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2205 return (ENOMEM);
2206
2207 size = sizeof (fasttrap_probe_spec_t) +
2208 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2209
2210 probe = kmem_alloc(size, KM_SLEEP);
2211
2212 if (copyin(arg, probe, size) != 0 ||
2213 probe->ftps_noffs != noffs) {
2214 kmem_free(probe, size);
2215 return (EFAULT);
2216 }
2217
2218 /*
2219 * Verify that the function and module strings contain no
2220 * funny characters.
2221 */
2222
2223 if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
2224 ret = EINVAL;
2225 goto err;
2226 }
2227
2228 if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
2229 ret = EINVAL;
2230 goto err;
2231 }
2232
2233 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2234 proc_t *p;
2235 pid_t pid = probe->ftps_pid;
2236
2237 /*
2238 * Report an error if the process doesn't exist
2239 * or is actively being birthed.
2240 */
2241 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2242 if (p != PROC_NULL)
2243 proc_rele(p);
2244 ret = ESRCH;
2245 goto err;
2246 }
2247 // proc_lock(p);
2248 // FIXME! How is this done on OS X?
2249 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2250 // VREAD | VWRITE)) != 0) {
2251 // mutex_exit(&p->p_lock);
2252 // return (ret);
2253 // }
2254 // proc_unlock(p);
2255 proc_rele(p);
2256 }
2257
2258 ret = fasttrap_add_probe(probe);
2259
2260 err:
2261 kmem_free(probe, size);
2262
2263 return (ret);
2264
2265 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2266 fasttrap_instr_query_t instr;
2267 fasttrap_tracepoint_t *tp;
2268 uint_t index;
2269 // int ret;
2270
2271 if (copyin(arg, &instr, sizeof (instr)) != 0)
2272 return (EFAULT);
2273
2274 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2275 proc_t *p;
2276 pid_t pid = instr.ftiq_pid;
2277
2278 /*
2279 * Report an error if the process doesn't exist
2280 * or is actively being birthed.
2281 */
2282 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2283 if (p != PROC_NULL)
2284 proc_rele(p);
2285 return (ESRCH);
2286 }
2287 //proc_lock(p);
2288 // FIXME! How is this done on OS X?
2289 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2290 // VREAD)) != 0) {
2291 // mutex_exit(&p->p_lock);
2292 // return (ret);
2293 // }
2294 // proc_unlock(p);
2295 proc_rele(p);
2296 }
2297
2298 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2299
2300 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2301 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2302 while (tp != NULL) {
2303 if (instr.ftiq_pid == tp->ftt_pid &&
2304 instr.ftiq_pc == tp->ftt_pc &&
2305 tp->ftt_proc->ftpc_acount != 0)
2306 break;
2307
2308 tp = tp->ftt_next;
2309 }
2310
2311 if (tp == NULL) {
2312 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2313 return (ENOENT);
2314 }
2315
2316 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2317 sizeof (instr.ftiq_instr));
2318 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2319
2320 if (copyout(&instr, arg, sizeof (instr)) != 0)
2321 return (EFAULT);
2322
2323 return (0);
2324 }
2325
2326 return (EINVAL);
2327 }
2328
2329 static int
2330 fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
2331 {
2332 ulong_t nent;
2333
2334 switch (cmd) {
2335 case DDI_ATTACH:
2336 break;
2337 case DDI_RESUME:
2338 return (DDI_SUCCESS);
2339 default:
2340 return (DDI_FAILURE);
2341 }
2342
2343 ddi_report_dev(devi);
2344 fasttrap_devi = devi;
2345
2346 /*
2347 * Install our hooks into fork(2), exec(2), and exit(2).
2348 */
2349 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2350 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2351 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2352
2353 /*
2354 * APPLE NOTE: We size the maximum number of fasttrap probes
2355 * based on system memory. 100k probes per 256M of system memory.
2356 * Yes, this is a WAG.
2357 */
2358 fasttrap_max = (sane_size >> 28) * 100000;
2359 if (fasttrap_max == 0)
2360 fasttrap_max = 50000;
2361
2362 fasttrap_total = 0;
2363
2364 /*
2365 * Conjure up the tracepoints hashtable...
2366 */
2367 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2368 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2369
2370 if (nent <= 0 || nent > 0x1000000)
2371 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2372
2373 if ((nent & (nent - 1)) == 0)
2374 fasttrap_tpoints.fth_nent = nent;
2375 else
2376 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2377 ASSERT(fasttrap_tpoints.fth_nent > 0);
2378 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2379 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2380 sizeof (fasttrap_bucket_t), KM_SLEEP);
2381 ASSERT(fasttrap_tpoints.fth_table != NULL);
2382
2383 /*
2384 * APPLE NOTE: explicitly initialize all locks...
2385 */
2386 unsigned int i;
2387 for (i=0; i<fasttrap_tpoints.fth_nent; i++) {
2388 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2389 }
2390
2391 /*
2392 * ... and the providers hash table...
2393 */
2394 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2395 if ((nent & (nent - 1)) == 0)
2396 fasttrap_provs.fth_nent = nent;
2397 else
2398 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2399 ASSERT(fasttrap_provs.fth_nent > 0);
2400 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2401 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2402 sizeof (fasttrap_bucket_t), KM_SLEEP);
2403 ASSERT(fasttrap_provs.fth_table != NULL);
2404
2405 /*
2406 * APPLE NOTE: explicitly initialize all locks...
2407 */
2408 for (i=0; i<fasttrap_provs.fth_nent; i++) {
2409 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2410 }
2411
2412 /*
2413 * ... and the procs hash table.
2414 */
2415 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2416 if ((nent & (nent - 1)) == 0)
2417 fasttrap_procs.fth_nent = nent;
2418 else
2419 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2420 ASSERT(fasttrap_procs.fth_nent > 0);
2421 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2422 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2423 sizeof (fasttrap_bucket_t), KM_SLEEP);
2424 ASSERT(fasttrap_procs.fth_table != NULL);
2425
2426 /*
2427 * APPLE NOTE: explicitly initialize all locks...
2428 */
2429 for (i=0; i<fasttrap_procs.fth_nent; i++) {
2430 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2431 }
2432
2433 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2434 &fasttrap_meta_id);
2435
2436 return (DDI_SUCCESS);
2437 }
2438
2439 static int
2440 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2441 {
2442 #pragma unused(dev, flags, devtype, p)
2443 return 0;
2444 }
2445
2446 static int
2447 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2448 {
2449 int err, rv = 0;
2450 user_addr_t uaddrp;
2451
2452 if (proc_is64bit(p))
2453 uaddrp = *(user_addr_t *)data;
2454 else
2455 uaddrp = (user_addr_t) *(uint32_t *)data;
2456
2457 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2458
2459 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2460 if (err != 0) {
2461 ASSERT( (err & 0xfffff000) == 0 );
2462 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2463 } else if (rv != 0) {
2464 ASSERT( (rv & 0xfff00000) == 0 );
2465 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2466 } else
2467 return 0;
2468 }
2469
2470 static int gFasttrapInited = 0;
2471
2472 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2473
2474 /*
2475 * A struct describing which functions will get invoked for certain
2476 * actions.
2477 */
2478
2479 static struct cdevsw fasttrap_cdevsw =
2480 {
2481 _fasttrap_open, /* open */
2482 eno_opcl, /* close */
2483 eno_rdwrt, /* read */
2484 eno_rdwrt, /* write */
2485 _fasttrap_ioctl, /* ioctl */
2486 (stop_fcn_t *)nulldev, /* stop */
2487 (reset_fcn_t *)nulldev, /* reset */
2488 NULL, /* tty's */
2489 eno_select, /* select */
2490 eno_mmap, /* mmap */
2491 eno_strat, /* strategy */
2492 eno_getc, /* getc */
2493 eno_putc, /* putc */
2494 0 /* type */
2495 };
2496
2497 void fasttrap_init(void);
2498
2499 void
2500 fasttrap_init( void )
2501 {
2502 /*
2503 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2504 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2505 *
2506 * The reason is to delay allocating the (rather large) resources as late as possible.
2507 */
2508 if (0 == gFasttrapInited) {
2509 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2510
2511 if (majdevno < 0) {
2512 // FIX ME! What kind of error reporting to do here?
2513 printf("fasttrap_init: failed to allocate a major number!\n");
2514 return;
2515 }
2516
2517 dev_t device = makedev( (uint32_t)majdevno, 0 );
2518 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2519 return;
2520 }
2521
2522 /*
2523 * Allocate the fasttrap_tracepoint_t zone
2524 */
2525 fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
2526 1024 * sizeof(fasttrap_tracepoint_t),
2527 sizeof(fasttrap_tracepoint_t),
2528 "dtrace.fasttrap_tracepoint_t");
2529
2530 /*
2531 * fasttrap_probe_t's are variable in size. We use an array of zones to
2532 * cover the most common sizes.
2533 */
2534 int i;
2535 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2536 size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
2537 fasttrap_probe_t_zones[i] = zinit(zone_element_size,
2538 1024 * zone_element_size,
2539 zone_element_size,
2540 fasttrap_probe_t_zone_names[i]);
2541 }
2542
2543
2544 /*
2545 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2546 */
2547 fasttrap_lck_attr = lck_attr_alloc_init();
2548 fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2549 fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
2550
2551 /*
2552 * Initialize global locks
2553 */
2554 lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2555 lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2556
2557 if (DDI_FAILURE == fasttrap_attach((dev_info_t *)(uintptr_t)device, 0 )) {
2558 // FIX ME! Do we remove the devfs node here?
2559 // What kind of error reporting?
2560 printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2561 return;
2562 }
2563
2564 gFasttrapInited = 1;
2565 }
2566 }
2567
2568 #undef FASTTRAP_MAJOR