]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/fasttrap.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * #pragma ident "@(#)fasttrap.c 1.26 08/04/21 SMI"
29 */
30
31 #include <sys/types.h>
32 #include <sys/time.h>
33
34 #include <sys/codesign.h>
35 #include <sys/errno.h>
36 #include <sys/stat.h>
37 #include <sys/conf.h>
38 #include <sys/systm.h>
39 #include <sys/kauth.h>
40 #include <sys/utfconv.h>
41
42 #include <sys/fasttrap.h>
43 #include <sys/fasttrap_impl.h>
44 #include <sys/fasttrap_isa.h>
45 #include <sys/dtrace.h>
46 #include <sys/dtrace_impl.h>
47 #include <sys/proc.h>
48
49 #include <security/mac_framework.h>
50
51 #include <miscfs/devfs/devfs.h>
52 #include <sys/proc_internal.h>
53 #include <sys/dtrace_glue.h>
54 #include <sys/dtrace_ptss.h>
55
56 #include <kern/cs_blobs.h>
57 #include <kern/thread.h>
58 #include <kern/zalloc.h>
59
60 #include <mach/thread_act.h>
61
62 extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread);
63
64 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
65 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
66
67 __private_extern__
68 void
69 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
70
71 /*
72 * User-Land Trap-Based Tracing
73 * ----------------------------
74 *
75 * The fasttrap provider allows DTrace consumers to instrument any user-level
76 * instruction to gather data; this includes probes with semantic
77 * signifigance like entry and return as well as simple offsets into the
78 * function. While the specific techniques used are very ISA specific, the
79 * methodology is generalizable to any architecture.
80 *
81 *
82 * The General Methodology
83 * -----------------------
84 *
85 * With the primary goal of tracing every user-land instruction and the
86 * limitation that we can't trust user space so don't want to rely on much
87 * information there, we begin by replacing the instructions we want to trace
88 * with trap instructions. Each instruction we overwrite is saved into a hash
89 * table keyed by process ID and pc address. When we enter the kernel due to
90 * this trap instruction, we need the effects of the replaced instruction to
91 * appear to have occurred before we proceed with the user thread's
92 * execution.
93 *
94 * Each user level thread is represented by a ulwp_t structure which is
95 * always easily accessible through a register. The most basic way to produce
96 * the effects of the instruction we replaced is to copy that instruction out
97 * to a bit of scratch space reserved in the user thread's ulwp_t structure
98 * (a sort of kernel-private thread local storage), set the PC to that
99 * scratch space and single step. When we reenter the kernel after single
100 * stepping the instruction we must then adjust the PC to point to what would
101 * normally be the next instruction. Of course, special care must be taken
102 * for branches and jumps, but these represent such a small fraction of any
103 * instruction set that writing the code to emulate these in the kernel is
104 * not too difficult.
105 *
106 * Return probes may require several tracepoints to trace every return site,
107 * and, conversely, each tracepoint may activate several probes (the entry
108 * and offset 0 probes, for example). To solve this muliplexing problem,
109 * tracepoints contain lists of probes to activate and probes contain lists
110 * of tracepoints to enable. If a probe is activated, it adds its ID to
111 * existing tracepoints or creates new ones as necessary.
112 *
113 * Most probes are activated _before_ the instruction is executed, but return
114 * probes are activated _after_ the effects of the last instruction of the
115 * function are visible. Return probes must be fired _after_ we have
116 * single-stepped the instruction whereas all other probes are fired
117 * beforehand.
118 *
119 *
120 * Lock Ordering
121 * -------------
122 *
123 * The lock ordering below -- both internally and with respect to the DTrace
124 * framework -- is a little tricky and bears some explanation. Each provider
125 * has a lock (ftp_mtx) that protects its members including reference counts
126 * for enabled probes (ftp_rcount), consumers actively creating probes
127 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
128 * from being freed. A provider is looked up by taking the bucket lock for the
129 * provider hash table, and is returned with its lock held. The provider lock
130 * may be taken in functions invoked by the DTrace framework, but may not be
131 * held while calling functions in the DTrace framework.
132 *
133 * To ensure consistency over multiple calls to the DTrace framework, the
134 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
135 * not be taken when holding the provider lock as that would create a cyclic
136 * lock ordering. In situations where one would naturally take the provider
137 * lock and then the creation lock, we instead up a reference count to prevent
138 * the provider from disappearing, drop the provider lock, and acquire the
139 * creation lock.
140 *
141 * Briefly:
142 * bucket lock before provider lock
143 * DTrace before provider lock
144 * creation lock before DTrace
145 * never hold the provider lock and creation lock simultaneously
146 */
147
148 static dtrace_meta_provider_id_t fasttrap_meta_id;
149
150 static thread_t fasttrap_cleanup_thread;
151
152 static lck_mtx_t fasttrap_cleanup_mtx;
153
154
155 #define FASTTRAP_CLEANUP_PROVIDER 0x1
156 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
157
158 static uint32_t fasttrap_cleanup_work = 0;
159
160 /*
161 * Generation count on modifications to the global tracepoint lookup table.
162 */
163 static volatile uint64_t fasttrap_mod_gen;
164
165 /*
166 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
167 * base on system memory. Each time a probe is created, fasttrap_total is
168 * incremented by the number of tracepoints that may be associated with that
169 * probe; fasttrap_total is capped at fasttrap_max.
170 */
171
172 static uint32_t fasttrap_max;
173 static uint32_t fasttrap_retired;
174 static uint32_t fasttrap_total;
175
176
177 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
178 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
179 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
180
181 fasttrap_hash_t fasttrap_tpoints;
182 static fasttrap_hash_t fasttrap_provs;
183 static fasttrap_hash_t fasttrap_procs;
184
185 static uint64_t fasttrap_pid_count; /* pid ref count */
186 static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
187
188 #define FASTTRAP_ENABLE_FAIL 1
189 #define FASTTRAP_ENABLE_PARTIAL 2
190
191 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
192 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
193
194 static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
195 const dtrace_pattr_t *);
196 static void fasttrap_provider_retire(proc_t*, const char *, int);
197 static void fasttrap_provider_free(fasttrap_provider_t *);
198
199 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
200 static void fasttrap_proc_release(fasttrap_proc_t *);
201
202 #define FASTTRAP_PROVS_INDEX(pid, name) \
203 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
204
205 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
206
207 /*
208 * APPLE NOTE: To save memory, some common memory allocations are given
209 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
210 * which means it would fall into the kalloc.128 bucket. With
211 * 20k elements allocated, the space saved is substantial.
212 */
213
214 struct zone *fasttrap_tracepoint_t_zone;
215
216 /*
217 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
218 * that the sweet spot for reducing memory footprint is covering the first
219 * three sizes. Everything larger goes into the common pool.
220 */
221 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
222
223 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
224
225 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
226 "",
227 "dtrace.fasttrap_probe_t[1]",
228 "dtrace.fasttrap_probe_t[2]",
229 "dtrace.fasttrap_probe_t[3]"
230 };
231
232 /*
233 * APPLE NOTE: We have to manage locks explicitly
234 */
235 lck_grp_t* fasttrap_lck_grp;
236 lck_grp_attr_t* fasttrap_lck_grp_attr;
237 lck_attr_t* fasttrap_lck_attr;
238
239 static int
240 fasttrap_highbit(ulong_t i)
241 {
242 int h = 1;
243
244 if (i == 0)
245 return (0);
246 #ifdef _LP64
247 if (i & 0xffffffff00000000ul) {
248 h += 32; i >>= 32;
249 }
250 #endif
251 if (i & 0xffff0000) {
252 h += 16; i >>= 16;
253 }
254 if (i & 0xff00) {
255 h += 8; i >>= 8;
256 }
257 if (i & 0xf0) {
258 h += 4; i >>= 4;
259 }
260 if (i & 0xc) {
261 h += 2; i >>= 2;
262 }
263 if (i & 0x2) {
264 h += 1;
265 }
266 return (h);
267 }
268
269 static uint_t
270 fasttrap_hash_str(const char *p)
271 {
272 unsigned int g;
273 uint_t hval = 0;
274
275 while (*p) {
276 hval = (hval << 4) + *p++;
277 if ((g = (hval & 0xf0000000)) != 0)
278 hval ^= g >> 24;
279 hval &= ~g;
280 }
281 return (hval);
282 }
283
284 /*
285 * APPLE NOTE: fasttrap_sigtrap not implemented
286 */
287 void
288 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
289 {
290 #pragma unused(p, t, pc)
291
292 #if !defined(__APPLE__)
293 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
294
295 sqp->sq_info.si_signo = SIGTRAP;
296 sqp->sq_info.si_code = TRAP_DTRACE;
297 sqp->sq_info.si_addr = (caddr_t)pc;
298
299 mutex_enter(&p->p_lock);
300 sigaddqa(p, t, sqp);
301 mutex_exit(&p->p_lock);
302
303 if (t != NULL)
304 aston(t);
305 #endif /* __APPLE__ */
306
307 printf("fasttrap_sigtrap called with no implementation.\n");
308 }
309
310 /*
311 * This function ensures that no threads are actively using the memory
312 * associated with probes that were formerly live.
313 */
314 static void
315 fasttrap_mod_barrier(uint64_t gen)
316 {
317 unsigned int i;
318
319 if (gen < fasttrap_mod_gen)
320 return;
321
322 fasttrap_mod_gen++;
323
324 for (i = 0; i < NCPU; i++) {
325 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
326 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
327 }
328 }
329
330 static void fasttrap_pid_cleanup(uint32_t);
331
332 static unsigned int
333 fasttrap_pid_cleanup_providers(void)
334 {
335 fasttrap_provider_t **fpp, *fp;
336 fasttrap_bucket_t *bucket;
337 dtrace_provider_id_t provid;
338 unsigned int later = 0, i;
339
340 /*
341 * Iterate over all the providers trying to remove the marked
342 * ones. If a provider is marked but not retired, we just
343 * have to take a crack at removing it -- it's no big deal if
344 * we can't.
345 */
346 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
347 bucket = &fasttrap_provs.fth_table[i];
348 lck_mtx_lock(&bucket->ftb_mtx);
349 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
350
351 while ((fp = *fpp) != NULL) {
352 if (!fp->ftp_marked) {
353 fpp = &fp->ftp_next;
354 continue;
355 }
356
357 lck_mtx_lock(&fp->ftp_mtx);
358
359 /*
360 * If this provider has consumers actively
361 * creating probes (ftp_ccount) or is a USDT
362 * provider (ftp_mcount), we can't unregister
363 * or even condense.
364 */
365 if (fp->ftp_ccount != 0 ||
366 fp->ftp_mcount != 0) {
367 fp->ftp_marked = 0;
368 lck_mtx_unlock(&fp->ftp_mtx);
369 continue;
370 }
371
372 if (!fp->ftp_retired || fp->ftp_rcount != 0)
373 fp->ftp_marked = 0;
374
375 lck_mtx_unlock(&fp->ftp_mtx);
376
377 /*
378 * If we successfully unregister this
379 * provider we can remove it from the hash
380 * chain and free the memory. If our attempt
381 * to unregister fails and this is a retired
382 * provider, increment our flag to try again
383 * pretty soon. If we've consumed more than
384 * half of our total permitted number of
385 * probes call dtrace_condense() to try to
386 * clean out the unenabled probes.
387 */
388 provid = fp->ftp_provid;
389 if (dtrace_unregister(provid) != 0) {
390 if (fasttrap_total > fasttrap_max / 2)
391 (void) dtrace_condense(provid);
392 later += fp->ftp_marked;
393 fpp = &fp->ftp_next;
394 } else {
395 *fpp = fp->ftp_next;
396 fasttrap_provider_free(fp);
397 }
398 }
399 lck_mtx_unlock(&bucket->ftb_mtx);
400 }
401
402 return later;
403 }
404
405 typedef struct fasttrap_tracepoint_spec {
406 pid_t fttps_pid;
407 user_addr_t fttps_pc;
408 } fasttrap_tracepoint_spec_t;
409
410 static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
411 static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
412 static lck_mtx_t fasttrap_retired_mtx;
413
414 #define DEFAULT_RETIRED_SIZE 256
415
416 static void
417 fasttrap_tracepoint_cleanup(void)
418 {
419 size_t i;
420 pid_t pid = 0;
421 user_addr_t pc;
422 proc_t *p = PROC_NULL;
423 fasttrap_tracepoint_t *tp = NULL;
424 lck_mtx_lock(&fasttrap_retired_mtx);
425 fasttrap_bucket_t *bucket;
426 for (i = 0; i < fasttrap_cur_retired; i++) {
427 pc = fasttrap_retired_spec[i].fttps_pc;
428 if (fasttrap_retired_spec[i].fttps_pid != pid) {
429 pid = fasttrap_retired_spec[i].fttps_pid;
430 if (p != PROC_NULL) {
431 sprunlock(p);
432 }
433 if ((p = sprlock(pid)) == PROC_NULL) {
434 pid = 0;
435 continue;
436 }
437 }
438 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
439 lck_mtx_lock(&bucket->ftb_mtx);
440 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
441 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
442 tp->ftt_proc->ftpc_acount != 0)
443 break;
444 }
445 /*
446 * Check that the tracepoint is not gone or has not been
447 * re-activated for another probe
448 */
449 if (tp == NULL || tp->ftt_retired == 0) {
450 lck_mtx_unlock(&bucket->ftb_mtx);
451 continue;
452 }
453 fasttrap_tracepoint_remove(p, tp);
454 lck_mtx_unlock(&bucket->ftb_mtx);
455 }
456 if (p != PROC_NULL) {
457 sprunlock(p);
458 }
459
460 fasttrap_cur_retired = 0;
461
462 lck_mtx_unlock(&fasttrap_retired_mtx);
463 }
464
465 void
466 fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
467 {
468 if (tp->ftt_retired)
469 return;
470 lck_mtx_lock(&fasttrap_retired_mtx);
471 fasttrap_tracepoint_spec_t *s = &fasttrap_retired_spec[fasttrap_cur_retired++];
472 s->fttps_pid = p->p_pid;
473 s->fttps_pc = tp->ftt_pc;
474
475 if (fasttrap_cur_retired == fasttrap_retired_size) {
476 fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
477 fasttrap_retired_size * 2 *
478 sizeof(*fasttrap_retired_spec),
479 KM_SLEEP);
480 memcpy(new_retired, fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
481 kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
482 fasttrap_retired_size *= 2;
483 fasttrap_retired_spec = new_retired;
484 }
485
486 lck_mtx_unlock(&fasttrap_retired_mtx);
487
488 tp->ftt_retired = 1;
489
490 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
491 }
492
493 static void
494 fasttrap_pid_cleanup_compute_priority(void)
495 {
496 if (fasttrap_total > (fasttrap_max / 100 * 90) || fasttrap_retired > fasttrap_max / 2) {
497 thread_precedence_policy_data_t precedence = {12 /* BASEPRI_PREEMPT_HIGH */};
498 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
499 }
500 else {
501 thread_precedence_policy_data_t precedence = {-39 /* BASEPRI_USER_INITIATED */};
502 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
503
504 }
505 }
506
507 /*
508 * This is the timeout's callback for cleaning up the providers and their
509 * probes.
510 */
511 /*ARGSUSED*/
512 __attribute__((noreturn))
513 static void
514 fasttrap_pid_cleanup_cb(void)
515 {
516 uint32_t work = 0;
517 lck_mtx_lock(&fasttrap_cleanup_mtx);
518 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
519 while (1) {
520 unsigned int later = 0;
521
522 work = atomic_and_32(&fasttrap_cleanup_work, 0);
523 lck_mtx_unlock(&fasttrap_cleanup_mtx);
524 if (work & FASTTRAP_CLEANUP_PROVIDER) {
525 later = fasttrap_pid_cleanup_providers();
526 }
527 if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
528 fasttrap_tracepoint_cleanup();
529 }
530 lck_mtx_lock(&fasttrap_cleanup_mtx);
531
532 fasttrap_pid_cleanup_compute_priority();
533 if (!fasttrap_cleanup_work) {
534 /*
535 * If we were unable to remove a retired provider, try again after
536 * a second. This situation can occur in certain circumstances where
537 * providers cannot be unregistered even though they have no probes
538 * enabled because of an execution of dtrace -l or something similar.
539 * If the timeout has been disabled (set to 1 because we're trying
540 * to detach), we set fasttrap_cleanup_work to ensure that we'll
541 * get a chance to do that work if and when the timeout is reenabled
542 * (if detach fails).
543 */
544 if (later > 0) {
545 struct timespec t = {1, 0};
546 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", &t);
547 }
548 else
549 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
550 }
551 }
552
553 }
554
555 /*
556 * Activates the asynchronous cleanup mechanism.
557 */
558 static void
559 fasttrap_pid_cleanup(uint32_t work)
560 {
561 lck_mtx_lock(&fasttrap_cleanup_mtx);
562 atomic_or_32(&fasttrap_cleanup_work, work);
563 fasttrap_pid_cleanup_compute_priority();
564 wakeup(&fasttrap_pid_cleanup_cb);
565 lck_mtx_unlock(&fasttrap_cleanup_mtx);
566 }
567
568
569 /*
570 * This is called from cfork() via dtrace_fasttrap_fork(). The child
571 * process's address space is a (roughly) a copy of the parent process's so
572 * we have to remove all the instrumentation we had previously enabled in the
573 * parent.
574 */
575 static void
576 fasttrap_fork(proc_t *p, proc_t *cp)
577 {
578 pid_t ppid = p->p_pid;
579 unsigned int i;
580
581 ASSERT(current_proc() == p);
582 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
583 ASSERT(p->p_dtrace_count > 0);
584 ASSERT(cp->p_dtrace_count == 0);
585
586 /*
587 * This would be simpler and faster if we maintained per-process
588 * hash tables of enabled tracepoints. It could, however, potentially
589 * slow down execution of a tracepoint since we'd need to go
590 * through two levels of indirection. In the future, we should
591 * consider either maintaining per-process ancillary lists of
592 * enabled tracepoints or hanging a pointer to a per-process hash
593 * table of enabled tracepoints off the proc structure.
594 */
595
596 /*
597 * We don't have to worry about the child process disappearing
598 * because we're in fork().
599 */
600 if (cp != sprlock(cp->p_pid)) {
601 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp->p_pid);
602 return;
603 }
604 proc_unlock(cp);
605
606 /*
607 * Iterate over every tracepoint looking for ones that belong to the
608 * parent process, and remove each from the child process.
609 */
610 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
611 fasttrap_tracepoint_t *tp;
612 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
613
614 lck_mtx_lock(&bucket->ftb_mtx);
615 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
616 if (tp->ftt_pid == ppid &&
617 tp->ftt_proc->ftpc_acount != 0) {
618 fasttrap_tracepoint_remove(cp, tp);
619
620 /*
621 * The count of active providers can only be
622 * decremented (i.e. to zero) during exec,
623 * exit, and removal of a meta provider so it
624 * should be impossible to drop the count
625 * mid-fork.
626 */
627 ASSERT(tp->ftt_proc->ftpc_acount != 0);
628 }
629 }
630 lck_mtx_unlock(&bucket->ftb_mtx);
631 }
632
633 /*
634 * Free any ptss pages/entries in the child.
635 */
636 dtrace_ptss_fork(p, cp);
637
638 proc_lock(cp);
639 sprunlock(cp);
640 }
641
642 /*
643 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
644 * is set on the proc structure to indicate that there is a pid provider
645 * associated with this process.
646 */
647 static void
648 fasttrap_exec_exit(proc_t *p)
649 {
650 ASSERT(p == current_proc());
651 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
652 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
653
654
655 /* APPLE NOTE: Okay, the locking here is really odd and needs some
656 * explaining. This method is always called with the proc_lock held.
657 * We must drop the proc_lock before calling fasttrap_provider_retire
658 * to avoid a deadlock when it takes the bucket lock.
659 *
660 * Next, the dtrace_ptss_exec_exit function requires the sprlock
661 * be held, but not the proc_lock.
662 *
663 * Finally, we must re-acquire the proc_lock
664 */
665 proc_unlock(p);
666
667 /*
668 * We clean up the pid provider for this process here; user-land
669 * static probes are handled by the meta-provider remove entry point.
670 */
671 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
672
673 /*
674 * APPLE NOTE: We also need to remove any aliased providers.
675 * XXX optimization: track which provider types are instantiated
676 * and only retire as needed.
677 */
678 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
679 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
680
681 /*
682 * This should be called after it is no longer possible for a user
683 * thread to execute (potentially dtrace instrumented) instructions.
684 */
685 lck_mtx_lock(&p->p_dtrace_sprlock);
686 dtrace_ptss_exec_exit(p);
687 lck_mtx_unlock(&p->p_dtrace_sprlock);
688
689 proc_lock(p);
690 }
691
692
693 /*ARGSUSED*/
694 static void
695 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
696 {
697 #pragma unused(arg, desc)
698 /*
699 * There are no "default" pid probes.
700 */
701 }
702
703 static int
704 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
705 {
706 fasttrap_tracepoint_t *tp, *new_tp = NULL;
707 fasttrap_bucket_t *bucket;
708 fasttrap_id_t *id;
709 pid_t pid;
710 user_addr_t pc;
711
712 ASSERT(index < probe->ftp_ntps);
713
714 pid = probe->ftp_pid;
715 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
716 id = &probe->ftp_tps[index].fit_id;
717
718 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
719
720 //ASSERT(!(p->p_flag & SVFORK));
721
722 /*
723 * Before we make any modifications, make sure we've imposed a barrier
724 * on the generation in which this probe was last modified.
725 */
726 fasttrap_mod_barrier(probe->ftp_gen);
727
728 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
729
730 /*
731 * If the tracepoint has already been enabled, just add our id to the
732 * list of interested probes. This may be our second time through
733 * this path in which case we'll have constructed the tracepoint we'd
734 * like to install. If we can't find a match, and have an allocated
735 * tracepoint ready to go, enable that one now.
736 *
737 * A tracepoint whose process is defunct is also considered defunct.
738 */
739 again:
740 lck_mtx_lock(&bucket->ftb_mtx);
741 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
742 int rc = 0;
743 /*
744 * Note that it's safe to access the active count on the
745 * associated proc structure because we know that at least one
746 * provider (this one) will still be around throughout this
747 * operation.
748 */
749 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
750 tp->ftt_proc->ftpc_acount == 0)
751 continue;
752
753 /*
754 * Now that we've found a matching tracepoint, it would be
755 * a decent idea to confirm that the tracepoint is still
756 * enabled and the trap instruction hasn't been overwritten.
757 * Since this is a little hairy, we'll punt for now.
758 */
759 if (!tp->ftt_installed) {
760 if (fasttrap_tracepoint_install(p, tp) != 0)
761 rc = FASTTRAP_ENABLE_PARTIAL;
762 }
763 /*
764 * This can't be the first interested probe. We don't have
765 * to worry about another thread being in the midst of
766 * deleting this tracepoint (which would be the only valid
767 * reason for a tracepoint to have no interested probes)
768 * since we're holding P_PR_LOCK for this process.
769 */
770 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
771
772 switch (id->fti_ptype) {
773 case DTFTP_ENTRY:
774 case DTFTP_OFFSETS:
775 case DTFTP_IS_ENABLED:
776 id->fti_next = tp->ftt_ids;
777 dtrace_membar_producer();
778 tp->ftt_ids = id;
779 dtrace_membar_producer();
780 break;
781
782 case DTFTP_RETURN:
783 case DTFTP_POST_OFFSETS:
784 id->fti_next = tp->ftt_retids;
785 dtrace_membar_producer();
786 tp->ftt_retids = id;
787 dtrace_membar_producer();
788 break;
789
790 default:
791 ASSERT(0);
792 }
793
794 tp->ftt_retired = 0;
795
796 lck_mtx_unlock(&bucket->ftb_mtx);
797
798 if (new_tp != NULL) {
799 new_tp->ftt_ids = NULL;
800 new_tp->ftt_retids = NULL;
801 }
802
803 return rc;
804 }
805
806 /*
807 * If we have a good tracepoint ready to go, install it now while
808 * we have the lock held and no one can screw with us.
809 */
810 if (new_tp != NULL) {
811 int rc = 0;
812
813 new_tp->ftt_next = bucket->ftb_data;
814 dtrace_membar_producer();
815 bucket->ftb_data = new_tp;
816 dtrace_membar_producer();
817 lck_mtx_unlock(&bucket->ftb_mtx);
818
819 /*
820 * Activate the tracepoint in the ISA-specific manner.
821 * If this fails, we need to report the failure, but
822 * indicate that this tracepoint must still be disabled
823 * by calling fasttrap_tracepoint_disable().
824 */
825 if (fasttrap_tracepoint_install(p, new_tp) != 0)
826 rc = FASTTRAP_ENABLE_PARTIAL;
827 /*
828 * Increment the count of the number of tracepoints active in
829 * the victim process.
830 */
831 //ASSERT(p->p_proc_flag & P_PR_LOCK);
832 p->p_dtrace_count++;
833
834
835 return (rc);
836 }
837
838 lck_mtx_unlock(&bucket->ftb_mtx);
839
840 /*
841 * Initialize the tracepoint that's been preallocated with the probe.
842 */
843 new_tp = probe->ftp_tps[index].fit_tp;
844 new_tp->ftt_retired = 0;
845
846 ASSERT(new_tp->ftt_pid == pid);
847 ASSERT(new_tp->ftt_pc == pc);
848 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
849 ASSERT(new_tp->ftt_ids == NULL);
850 ASSERT(new_tp->ftt_retids == NULL);
851
852 switch (id->fti_ptype) {
853 case DTFTP_ENTRY:
854 case DTFTP_OFFSETS:
855 case DTFTP_IS_ENABLED:
856 id->fti_next = NULL;
857 new_tp->ftt_ids = id;
858 break;
859
860 case DTFTP_RETURN:
861 case DTFTP_POST_OFFSETS:
862 id->fti_next = NULL;
863 new_tp->ftt_retids = id;
864 break;
865
866 default:
867 ASSERT(0);
868 }
869
870 /*
871 * If the ISA-dependent initialization goes to plan, go back to the
872 * beginning and try to install this freshly made tracepoint.
873 */
874 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
875 goto again;
876
877 new_tp->ftt_ids = NULL;
878 new_tp->ftt_retids = NULL;
879
880 return (FASTTRAP_ENABLE_FAIL);
881 }
882
883 static void
884 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
885 {
886 fasttrap_bucket_t *bucket;
887 fasttrap_provider_t *provider = probe->ftp_prov;
888 fasttrap_tracepoint_t **pp, *tp;
889 fasttrap_id_t *id, **idp;
890 pid_t pid;
891 user_addr_t pc;
892
893 ASSERT(index < probe->ftp_ntps);
894
895 pid = probe->ftp_pid;
896 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
897 id = &probe->ftp_tps[index].fit_id;
898
899 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
900
901 /*
902 * Find the tracepoint and make sure that our id is one of the
903 * ones registered with it.
904 */
905 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
906 lck_mtx_lock(&bucket->ftb_mtx);
907 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
908 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
909 tp->ftt_proc == provider->ftp_proc)
910 break;
911 }
912
913 /*
914 * If we somehow lost this tracepoint, we're in a world of hurt.
915 */
916 ASSERT(tp != NULL);
917
918 switch (id->fti_ptype) {
919 case DTFTP_ENTRY:
920 case DTFTP_OFFSETS:
921 case DTFTP_IS_ENABLED:
922 ASSERT(tp->ftt_ids != NULL);
923 idp = &tp->ftt_ids;
924 break;
925
926 case DTFTP_RETURN:
927 case DTFTP_POST_OFFSETS:
928 ASSERT(tp->ftt_retids != NULL);
929 idp = &tp->ftt_retids;
930 break;
931
932 default:
933 /* Fix compiler warning... */
934 idp = NULL;
935 ASSERT(0);
936 }
937
938 while ((*idp)->fti_probe != probe) {
939 idp = &(*idp)->fti_next;
940 ASSERT(*idp != NULL);
941 }
942
943 id = *idp;
944 *idp = id->fti_next;
945 dtrace_membar_producer();
946
947 ASSERT(id->fti_probe == probe);
948
949 /*
950 * If there are other registered enablings of this tracepoint, we're
951 * all done, but if this was the last probe assocated with this
952 * this tracepoint, we need to remove and free it.
953 */
954 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
955
956 /*
957 * If the current probe's tracepoint is in use, swap it
958 * for an unused tracepoint.
959 */
960 if (tp == probe->ftp_tps[index].fit_tp) {
961 fasttrap_probe_t *tmp_probe;
962 fasttrap_tracepoint_t **tmp_tp;
963 uint_t tmp_index;
964
965 if (tp->ftt_ids != NULL) {
966 tmp_probe = tp->ftt_ids->fti_probe;
967 /* LINTED - alignment */
968 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
969 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
970 } else {
971 tmp_probe = tp->ftt_retids->fti_probe;
972 /* LINTED - alignment */
973 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
974 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
975 }
976
977 ASSERT(*tmp_tp != NULL);
978 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
979 ASSERT((*tmp_tp)->ftt_ids == NULL);
980 ASSERT((*tmp_tp)->ftt_retids == NULL);
981
982 probe->ftp_tps[index].fit_tp = *tmp_tp;
983 *tmp_tp = tp;
984
985 }
986
987 lck_mtx_unlock(&bucket->ftb_mtx);
988
989 /*
990 * Tag the modified probe with the generation in which it was
991 * changed.
992 */
993 probe->ftp_gen = fasttrap_mod_gen;
994 return;
995 }
996
997 lck_mtx_unlock(&bucket->ftb_mtx);
998
999 /*
1000 * We can't safely remove the tracepoint from the set of active
1001 * tracepoints until we've actually removed the fasttrap instruction
1002 * from the process's text. We can, however, operate on this
1003 * tracepoint secure in the knowledge that no other thread is going to
1004 * be looking at it since we hold P_PR_LOCK on the process if it's
1005 * live or we hold the provider lock on the process if it's dead and
1006 * gone.
1007 */
1008
1009 /*
1010 * We only need to remove the actual instruction if we're looking
1011 * at an existing process
1012 */
1013 if (p != NULL) {
1014 /*
1015 * If we fail to restore the instruction we need to kill
1016 * this process since it's in a completely unrecoverable
1017 * state.
1018 */
1019 if (fasttrap_tracepoint_remove(p, tp) != 0)
1020 fasttrap_sigtrap(p, NULL, pc);
1021
1022 /*
1023 * Decrement the count of the number of tracepoints active
1024 * in the victim process.
1025 */
1026 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1027 p->p_dtrace_count--;
1028 }
1029
1030 /*
1031 * Remove the probe from the hash table of active tracepoints.
1032 */
1033 lck_mtx_lock(&bucket->ftb_mtx);
1034 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1035 ASSERT(*pp != NULL);
1036 while (*pp != tp) {
1037 pp = &(*pp)->ftt_next;
1038 ASSERT(*pp != NULL);
1039 }
1040
1041 *pp = tp->ftt_next;
1042 dtrace_membar_producer();
1043
1044 lck_mtx_unlock(&bucket->ftb_mtx);
1045
1046 /*
1047 * Tag the modified probe with the generation in which it was changed.
1048 */
1049 probe->ftp_gen = fasttrap_mod_gen;
1050 }
1051
1052 static void
1053 fasttrap_enable_callbacks(void)
1054 {
1055 /*
1056 * We don't have to play the rw lock game here because we're
1057 * providing something rather than taking something away --
1058 * we can be sure that no threads have tried to follow this
1059 * function pointer yet.
1060 */
1061 lck_mtx_lock(&fasttrap_count_mtx);
1062 if (fasttrap_pid_count == 0) {
1063 ASSERT(dtrace_pid_probe_ptr == NULL);
1064 ASSERT(dtrace_return_probe_ptr == NULL);
1065 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1066 dtrace_return_probe_ptr = &fasttrap_return_probe;
1067 }
1068 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1069 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1070 fasttrap_pid_count++;
1071 lck_mtx_unlock(&fasttrap_count_mtx);
1072 }
1073
1074 static void
1075 fasttrap_disable_callbacks(void)
1076 {
1077 //ASSERT(MUTEX_HELD(&cpu_lock));
1078
1079 lck_mtx_lock(&fasttrap_count_mtx);
1080 ASSERT(fasttrap_pid_count > 0);
1081 fasttrap_pid_count--;
1082 if (fasttrap_pid_count == 0) {
1083 dtrace_cpu_t *cur, *cpu = CPU;
1084
1085 /*
1086 * APPLE NOTE: This loop seems broken, it touches every CPU
1087 * but the one we're actually running on. Need to ask Sun folks
1088 * if that is safe. Scenario is this: We're running on CPU A,
1089 * and lock all but A. Then we get preempted, and start running
1090 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1091 */
1092 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1093 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
1094 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1095 }
1096
1097 dtrace_pid_probe_ptr = NULL;
1098 dtrace_return_probe_ptr = NULL;
1099
1100 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1101 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
1102 // rw_exit(&cur->cpu_ft_lock);
1103 }
1104 }
1105 lck_mtx_unlock(&fasttrap_count_mtx);
1106 }
1107
1108 /*ARGSUSED*/
1109 static int
1110 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1111 {
1112 #pragma unused(arg, id)
1113 fasttrap_probe_t *probe = parg;
1114 proc_t *p;
1115 int i, rc;
1116
1117 ASSERT(probe != NULL);
1118 ASSERT(!probe->ftp_enabled);
1119 ASSERT(id == probe->ftp_id);
1120 // ASSERT(MUTEX_HELD(&cpu_lock));
1121
1122 /*
1123 * Increment the count of enabled probes on this probe's provider;
1124 * the provider can't go away while the probe still exists. We
1125 * must increment this even if we aren't able to properly enable
1126 * this probe.
1127 */
1128 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1129 probe->ftp_prov->ftp_rcount++;
1130 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1131
1132 /*
1133 * If this probe's provider is retired (meaning it was valid in a
1134 * previously exec'ed incarnation of this address space), bail out. The
1135 * provider can't go away while we're in this code path.
1136 */
1137 if (probe->ftp_prov->ftp_retired)
1138 return(0);
1139
1140 /*
1141 * If we can't find the process, it may be that we're in the context of
1142 * a fork in which the traced process is being born and we're copying
1143 * USDT probes. Otherwise, the process is gone so bail.
1144 */
1145 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1146 /*
1147 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1148 * does not return process's with SIDL set, but we always return
1149 * the child process.
1150 */
1151 return(0);
1152 }
1153
1154 if ((p->p_csflags & (CS_KILL|CS_HARD))) {
1155 proc_unlock(p);
1156 for (i = 0; i < DTRACE_NCLIENTS; i++) {
1157 dtrace_state_t *state = dtrace_state_get(i);
1158 if (state == NULL)
1159 continue;
1160 if (state->dts_cred.dcr_cred == NULL)
1161 continue;
1162 mac_proc_check_get_task(state->dts_cred.dcr_cred, p);
1163 }
1164 rc = cs_allow_invalid(p);
1165 proc_lock(p);
1166 if (rc == 0) {
1167 sprunlock(p);
1168 cmn_err(CE_WARN, "process doesn't allow invalid code pages, failing to install fasttrap probe\n");
1169 return (0);
1170 }
1171 }
1172
1173 /*
1174 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1175 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1176 * To mimic this, we allocate on demand scratch space. If this is the first
1177 * time a probe has been enabled in this process, we need to allocate scratch
1178 * space for each already existing thread. Now is a good time to do this, as
1179 * the target process is suspended and the proc_lock is held.
1180 */
1181 if (p->p_dtrace_ptss_pages == NULL) {
1182 dtrace_ptss_enable(p);
1183 }
1184
1185 // ASSERT(!(p->p_flag & SVFORK));
1186 proc_unlock(p);
1187
1188 /*
1189 * We have to enable the trap entry point before any user threads have
1190 * the chance to execute the trap instruction we're about to place
1191 * in their process's text.
1192 */
1193 fasttrap_enable_callbacks();
1194
1195 /*
1196 * Enable all the tracepoints and add this probe's id to each
1197 * tracepoint's list of active probes.
1198 */
1199 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1200 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1201 /*
1202 * If enabling the tracepoint failed completely,
1203 * we don't have to disable it; if the failure
1204 * was only partial we must disable it.
1205 */
1206 if (rc == FASTTRAP_ENABLE_FAIL)
1207 i--;
1208 else
1209 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1210
1211 /*
1212 * Back up and pull out all the tracepoints we've
1213 * created so far for this probe.
1214 */
1215 while (i >= 0) {
1216 fasttrap_tracepoint_disable(p, probe, i);
1217 i--;
1218 }
1219
1220 proc_lock(p);
1221 sprunlock(p);
1222
1223 /*
1224 * Since we're not actually enabling this probe,
1225 * drop our reference on the trap table entry.
1226 */
1227 fasttrap_disable_callbacks();
1228 return(0);
1229 }
1230 }
1231
1232 proc_lock(p);
1233 sprunlock(p);
1234
1235 probe->ftp_enabled = 1;
1236 return (0);
1237 }
1238
1239 /*ARGSUSED*/
1240 static void
1241 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1242 {
1243 #pragma unused(arg, id)
1244 fasttrap_probe_t *probe = parg;
1245 fasttrap_provider_t *provider = probe->ftp_prov;
1246 proc_t *p;
1247 int i, whack = 0;
1248
1249 ASSERT(id == probe->ftp_id);
1250
1251 /*
1252 * We won't be able to acquire a /proc-esque lock on the process
1253 * iff the process is dead and gone. In this case, we rely on the
1254 * provider lock as a point of mutual exclusion to prevent other
1255 * DTrace consumers from disabling this probe.
1256 */
1257 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1258 // ASSERT(!(p->p_flag & SVFORK));
1259 proc_unlock(p);
1260 }
1261
1262 lck_mtx_lock(&provider->ftp_mtx);
1263
1264 /*
1265 * Disable all the associated tracepoints (for fully enabled probes).
1266 */
1267 if (probe->ftp_enabled) {
1268 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1269 fasttrap_tracepoint_disable(p, probe, i);
1270 }
1271 }
1272
1273 ASSERT(provider->ftp_rcount > 0);
1274 provider->ftp_rcount--;
1275
1276 if (p != NULL) {
1277 /*
1278 * Even though we may not be able to remove it entirely, we
1279 * mark this retired provider to get a chance to remove some
1280 * of the associated probes.
1281 */
1282 if (provider->ftp_retired && !provider->ftp_marked)
1283 whack = provider->ftp_marked = 1;
1284 lck_mtx_unlock(&provider->ftp_mtx);
1285
1286 proc_lock(p);
1287 sprunlock(p);
1288 } else {
1289 /*
1290 * If the process is dead, we're just waiting for the
1291 * last probe to be disabled to be able to free it.
1292 */
1293 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1294 whack = provider->ftp_marked = 1;
1295 lck_mtx_unlock(&provider->ftp_mtx);
1296 }
1297
1298 if (whack) {
1299 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1300 }
1301
1302 if (!probe->ftp_enabled)
1303 return;
1304
1305 probe->ftp_enabled = 0;
1306
1307 // ASSERT(MUTEX_HELD(&cpu_lock));
1308 fasttrap_disable_callbacks();
1309 }
1310
1311 /*ARGSUSED*/
1312 static void
1313 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1314 dtrace_argdesc_t *desc)
1315 {
1316 #pragma unused(arg, id)
1317 fasttrap_probe_t *probe = parg;
1318 char *str;
1319 int i, ndx;
1320
1321 desc->dtargd_native[0] = '\0';
1322 desc->dtargd_xlate[0] = '\0';
1323
1324 if (probe->ftp_prov->ftp_retired != 0 ||
1325 desc->dtargd_ndx >= probe->ftp_nargs) {
1326 desc->dtargd_ndx = DTRACE_ARGNONE;
1327 return;
1328 }
1329
1330 ndx = (probe->ftp_argmap != NULL) ?
1331 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1332
1333 str = probe->ftp_ntypes;
1334 for (i = 0; i < ndx; i++) {
1335 str += strlen(str) + 1;
1336 }
1337
1338 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1339
1340 if (probe->ftp_xtypes == NULL)
1341 return;
1342
1343 str = probe->ftp_xtypes;
1344 for (i = 0; i < desc->dtargd_ndx; i++) {
1345 str += strlen(str) + 1;
1346 }
1347
1348 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1349 }
1350
1351 /*ARGSUSED*/
1352 static void
1353 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1354 {
1355 #pragma unused(arg, id)
1356 fasttrap_probe_t *probe = parg;
1357 unsigned int i;
1358
1359 ASSERT(probe != NULL);
1360 ASSERT(!probe->ftp_enabled);
1361 ASSERT(fasttrap_total >= probe->ftp_ntps);
1362
1363 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1364 atomic_add_32(&fasttrap_retired, -probe->ftp_ntps);
1365
1366 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1367 fasttrap_mod_barrier(probe->ftp_gen);
1368
1369 for (i = 0; i < probe->ftp_ntps; i++) {
1370 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1371 }
1372
1373 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1374 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1375 } else {
1376 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1377 kmem_free(probe, size);
1378 }
1379 }
1380
1381
1382 static const dtrace_pattr_t pid_attr = {
1383 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1384 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1385 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1386 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1387 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1388 };
1389
1390 static dtrace_pops_t pid_pops = {
1391 .dtps_provide = fasttrap_pid_provide,
1392 .dtps_provide_module = NULL,
1393 .dtps_enable = fasttrap_pid_enable,
1394 .dtps_disable = fasttrap_pid_disable,
1395 .dtps_suspend = NULL,
1396 .dtps_resume = NULL,
1397 .dtps_getargdesc = fasttrap_pid_getargdesc,
1398 .dtps_getargval = fasttrap_pid_getarg,
1399 .dtps_usermode = NULL,
1400 .dtps_destroy = fasttrap_pid_destroy
1401 };
1402
1403 static dtrace_pops_t usdt_pops = {
1404 .dtps_provide = fasttrap_pid_provide,
1405 .dtps_provide_module = NULL,
1406 .dtps_enable = fasttrap_pid_enable,
1407 .dtps_disable = fasttrap_pid_disable,
1408 .dtps_suspend = NULL,
1409 .dtps_resume = NULL,
1410 .dtps_getargdesc = fasttrap_pid_getargdesc,
1411 .dtps_getargval = fasttrap_usdt_getarg,
1412 .dtps_usermode = NULL,
1413 .dtps_destroy = fasttrap_pid_destroy
1414 };
1415
1416 static fasttrap_proc_t *
1417 fasttrap_proc_lookup(pid_t pid)
1418 {
1419 fasttrap_bucket_t *bucket;
1420 fasttrap_proc_t *fprc, *new_fprc;
1421
1422 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1423 lck_mtx_lock(&bucket->ftb_mtx);
1424
1425 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1426 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1427 lck_mtx_lock(&fprc->ftpc_mtx);
1428 lck_mtx_unlock(&bucket->ftb_mtx);
1429 fprc->ftpc_rcount++;
1430 atomic_add_64(&fprc->ftpc_acount, 1);
1431 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1432 lck_mtx_unlock(&fprc->ftpc_mtx);
1433
1434 return (fprc);
1435 }
1436 }
1437
1438 /*
1439 * Drop the bucket lock so we don't try to perform a sleeping
1440 * allocation under it.
1441 */
1442 lck_mtx_unlock(&bucket->ftb_mtx);
1443
1444 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1445 ASSERT(new_fprc != NULL);
1446 new_fprc->ftpc_pid = pid;
1447 new_fprc->ftpc_rcount = 1;
1448 new_fprc->ftpc_acount = 1;
1449
1450 lck_mtx_lock(&bucket->ftb_mtx);
1451
1452 /*
1453 * Take another lap through the list to make sure a proc hasn't
1454 * been created for this pid while we weren't under the bucket lock.
1455 */
1456 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1457 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1458 lck_mtx_lock(&fprc->ftpc_mtx);
1459 lck_mtx_unlock(&bucket->ftb_mtx);
1460 fprc->ftpc_rcount++;
1461 atomic_add_64(&fprc->ftpc_acount, 1);
1462 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1463 lck_mtx_unlock(&fprc->ftpc_mtx);
1464
1465 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1466
1467 return (fprc);
1468 }
1469 }
1470
1471 /*
1472 * APPLE NOTE: We have to initialize all locks explicitly
1473 */
1474 lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1475
1476 new_fprc->ftpc_next = bucket->ftb_data;
1477 bucket->ftb_data = new_fprc;
1478
1479 lck_mtx_unlock(&bucket->ftb_mtx);
1480
1481 return (new_fprc);
1482 }
1483
1484 static void
1485 fasttrap_proc_release(fasttrap_proc_t *proc)
1486 {
1487 fasttrap_bucket_t *bucket;
1488 fasttrap_proc_t *fprc, **fprcp;
1489 pid_t pid = proc->ftpc_pid;
1490
1491 lck_mtx_lock(&proc->ftpc_mtx);
1492
1493 ASSERT(proc->ftpc_rcount != 0);
1494 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1495
1496 if (--proc->ftpc_rcount != 0) {
1497 lck_mtx_unlock(&proc->ftpc_mtx);
1498 return;
1499 }
1500
1501 lck_mtx_unlock(&proc->ftpc_mtx);
1502
1503 /*
1504 * There should definitely be no live providers associated with this
1505 * process at this point.
1506 */
1507 ASSERT(proc->ftpc_acount == 0);
1508
1509 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1510 lck_mtx_lock(&bucket->ftb_mtx);
1511
1512 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1513 while ((fprc = *fprcp) != NULL) {
1514 if (fprc == proc)
1515 break;
1516
1517 fprcp = &fprc->ftpc_next;
1518 }
1519
1520 /*
1521 * Something strange has happened if we can't find the proc.
1522 */
1523 ASSERT(fprc != NULL);
1524
1525 *fprcp = fprc->ftpc_next;
1526
1527 lck_mtx_unlock(&bucket->ftb_mtx);
1528
1529 /*
1530 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1531 * memory is freed even without the destroy. Maybe accounting cleanup?
1532 */
1533 lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1534
1535 kmem_free(fprc, sizeof (fasttrap_proc_t));
1536 }
1537
1538 /*
1539 * Lookup a fasttrap-managed provider based on its name and associated proc.
1540 * A reference to the proc must be held for the duration of the call.
1541 * If the pattr argument is non-NULL, this function instantiates the provider
1542 * if it doesn't exist otherwise it returns NULL. The provider is returned
1543 * with its lock held.
1544 */
1545 static fasttrap_provider_t *
1546 fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1547 const dtrace_pattr_t *pattr)
1548 {
1549 pid_t pid = p->p_pid;
1550 fasttrap_provider_t *fp, *new_fp = NULL;
1551 fasttrap_bucket_t *bucket;
1552 char provname[DTRACE_PROVNAMELEN];
1553 cred_t *cred;
1554
1555 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1556 ASSERT(pattr != NULL);
1557
1558 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1559 lck_mtx_lock(&bucket->ftb_mtx);
1560
1561 /*
1562 * Take a lap through the list and return the match if we find it.
1563 */
1564 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1565 if (fp->ftp_pid == pid &&
1566 fp->ftp_provider_type == provider_type &&
1567 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1568 !fp->ftp_retired) {
1569 lck_mtx_lock(&fp->ftp_mtx);
1570 lck_mtx_unlock(&bucket->ftb_mtx);
1571 return (fp);
1572 }
1573 }
1574
1575 /*
1576 * Drop the bucket lock so we don't try to perform a sleeping
1577 * allocation under it.
1578 */
1579 lck_mtx_unlock(&bucket->ftb_mtx);
1580
1581 /*
1582 * Make sure the process isn't a child created as the result
1583 * of a vfork(2), and isn't a zombie (but may be in fork).
1584 */
1585 proc_lock(p);
1586 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1587 proc_unlock(p);
1588 return (NULL);
1589 }
1590
1591 /*
1592 * Increment p_dtrace_probes so that the process knows to inform us
1593 * when it exits or execs. fasttrap_provider_free() decrements this
1594 * when we're done with this provider.
1595 */
1596 p->p_dtrace_probes++;
1597
1598 /*
1599 * Grab the credentials for this process so we have
1600 * something to pass to dtrace_register().
1601 * APPLE NOTE: We have no equivalent to crhold,
1602 * even though there is a cr_ref filed in ucred.
1603 */
1604 cred = kauth_cred_proc_ref(p);
1605 proc_unlock(p);
1606
1607 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1608 ASSERT(new_fp != NULL);
1609 new_fp->ftp_pid = p->p_pid;
1610 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1611 new_fp->ftp_provider_type = provider_type;
1612
1613 /*
1614 * APPLE NOTE: locks require explicit init
1615 */
1616 lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1617 lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1618
1619 ASSERT(new_fp->ftp_proc != NULL);
1620
1621 lck_mtx_lock(&bucket->ftb_mtx);
1622
1623 /*
1624 * Take another lap through the list to make sure a provider hasn't
1625 * been created for this pid while we weren't under the bucket lock.
1626 */
1627 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1628 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1629 !fp->ftp_retired) {
1630 lck_mtx_lock(&fp->ftp_mtx);
1631 lck_mtx_unlock(&bucket->ftb_mtx);
1632 fasttrap_provider_free(new_fp);
1633 kauth_cred_unref(&cred);
1634 return (fp);
1635 }
1636 }
1637
1638 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1639
1640 /*
1641 * Fail and return NULL if either the provider name is too long
1642 * or we fail to register this new provider with the DTrace
1643 * framework. Note that this is the only place we ever construct
1644 * the full provider name -- we keep it in pieces in the provider
1645 * structure.
1646 */
1647 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1648 (int)sizeof (provname) ||
1649 dtrace_register(provname, pattr,
1650 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1651 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1652 &new_fp->ftp_provid) != 0) {
1653 lck_mtx_unlock(&bucket->ftb_mtx);
1654 fasttrap_provider_free(new_fp);
1655 kauth_cred_unref(&cred);
1656 return (NULL);
1657 }
1658
1659 new_fp->ftp_next = bucket->ftb_data;
1660 bucket->ftb_data = new_fp;
1661
1662 lck_mtx_lock(&new_fp->ftp_mtx);
1663 lck_mtx_unlock(&bucket->ftb_mtx);
1664
1665 kauth_cred_unref(&cred);
1666
1667 return (new_fp);
1668 }
1669
1670 static void
1671 fasttrap_provider_free(fasttrap_provider_t *provider)
1672 {
1673 pid_t pid = provider->ftp_pid;
1674 proc_t *p;
1675
1676 /*
1677 * There need to be no associated enabled probes, no consumers
1678 * creating probes, and no meta providers referencing this provider.
1679 */
1680 ASSERT(provider->ftp_rcount == 0);
1681 ASSERT(provider->ftp_ccount == 0);
1682 ASSERT(provider->ftp_mcount == 0);
1683
1684 /*
1685 * If this provider hasn't been retired, we need to explicitly drop the
1686 * count of active providers on the associated process structure.
1687 */
1688 if (!provider->ftp_retired) {
1689 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1690 ASSERT(provider->ftp_proc->ftpc_acount <
1691 provider->ftp_proc->ftpc_rcount);
1692 }
1693
1694 fasttrap_proc_release(provider->ftp_proc);
1695
1696 /*
1697 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1698 * memory is freed even without the destroy. Maybe accounting cleanup?
1699 */
1700 lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1701 lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1702
1703 kmem_free(provider, sizeof (fasttrap_provider_t));
1704
1705 /*
1706 * Decrement p_dtrace_probes on the process whose provider we're
1707 * freeing. We don't have to worry about clobbering somone else's
1708 * modifications to it because we have locked the bucket that
1709 * corresponds to this process's hash chain in the provider hash
1710 * table. Don't sweat it if we can't find the process.
1711 */
1712 if ((p = proc_find(pid)) == NULL) {
1713 return;
1714 }
1715
1716 proc_lock(p);
1717 p->p_dtrace_probes--;
1718 proc_unlock(p);
1719
1720 proc_rele(p);
1721 }
1722
1723 static void
1724 fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1725 {
1726 fasttrap_provider_t *fp;
1727 fasttrap_bucket_t *bucket;
1728 dtrace_provider_id_t provid;
1729 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1730
1731 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
1732 lck_mtx_lock(&bucket->ftb_mtx);
1733
1734 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1735 if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1736 !fp->ftp_retired)
1737 break;
1738 }
1739
1740 if (fp == NULL) {
1741 lck_mtx_unlock(&bucket->ftb_mtx);
1742 return;
1743 }
1744
1745 lck_mtx_lock(&fp->ftp_mtx);
1746 ASSERT(!mprov || fp->ftp_mcount > 0);
1747 if (mprov && --fp->ftp_mcount != 0) {
1748 lck_mtx_unlock(&fp->ftp_mtx);
1749 lck_mtx_unlock(&bucket->ftb_mtx);
1750 return;
1751 }
1752
1753 /*
1754 * Mark the provider to be removed in our post-processing step, mark it
1755 * retired, and drop the active count on its proc. Marking it indicates
1756 * that we should try to remove it; setting the retired flag indicates
1757 * that we're done with this provider; dropping the active the proc
1758 * releases our hold, and when this reaches zero (as it will during
1759 * exit or exec) the proc and associated providers become defunct.
1760 *
1761 * We obviously need to take the bucket lock before the provider lock
1762 * to perform the lookup, but we need to drop the provider lock
1763 * before calling into the DTrace framework since we acquire the
1764 * provider lock in callbacks invoked from the DTrace framework. The
1765 * bucket lock therefore protects the integrity of the provider hash
1766 * table.
1767 */
1768 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1769 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1770
1771 /*
1772 * Add this provider probes to the retired count and
1773 * make sure we don't add them twice
1774 */
1775 atomic_add_32(&fasttrap_retired, fp->ftp_pcount);
1776 fp->ftp_pcount = 0;
1777
1778 fp->ftp_retired = 1;
1779 fp->ftp_marked = 1;
1780 provid = fp->ftp_provid;
1781 lck_mtx_unlock(&fp->ftp_mtx);
1782
1783 /*
1784 * We don't have to worry about invalidating the same provider twice
1785 * since fasttrap_provider_lookup() will ignore providers that have
1786 * been marked as retired.
1787 */
1788 dtrace_invalidate(provid);
1789
1790 lck_mtx_unlock(&bucket->ftb_mtx);
1791
1792 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1793 }
1794
1795 static int
1796 fasttrap_uint32_cmp(const void *ap, const void *bp)
1797 {
1798 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1799 }
1800
1801 static int
1802 fasttrap_uint64_cmp(const void *ap, const void *bp)
1803 {
1804 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1805 }
1806
1807 static int
1808 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1809 {
1810 proc_t *p;
1811 fasttrap_provider_t *provider;
1812 fasttrap_probe_t *pp;
1813 fasttrap_tracepoint_t *tp;
1814 const char *name;
1815 unsigned int i, aframes, whack;
1816
1817 /*
1818 * There needs to be at least one desired trace point.
1819 */
1820 if (pdata->ftps_noffs == 0)
1821 return (EINVAL);
1822
1823 switch (pdata->ftps_probe_type) {
1824 case DTFTP_ENTRY:
1825 name = "entry";
1826 aframes = FASTTRAP_ENTRY_AFRAMES;
1827 break;
1828 case DTFTP_RETURN:
1829 name = "return";
1830 aframes = FASTTRAP_RETURN_AFRAMES;
1831 break;
1832 case DTFTP_OFFSETS:
1833 aframes = 0;
1834 name = NULL;
1835 break;
1836 default:
1837 return (EINVAL);
1838 }
1839
1840 const char* provider_name;
1841 switch (pdata->ftps_provider_type) {
1842 case DTFTP_PROVIDER_PID:
1843 provider_name = FASTTRAP_PID_NAME;
1844 break;
1845 case DTFTP_PROVIDER_OBJC:
1846 provider_name = FASTTRAP_OBJC_NAME;
1847 break;
1848 case DTFTP_PROVIDER_ONESHOT:
1849 provider_name = FASTTRAP_ONESHOT_NAME;
1850 break;
1851 default:
1852 return (EINVAL);
1853 }
1854
1855 p = proc_find(pdata->ftps_pid);
1856 if (p == PROC_NULL)
1857 return (ESRCH);
1858
1859 if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
1860 provider_name, &pid_attr)) == NULL) {
1861 proc_rele(p);
1862 return (ESRCH);
1863 }
1864
1865 proc_rele(p);
1866 /*
1867 * Increment this reference count to indicate that a consumer is
1868 * actively adding a new probe associated with this provider. This
1869 * prevents the provider from being deleted -- we'll need to check
1870 * for pending deletions when we drop this reference count.
1871 */
1872 provider->ftp_ccount++;
1873 lck_mtx_unlock(&provider->ftp_mtx);
1874
1875 /*
1876 * Grab the creation lock to ensure consistency between calls to
1877 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1878 * other threads creating probes. We must drop the provider lock
1879 * before taking this lock to avoid a three-way deadlock with the
1880 * DTrace framework.
1881 */
1882 lck_mtx_lock(&provider->ftp_cmtx);
1883
1884 if (name == NULL) {
1885 for (i = 0; i < pdata->ftps_noffs; i++) {
1886 char name_str[17];
1887
1888 (void) snprintf(name_str, sizeof(name_str), "%llx",
1889 (uint64_t)pdata->ftps_offs[i]);
1890
1891 if (dtrace_probe_lookup(provider->ftp_provid,
1892 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1893 continue;
1894
1895 atomic_add_32(&fasttrap_total, 1);
1896 if (fasttrap_total > fasttrap_max) {
1897 atomic_add_32(&fasttrap_total, -1);
1898 goto no_mem;
1899 }
1900 provider->ftp_pcount++;
1901
1902 pp = zalloc(fasttrap_probe_t_zones[1]);
1903 bzero(pp, sizeof (fasttrap_probe_t));
1904
1905 pp->ftp_prov = provider;
1906 pp->ftp_faddr = pdata->ftps_pc;
1907 pp->ftp_fsize = pdata->ftps_size;
1908 pp->ftp_pid = pdata->ftps_pid;
1909 pp->ftp_ntps = 1;
1910
1911 tp = zalloc(fasttrap_tracepoint_t_zone);
1912 bzero(tp, sizeof (fasttrap_tracepoint_t));
1913
1914 tp->ftt_proc = provider->ftp_proc;
1915 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1916 tp->ftt_pid = pdata->ftps_pid;
1917
1918 #if defined(__arm__) || defined(__arm64__)
1919 /*
1920 * On arm the subinfo is used to distinguish between arm
1921 * and thumb modes. On arm64 there is no thumb mode, so
1922 * this field is simply initialized to 0 on its way
1923 * into the kernel.
1924 */
1925 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1926 #endif
1927
1928 pp->ftp_tps[0].fit_tp = tp;
1929 pp->ftp_tps[0].fit_id.fti_probe = pp;
1930 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1931 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1932 pdata->ftps_mod, pdata->ftps_func, name_str,
1933 FASTTRAP_OFFSET_AFRAMES, pp);
1934 }
1935
1936 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1937 pdata->ftps_func, name) == 0) {
1938 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1939
1940 if (fasttrap_total > fasttrap_max) {
1941 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1942 goto no_mem;
1943 }
1944
1945 /*
1946 * Make sure all tracepoint program counter values are unique.
1947 * We later assume that each probe has exactly one tracepoint
1948 * for a given pc.
1949 */
1950 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1951 sizeof (uint64_t), fasttrap_uint64_cmp);
1952 for (i = 1; i < pdata->ftps_noffs; i++) {
1953 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1954 continue;
1955
1956 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1957 goto no_mem;
1958 }
1959 provider->ftp_pcount += pdata->ftps_noffs;
1960 ASSERT(pdata->ftps_noffs > 0);
1961 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1962 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
1963 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
1964 } else {
1965 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1966 }
1967
1968 pp->ftp_prov = provider;
1969 pp->ftp_faddr = pdata->ftps_pc;
1970 pp->ftp_fsize = pdata->ftps_size;
1971 pp->ftp_pid = pdata->ftps_pid;
1972 pp->ftp_ntps = pdata->ftps_noffs;
1973
1974 for (i = 0; i < pdata->ftps_noffs; i++) {
1975 tp = zalloc(fasttrap_tracepoint_t_zone);
1976 bzero(tp, sizeof (fasttrap_tracepoint_t));
1977 tp->ftt_proc = provider->ftp_proc;
1978 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1979 tp->ftt_pid = pdata->ftps_pid;
1980
1981 #if defined(__arm__) || defined (__arm64__)
1982 /*
1983 * On arm the subinfo is used to distinguish between arm
1984 * and thumb modes. On arm64 there is no thumb mode, so
1985 * this field is simply initialized to 0 on its way
1986 * into the kernel.
1987 */
1988
1989 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1990 #endif
1991 pp->ftp_tps[i].fit_tp = tp;
1992 pp->ftp_tps[i].fit_id.fti_probe = pp;
1993 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
1994 }
1995
1996 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1997 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1998 }
1999
2000 lck_mtx_unlock(&provider->ftp_cmtx);
2001
2002 /*
2003 * We know that the provider is still valid since we incremented the
2004 * creation reference count. If someone tried to clean up this provider
2005 * while we were using it (e.g. because the process called exec(2) or
2006 * exit(2)), take note of that and try to clean it up now.
2007 */
2008 lck_mtx_lock(&provider->ftp_mtx);
2009 provider->ftp_ccount--;
2010 whack = provider->ftp_retired;
2011 lck_mtx_unlock(&provider->ftp_mtx);
2012
2013 if (whack)
2014 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2015
2016 return (0);
2017
2018 no_mem:
2019 /*
2020 * If we've exhausted the allowable resources, we'll try to remove
2021 * this provider to free some up. This is to cover the case where
2022 * the user has accidentally created many more probes than was
2023 * intended (e.g. pid123:::).
2024 */
2025 lck_mtx_unlock(&provider->ftp_cmtx);
2026 lck_mtx_lock(&provider->ftp_mtx);
2027 provider->ftp_ccount--;
2028 provider->ftp_marked = 1;
2029 lck_mtx_unlock(&provider->ftp_mtx);
2030
2031 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2032
2033 return (ENOMEM);
2034 }
2035
2036 /*ARGSUSED*/
2037 static void *
2038 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2039 {
2040 #pragma unused(arg)
2041 fasttrap_provider_t *provider;
2042
2043 /*
2044 * A 32-bit unsigned integer (like a pid for example) can be
2045 * expressed in 10 or fewer decimal digits. Make sure that we'll
2046 * have enough space for the provider name.
2047 */
2048 if (strlen(dhpv->dthpv_provname) + 10 >=
2049 sizeof (provider->ftp_name)) {
2050 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2051 "name too long to accomodate pid", dhpv->dthpv_provname);
2052 return (NULL);
2053 }
2054
2055 /*
2056 * Don't let folks spoof the true pid provider.
2057 */
2058 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
2059 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2060 "%s is an invalid name", dhpv->dthpv_provname,
2061 FASTTRAP_PID_NAME);
2062 return (NULL);
2063 }
2064
2065 /*
2066 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2067 */
2068 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
2069 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2070 "%s is an invalid name", dhpv->dthpv_provname,
2071 FASTTRAP_OBJC_NAME);
2072 return (NULL);
2073 }
2074 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
2075 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2076 "%s is an invalid name", dhpv->dthpv_provname,
2077 FASTTRAP_ONESHOT_NAME);
2078 return (NULL);
2079 }
2080
2081 /*
2082 * The highest stability class that fasttrap supports is ISA; cap
2083 * the stability of the new provider accordingly.
2084 */
2085 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2086 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2087 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2088 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2089 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2090 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2091 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2092 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2093 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2094 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2095
2096 if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2097 &dhpv->dthpv_pattr)) == NULL) {
2098 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2099 "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
2100 return (NULL);
2101 }
2102
2103 /*
2104 * APPLE NOTE!
2105 *
2106 * USDT probes (fasttrap meta probes) are very expensive to create.
2107 * Profiling has shown that the largest single cost is verifying that
2108 * dtrace hasn't already created a given meta_probe. The reason for
2109 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2110 * each static probe being created. We want to get rid of that check.
2111 * The simplest way of eliminating it is to deny the ability to add
2112 * probes to an existing provider. If the provider already exists, BZZT!
2113 * This still leaves the possibility of intentionally malformed DOF
2114 * having duplicate probes. However, duplicate probes are not fatal,
2115 * and there is no way to get that by accident, so we will not check
2116 * for that case.
2117 *
2118 * UPDATE: It turns out there are several use cases that require adding
2119 * probes to existing providers. Disabling the dtrace_probe_lookup()
2120 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2121 */
2122
2123 /*
2124 * Up the meta provider count so this provider isn't removed until
2125 * the meta provider has been told to remove it.
2126 */
2127 provider->ftp_mcount++;
2128
2129 lck_mtx_unlock(&provider->ftp_mtx);
2130
2131 return (provider);
2132 }
2133
2134 /*ARGSUSED*/
2135 static void
2136 fasttrap_meta_create_probe(void *arg, void *parg,
2137 dtrace_helper_probedesc_t *dhpb)
2138 {
2139 #pragma unused(arg)
2140 fasttrap_provider_t *provider = parg;
2141 fasttrap_probe_t *pp;
2142 fasttrap_tracepoint_t *tp;
2143 unsigned int i, j;
2144 uint32_t ntps;
2145
2146 /*
2147 * Since the meta provider count is non-zero we don't have to worry
2148 * about this provider disappearing.
2149 */
2150 ASSERT(provider->ftp_mcount > 0);
2151
2152 /*
2153 * The offsets must be unique.
2154 */
2155 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2156 fasttrap_uint32_cmp);
2157 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2158 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2159 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2160 return;
2161 }
2162
2163 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2164 fasttrap_uint32_cmp);
2165 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2166 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2167 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2168 return;
2169 }
2170
2171 /*
2172 * Grab the creation lock to ensure consistency between calls to
2173 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2174 * other threads creating probes.
2175 */
2176 lck_mtx_lock(&provider->ftp_cmtx);
2177
2178 #if 0
2179 /*
2180 * APPLE NOTE: This is hideously expensive. See note in
2181 * fasttrap_meta_provide() for why we can get away without
2182 * checking here.
2183 */
2184 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2185 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2186 lck_mtx_unlock(&provider->ftp_cmtx);
2187 return;
2188 }
2189 #endif
2190
2191 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2192 ASSERT(ntps > 0);
2193
2194 atomic_add_32(&fasttrap_total, ntps);
2195
2196 if (fasttrap_total > fasttrap_max) {
2197 atomic_add_32(&fasttrap_total, -ntps);
2198 lck_mtx_unlock(&provider->ftp_cmtx);
2199 return;
2200 }
2201
2202 provider->ftp_pcount += ntps;
2203
2204 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2205 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2206 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2207 } else {
2208 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2209 }
2210
2211 pp->ftp_prov = provider;
2212 pp->ftp_pid = provider->ftp_pid;
2213 pp->ftp_ntps = ntps;
2214 pp->ftp_nargs = dhpb->dthpb_xargc;
2215 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2216 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2217
2218 /*
2219 * First create a tracepoint for each actual point of interest.
2220 */
2221 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2222 tp = zalloc(fasttrap_tracepoint_t_zone);
2223 bzero(tp, sizeof (fasttrap_tracepoint_t));
2224
2225 tp->ftt_proc = provider->ftp_proc;
2226
2227 /*
2228 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2229 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2230 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2231 */
2232 #if defined(__x86_64__)
2233 /*
2234 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2235 */
2236 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2237 #elif defined(__arm__) || defined(__arm64__)
2238 /*
2239 * All ARM and ARM64 probes are zero offset. We need to zero out the
2240 * thumb bit because we still support 32bit user processes.
2241 * On 64bit user processes, bit zero won't be set anyway.
2242 */
2243 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
2244 tp->ftt_fntype = FASTTRAP_FN_USDT;
2245 #else
2246 #error "Architecture not supported"
2247 #endif
2248
2249 tp->ftt_pid = provider->ftp_pid;
2250
2251 pp->ftp_tps[i].fit_tp = tp;
2252 pp->ftp_tps[i].fit_id.fti_probe = pp;
2253 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2254 }
2255
2256 /*
2257 * Then create a tracepoint for each is-enabled point.
2258 */
2259 for (j = 0; i < ntps; i++, j++) {
2260 tp = zalloc(fasttrap_tracepoint_t_zone);
2261 bzero(tp, sizeof (fasttrap_tracepoint_t));
2262
2263 tp->ftt_proc = provider->ftp_proc;
2264
2265 /*
2266 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2267 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2268 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2269 */
2270 #if defined(__x86_64__)
2271 /*
2272 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2273 */
2274 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2275 #elif defined(__arm__) || defined(__arm64__)
2276 /*
2277 * All ARM and ARM64 probes are zero offset. We need to zero out the
2278 * thumb bit because we still support 32bit user processes.
2279 * On 64bit user processes, bit zero won't be set anyway.
2280 */
2281 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
2282 tp->ftt_fntype = FASTTRAP_FN_USDT;
2283 #else
2284 #error "Architecture not supported"
2285 #endif
2286
2287 tp->ftt_pid = provider->ftp_pid;
2288
2289 pp->ftp_tps[i].fit_tp = tp;
2290 pp->ftp_tps[i].fit_id.fti_probe = pp;
2291 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2292 }
2293
2294 /*
2295 * If the arguments are shuffled around we set the argument remapping
2296 * table. Later, when the probe fires, we only remap the arguments
2297 * if the table is non-NULL.
2298 */
2299 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2300 if (dhpb->dthpb_args[i] != i) {
2301 pp->ftp_argmap = dhpb->dthpb_args;
2302 break;
2303 }
2304 }
2305
2306 /*
2307 * The probe is fully constructed -- register it with DTrace.
2308 */
2309 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2310 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2311
2312 lck_mtx_unlock(&provider->ftp_cmtx);
2313 }
2314
2315 /*ARGSUSED*/
2316 static void
2317 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2318 {
2319 #pragma unused(arg)
2320 /*
2321 * Clean up the USDT provider. There may be active consumers of the
2322 * provider busy adding probes, no damage will actually befall the
2323 * provider until that count has dropped to zero. This just puts
2324 * the provider on death row.
2325 */
2326 fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
2327 }
2328
2329 static char*
2330 fasttrap_meta_provider_name(void *arg)
2331 {
2332 fasttrap_provider_t *fprovider = arg;
2333 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2334 return provider->dtpv_name;
2335 }
2336
2337 static dtrace_mops_t fasttrap_mops = {
2338 .dtms_create_probe = fasttrap_meta_create_probe,
2339 .dtms_provide_proc = fasttrap_meta_provide,
2340 .dtms_remove_proc = fasttrap_meta_remove,
2341 .dtms_provider_name = fasttrap_meta_provider_name
2342 };
2343
2344 /*
2345 * Validate a null-terminated string. If str is not null-terminated,
2346 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2347 * returned.
2348 *
2349 * str: string to validate.
2350 * maxlen: maximal length of the string, null-terminated byte included.
2351 */
2352 static int
2353 fasttrap_validatestr(char const* str, size_t maxlen) {
2354 size_t len;
2355
2356 assert(str);
2357 assert(maxlen != 0);
2358
2359 /* Check if the string is null-terminated. */
2360 len = strnlen(str, maxlen);
2361 if (len >= maxlen)
2362 return -1;
2363
2364 /* Finally, check for UTF8 validity. */
2365 return utf8_validatestr((unsigned const char*) str, len);
2366 }
2367
2368 /*ARGSUSED*/
2369 static int
2370 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2371 {
2372 #pragma unused(dev, md, rv)
2373 if (!dtrace_attached())
2374 return (EAGAIN);
2375
2376 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2377 fasttrap_probe_spec_t *probe;
2378 uint64_t noffs;
2379 size_t size;
2380 int ret;
2381
2382 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2383 sizeof (probe->ftps_noffs)))
2384 return (EFAULT);
2385
2386 /*
2387 * Probes must have at least one tracepoint.
2388 */
2389 if (noffs == 0)
2390 return (EINVAL);
2391
2392 /*
2393 * We want to check the number of noffs before doing
2394 * sizing math, to prevent potential buffer overflows.
2395 */
2396 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2397 return (ENOMEM);
2398
2399 size = sizeof (fasttrap_probe_spec_t) +
2400 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2401
2402 probe = kmem_alloc(size, KM_SLEEP);
2403
2404 if (copyin(arg, probe, size) != 0 ||
2405 probe->ftps_noffs != noffs) {
2406 kmem_free(probe, size);
2407 return (EFAULT);
2408 }
2409
2410 /*
2411 * Verify that the function and module strings contain no
2412 * funny characters.
2413 */
2414
2415 if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
2416 ret = EINVAL;
2417 goto err;
2418 }
2419
2420 if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
2421 ret = EINVAL;
2422 goto err;
2423 }
2424
2425 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2426 proc_t *p;
2427 pid_t pid = probe->ftps_pid;
2428
2429 /*
2430 * Report an error if the process doesn't exist
2431 * or is actively being birthed.
2432 */
2433 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2434 if (p != PROC_NULL)
2435 proc_rele(p);
2436 ret = ESRCH;
2437 goto err;
2438 }
2439 // proc_lock(p);
2440 // FIXME! How is this done on OS X?
2441 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2442 // VREAD | VWRITE)) != 0) {
2443 // mutex_exit(&p->p_lock);
2444 // return (ret);
2445 // }
2446 // proc_unlock(p);
2447 proc_rele(p);
2448 }
2449
2450 ret = fasttrap_add_probe(probe);
2451
2452 err:
2453 kmem_free(probe, size);
2454
2455 return (ret);
2456
2457 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2458 fasttrap_instr_query_t instr;
2459 fasttrap_tracepoint_t *tp;
2460 uint_t index;
2461 // int ret;
2462
2463 if (copyin(arg, &instr, sizeof (instr)) != 0)
2464 return (EFAULT);
2465
2466 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2467 proc_t *p;
2468 pid_t pid = instr.ftiq_pid;
2469
2470 /*
2471 * Report an error if the process doesn't exist
2472 * or is actively being birthed.
2473 */
2474 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2475 if (p != PROC_NULL)
2476 proc_rele(p);
2477 return (ESRCH);
2478 }
2479 //proc_lock(p);
2480 // FIXME! How is this done on OS X?
2481 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2482 // VREAD)) != 0) {
2483 // mutex_exit(&p->p_lock);
2484 // return (ret);
2485 // }
2486 // proc_unlock(p);
2487 proc_rele(p);
2488 }
2489
2490 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2491
2492 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2493 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2494 while (tp != NULL) {
2495 if (instr.ftiq_pid == tp->ftt_pid &&
2496 instr.ftiq_pc == tp->ftt_pc &&
2497 tp->ftt_proc->ftpc_acount != 0)
2498 break;
2499
2500 tp = tp->ftt_next;
2501 }
2502
2503 if (tp == NULL) {
2504 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2505 return (ENOENT);
2506 }
2507
2508 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2509 sizeof (instr.ftiq_instr));
2510 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2511
2512 if (copyout(&instr, arg, sizeof (instr)) != 0)
2513 return (EFAULT);
2514
2515 return (0);
2516 }
2517
2518 return (EINVAL);
2519 }
2520
2521 static void
2522 fasttrap_attach(void)
2523 {
2524 ulong_t nent;
2525 unsigned int i;
2526
2527 /*
2528 * Install our hooks into fork(2), exec(2), and exit(2).
2529 */
2530 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2531 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2532 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2533
2534 /*
2535 * APPLE NOTE: We size the maximum number of fasttrap probes
2536 * based on system memory. 100k probes per 256M of system memory.
2537 * Yes, this is a WAG.
2538 */
2539 fasttrap_max = (sane_size >> 28) * 100000;
2540
2541 if (fasttrap_max == 0)
2542 fasttrap_max = 50000;
2543
2544 fasttrap_total = 0;
2545 fasttrap_retired = 0;
2546
2547 /*
2548 * Conjure up the tracepoints hashtable...
2549 */
2550 #ifdef illumos
2551 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2552 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2553 #else
2554 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2555 #endif
2556
2557 if (nent <= 0 || nent > 0x1000000)
2558 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2559
2560 if ((nent & (nent - 1)) == 0)
2561 fasttrap_tpoints.fth_nent = nent;
2562 else
2563 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2564 ASSERT(fasttrap_tpoints.fth_nent > 0);
2565 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2566 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2567 sizeof (fasttrap_bucket_t), KM_SLEEP);
2568 ASSERT(fasttrap_tpoints.fth_table != NULL);
2569
2570 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
2571 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2572 }
2573
2574 /*
2575 * ... and the providers hash table...
2576 */
2577 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2578 if ((nent & (nent - 1)) == 0)
2579 fasttrap_provs.fth_nent = nent;
2580 else
2581 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2582 ASSERT(fasttrap_provs.fth_nent > 0);
2583 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2584 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2585 sizeof (fasttrap_bucket_t), KM_SLEEP);
2586 ASSERT(fasttrap_provs.fth_table != NULL);
2587
2588 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2589 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2590 }
2591
2592 /*
2593 * ... and the procs hash table.
2594 */
2595 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2596 if ((nent & (nent - 1)) == 0)
2597 fasttrap_procs.fth_nent = nent;
2598 else
2599 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2600 ASSERT(fasttrap_procs.fth_nent > 0);
2601 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2602 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2603 sizeof (fasttrap_bucket_t), KM_SLEEP);
2604 ASSERT(fasttrap_procs.fth_table != NULL);
2605
2606 #ifndef illumos
2607 for (i = 0; i < fasttrap_procs.fth_nent; i++) {
2608 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2609 }
2610 #endif
2611
2612 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2613 &fasttrap_meta_id);
2614 }
2615
2616 static int
2617 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2618 {
2619 #pragma unused(dev, flags, devtype, p)
2620 return 0;
2621 }
2622
2623 static int
2624 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2625 {
2626 int err, rv = 0;
2627 user_addr_t uaddrp;
2628
2629 if (proc_is64bit(p))
2630 uaddrp = *(user_addr_t *)data;
2631 else
2632 uaddrp = (user_addr_t) *(uint32_t *)data;
2633
2634 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2635
2636 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2637 if (err != 0) {
2638 ASSERT( (err & 0xfffff000) == 0 );
2639 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2640 } else if (rv != 0) {
2641 ASSERT( (rv & 0xfff00000) == 0 );
2642 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2643 } else
2644 return 0;
2645 }
2646
2647 static int fasttrap_inited = 0;
2648
2649 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2650
2651 /*
2652 * A struct describing which functions will get invoked for certain
2653 * actions.
2654 */
2655
2656 static struct cdevsw fasttrap_cdevsw =
2657 {
2658 _fasttrap_open, /* open */
2659 eno_opcl, /* close */
2660 eno_rdwrt, /* read */
2661 eno_rdwrt, /* write */
2662 _fasttrap_ioctl, /* ioctl */
2663 (stop_fcn_t *)nulldev, /* stop */
2664 (reset_fcn_t *)nulldev, /* reset */
2665 NULL, /* tty's */
2666 eno_select, /* select */
2667 eno_mmap, /* mmap */
2668 eno_strat, /* strategy */
2669 eno_getc, /* getc */
2670 eno_putc, /* putc */
2671 0 /* type */
2672 };
2673
2674 void fasttrap_init(void);
2675
2676 void
2677 fasttrap_init( void )
2678 {
2679 /*
2680 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2681 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2682 *
2683 * The reason is to delay allocating the (rather large) resources as late as possible.
2684 */
2685 if (!fasttrap_inited) {
2686 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2687
2688 if (majdevno < 0) {
2689 // FIX ME! What kind of error reporting to do here?
2690 printf("fasttrap_init: failed to allocate a major number!\n");
2691 return;
2692 }
2693
2694 dev_t device = makedev( (uint32_t)majdevno, 0 );
2695 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2696 return;
2697 }
2698
2699 /*
2700 * Allocate the fasttrap_tracepoint_t zone
2701 */
2702 fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
2703 1024 * sizeof(fasttrap_tracepoint_t),
2704 sizeof(fasttrap_tracepoint_t),
2705 "dtrace.fasttrap_tracepoint_t");
2706
2707 /*
2708 * fasttrap_probe_t's are variable in size. We use an array of zones to
2709 * cover the most common sizes.
2710 */
2711 int i;
2712 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2713 size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
2714 fasttrap_probe_t_zones[i] = zinit(zone_element_size,
2715 1024 * zone_element_size,
2716 zone_element_size,
2717 fasttrap_probe_t_zone_names[i]);
2718 }
2719
2720
2721 /*
2722 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2723 */
2724 fasttrap_lck_attr = lck_attr_alloc_init();
2725 fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2726 fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
2727
2728 /*
2729 * Initialize global locks
2730 */
2731 lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2732 lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2733
2734 fasttrap_attach();
2735
2736 /*
2737 * Start the fasttrap cleanup thread
2738 */
2739 kern_return_t res = kernel_thread_start_priority((thread_continue_t)fasttrap_pid_cleanup_cb, NULL, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread);
2740 if (res != KERN_SUCCESS) {
2741 panic("Could not create fasttrap_cleanup_thread");
2742 }
2743 thread_set_thread_name(fasttrap_cleanup_thread, "dtrace_fasttrap_cleanup_thread");
2744
2745 fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
2746 fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
2747 KM_SLEEP);
2748 lck_mtx_init(&fasttrap_retired_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2749
2750 fasttrap_inited = 1;
2751 }
2752 }
2753
2754 #undef FASTTRAP_MAJOR