]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/fasttrap.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/time.h>
29
30 #include <sys/codesign.h>
31 #include <sys/errno.h>
32 #include <sys/stat.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/kauth.h>
36 #include <sys/utfconv.h>
37
38 #include <sys/fasttrap.h>
39 #include <sys/fasttrap_impl.h>
40 #include <sys/fasttrap_isa.h>
41 #include <sys/dtrace.h>
42 #include <sys/dtrace_impl.h>
43 #include <sys/proc.h>
44
45 #include <security/mac_framework.h>
46
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
51
52 #include <kern/cs_blobs.h>
53 #include <kern/thread.h>
54 #include <kern/zalloc.h>
55
56 #include <mach/thread_act.h>
57
58 extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread);
59
60 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
61 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
62
63 __private_extern__
64 void
65 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
66
67 /*
68 * User-Land Trap-Based Tracing
69 * ----------------------------
70 *
71 * The fasttrap provider allows DTrace consumers to instrument any user-level
72 * instruction to gather data; this includes probes with semantic
73 * signifigance like entry and return as well as simple offsets into the
74 * function. While the specific techniques used are very ISA specific, the
75 * methodology is generalizable to any architecture.
76 *
77 *
78 * The General Methodology
79 * -----------------------
80 *
81 * With the primary goal of tracing every user-land instruction and the
82 * limitation that we can't trust user space so don't want to rely on much
83 * information there, we begin by replacing the instructions we want to trace
84 * with trap instructions. Each instruction we overwrite is saved into a hash
85 * table keyed by process ID and pc address. When we enter the kernel due to
86 * this trap instruction, we need the effects of the replaced instruction to
87 * appear to have occurred before we proceed with the user thread's
88 * execution.
89 *
90 * Each user level thread is represented by a ulwp_t structure which is
91 * always easily accessible through a register. The most basic way to produce
92 * the effects of the instruction we replaced is to copy that instruction out
93 * to a bit of scratch space reserved in the user thread's ulwp_t structure
94 * (a sort of kernel-private thread local storage), set the PC to that
95 * scratch space and single step. When we reenter the kernel after single
96 * stepping the instruction we must then adjust the PC to point to what would
97 * normally be the next instruction. Of course, special care must be taken
98 * for branches and jumps, but these represent such a small fraction of any
99 * instruction set that writing the code to emulate these in the kernel is
100 * not too difficult.
101 *
102 * Return probes may require several tracepoints to trace every return site,
103 * and, conversely, each tracepoint may activate several probes (the entry
104 * and offset 0 probes, for example). To solve this muliplexing problem,
105 * tracepoints contain lists of probes to activate and probes contain lists
106 * of tracepoints to enable. If a probe is activated, it adds its ID to
107 * existing tracepoints or creates new ones as necessary.
108 *
109 * Most probes are activated _before_ the instruction is executed, but return
110 * probes are activated _after_ the effects of the last instruction of the
111 * function are visible. Return probes must be fired _after_ we have
112 * single-stepped the instruction whereas all other probes are fired
113 * beforehand.
114 *
115 *
116 * Lock Ordering
117 * -------------
118 *
119 * The lock ordering below -- both internally and with respect to the DTrace
120 * framework -- is a little tricky and bears some explanation. Each provider
121 * has a lock (ftp_mtx) that protects its members including reference counts
122 * for enabled probes (ftp_rcount), consumers actively creating probes
123 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
124 * from being freed. A provider is looked up by taking the bucket lock for the
125 * provider hash table, and is returned with its lock held. The provider lock
126 * may be taken in functions invoked by the DTrace framework, but may not be
127 * held while calling functions in the DTrace framework.
128 *
129 * To ensure consistency over multiple calls to the DTrace framework, the
130 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
131 * not be taken when holding the provider lock as that would create a cyclic
132 * lock ordering. In situations where one would naturally take the provider
133 * lock and then the creation lock, we instead up a reference count to prevent
134 * the provider from disappearing, drop the provider lock, and acquire the
135 * creation lock.
136 *
137 * Briefly:
138 * bucket lock before provider lock
139 * DTrace before provider lock
140 * creation lock before DTrace
141 * never hold the provider lock and creation lock simultaneously
142 */
143
144 static dtrace_meta_provider_id_t fasttrap_meta_id;
145
146 static thread_t fasttrap_cleanup_thread;
147
148 static lck_mtx_t fasttrap_cleanup_mtx;
149
150
151 #define FASTTRAP_CLEANUP_PROVIDER 0x1
152 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
153
154 static uint32_t fasttrap_cleanup_work = 0;
155
156 /*
157 * Generation count on modifications to the global tracepoint lookup table.
158 */
159 static volatile uint64_t fasttrap_mod_gen;
160
161 /*
162 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
163 * base on system memory. Each time a probe is created, fasttrap_total is
164 * incremented by the number of tracepoints that may be associated with that
165 * probe; fasttrap_total is capped at fasttrap_max.
166 */
167
168 static uint32_t fasttrap_max;
169 static uint32_t fasttrap_retired;
170 static uint32_t fasttrap_total;
171
172
173 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
174 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
175 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
176
177 fasttrap_hash_t fasttrap_tpoints;
178 static fasttrap_hash_t fasttrap_provs;
179 static fasttrap_hash_t fasttrap_procs;
180
181 static uint64_t fasttrap_pid_count; /* pid ref count */
182 static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
183
184 #define FASTTRAP_ENABLE_FAIL 1
185 #define FASTTRAP_ENABLE_PARTIAL 2
186
187 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
188 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
189
190 static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
191 const dtrace_pattr_t *);
192 static void fasttrap_provider_retire(proc_t*, const char *, int);
193 static void fasttrap_provider_free(fasttrap_provider_t *);
194
195 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
196 static void fasttrap_proc_release(fasttrap_proc_t *);
197
198 #define FASTTRAP_PROVS_INDEX(pid, name) \
199 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
200
201 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
202
203 /*
204 * APPLE NOTE: To save memory, some common memory allocations are given
205 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
206 * which means it would fall into the kalloc.128 bucket. With
207 * 20k elements allocated, the space saved is substantial.
208 */
209
210 ZONE_DECLARE(fasttrap_tracepoint_t_zone, "dtrace.fasttrap_tracepoint_t",
211 sizeof(fasttrap_tracepoint_t), ZC_NONE);
212
213 /*
214 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
215 * that the sweet spot for reducing memory footprint is covering the first
216 * three sizes. Everything larger goes into the common pool.
217 */
218 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
219
220 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
221
222 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
223 "",
224 "dtrace.fasttrap_probe_t[1]",
225 "dtrace.fasttrap_probe_t[2]",
226 "dtrace.fasttrap_probe_t[3]"
227 };
228
229 /*
230 * APPLE NOTE: We have to manage locks explicitly
231 */
232 lck_grp_t* fasttrap_lck_grp;
233 lck_grp_attr_t* fasttrap_lck_grp_attr;
234 lck_attr_t* fasttrap_lck_attr;
235
236 static int
237 fasttrap_highbit(ulong_t i)
238 {
239 int h = 1;
240
241 if (i == 0)
242 return (0);
243 #ifdef _LP64
244 if (i & 0xffffffff00000000ul) {
245 h += 32; i >>= 32;
246 }
247 #endif
248 if (i & 0xffff0000) {
249 h += 16; i >>= 16;
250 }
251 if (i & 0xff00) {
252 h += 8; i >>= 8;
253 }
254 if (i & 0xf0) {
255 h += 4; i >>= 4;
256 }
257 if (i & 0xc) {
258 h += 2; i >>= 2;
259 }
260 if (i & 0x2) {
261 h += 1;
262 }
263 return (h);
264 }
265
266 static uint_t
267 fasttrap_hash_str(const char *p)
268 {
269 unsigned int g;
270 uint_t hval = 0;
271
272 while (*p) {
273 hval = (hval << 4) + *p++;
274 if ((g = (hval & 0xf0000000)) != 0)
275 hval ^= g >> 24;
276 hval &= ~g;
277 }
278 return (hval);
279 }
280
281 /*
282 * APPLE NOTE: fasttrap_sigtrap not implemented
283 */
284 void
285 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
286 {
287 #pragma unused(p, t, pc)
288
289 #if !defined(__APPLE__)
290 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
291
292 sqp->sq_info.si_signo = SIGTRAP;
293 sqp->sq_info.si_code = TRAP_DTRACE;
294 sqp->sq_info.si_addr = (caddr_t)pc;
295
296 mutex_enter(&p->p_lock);
297 sigaddqa(p, t, sqp);
298 mutex_exit(&p->p_lock);
299
300 if (t != NULL)
301 aston(t);
302 #endif /* __APPLE__ */
303
304 printf("fasttrap_sigtrap called with no implementation.\n");
305 }
306
307 /*
308 * This function ensures that no threads are actively using the memory
309 * associated with probes that were formerly live.
310 */
311 static void
312 fasttrap_mod_barrier(uint64_t gen)
313 {
314 unsigned int i;
315
316 if (gen < fasttrap_mod_gen)
317 return;
318
319 fasttrap_mod_gen++;
320
321 for (i = 0; i < NCPU; i++) {
322 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
323 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
324 }
325 }
326
327 static void fasttrap_pid_cleanup(uint32_t);
328
329 static unsigned int
330 fasttrap_pid_cleanup_providers(void)
331 {
332 fasttrap_provider_t **fpp, *fp;
333 fasttrap_bucket_t *bucket;
334 dtrace_provider_id_t provid;
335 unsigned int later = 0, i;
336
337 /*
338 * Iterate over all the providers trying to remove the marked
339 * ones. If a provider is marked but not retired, we just
340 * have to take a crack at removing it -- it's no big deal if
341 * we can't.
342 */
343 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
344 bucket = &fasttrap_provs.fth_table[i];
345 lck_mtx_lock(&bucket->ftb_mtx);
346 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
347
348 while ((fp = *fpp) != NULL) {
349 if (!fp->ftp_marked) {
350 fpp = &fp->ftp_next;
351 continue;
352 }
353
354 lck_mtx_lock(&fp->ftp_mtx);
355
356 /*
357 * If this provider has consumers actively
358 * creating probes (ftp_ccount) or is a USDT
359 * provider (ftp_mcount), we can't unregister
360 * or even condense.
361 */
362 if (fp->ftp_ccount != 0 ||
363 fp->ftp_mcount != 0) {
364 fp->ftp_marked = 0;
365 lck_mtx_unlock(&fp->ftp_mtx);
366 continue;
367 }
368
369 if (!fp->ftp_retired || fp->ftp_rcount != 0)
370 fp->ftp_marked = 0;
371
372 lck_mtx_unlock(&fp->ftp_mtx);
373
374 /*
375 * If we successfully unregister this
376 * provider we can remove it from the hash
377 * chain and free the memory. If our attempt
378 * to unregister fails and this is a retired
379 * provider, increment our flag to try again
380 * pretty soon. If we've consumed more than
381 * half of our total permitted number of
382 * probes call dtrace_condense() to try to
383 * clean out the unenabled probes.
384 */
385 provid = fp->ftp_provid;
386 if (dtrace_unregister(provid) != 0) {
387 if (fasttrap_total > fasttrap_max / 2)
388 (void) dtrace_condense(provid);
389 later += fp->ftp_marked;
390 fpp = &fp->ftp_next;
391 } else {
392 *fpp = fp->ftp_next;
393 fasttrap_provider_free(fp);
394 }
395 }
396 lck_mtx_unlock(&bucket->ftb_mtx);
397 }
398
399 return later;
400 }
401
402 typedef struct fasttrap_tracepoint_spec {
403 pid_t fttps_pid;
404 user_addr_t fttps_pc;
405 } fasttrap_tracepoint_spec_t;
406
407 static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
408 static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
409 static lck_mtx_t fasttrap_retired_mtx;
410
411 #define DEFAULT_RETIRED_SIZE 256
412
413 static void
414 fasttrap_tracepoint_cleanup(void)
415 {
416 size_t i;
417 pid_t pid = 0;
418 user_addr_t pc;
419 proc_t *p = PROC_NULL;
420 fasttrap_tracepoint_t *tp = NULL;
421 lck_mtx_lock(&fasttrap_retired_mtx);
422 fasttrap_bucket_t *bucket;
423 for (i = 0; i < fasttrap_cur_retired; i++) {
424 pc = fasttrap_retired_spec[i].fttps_pc;
425 if (fasttrap_retired_spec[i].fttps_pid != pid) {
426 pid = fasttrap_retired_spec[i].fttps_pid;
427 if (p != PROC_NULL) {
428 sprunlock(p);
429 }
430 if ((p = sprlock(pid)) == PROC_NULL) {
431 pid = 0;
432 continue;
433 }
434 }
435 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
436 lck_mtx_lock(&bucket->ftb_mtx);
437 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
438 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
439 tp->ftt_proc->ftpc_acount != 0)
440 break;
441 }
442 /*
443 * Check that the tracepoint is not gone or has not been
444 * re-activated for another probe
445 */
446 if (tp == NULL || tp->ftt_retired == 0) {
447 lck_mtx_unlock(&bucket->ftb_mtx);
448 continue;
449 }
450 fasttrap_tracepoint_remove(p, tp);
451 lck_mtx_unlock(&bucket->ftb_mtx);
452 }
453 if (p != PROC_NULL) {
454 sprunlock(p);
455 }
456
457 fasttrap_cur_retired = 0;
458
459 lck_mtx_unlock(&fasttrap_retired_mtx);
460 }
461
462 void
463 fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
464 {
465 if (tp->ftt_retired)
466 return;
467 lck_mtx_lock(&fasttrap_retired_mtx);
468 fasttrap_tracepoint_spec_t *s = &fasttrap_retired_spec[fasttrap_cur_retired++];
469 s->fttps_pid = p->p_pid;
470 s->fttps_pc = tp->ftt_pc;
471
472 if (fasttrap_cur_retired == fasttrap_retired_size) {
473 fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
474 fasttrap_retired_size * 2 *
475 sizeof(*fasttrap_retired_spec),
476 KM_SLEEP);
477 memcpy(new_retired, fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
478 kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
479 fasttrap_retired_size *= 2;
480 fasttrap_retired_spec = new_retired;
481 }
482
483 lck_mtx_unlock(&fasttrap_retired_mtx);
484
485 tp->ftt_retired = 1;
486
487 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
488 }
489
490 static void
491 fasttrap_pid_cleanup_compute_priority(void)
492 {
493 if (fasttrap_total > (fasttrap_max / 100 * 90) || fasttrap_retired > fasttrap_max / 2) {
494 thread_precedence_policy_data_t precedence = {12 /* BASEPRI_PREEMPT_HIGH */};
495 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
496 }
497 else {
498 thread_precedence_policy_data_t precedence = {-39 /* BASEPRI_USER_INITIATED */};
499 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
500
501 }
502 }
503
504 /*
505 * This is the timeout's callback for cleaning up the providers and their
506 * probes.
507 */
508 /*ARGSUSED*/
509 __attribute__((noreturn))
510 static void
511 fasttrap_pid_cleanup_cb(void)
512 {
513 uint32_t work = 0;
514 lck_mtx_lock(&fasttrap_cleanup_mtx);
515 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
516 while (1) {
517 unsigned int later = 0;
518
519 work = os_atomic_xchg(&fasttrap_cleanup_work, 0, relaxed);
520 lck_mtx_unlock(&fasttrap_cleanup_mtx);
521 if (work & FASTTRAP_CLEANUP_PROVIDER) {
522 later = fasttrap_pid_cleanup_providers();
523 }
524 if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
525 fasttrap_tracepoint_cleanup();
526 }
527 lck_mtx_lock(&fasttrap_cleanup_mtx);
528
529 fasttrap_pid_cleanup_compute_priority();
530 if (!fasttrap_cleanup_work) {
531 /*
532 * If we were unable to remove a retired provider, try again after
533 * a second. This situation can occur in certain circumstances where
534 * providers cannot be unregistered even though they have no probes
535 * enabled because of an execution of dtrace -l or something similar.
536 * If the timeout has been disabled (set to 1 because we're trying
537 * to detach), we set fasttrap_cleanup_work to ensure that we'll
538 * get a chance to do that work if and when the timeout is reenabled
539 * (if detach fails).
540 */
541 if (later > 0) {
542 struct timespec t = {.tv_sec = 1, .tv_nsec = 0};
543 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", &t);
544 }
545 else
546 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
547 }
548 }
549
550 }
551
552 /*
553 * Activates the asynchronous cleanup mechanism.
554 */
555 static void
556 fasttrap_pid_cleanup(uint32_t work)
557 {
558 lck_mtx_lock(&fasttrap_cleanup_mtx);
559 os_atomic_or(&fasttrap_cleanup_work, work, relaxed);
560 fasttrap_pid_cleanup_compute_priority();
561 wakeup(&fasttrap_pid_cleanup_cb);
562 lck_mtx_unlock(&fasttrap_cleanup_mtx);
563 }
564
565 static int
566 fasttrap_setdebug(proc_t *p)
567 {
568 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
569
570 /*
571 * CS_KILL and CS_HARD will cause code-signing to kill the process
572 * when the process text is modified, so register the intent
573 * to allow invalid access beforehand.
574 */
575 if ((p->p_csflags & (CS_KILL|CS_HARD))) {
576 proc_unlock(p);
577 for (int i = 0; i < DTRACE_NCLIENTS; i++) {
578 dtrace_state_t *state = dtrace_state_get(i);
579 if (state == NULL)
580 continue;
581 if (state->dts_cred.dcr_cred == NULL)
582 continue;
583 /*
584 * The get_task call flags whether the process should
585 * be flagged to have the cs_allow_invalid call
586 * succeed. We want the best credential that any dtrace
587 * client has, so try all of them.
588 */
589
590 /*
591 * mac_proc_check_get_task() can trigger upcalls. It's
592 * not safe to hold proc references accross upcalls, so
593 * just drop the reference. Given the context, it
594 * should not be possible for the process to actually
595 * disappear.
596 */
597 struct proc_ident pident = proc_ident(p);
598 sprunlock(p);
599 p = PROC_NULL;
600
601 mac_proc_check_get_task(state->dts_cred.dcr_cred, &pident);
602
603 p = sprlock(pident.p_pid);
604 if (p == PROC_NULL) {
605 return (ESRCH);
606 }
607 }
608 int rc = cs_allow_invalid(p);
609 proc_lock(p);
610 if (rc == 0) {
611 return (EACCES);
612 }
613 }
614 return (0);
615 }
616
617 /*
618 * This is called from cfork() via dtrace_fasttrap_fork(). The child
619 * process's address space is a (roughly) a copy of the parent process's so
620 * we have to remove all the instrumentation we had previously enabled in the
621 * parent.
622 */
623 static void
624 fasttrap_fork(proc_t *p, proc_t *cp)
625 {
626 pid_t ppid = p->p_pid;
627 unsigned int i;
628
629 ASSERT(current_proc() == p);
630 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
631 ASSERT(p->p_dtrace_count > 0);
632 ASSERT(cp->p_dtrace_count == 0);
633
634 /*
635 * This would be simpler and faster if we maintained per-process
636 * hash tables of enabled tracepoints. It could, however, potentially
637 * slow down execution of a tracepoint since we'd need to go
638 * through two levels of indirection. In the future, we should
639 * consider either maintaining per-process ancillary lists of
640 * enabled tracepoints or hanging a pointer to a per-process hash
641 * table of enabled tracepoints off the proc structure.
642 */
643
644 /*
645 * We don't have to worry about the child process disappearing
646 * because we're in fork().
647 */
648 if (cp != sprlock(cp->p_pid)) {
649 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp->p_pid);
650 return;
651 }
652
653 proc_lock(cp);
654 if (fasttrap_setdebug(cp) == ESRCH) {
655 printf("fasttrap_fork: failed to re-acquire proc\n");
656 return;
657 }
658 proc_unlock(cp);
659
660 /*
661 * Iterate over every tracepoint looking for ones that belong to the
662 * parent process, and remove each from the child process.
663 */
664 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
665 fasttrap_tracepoint_t *tp;
666 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
667
668 lck_mtx_lock(&bucket->ftb_mtx);
669 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
670 if (tp->ftt_pid == ppid &&
671 tp->ftt_proc->ftpc_acount != 0) {
672 fasttrap_tracepoint_remove(cp, tp);
673
674 /*
675 * The count of active providers can only be
676 * decremented (i.e. to zero) during exec,
677 * exit, and removal of a meta provider so it
678 * should be impossible to drop the count
679 * mid-fork.
680 */
681 ASSERT(tp->ftt_proc->ftpc_acount != 0);
682 }
683 }
684 lck_mtx_unlock(&bucket->ftb_mtx);
685 }
686
687 /*
688 * Free any ptss pages/entries in the child.
689 */
690 dtrace_ptss_fork(p, cp);
691
692 sprunlock(cp);
693 }
694
695 /*
696 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
697 * is set on the proc structure to indicate that there is a pid provider
698 * associated with this process.
699 */
700 static void
701 fasttrap_exec_exit(proc_t *p)
702 {
703 ASSERT(p == current_proc());
704 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
705 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
706
707
708 /* APPLE NOTE: Okay, the locking here is really odd and needs some
709 * explaining. This method is always called with the proc_lock held.
710 * We must drop the proc_lock before calling fasttrap_provider_retire
711 * to avoid a deadlock when it takes the bucket lock.
712 *
713 * Next, the dtrace_ptss_exec_exit function requires the sprlock
714 * be held, but not the proc_lock.
715 *
716 * Finally, we must re-acquire the proc_lock
717 */
718 proc_unlock(p);
719
720 /*
721 * We clean up the pid provider for this process here; user-land
722 * static probes are handled by the meta-provider remove entry point.
723 */
724 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
725
726 /*
727 * APPLE NOTE: We also need to remove any aliased providers.
728 * XXX optimization: track which provider types are instantiated
729 * and only retire as needed.
730 */
731 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
732 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
733
734 /*
735 * This should be called after it is no longer possible for a user
736 * thread to execute (potentially dtrace instrumented) instructions.
737 */
738 lck_mtx_lock(&p->p_dtrace_sprlock);
739 dtrace_ptss_exec_exit(p);
740 lck_mtx_unlock(&p->p_dtrace_sprlock);
741
742 proc_lock(p);
743 }
744
745
746 /*ARGSUSED*/
747 static void
748 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
749 {
750 #pragma unused(arg, desc)
751 /*
752 * There are no "default" pid probes.
753 */
754 }
755
756 static int
757 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
758 {
759 fasttrap_tracepoint_t *tp, *new_tp = NULL;
760 fasttrap_bucket_t *bucket;
761 fasttrap_id_t *id;
762 pid_t pid;
763 user_addr_t pc;
764
765 ASSERT(index < probe->ftp_ntps);
766
767 pid = probe->ftp_pid;
768 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
769 id = &probe->ftp_tps[index].fit_id;
770
771 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
772
773 //ASSERT(!(p->p_flag & SVFORK));
774
775 /*
776 * Before we make any modifications, make sure we've imposed a barrier
777 * on the generation in which this probe was last modified.
778 */
779 fasttrap_mod_barrier(probe->ftp_gen);
780
781 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
782
783 /*
784 * If the tracepoint has already been enabled, just add our id to the
785 * list of interested probes. This may be our second time through
786 * this path in which case we'll have constructed the tracepoint we'd
787 * like to install. If we can't find a match, and have an allocated
788 * tracepoint ready to go, enable that one now.
789 *
790 * A tracepoint whose process is defunct is also considered defunct.
791 */
792 again:
793 lck_mtx_lock(&bucket->ftb_mtx);
794 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
795 int rc = 0;
796 /*
797 * Note that it's safe to access the active count on the
798 * associated proc structure because we know that at least one
799 * provider (this one) will still be around throughout this
800 * operation.
801 */
802 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
803 tp->ftt_proc->ftpc_acount == 0)
804 continue;
805
806 /*
807 * Now that we've found a matching tracepoint, it would be
808 * a decent idea to confirm that the tracepoint is still
809 * enabled and the trap instruction hasn't been overwritten.
810 * Since this is a little hairy, we'll punt for now.
811 */
812 if (!tp->ftt_installed) {
813 if (fasttrap_tracepoint_install(p, tp) != 0)
814 rc = FASTTRAP_ENABLE_PARTIAL;
815 }
816 /*
817 * This can't be the first interested probe. We don't have
818 * to worry about another thread being in the midst of
819 * deleting this tracepoint (which would be the only valid
820 * reason for a tracepoint to have no interested probes)
821 * since we're holding P_PR_LOCK for this process.
822 */
823 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
824
825 switch (id->fti_ptype) {
826 case DTFTP_ENTRY:
827 case DTFTP_OFFSETS:
828 case DTFTP_IS_ENABLED:
829 id->fti_next = tp->ftt_ids;
830 dtrace_membar_producer();
831 tp->ftt_ids = id;
832 dtrace_membar_producer();
833 break;
834
835 case DTFTP_RETURN:
836 case DTFTP_POST_OFFSETS:
837 id->fti_next = tp->ftt_retids;
838 dtrace_membar_producer();
839 tp->ftt_retids = id;
840 dtrace_membar_producer();
841 break;
842
843 default:
844 ASSERT(0);
845 }
846
847 tp->ftt_retired = 0;
848
849 lck_mtx_unlock(&bucket->ftb_mtx);
850
851 if (new_tp != NULL) {
852 new_tp->ftt_ids = NULL;
853 new_tp->ftt_retids = NULL;
854 }
855
856 return rc;
857 }
858
859 /*
860 * If we have a good tracepoint ready to go, install it now while
861 * we have the lock held and no one can screw with us.
862 */
863 if (new_tp != NULL) {
864 int rc = 0;
865
866 new_tp->ftt_next = bucket->ftb_data;
867 dtrace_membar_producer();
868 bucket->ftb_data = new_tp;
869 dtrace_membar_producer();
870 lck_mtx_unlock(&bucket->ftb_mtx);
871
872 /*
873 * Activate the tracepoint in the ISA-specific manner.
874 * If this fails, we need to report the failure, but
875 * indicate that this tracepoint must still be disabled
876 * by calling fasttrap_tracepoint_disable().
877 */
878 if (fasttrap_tracepoint_install(p, new_tp) != 0)
879 rc = FASTTRAP_ENABLE_PARTIAL;
880 /*
881 * Increment the count of the number of tracepoints active in
882 * the victim process.
883 */
884 //ASSERT(p->p_proc_flag & P_PR_LOCK);
885 p->p_dtrace_count++;
886
887
888 return (rc);
889 }
890
891 lck_mtx_unlock(&bucket->ftb_mtx);
892
893 /*
894 * Initialize the tracepoint that's been preallocated with the probe.
895 */
896 new_tp = probe->ftp_tps[index].fit_tp;
897 new_tp->ftt_retired = 0;
898
899 ASSERT(new_tp->ftt_pid == pid);
900 ASSERT(new_tp->ftt_pc == pc);
901 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
902 ASSERT(new_tp->ftt_ids == NULL);
903 ASSERT(new_tp->ftt_retids == NULL);
904
905 switch (id->fti_ptype) {
906 case DTFTP_ENTRY:
907 case DTFTP_OFFSETS:
908 case DTFTP_IS_ENABLED:
909 id->fti_next = NULL;
910 new_tp->ftt_ids = id;
911 break;
912
913 case DTFTP_RETURN:
914 case DTFTP_POST_OFFSETS:
915 id->fti_next = NULL;
916 new_tp->ftt_retids = id;
917 break;
918
919 default:
920 ASSERT(0);
921 }
922
923 /*
924 * If the ISA-dependent initialization goes to plan, go back to the
925 * beginning and try to install this freshly made tracepoint.
926 */
927 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
928 goto again;
929
930 new_tp->ftt_ids = NULL;
931 new_tp->ftt_retids = NULL;
932
933 return (FASTTRAP_ENABLE_FAIL);
934 }
935
936 static void
937 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
938 {
939 fasttrap_bucket_t *bucket;
940 fasttrap_provider_t *provider = probe->ftp_prov;
941 fasttrap_tracepoint_t **pp, *tp;
942 fasttrap_id_t *id, **idp;
943 pid_t pid;
944 user_addr_t pc;
945
946 ASSERT(index < probe->ftp_ntps);
947
948 pid = probe->ftp_pid;
949 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
950 id = &probe->ftp_tps[index].fit_id;
951
952 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
953
954 /*
955 * Find the tracepoint and make sure that our id is one of the
956 * ones registered with it.
957 */
958 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
959 lck_mtx_lock(&bucket->ftb_mtx);
960 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
961 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
962 tp->ftt_proc == provider->ftp_proc)
963 break;
964 }
965
966 /*
967 * If we somehow lost this tracepoint, we're in a world of hurt.
968 */
969 ASSERT(tp != NULL);
970
971 switch (id->fti_ptype) {
972 case DTFTP_ENTRY:
973 case DTFTP_OFFSETS:
974 case DTFTP_IS_ENABLED:
975 ASSERT(tp->ftt_ids != NULL);
976 idp = &tp->ftt_ids;
977 break;
978
979 case DTFTP_RETURN:
980 case DTFTP_POST_OFFSETS:
981 ASSERT(tp->ftt_retids != NULL);
982 idp = &tp->ftt_retids;
983 break;
984
985 default:
986 /* Fix compiler warning... */
987 idp = NULL;
988 ASSERT(0);
989 }
990
991 while ((*idp)->fti_probe != probe) {
992 idp = &(*idp)->fti_next;
993 ASSERT(*idp != NULL);
994 }
995
996 id = *idp;
997 *idp = id->fti_next;
998 dtrace_membar_producer();
999
1000 ASSERT(id->fti_probe == probe);
1001
1002 /*
1003 * If there are other registered enablings of this tracepoint, we're
1004 * all done, but if this was the last probe assocated with this
1005 * this tracepoint, we need to remove and free it.
1006 */
1007 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1008
1009 /*
1010 * If the current probe's tracepoint is in use, swap it
1011 * for an unused tracepoint.
1012 */
1013 if (tp == probe->ftp_tps[index].fit_tp) {
1014 fasttrap_probe_t *tmp_probe;
1015 fasttrap_tracepoint_t **tmp_tp;
1016 uint_t tmp_index;
1017
1018 if (tp->ftt_ids != NULL) {
1019 tmp_probe = tp->ftt_ids->fti_probe;
1020 /* LINTED - alignment */
1021 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1022 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1023 } else {
1024 tmp_probe = tp->ftt_retids->fti_probe;
1025 /* LINTED - alignment */
1026 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1027 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1028 }
1029
1030 ASSERT(*tmp_tp != NULL);
1031 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1032 ASSERT((*tmp_tp)->ftt_ids == NULL);
1033 ASSERT((*tmp_tp)->ftt_retids == NULL);
1034
1035 probe->ftp_tps[index].fit_tp = *tmp_tp;
1036 *tmp_tp = tp;
1037
1038 }
1039
1040 lck_mtx_unlock(&bucket->ftb_mtx);
1041
1042 /*
1043 * Tag the modified probe with the generation in which it was
1044 * changed.
1045 */
1046 probe->ftp_gen = fasttrap_mod_gen;
1047 return;
1048 }
1049
1050 lck_mtx_unlock(&bucket->ftb_mtx);
1051
1052 /*
1053 * We can't safely remove the tracepoint from the set of active
1054 * tracepoints until we've actually removed the fasttrap instruction
1055 * from the process's text. We can, however, operate on this
1056 * tracepoint secure in the knowledge that no other thread is going to
1057 * be looking at it since we hold P_PR_LOCK on the process if it's
1058 * live or we hold the provider lock on the process if it's dead and
1059 * gone.
1060 */
1061
1062 /*
1063 * We only need to remove the actual instruction if we're looking
1064 * at an existing process
1065 */
1066 if (p != NULL) {
1067 /*
1068 * If we fail to restore the instruction we need to kill
1069 * this process since it's in a completely unrecoverable
1070 * state.
1071 */
1072 if (fasttrap_tracepoint_remove(p, tp) != 0)
1073 fasttrap_sigtrap(p, NULL, pc);
1074
1075 /*
1076 * Decrement the count of the number of tracepoints active
1077 * in the victim process.
1078 */
1079 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1080 p->p_dtrace_count--;
1081 }
1082
1083 /*
1084 * Remove the probe from the hash table of active tracepoints.
1085 */
1086 lck_mtx_lock(&bucket->ftb_mtx);
1087 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1088 ASSERT(*pp != NULL);
1089 while (*pp != tp) {
1090 pp = &(*pp)->ftt_next;
1091 ASSERT(*pp != NULL);
1092 }
1093
1094 *pp = tp->ftt_next;
1095 dtrace_membar_producer();
1096
1097 lck_mtx_unlock(&bucket->ftb_mtx);
1098
1099 /*
1100 * Tag the modified probe with the generation in which it was changed.
1101 */
1102 probe->ftp_gen = fasttrap_mod_gen;
1103 }
1104
1105 static void
1106 fasttrap_enable_callbacks(void)
1107 {
1108 /*
1109 * We don't have to play the rw lock game here because we're
1110 * providing something rather than taking something away --
1111 * we can be sure that no threads have tried to follow this
1112 * function pointer yet.
1113 */
1114 lck_mtx_lock(&fasttrap_count_mtx);
1115 if (fasttrap_pid_count == 0) {
1116 ASSERT(dtrace_pid_probe_ptr == NULL);
1117 ASSERT(dtrace_return_probe_ptr == NULL);
1118 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1119 dtrace_return_probe_ptr = &fasttrap_return_probe;
1120 }
1121 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1122 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1123 fasttrap_pid_count++;
1124 lck_mtx_unlock(&fasttrap_count_mtx);
1125 }
1126
1127 static void
1128 fasttrap_disable_callbacks(void)
1129 {
1130 //ASSERT(MUTEX_HELD(&cpu_lock));
1131
1132 lck_mtx_lock(&fasttrap_count_mtx);
1133 ASSERT(fasttrap_pid_count > 0);
1134 fasttrap_pid_count--;
1135 if (fasttrap_pid_count == 0) {
1136 dtrace_cpu_t *cur, *cpu = CPU;
1137
1138 /*
1139 * APPLE NOTE: This loop seems broken, it touches every CPU
1140 * but the one we're actually running on. Need to ask Sun folks
1141 * if that is safe. Scenario is this: We're running on CPU A,
1142 * and lock all but A. Then we get preempted, and start running
1143 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1144 */
1145 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1146 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
1147 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1148 }
1149
1150 dtrace_pid_probe_ptr = NULL;
1151 dtrace_return_probe_ptr = NULL;
1152
1153 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1154 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
1155 // rw_exit(&cur->cpu_ft_lock);
1156 }
1157 }
1158 lck_mtx_unlock(&fasttrap_count_mtx);
1159 }
1160
1161 /*ARGSUSED*/
1162 static int
1163 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1164 {
1165 #pragma unused(arg, id)
1166 fasttrap_probe_t *probe = parg;
1167 proc_t *p;
1168 int i, rc;
1169
1170 ASSERT(probe != NULL);
1171 ASSERT(!probe->ftp_enabled);
1172 ASSERT(id == probe->ftp_id);
1173 // ASSERT(MUTEX_HELD(&cpu_lock));
1174
1175 /*
1176 * Increment the count of enabled probes on this probe's provider;
1177 * the provider can't go away while the probe still exists. We
1178 * must increment this even if we aren't able to properly enable
1179 * this probe.
1180 */
1181 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1182 probe->ftp_prov->ftp_rcount++;
1183 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1184
1185 /*
1186 * If this probe's provider is retired (meaning it was valid in a
1187 * previously exec'ed incarnation of this address space), bail out. The
1188 * provider can't go away while we're in this code path.
1189 */
1190 if (probe->ftp_prov->ftp_retired)
1191 return(0);
1192
1193 /*
1194 * If we can't find the process, it may be that we're in the context of
1195 * a fork in which the traced process is being born and we're copying
1196 * USDT probes. Otherwise, the process is gone so bail.
1197 */
1198 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1199 /*
1200 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1201 * does not return process's with SIDL set, but we always return
1202 * the child process.
1203 */
1204 return(0);
1205 }
1206
1207 proc_lock(p);
1208 int p_pid = proc_pid(p);
1209
1210 rc = fasttrap_setdebug(p);
1211 switch (rc) {
1212 case EACCES:
1213 proc_unlock(p);
1214 sprunlock(p);
1215 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1216 "Process does not allow invalid code pages\n", p_pid);
1217 return (0);
1218 case ESRCH:
1219 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1220 "Failed to re-acquire process\n", p_pid);
1221 return (0);
1222 default:
1223 assert(rc == 0);
1224 break;
1225 }
1226
1227 /*
1228 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1229 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1230 * To mimic this, we allocate on demand scratch space. If this is the first
1231 * time a probe has been enabled in this process, we need to allocate scratch
1232 * space for each already existing thread. Now is a good time to do this, as
1233 * the target process is suspended and the proc_lock is held.
1234 */
1235 if (p->p_dtrace_ptss_pages == NULL) {
1236 dtrace_ptss_enable(p);
1237 }
1238
1239 // ASSERT(!(p->p_flag & SVFORK));
1240 proc_unlock(p);
1241
1242 /*
1243 * We have to enable the trap entry point before any user threads have
1244 * the chance to execute the trap instruction we're about to place
1245 * in their process's text.
1246 */
1247 fasttrap_enable_callbacks();
1248
1249 /*
1250 * Enable all the tracepoints and add this probe's id to each
1251 * tracepoint's list of active probes.
1252 */
1253 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1254 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1255 /*
1256 * If enabling the tracepoint failed completely,
1257 * we don't have to disable it; if the failure
1258 * was only partial we must disable it.
1259 */
1260 if (rc == FASTTRAP_ENABLE_FAIL)
1261 i--;
1262 else
1263 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1264
1265 /*
1266 * Back up and pull out all the tracepoints we've
1267 * created so far for this probe.
1268 */
1269 while (i >= 0) {
1270 fasttrap_tracepoint_disable(p, probe, i);
1271 i--;
1272 }
1273
1274 sprunlock(p);
1275
1276 /*
1277 * Since we're not actually enabling this probe,
1278 * drop our reference on the trap table entry.
1279 */
1280 fasttrap_disable_callbacks();
1281 return(0);
1282 }
1283 }
1284
1285 sprunlock(p);
1286
1287 probe->ftp_enabled = 1;
1288 return (0);
1289 }
1290
1291 /*ARGSUSED*/
1292 static void
1293 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1294 {
1295 #pragma unused(arg, id)
1296 fasttrap_probe_t *probe = parg;
1297 fasttrap_provider_t *provider = probe->ftp_prov;
1298 proc_t *p;
1299 int i, whack = 0;
1300
1301 ASSERT(id == probe->ftp_id);
1302
1303 /*
1304 * We won't be able to acquire a /proc-esque lock on the process
1305 * iff the process is dead and gone. In this case, we rely on the
1306 * provider lock as a point of mutual exclusion to prevent other
1307 * DTrace consumers from disabling this probe.
1308 */
1309 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1310 // ASSERT(!(p->p_flag & SVFORK));
1311 }
1312
1313 lck_mtx_lock(&provider->ftp_mtx);
1314
1315 /*
1316 * Disable all the associated tracepoints (for fully enabled probes).
1317 */
1318 if (probe->ftp_enabled) {
1319 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1320 fasttrap_tracepoint_disable(p, probe, i);
1321 }
1322 }
1323
1324 ASSERT(provider->ftp_rcount > 0);
1325 provider->ftp_rcount--;
1326
1327 if (p != NULL) {
1328 /*
1329 * Even though we may not be able to remove it entirely, we
1330 * mark this retired provider to get a chance to remove some
1331 * of the associated probes.
1332 */
1333 if (provider->ftp_retired && !provider->ftp_marked)
1334 whack = provider->ftp_marked = 1;
1335 lck_mtx_unlock(&provider->ftp_mtx);
1336
1337 sprunlock(p);
1338 } else {
1339 /*
1340 * If the process is dead, we're just waiting for the
1341 * last probe to be disabled to be able to free it.
1342 */
1343 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1344 whack = provider->ftp_marked = 1;
1345 lck_mtx_unlock(&provider->ftp_mtx);
1346 }
1347
1348 if (whack) {
1349 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1350 }
1351
1352 if (!probe->ftp_enabled)
1353 return;
1354
1355 probe->ftp_enabled = 0;
1356
1357 // ASSERT(MUTEX_HELD(&cpu_lock));
1358 fasttrap_disable_callbacks();
1359 }
1360
1361 /*ARGSUSED*/
1362 static void
1363 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1364 dtrace_argdesc_t *desc)
1365 {
1366 #pragma unused(arg, id)
1367 fasttrap_probe_t *probe = parg;
1368 char *str;
1369 int i, ndx;
1370
1371 desc->dtargd_native[0] = '\0';
1372 desc->dtargd_xlate[0] = '\0';
1373
1374 if (probe->ftp_prov->ftp_retired != 0 ||
1375 desc->dtargd_ndx >= probe->ftp_nargs) {
1376 desc->dtargd_ndx = DTRACE_ARGNONE;
1377 return;
1378 }
1379
1380 ndx = (probe->ftp_argmap != NULL) ?
1381 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1382
1383 str = probe->ftp_ntypes;
1384 for (i = 0; i < ndx; i++) {
1385 str += strlen(str) + 1;
1386 }
1387
1388 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1389
1390 if (probe->ftp_xtypes == NULL)
1391 return;
1392
1393 str = probe->ftp_xtypes;
1394 for (i = 0; i < desc->dtargd_ndx; i++) {
1395 str += strlen(str) + 1;
1396 }
1397
1398 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1399 }
1400
1401 /*ARGSUSED*/
1402 static void
1403 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1404 {
1405 #pragma unused(arg, id)
1406 fasttrap_probe_t *probe = parg;
1407 unsigned int i;
1408
1409 ASSERT(probe != NULL);
1410 ASSERT(!probe->ftp_enabled);
1411 ASSERT(fasttrap_total >= probe->ftp_ntps);
1412
1413 os_atomic_sub(&fasttrap_total, probe->ftp_ntps, relaxed);
1414 os_atomic_sub(&fasttrap_retired, probe->ftp_ntps, relaxed);
1415
1416 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1417 fasttrap_mod_barrier(probe->ftp_gen);
1418
1419 for (i = 0; i < probe->ftp_ntps; i++) {
1420 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1421 }
1422
1423 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1424 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1425 } else {
1426 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1427 kmem_free(probe, size);
1428 }
1429 }
1430
1431
1432 static const dtrace_pattr_t pid_attr = {
1433 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1434 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1435 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1436 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1437 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1438 };
1439
1440 static dtrace_pops_t pid_pops = {
1441 .dtps_provide = fasttrap_pid_provide,
1442 .dtps_provide_module = NULL,
1443 .dtps_enable = fasttrap_pid_enable,
1444 .dtps_disable = fasttrap_pid_disable,
1445 .dtps_suspend = NULL,
1446 .dtps_resume = NULL,
1447 .dtps_getargdesc = fasttrap_pid_getargdesc,
1448 .dtps_getargval = fasttrap_pid_getarg,
1449 .dtps_usermode = NULL,
1450 .dtps_destroy = fasttrap_pid_destroy
1451 };
1452
1453 static dtrace_pops_t usdt_pops = {
1454 .dtps_provide = fasttrap_pid_provide,
1455 .dtps_provide_module = NULL,
1456 .dtps_enable = fasttrap_pid_enable,
1457 .dtps_disable = fasttrap_pid_disable,
1458 .dtps_suspend = NULL,
1459 .dtps_resume = NULL,
1460 .dtps_getargdesc = fasttrap_pid_getargdesc,
1461 .dtps_getargval = fasttrap_usdt_getarg,
1462 .dtps_usermode = NULL,
1463 .dtps_destroy = fasttrap_pid_destroy
1464 };
1465
1466 static fasttrap_proc_t *
1467 fasttrap_proc_lookup(pid_t pid)
1468 {
1469 fasttrap_bucket_t *bucket;
1470 fasttrap_proc_t *fprc, *new_fprc;
1471
1472 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1473 lck_mtx_lock(&bucket->ftb_mtx);
1474
1475 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1476 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1477 lck_mtx_lock(&fprc->ftpc_mtx);
1478 lck_mtx_unlock(&bucket->ftb_mtx);
1479 fprc->ftpc_rcount++;
1480 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1481 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1482 lck_mtx_unlock(&fprc->ftpc_mtx);
1483
1484 return (fprc);
1485 }
1486 }
1487
1488 /*
1489 * Drop the bucket lock so we don't try to perform a sleeping
1490 * allocation under it.
1491 */
1492 lck_mtx_unlock(&bucket->ftb_mtx);
1493
1494 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1495 ASSERT(new_fprc != NULL);
1496 new_fprc->ftpc_pid = pid;
1497 new_fprc->ftpc_rcount = 1;
1498 new_fprc->ftpc_acount = 1;
1499
1500 lck_mtx_lock(&bucket->ftb_mtx);
1501
1502 /*
1503 * Take another lap through the list to make sure a proc hasn't
1504 * been created for this pid while we weren't under the bucket lock.
1505 */
1506 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1507 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1508 lck_mtx_lock(&fprc->ftpc_mtx);
1509 lck_mtx_unlock(&bucket->ftb_mtx);
1510 fprc->ftpc_rcount++;
1511 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1512 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1513 lck_mtx_unlock(&fprc->ftpc_mtx);
1514
1515 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1516
1517 return (fprc);
1518 }
1519 }
1520
1521 /*
1522 * APPLE NOTE: We have to initialize all locks explicitly
1523 */
1524 lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1525
1526 new_fprc->ftpc_next = bucket->ftb_data;
1527 bucket->ftb_data = new_fprc;
1528
1529 lck_mtx_unlock(&bucket->ftb_mtx);
1530
1531 return (new_fprc);
1532 }
1533
1534 static void
1535 fasttrap_proc_release(fasttrap_proc_t *proc)
1536 {
1537 fasttrap_bucket_t *bucket;
1538 fasttrap_proc_t *fprc, **fprcp;
1539 pid_t pid = proc->ftpc_pid;
1540
1541 lck_mtx_lock(&proc->ftpc_mtx);
1542
1543 ASSERT(proc->ftpc_rcount != 0);
1544 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1545
1546 if (--proc->ftpc_rcount != 0) {
1547 lck_mtx_unlock(&proc->ftpc_mtx);
1548 return;
1549 }
1550
1551 lck_mtx_unlock(&proc->ftpc_mtx);
1552
1553 /*
1554 * There should definitely be no live providers associated with this
1555 * process at this point.
1556 */
1557 ASSERT(proc->ftpc_acount == 0);
1558
1559 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1560 lck_mtx_lock(&bucket->ftb_mtx);
1561
1562 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1563 while ((fprc = *fprcp) != NULL) {
1564 if (fprc == proc)
1565 break;
1566
1567 fprcp = &fprc->ftpc_next;
1568 }
1569
1570 /*
1571 * Something strange has happened if we can't find the proc.
1572 */
1573 ASSERT(fprc != NULL);
1574
1575 *fprcp = fprc->ftpc_next;
1576
1577 lck_mtx_unlock(&bucket->ftb_mtx);
1578
1579 /*
1580 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1581 * memory is freed even without the destroy. Maybe accounting cleanup?
1582 */
1583 lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1584
1585 kmem_free(fprc, sizeof (fasttrap_proc_t));
1586 }
1587
1588 /*
1589 * Lookup a fasttrap-managed provider based on its name and associated proc.
1590 * A reference to the proc must be held for the duration of the call.
1591 * If the pattr argument is non-NULL, this function instantiates the provider
1592 * if it doesn't exist otherwise it returns NULL. The provider is returned
1593 * with its lock held.
1594 */
1595 static fasttrap_provider_t *
1596 fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1597 const dtrace_pattr_t *pattr)
1598 {
1599 pid_t pid = p->p_pid;
1600 fasttrap_provider_t *fp, *new_fp = NULL;
1601 fasttrap_bucket_t *bucket;
1602 char provname[DTRACE_PROVNAMELEN];
1603 cred_t *cred;
1604
1605 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1606 ASSERT(pattr != NULL);
1607
1608 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1609 lck_mtx_lock(&bucket->ftb_mtx);
1610
1611 /*
1612 * Take a lap through the list and return the match if we find it.
1613 */
1614 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1615 if (fp->ftp_pid == pid &&
1616 fp->ftp_provider_type == provider_type &&
1617 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1618 !fp->ftp_retired) {
1619 lck_mtx_lock(&fp->ftp_mtx);
1620 lck_mtx_unlock(&bucket->ftb_mtx);
1621 return (fp);
1622 }
1623 }
1624
1625 /*
1626 * Drop the bucket lock so we don't try to perform a sleeping
1627 * allocation under it.
1628 */
1629 lck_mtx_unlock(&bucket->ftb_mtx);
1630
1631 /*
1632 * Make sure the process isn't a child created as the result
1633 * of a vfork(2), and isn't a zombie (but may be in fork).
1634 */
1635 proc_lock(p);
1636 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1637 proc_unlock(p);
1638 return (NULL);
1639 }
1640
1641 /*
1642 * Increment p_dtrace_probes so that the process knows to inform us
1643 * when it exits or execs. fasttrap_provider_free() decrements this
1644 * when we're done with this provider.
1645 */
1646 p->p_dtrace_probes++;
1647
1648 /*
1649 * Grab the credentials for this process so we have
1650 * something to pass to dtrace_register().
1651 * APPLE NOTE: We have no equivalent to crhold,
1652 * even though there is a cr_ref filed in ucred.
1653 */
1654 cred = kauth_cred_proc_ref(p);
1655 proc_unlock(p);
1656
1657 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1658 ASSERT(new_fp != NULL);
1659 new_fp->ftp_pid = p->p_pid;
1660 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1661 new_fp->ftp_provider_type = provider_type;
1662
1663 /*
1664 * APPLE NOTE: locks require explicit init
1665 */
1666 lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1667 lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1668
1669 ASSERT(new_fp->ftp_proc != NULL);
1670
1671 lck_mtx_lock(&bucket->ftb_mtx);
1672
1673 /*
1674 * Take another lap through the list to make sure a provider hasn't
1675 * been created for this pid while we weren't under the bucket lock.
1676 */
1677 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1678 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1679 !fp->ftp_retired) {
1680 lck_mtx_lock(&fp->ftp_mtx);
1681 lck_mtx_unlock(&bucket->ftb_mtx);
1682 fasttrap_provider_free(new_fp);
1683 kauth_cred_unref(&cred);
1684 return (fp);
1685 }
1686 }
1687
1688 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1689
1690 /*
1691 * Fail and return NULL if either the provider name is too long
1692 * or we fail to register this new provider with the DTrace
1693 * framework. Note that this is the only place we ever construct
1694 * the full provider name -- we keep it in pieces in the provider
1695 * structure.
1696 */
1697 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1698 (int)sizeof (provname) ||
1699 dtrace_register(provname, pattr,
1700 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1701 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1702 &new_fp->ftp_provid) != 0) {
1703 lck_mtx_unlock(&bucket->ftb_mtx);
1704 fasttrap_provider_free(new_fp);
1705 kauth_cred_unref(&cred);
1706 return (NULL);
1707 }
1708
1709 new_fp->ftp_next = bucket->ftb_data;
1710 bucket->ftb_data = new_fp;
1711
1712 lck_mtx_lock(&new_fp->ftp_mtx);
1713 lck_mtx_unlock(&bucket->ftb_mtx);
1714
1715 kauth_cred_unref(&cred);
1716
1717 return (new_fp);
1718 }
1719
1720 static void
1721 fasttrap_provider_free(fasttrap_provider_t *provider)
1722 {
1723 pid_t pid = provider->ftp_pid;
1724 proc_t *p;
1725
1726 /*
1727 * There need to be no associated enabled probes, no consumers
1728 * creating probes, and no meta providers referencing this provider.
1729 */
1730 ASSERT(provider->ftp_rcount == 0);
1731 ASSERT(provider->ftp_ccount == 0);
1732 ASSERT(provider->ftp_mcount == 0);
1733
1734 /*
1735 * If this provider hasn't been retired, we need to explicitly drop the
1736 * count of active providers on the associated process structure.
1737 */
1738 if (!provider->ftp_retired) {
1739 os_atomic_dec(&provider->ftp_proc->ftpc_acount, relaxed);
1740 ASSERT(provider->ftp_proc->ftpc_acount <
1741 provider->ftp_proc->ftpc_rcount);
1742 }
1743
1744 fasttrap_proc_release(provider->ftp_proc);
1745
1746 /*
1747 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1748 * memory is freed even without the destroy. Maybe accounting cleanup?
1749 */
1750 lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1751 lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1752
1753 kmem_free(provider, sizeof (fasttrap_provider_t));
1754
1755 /*
1756 * Decrement p_dtrace_probes on the process whose provider we're
1757 * freeing. We don't have to worry about clobbering somone else's
1758 * modifications to it because we have locked the bucket that
1759 * corresponds to this process's hash chain in the provider hash
1760 * table. Don't sweat it if we can't find the process.
1761 */
1762 if ((p = proc_find(pid)) == NULL) {
1763 return;
1764 }
1765
1766 proc_lock(p);
1767 p->p_dtrace_probes--;
1768 proc_unlock(p);
1769
1770 proc_rele(p);
1771 }
1772
1773 static void
1774 fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1775 {
1776 fasttrap_provider_t *fp;
1777 fasttrap_bucket_t *bucket;
1778 dtrace_provider_id_t provid;
1779 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1780
1781 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
1782 lck_mtx_lock(&bucket->ftb_mtx);
1783
1784 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1785 if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1786 !fp->ftp_retired)
1787 break;
1788 }
1789
1790 if (fp == NULL) {
1791 lck_mtx_unlock(&bucket->ftb_mtx);
1792 return;
1793 }
1794
1795 lck_mtx_lock(&fp->ftp_mtx);
1796 ASSERT(!mprov || fp->ftp_mcount > 0);
1797 if (mprov && --fp->ftp_mcount != 0) {
1798 lck_mtx_unlock(&fp->ftp_mtx);
1799 lck_mtx_unlock(&bucket->ftb_mtx);
1800 return;
1801 }
1802
1803 /*
1804 * Mark the provider to be removed in our post-processing step, mark it
1805 * retired, and drop the active count on its proc. Marking it indicates
1806 * that we should try to remove it; setting the retired flag indicates
1807 * that we're done with this provider; dropping the active the proc
1808 * releases our hold, and when this reaches zero (as it will during
1809 * exit or exec) the proc and associated providers become defunct.
1810 *
1811 * We obviously need to take the bucket lock before the provider lock
1812 * to perform the lookup, but we need to drop the provider lock
1813 * before calling into the DTrace framework since we acquire the
1814 * provider lock in callbacks invoked from the DTrace framework. The
1815 * bucket lock therefore protects the integrity of the provider hash
1816 * table.
1817 */
1818 os_atomic_dec(&fp->ftp_proc->ftpc_acount, relaxed);
1819 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1820
1821 /*
1822 * Add this provider probes to the retired count and
1823 * make sure we don't add them twice
1824 */
1825 os_atomic_add(&fasttrap_retired, fp->ftp_pcount, relaxed);
1826 fp->ftp_pcount = 0;
1827
1828 fp->ftp_retired = 1;
1829 fp->ftp_marked = 1;
1830 provid = fp->ftp_provid;
1831 lck_mtx_unlock(&fp->ftp_mtx);
1832
1833 /*
1834 * We don't have to worry about invalidating the same provider twice
1835 * since fasttrap_provider_lookup() will ignore providers that have
1836 * been marked as retired.
1837 */
1838 dtrace_invalidate(provid);
1839
1840 lck_mtx_unlock(&bucket->ftb_mtx);
1841
1842 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1843 }
1844
1845 static int
1846 fasttrap_uint32_cmp(const void *ap, const void *bp)
1847 {
1848 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1849 }
1850
1851 static int
1852 fasttrap_uint64_cmp(const void *ap, const void *bp)
1853 {
1854 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1855 }
1856
1857 static int
1858 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1859 {
1860 proc_t *p;
1861 fasttrap_provider_t *provider;
1862 fasttrap_probe_t *pp;
1863 fasttrap_tracepoint_t *tp;
1864 const char *name;
1865 unsigned int i, aframes, whack;
1866
1867 /*
1868 * There needs to be at least one desired trace point.
1869 */
1870 if (pdata->ftps_noffs == 0)
1871 return (EINVAL);
1872
1873 switch (pdata->ftps_probe_type) {
1874 case DTFTP_ENTRY:
1875 name = "entry";
1876 aframes = FASTTRAP_ENTRY_AFRAMES;
1877 break;
1878 case DTFTP_RETURN:
1879 name = "return";
1880 aframes = FASTTRAP_RETURN_AFRAMES;
1881 break;
1882 case DTFTP_OFFSETS:
1883 aframes = 0;
1884 name = NULL;
1885 break;
1886 default:
1887 return (EINVAL);
1888 }
1889
1890 const char* provider_name;
1891 switch (pdata->ftps_provider_type) {
1892 case DTFTP_PROVIDER_PID:
1893 provider_name = FASTTRAP_PID_NAME;
1894 break;
1895 case DTFTP_PROVIDER_OBJC:
1896 provider_name = FASTTRAP_OBJC_NAME;
1897 break;
1898 case DTFTP_PROVIDER_ONESHOT:
1899 provider_name = FASTTRAP_ONESHOT_NAME;
1900 break;
1901 default:
1902 return (EINVAL);
1903 }
1904
1905 p = proc_find(pdata->ftps_pid);
1906 if (p == PROC_NULL)
1907 return (ESRCH);
1908
1909 if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
1910 provider_name, &pid_attr)) == NULL) {
1911 proc_rele(p);
1912 return (ESRCH);
1913 }
1914
1915 proc_rele(p);
1916 /*
1917 * Increment this reference count to indicate that a consumer is
1918 * actively adding a new probe associated with this provider. This
1919 * prevents the provider from being deleted -- we'll need to check
1920 * for pending deletions when we drop this reference count.
1921 */
1922 provider->ftp_ccount++;
1923 lck_mtx_unlock(&provider->ftp_mtx);
1924
1925 /*
1926 * Grab the creation lock to ensure consistency between calls to
1927 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1928 * other threads creating probes. We must drop the provider lock
1929 * before taking this lock to avoid a three-way deadlock with the
1930 * DTrace framework.
1931 */
1932 lck_mtx_lock(&provider->ftp_cmtx);
1933
1934 if (name == NULL) {
1935 for (i = 0; i < pdata->ftps_noffs; i++) {
1936 char name_str[17];
1937
1938 (void) snprintf(name_str, sizeof(name_str), "%llx",
1939 (uint64_t)pdata->ftps_offs[i]);
1940
1941 if (dtrace_probe_lookup(provider->ftp_provid,
1942 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1943 continue;
1944
1945 os_atomic_inc(&fasttrap_total, relaxed);
1946 if (fasttrap_total > fasttrap_max) {
1947 os_atomic_dec(&fasttrap_total, relaxed);
1948 goto no_mem;
1949 }
1950 provider->ftp_pcount++;
1951
1952 pp = zalloc(fasttrap_probe_t_zones[1]);
1953 bzero(pp, sizeof (fasttrap_probe_t));
1954
1955 pp->ftp_prov = provider;
1956 pp->ftp_faddr = pdata->ftps_pc;
1957 pp->ftp_fsize = pdata->ftps_size;
1958 pp->ftp_pid = pdata->ftps_pid;
1959 pp->ftp_ntps = 1;
1960
1961 tp = zalloc(fasttrap_tracepoint_t_zone);
1962 bzero(tp, sizeof (fasttrap_tracepoint_t));
1963
1964 tp->ftt_proc = provider->ftp_proc;
1965 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1966 tp->ftt_pid = pdata->ftps_pid;
1967
1968 #if defined(__arm__) || defined(__arm64__)
1969 /*
1970 * On arm the subinfo is used to distinguish between arm
1971 * and thumb modes. On arm64 there is no thumb mode, so
1972 * this field is simply initialized to 0 on its way
1973 * into the kernel.
1974 */
1975 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1976 #endif
1977
1978 pp->ftp_tps[0].fit_tp = tp;
1979 pp->ftp_tps[0].fit_id.fti_probe = pp;
1980 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1981 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1982 pdata->ftps_mod, pdata->ftps_func, name_str,
1983 FASTTRAP_OFFSET_AFRAMES, pp);
1984 }
1985
1986 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1987 pdata->ftps_func, name) == 0) {
1988 os_atomic_add(&fasttrap_total, pdata->ftps_noffs, relaxed);
1989
1990 if (fasttrap_total > fasttrap_max) {
1991 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1992 goto no_mem;
1993 }
1994
1995 /*
1996 * Make sure all tracepoint program counter values are unique.
1997 * We later assume that each probe has exactly one tracepoint
1998 * for a given pc.
1999 */
2000 qsort(pdata->ftps_offs, pdata->ftps_noffs,
2001 sizeof (uint64_t), fasttrap_uint64_cmp);
2002 for (i = 1; i < pdata->ftps_noffs; i++) {
2003 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
2004 continue;
2005
2006 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
2007 goto no_mem;
2008 }
2009 provider->ftp_pcount += pdata->ftps_noffs;
2010 ASSERT(pdata->ftps_noffs > 0);
2011 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2012 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
2013 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
2014 } else {
2015 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
2016 }
2017
2018 pp->ftp_prov = provider;
2019 pp->ftp_faddr = pdata->ftps_pc;
2020 pp->ftp_fsize = pdata->ftps_size;
2021 pp->ftp_pid = pdata->ftps_pid;
2022 pp->ftp_ntps = pdata->ftps_noffs;
2023
2024 for (i = 0; i < pdata->ftps_noffs; i++) {
2025 tp = zalloc(fasttrap_tracepoint_t_zone);
2026 bzero(tp, sizeof (fasttrap_tracepoint_t));
2027 tp->ftt_proc = provider->ftp_proc;
2028 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
2029 tp->ftt_pid = pdata->ftps_pid;
2030
2031 #if defined(__arm__) || defined (__arm64__)
2032 /*
2033 * On arm the subinfo is used to distinguish between arm
2034 * and thumb modes. On arm64 there is no thumb mode, so
2035 * this field is simply initialized to 0 on its way
2036 * into the kernel.
2037 */
2038
2039 tp->ftt_fntype = pdata->ftps_arch_subinfo;
2040 #endif
2041 pp->ftp_tps[i].fit_tp = tp;
2042 pp->ftp_tps[i].fit_id.fti_probe = pp;
2043 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
2044 }
2045
2046 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
2047 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
2048 }
2049
2050 lck_mtx_unlock(&provider->ftp_cmtx);
2051
2052 /*
2053 * We know that the provider is still valid since we incremented the
2054 * creation reference count. If someone tried to clean up this provider
2055 * while we were using it (e.g. because the process called exec(2) or
2056 * exit(2)), take note of that and try to clean it up now.
2057 */
2058 lck_mtx_lock(&provider->ftp_mtx);
2059 provider->ftp_ccount--;
2060 whack = provider->ftp_retired;
2061 lck_mtx_unlock(&provider->ftp_mtx);
2062
2063 if (whack)
2064 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2065
2066 return (0);
2067
2068 no_mem:
2069 /*
2070 * If we've exhausted the allowable resources, we'll try to remove
2071 * this provider to free some up. This is to cover the case where
2072 * the user has accidentally created many more probes than was
2073 * intended (e.g. pid123:::).
2074 */
2075 lck_mtx_unlock(&provider->ftp_cmtx);
2076 lck_mtx_lock(&provider->ftp_mtx);
2077 provider->ftp_ccount--;
2078 provider->ftp_marked = 1;
2079 lck_mtx_unlock(&provider->ftp_mtx);
2080
2081 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2082
2083 return (ENOMEM);
2084 }
2085
2086 /*ARGSUSED*/
2087 static void *
2088 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2089 {
2090 #pragma unused(arg)
2091 fasttrap_provider_t *provider;
2092
2093 /*
2094 * A 32-bit unsigned integer (like a pid for example) can be
2095 * expressed in 10 or fewer decimal digits. Make sure that we'll
2096 * have enough space for the provider name.
2097 */
2098 if (strlen(dhpv->dthpv_provname) + 10 >=
2099 sizeof (provider->ftp_name)) {
2100 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2101 "name too long to accomodate pid", dhpv->dthpv_provname);
2102 return (NULL);
2103 }
2104
2105 /*
2106 * Don't let folks spoof the true pid provider.
2107 */
2108 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
2109 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2110 "%s is an invalid name", dhpv->dthpv_provname,
2111 FASTTRAP_PID_NAME);
2112 return (NULL);
2113 }
2114
2115 /*
2116 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2117 */
2118 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
2119 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2120 "%s is an invalid name", dhpv->dthpv_provname,
2121 FASTTRAP_OBJC_NAME);
2122 return (NULL);
2123 }
2124 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
2125 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2126 "%s is an invalid name", dhpv->dthpv_provname,
2127 FASTTRAP_ONESHOT_NAME);
2128 return (NULL);
2129 }
2130
2131 /*
2132 * The highest stability class that fasttrap supports is ISA; cap
2133 * the stability of the new provider accordingly.
2134 */
2135 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2136 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2137 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2138 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2139 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2140 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2141 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2142 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2143 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2144 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2145
2146 if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2147 &dhpv->dthpv_pattr)) == NULL) {
2148 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2149 "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
2150 return (NULL);
2151 }
2152
2153 /*
2154 * APPLE NOTE!
2155 *
2156 * USDT probes (fasttrap meta probes) are very expensive to create.
2157 * Profiling has shown that the largest single cost is verifying that
2158 * dtrace hasn't already created a given meta_probe. The reason for
2159 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2160 * each static probe being created. We want to get rid of that check.
2161 * The simplest way of eliminating it is to deny the ability to add
2162 * probes to an existing provider. If the provider already exists, BZZT!
2163 * This still leaves the possibility of intentionally malformed DOF
2164 * having duplicate probes. However, duplicate probes are not fatal,
2165 * and there is no way to get that by accident, so we will not check
2166 * for that case.
2167 *
2168 * UPDATE: It turns out there are several use cases that require adding
2169 * probes to existing providers. Disabling the dtrace_probe_lookup()
2170 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2171 */
2172
2173 /*
2174 * Up the meta provider count so this provider isn't removed until
2175 * the meta provider has been told to remove it.
2176 */
2177 provider->ftp_mcount++;
2178
2179 lck_mtx_unlock(&provider->ftp_mtx);
2180
2181 return (provider);
2182 }
2183
2184 /*ARGSUSED*/
2185 static void
2186 fasttrap_meta_create_probe(void *arg, void *parg,
2187 dtrace_helper_probedesc_t *dhpb)
2188 {
2189 #pragma unused(arg)
2190 fasttrap_provider_t *provider = parg;
2191 fasttrap_probe_t *pp;
2192 fasttrap_tracepoint_t *tp;
2193 unsigned int i, j;
2194 uint32_t ntps;
2195
2196 /*
2197 * Since the meta provider count is non-zero we don't have to worry
2198 * about this provider disappearing.
2199 */
2200 ASSERT(provider->ftp_mcount > 0);
2201
2202 /*
2203 * The offsets must be unique.
2204 */
2205 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2206 fasttrap_uint32_cmp);
2207 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2208 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2209 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2210 return;
2211 }
2212
2213 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2214 fasttrap_uint32_cmp);
2215 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2216 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2217 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2218 return;
2219 }
2220
2221 /*
2222 * Grab the creation lock to ensure consistency between calls to
2223 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2224 * other threads creating probes.
2225 */
2226 lck_mtx_lock(&provider->ftp_cmtx);
2227
2228 #if 0
2229 /*
2230 * APPLE NOTE: This is hideously expensive. See note in
2231 * fasttrap_meta_provide() for why we can get away without
2232 * checking here.
2233 */
2234 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2235 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2236 lck_mtx_unlock(&provider->ftp_cmtx);
2237 return;
2238 }
2239 #endif
2240
2241 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2242 ASSERT(ntps > 0);
2243
2244 os_atomic_add(&fasttrap_total, ntps, relaxed);
2245
2246 if (fasttrap_total > fasttrap_max) {
2247 os_atomic_sub(&fasttrap_total, ntps, relaxed);
2248 lck_mtx_unlock(&provider->ftp_cmtx);
2249 return;
2250 }
2251
2252 provider->ftp_pcount += ntps;
2253
2254 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2255 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2256 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2257 } else {
2258 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2259 }
2260
2261 pp->ftp_prov = provider;
2262 pp->ftp_pid = provider->ftp_pid;
2263 pp->ftp_ntps = ntps;
2264 pp->ftp_nargs = dhpb->dthpb_xargc;
2265 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2266 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2267
2268 /*
2269 * First create a tracepoint for each actual point of interest.
2270 */
2271 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2272 tp = zalloc(fasttrap_tracepoint_t_zone);
2273 bzero(tp, sizeof (fasttrap_tracepoint_t));
2274
2275 tp->ftt_proc = provider->ftp_proc;
2276
2277 /*
2278 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2279 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2280 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2281 */
2282 #if defined(__x86_64__)
2283 /*
2284 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2285 */
2286 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2287 #elif defined(__arm__) || defined(__arm64__)
2288 /*
2289 * All ARM and ARM64 probes are zero offset. We need to zero out the
2290 * thumb bit because we still support 32bit user processes.
2291 * On 64bit user processes, bit zero won't be set anyway.
2292 */
2293 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
2294 tp->ftt_fntype = FASTTRAP_FN_USDT;
2295 #else
2296 #error "Architecture not supported"
2297 #endif
2298
2299 tp->ftt_pid = provider->ftp_pid;
2300
2301 pp->ftp_tps[i].fit_tp = tp;
2302 pp->ftp_tps[i].fit_id.fti_probe = pp;
2303 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2304 }
2305
2306 /*
2307 * Then create a tracepoint for each is-enabled point.
2308 */
2309 for (j = 0; i < ntps; i++, j++) {
2310 tp = zalloc(fasttrap_tracepoint_t_zone);
2311 bzero(tp, sizeof (fasttrap_tracepoint_t));
2312
2313 tp->ftt_proc = provider->ftp_proc;
2314
2315 /*
2316 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2317 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2318 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2319 */
2320 #if defined(__x86_64__)
2321 /*
2322 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2323 */
2324 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2325 #elif defined(__arm__) || defined(__arm64__)
2326 /*
2327 * All ARM and ARM64 probes are zero offset. We need to zero out the
2328 * thumb bit because we still support 32bit user processes.
2329 * On 64bit user processes, bit zero won't be set anyway.
2330 */
2331 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
2332 tp->ftt_fntype = FASTTRAP_FN_USDT;
2333 #else
2334 #error "Architecture not supported"
2335 #endif
2336
2337 tp->ftt_pid = provider->ftp_pid;
2338
2339 pp->ftp_tps[i].fit_tp = tp;
2340 pp->ftp_tps[i].fit_id.fti_probe = pp;
2341 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2342 }
2343
2344 /*
2345 * If the arguments are shuffled around we set the argument remapping
2346 * table. Later, when the probe fires, we only remap the arguments
2347 * if the table is non-NULL.
2348 */
2349 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2350 if (dhpb->dthpb_args[i] != i) {
2351 pp->ftp_argmap = dhpb->dthpb_args;
2352 break;
2353 }
2354 }
2355
2356 /*
2357 * The probe is fully constructed -- register it with DTrace.
2358 */
2359 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2360 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2361
2362 lck_mtx_unlock(&provider->ftp_cmtx);
2363 }
2364
2365 /*ARGSUSED*/
2366 static void
2367 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2368 {
2369 #pragma unused(arg)
2370 /*
2371 * Clean up the USDT provider. There may be active consumers of the
2372 * provider busy adding probes, no damage will actually befall the
2373 * provider until that count has dropped to zero. This just puts
2374 * the provider on death row.
2375 */
2376 fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
2377 }
2378
2379 static char*
2380 fasttrap_meta_provider_name(void *arg)
2381 {
2382 fasttrap_provider_t *fprovider = arg;
2383 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2384 return provider->dtpv_name;
2385 }
2386
2387 static dtrace_mops_t fasttrap_mops = {
2388 .dtms_create_probe = fasttrap_meta_create_probe,
2389 .dtms_provide_proc = fasttrap_meta_provide,
2390 .dtms_remove_proc = fasttrap_meta_remove,
2391 .dtms_provider_name = fasttrap_meta_provider_name
2392 };
2393
2394 /*
2395 * Validate a null-terminated string. If str is not null-terminated,
2396 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2397 * returned.
2398 *
2399 * str: string to validate.
2400 * maxlen: maximal length of the string, null-terminated byte included.
2401 */
2402 static int
2403 fasttrap_validatestr(char const* str, size_t maxlen) {
2404 size_t len;
2405
2406 assert(str);
2407 assert(maxlen != 0);
2408
2409 /* Check if the string is null-terminated. */
2410 len = strnlen(str, maxlen);
2411 if (len >= maxlen)
2412 return -1;
2413
2414 /* Finally, check for UTF8 validity. */
2415 return utf8_validatestr((unsigned const char*) str, len);
2416 }
2417
2418 /*ARGSUSED*/
2419 static int
2420 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2421 {
2422 #pragma unused(dev, md, rv)
2423 if (!dtrace_attached())
2424 return (EAGAIN);
2425
2426 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2427 fasttrap_probe_spec_t *probe;
2428 uint64_t noffs;
2429 size_t size;
2430 int ret;
2431
2432 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2433 sizeof (probe->ftps_noffs)))
2434 return (EFAULT);
2435
2436 /*
2437 * Probes must have at least one tracepoint.
2438 */
2439 if (noffs == 0)
2440 return (EINVAL);
2441
2442 /*
2443 * We want to check the number of noffs before doing
2444 * sizing math, to prevent potential buffer overflows.
2445 */
2446 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2447 return (ENOMEM);
2448
2449 size = sizeof (fasttrap_probe_spec_t) +
2450 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2451
2452 probe = kmem_alloc(size, KM_SLEEP);
2453
2454 if (copyin(arg, probe, size) != 0 ||
2455 probe->ftps_noffs != noffs) {
2456 kmem_free(probe, size);
2457 return (EFAULT);
2458 }
2459
2460 /*
2461 * Verify that the function and module strings contain no
2462 * funny characters.
2463 */
2464
2465 if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
2466 ret = EINVAL;
2467 goto err;
2468 }
2469
2470 if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
2471 ret = EINVAL;
2472 goto err;
2473 }
2474
2475 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2476 proc_t *p;
2477 pid_t pid = probe->ftps_pid;
2478
2479 /*
2480 * Report an error if the process doesn't exist
2481 * or is actively being birthed.
2482 */
2483 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2484 if (p != PROC_NULL)
2485 proc_rele(p);
2486 ret = ESRCH;
2487 goto err;
2488 }
2489 // proc_lock(p);
2490 // FIXME! How is this done on OS X?
2491 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2492 // VREAD | VWRITE)) != 0) {
2493 // mutex_exit(&p->p_lock);
2494 // return (ret);
2495 // }
2496 // proc_unlock(p);
2497 proc_rele(p);
2498 }
2499
2500 ret = fasttrap_add_probe(probe);
2501
2502 err:
2503 kmem_free(probe, size);
2504
2505 return (ret);
2506
2507 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2508 fasttrap_instr_query_t instr;
2509 fasttrap_tracepoint_t *tp;
2510 uint_t index;
2511 // int ret;
2512
2513 if (copyin(arg, &instr, sizeof (instr)) != 0)
2514 return (EFAULT);
2515
2516 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2517 proc_t *p;
2518 pid_t pid = instr.ftiq_pid;
2519
2520 /*
2521 * Report an error if the process doesn't exist
2522 * or is actively being birthed.
2523 */
2524 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2525 if (p != PROC_NULL)
2526 proc_rele(p);
2527 return (ESRCH);
2528 }
2529 //proc_lock(p);
2530 // FIXME! How is this done on OS X?
2531 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2532 // VREAD)) != 0) {
2533 // mutex_exit(&p->p_lock);
2534 // return (ret);
2535 // }
2536 // proc_unlock(p);
2537 proc_rele(p);
2538 }
2539
2540 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2541
2542 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2543 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2544 while (tp != NULL) {
2545 if (instr.ftiq_pid == tp->ftt_pid &&
2546 instr.ftiq_pc == tp->ftt_pc &&
2547 tp->ftt_proc->ftpc_acount != 0)
2548 break;
2549
2550 tp = tp->ftt_next;
2551 }
2552
2553 if (tp == NULL) {
2554 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2555 return (ENOENT);
2556 }
2557
2558 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2559 sizeof (instr.ftiq_instr));
2560 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2561
2562 if (copyout(&instr, arg, sizeof (instr)) != 0)
2563 return (EFAULT);
2564
2565 return (0);
2566 }
2567
2568 return (EINVAL);
2569 }
2570
2571 static void
2572 fasttrap_attach(void)
2573 {
2574 ulong_t nent;
2575 unsigned int i;
2576
2577 /*
2578 * Install our hooks into fork(2), exec(2), and exit(2).
2579 */
2580 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2581 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2582 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2583
2584 /*
2585 * APPLE NOTE: We size the maximum number of fasttrap probes
2586 * based on system memory. 100k probes per 256M of system memory.
2587 * Yes, this is a WAG.
2588 */
2589 fasttrap_max = (sane_size >> 28) * 100000;
2590
2591 if (fasttrap_max == 0)
2592 fasttrap_max = 50000;
2593
2594 fasttrap_total = 0;
2595 fasttrap_retired = 0;
2596
2597 /*
2598 * Conjure up the tracepoints hashtable...
2599 */
2600 #ifdef illumos
2601 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2602 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2603 #else
2604 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2605 #endif
2606
2607 if (nent <= 0 || nent > 0x1000000)
2608 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2609
2610 if ((nent & (nent - 1)) == 0)
2611 fasttrap_tpoints.fth_nent = nent;
2612 else
2613 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2614 ASSERT(fasttrap_tpoints.fth_nent > 0);
2615 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2616 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2617 sizeof (fasttrap_bucket_t), KM_SLEEP);
2618 ASSERT(fasttrap_tpoints.fth_table != NULL);
2619
2620 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
2621 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2622 }
2623
2624 /*
2625 * ... and the providers hash table...
2626 */
2627 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2628 if ((nent & (nent - 1)) == 0)
2629 fasttrap_provs.fth_nent = nent;
2630 else
2631 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2632 ASSERT(fasttrap_provs.fth_nent > 0);
2633 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2634 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2635 sizeof (fasttrap_bucket_t), KM_SLEEP);
2636 ASSERT(fasttrap_provs.fth_table != NULL);
2637
2638 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2639 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2640 }
2641
2642 /*
2643 * ... and the procs hash table.
2644 */
2645 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2646 if ((nent & (nent - 1)) == 0)
2647 fasttrap_procs.fth_nent = nent;
2648 else
2649 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2650 ASSERT(fasttrap_procs.fth_nent > 0);
2651 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2652 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2653 sizeof (fasttrap_bucket_t), KM_SLEEP);
2654 ASSERT(fasttrap_procs.fth_table != NULL);
2655
2656 #ifndef illumos
2657 for (i = 0; i < fasttrap_procs.fth_nent; i++) {
2658 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2659 }
2660 #endif
2661
2662 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2663 &fasttrap_meta_id);
2664 }
2665
2666 static int
2667 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2668 {
2669 #pragma unused(dev, flags, devtype, p)
2670 return 0;
2671 }
2672
2673 static int
2674 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2675 {
2676 int err, rv = 0;
2677 user_addr_t uaddrp;
2678
2679 if (proc_is64bit(p)) {
2680 uaddrp = *(user_addr_t *)data;
2681 } else {
2682 uaddrp = (user_addr_t) *(uint32_t *)data;
2683 }
2684
2685 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2686
2687 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2688 if (err != 0) {
2689 ASSERT( (err & 0xfffff000) == 0 );
2690 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2691 } else if (rv != 0) {
2692 ASSERT( (rv & 0xfff00000) == 0 );
2693 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2694 } else
2695 return 0;
2696 }
2697
2698 static int fasttrap_inited = 0;
2699
2700 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2701
2702 static const struct cdevsw fasttrap_cdevsw =
2703 {
2704 .d_open = _fasttrap_open,
2705 .d_close = eno_opcl,
2706 .d_read = eno_rdwrt,
2707 .d_write = eno_rdwrt,
2708 .d_ioctl = _fasttrap_ioctl,
2709 .d_stop = (stop_fcn_t *)nulldev,
2710 .d_reset = (reset_fcn_t *)nulldev,
2711 .d_select = eno_select,
2712 .d_mmap = eno_mmap,
2713 .d_strategy = eno_strat,
2714 .d_reserved_1 = eno_getc,
2715 .d_reserved_2 = eno_putc,
2716 };
2717
2718 void fasttrap_init(void);
2719
2720 void
2721 fasttrap_init( void )
2722 {
2723 /*
2724 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2725 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2726 *
2727 * The reason is to delay allocating the (rather large) resources as late as possible.
2728 */
2729 if (!fasttrap_inited) {
2730 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2731
2732 if (majdevno < 0) {
2733 // FIX ME! What kind of error reporting to do here?
2734 printf("fasttrap_init: failed to allocate a major number!\n");
2735 return;
2736 }
2737
2738 dev_t device = makedev( (uint32_t)majdevno, 0 );
2739 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2740 return;
2741 }
2742
2743 /*
2744 * fasttrap_probe_t's are variable in size. We use an array of zones to
2745 * cover the most common sizes.
2746 */
2747 int i;
2748 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2749 fasttrap_probe_t_zones[i] =
2750 zone_create(fasttrap_probe_t_zone_names[i],
2751 offsetof(fasttrap_probe_t, ftp_tps[i]), ZC_NONE);
2752 }
2753
2754
2755 /*
2756 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2757 */
2758 fasttrap_lck_attr = lck_attr_alloc_init();
2759 fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2760 fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
2761
2762 /*
2763 * Initialize global locks
2764 */
2765 lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2766 lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2767
2768 fasttrap_attach();
2769
2770 /*
2771 * Start the fasttrap cleanup thread
2772 */
2773 kern_return_t res = kernel_thread_start_priority((thread_continue_t)fasttrap_pid_cleanup_cb, NULL, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread);
2774 if (res != KERN_SUCCESS) {
2775 panic("Could not create fasttrap_cleanup_thread");
2776 }
2777 thread_set_thread_name(fasttrap_cleanup_thread, "dtrace_fasttrap_cleanup_thread");
2778
2779 fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
2780 fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
2781 KM_SLEEP);
2782 lck_mtx_init(&fasttrap_retired_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2783
2784 fasttrap_inited = 1;
2785 }
2786 }
2787
2788 #undef FASTTRAP_MAJOR