]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/fasttrap.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/time.h>
29
30 #include <sys/codesign.h>
31 #include <sys/errno.h>
32 #include <sys/stat.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/kauth.h>
36 #include <sys/utfconv.h>
37
38 #include <sys/fasttrap.h>
39 #include <sys/fasttrap_impl.h>
40 #include <sys/fasttrap_isa.h>
41 #include <sys/dtrace.h>
42 #include <sys/dtrace_impl.h>
43 #include <sys/proc.h>
44
45 #include <security/mac_framework.h>
46
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
51
52 #include <kern/cs_blobs.h>
53 #include <kern/thread.h>
54 #include <kern/zalloc.h>
55
56 #include <mach/thread_act.h>
57
58 extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread);
59
60 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
61 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
62
63 __private_extern__
64 void
65 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
66
67 /*
68 * User-Land Trap-Based Tracing
69 * ----------------------------
70 *
71 * The fasttrap provider allows DTrace consumers to instrument any user-level
72 * instruction to gather data; this includes probes with semantic
73 * signifigance like entry and return as well as simple offsets into the
74 * function. While the specific techniques used are very ISA specific, the
75 * methodology is generalizable to any architecture.
76 *
77 *
78 * The General Methodology
79 * -----------------------
80 *
81 * With the primary goal of tracing every user-land instruction and the
82 * limitation that we can't trust user space so don't want to rely on much
83 * information there, we begin by replacing the instructions we want to trace
84 * with trap instructions. Each instruction we overwrite is saved into a hash
85 * table keyed by process ID and pc address. When we enter the kernel due to
86 * this trap instruction, we need the effects of the replaced instruction to
87 * appear to have occurred before we proceed with the user thread's
88 * execution.
89 *
90 * Each user level thread is represented by a ulwp_t structure which is
91 * always easily accessible through a register. The most basic way to produce
92 * the effects of the instruction we replaced is to copy that instruction out
93 * to a bit of scratch space reserved in the user thread's ulwp_t structure
94 * (a sort of kernel-private thread local storage), set the PC to that
95 * scratch space and single step. When we reenter the kernel after single
96 * stepping the instruction we must then adjust the PC to point to what would
97 * normally be the next instruction. Of course, special care must be taken
98 * for branches and jumps, but these represent such a small fraction of any
99 * instruction set that writing the code to emulate these in the kernel is
100 * not too difficult.
101 *
102 * Return probes may require several tracepoints to trace every return site,
103 * and, conversely, each tracepoint may activate several probes (the entry
104 * and offset 0 probes, for example). To solve this muliplexing problem,
105 * tracepoints contain lists of probes to activate and probes contain lists
106 * of tracepoints to enable. If a probe is activated, it adds its ID to
107 * existing tracepoints or creates new ones as necessary.
108 *
109 * Most probes are activated _before_ the instruction is executed, but return
110 * probes are activated _after_ the effects of the last instruction of the
111 * function are visible. Return probes must be fired _after_ we have
112 * single-stepped the instruction whereas all other probes are fired
113 * beforehand.
114 *
115 *
116 * Lock Ordering
117 * -------------
118 *
119 * The lock ordering below -- both internally and with respect to the DTrace
120 * framework -- is a little tricky and bears some explanation. Each provider
121 * has a lock (ftp_mtx) that protects its members including reference counts
122 * for enabled probes (ftp_rcount), consumers actively creating probes
123 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
124 * from being freed. A provider is looked up by taking the bucket lock for the
125 * provider hash table, and is returned with its lock held. The provider lock
126 * may be taken in functions invoked by the DTrace framework, but may not be
127 * held while calling functions in the DTrace framework.
128 *
129 * To ensure consistency over multiple calls to the DTrace framework, the
130 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
131 * not be taken when holding the provider lock as that would create a cyclic
132 * lock ordering. In situations where one would naturally take the provider
133 * lock and then the creation lock, we instead up a reference count to prevent
134 * the provider from disappearing, drop the provider lock, and acquire the
135 * creation lock.
136 *
137 * Briefly:
138 * bucket lock before provider lock
139 * DTrace before provider lock
140 * creation lock before DTrace
141 * never hold the provider lock and creation lock simultaneously
142 */
143
144 static dtrace_meta_provider_id_t fasttrap_meta_id;
145
146 static thread_t fasttrap_cleanup_thread;
147
148 static LCK_GRP_DECLARE(fasttrap_lck_grp, "fasttrap");
149 static LCK_ATTR_DECLARE(fasttrap_lck_attr, 0, 0);
150 static LCK_MTX_DECLARE_ATTR(fasttrap_cleanup_mtx,
151 &fasttrap_lck_grp, &fasttrap_lck_attr);
152
153
154 #define FASTTRAP_CLEANUP_PROVIDER 0x1
155 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
156
157 static uint32_t fasttrap_cleanup_work = 0;
158
159 /*
160 * Generation count on modifications to the global tracepoint lookup table.
161 */
162 static volatile uint64_t fasttrap_mod_gen;
163
164 /*
165 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
166 * base on system memory. Each time a probe is created, fasttrap_total is
167 * incremented by the number of tracepoints that may be associated with that
168 * probe; fasttrap_total is capped at fasttrap_max.
169 */
170
171 static uint32_t fasttrap_max;
172 static uint32_t fasttrap_retired;
173 static uint32_t fasttrap_total;
174
175
176 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
177 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
178 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
179
180 fasttrap_hash_t fasttrap_tpoints;
181 static fasttrap_hash_t fasttrap_provs;
182 static fasttrap_hash_t fasttrap_procs;
183
184 static uint64_t fasttrap_pid_count; /* pid ref count */
185 static LCK_MTX_DECLARE_ATTR(fasttrap_count_mtx, /* lock on ref count */
186 &fasttrap_lck_grp, &fasttrap_lck_attr);
187
188 #define FASTTRAP_ENABLE_FAIL 1
189 #define FASTTRAP_ENABLE_PARTIAL 2
190
191 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
192 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
193
194 static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
195 const dtrace_pattr_t *);
196 static void fasttrap_provider_retire(proc_t*, const char *, int);
197 static void fasttrap_provider_free(fasttrap_provider_t *);
198
199 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
200 static void fasttrap_proc_release(fasttrap_proc_t *);
201
202 #define FASTTRAP_PROVS_INDEX(pid, name) \
203 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
204
205 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
206
207 /*
208 * APPLE NOTE: To save memory, some common memory allocations are given
209 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
210 * which means it would fall into the kalloc.128 bucket. With
211 * 20k elements allocated, the space saved is substantial.
212 */
213
214 ZONE_DECLARE(fasttrap_tracepoint_t_zone, "dtrace.fasttrap_tracepoint_t",
215 sizeof(fasttrap_tracepoint_t), ZC_NONE);
216
217 /*
218 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
219 * that the sweet spot for reducing memory footprint is covering the first
220 * three sizes. Everything larger goes into the common pool.
221 */
222 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
223
224 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
225
226 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
227 "",
228 "dtrace.fasttrap_probe_t[1]",
229 "dtrace.fasttrap_probe_t[2]",
230 "dtrace.fasttrap_probe_t[3]"
231 };
232
233 static int
234 fasttrap_highbit(ulong_t i)
235 {
236 int h = 1;
237
238 if (i == 0)
239 return (0);
240 #ifdef _LP64
241 if (i & 0xffffffff00000000ul) {
242 h += 32; i >>= 32;
243 }
244 #endif
245 if (i & 0xffff0000) {
246 h += 16; i >>= 16;
247 }
248 if (i & 0xff00) {
249 h += 8; i >>= 8;
250 }
251 if (i & 0xf0) {
252 h += 4; i >>= 4;
253 }
254 if (i & 0xc) {
255 h += 2; i >>= 2;
256 }
257 if (i & 0x2) {
258 h += 1;
259 }
260 return (h);
261 }
262
263 static uint_t
264 fasttrap_hash_str(const char *p)
265 {
266 unsigned int g;
267 uint_t hval = 0;
268
269 while (*p) {
270 hval = (hval << 4) + *p++;
271 if ((g = (hval & 0xf0000000)) != 0)
272 hval ^= g >> 24;
273 hval &= ~g;
274 }
275 return (hval);
276 }
277
278 /*
279 * APPLE NOTE: fasttrap_sigtrap not implemented
280 */
281 void
282 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
283 {
284 #pragma unused(p, t, pc)
285
286 #if !defined(__APPLE__)
287 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
288
289 sqp->sq_info.si_signo = SIGTRAP;
290 sqp->sq_info.si_code = TRAP_DTRACE;
291 sqp->sq_info.si_addr = (caddr_t)pc;
292
293 mutex_enter(&p->p_lock);
294 sigaddqa(p, t, sqp);
295 mutex_exit(&p->p_lock);
296
297 if (t != NULL)
298 aston(t);
299 #endif /* __APPLE__ */
300
301 printf("fasttrap_sigtrap called with no implementation.\n");
302 }
303
304 /*
305 * This function ensures that no threads are actively using the memory
306 * associated with probes that were formerly live.
307 */
308 static void
309 fasttrap_mod_barrier(uint64_t gen)
310 {
311 unsigned int i;
312
313 if (gen < fasttrap_mod_gen)
314 return;
315
316 fasttrap_mod_gen++;
317
318 for (i = 0; i < NCPU; i++) {
319 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
320 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
321 }
322 }
323
324 static void fasttrap_pid_cleanup(uint32_t);
325
326 static unsigned int
327 fasttrap_pid_cleanup_providers(void)
328 {
329 fasttrap_provider_t **fpp, *fp;
330 fasttrap_bucket_t *bucket;
331 dtrace_provider_id_t provid;
332 unsigned int later = 0, i;
333
334 /*
335 * Iterate over all the providers trying to remove the marked
336 * ones. If a provider is marked but not retired, we just
337 * have to take a crack at removing it -- it's no big deal if
338 * we can't.
339 */
340 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
341 bucket = &fasttrap_provs.fth_table[i];
342 lck_mtx_lock(&bucket->ftb_mtx);
343 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
344
345 while ((fp = *fpp) != NULL) {
346 if (!fp->ftp_marked) {
347 fpp = &fp->ftp_next;
348 continue;
349 }
350
351 lck_mtx_lock(&fp->ftp_mtx);
352
353 /*
354 * If this provider has consumers actively
355 * creating probes (ftp_ccount) or is a USDT
356 * provider (ftp_mcount), we can't unregister
357 * or even condense.
358 */
359 if (fp->ftp_ccount != 0 ||
360 fp->ftp_mcount != 0) {
361 fp->ftp_marked = 0;
362 lck_mtx_unlock(&fp->ftp_mtx);
363 continue;
364 }
365
366 if (!fp->ftp_retired || fp->ftp_rcount != 0)
367 fp->ftp_marked = 0;
368
369 lck_mtx_unlock(&fp->ftp_mtx);
370
371 /*
372 * If we successfully unregister this
373 * provider we can remove it from the hash
374 * chain and free the memory. If our attempt
375 * to unregister fails and this is a retired
376 * provider, increment our flag to try again
377 * pretty soon. If we've consumed more than
378 * half of our total permitted number of
379 * probes call dtrace_condense() to try to
380 * clean out the unenabled probes.
381 */
382 provid = fp->ftp_provid;
383 if (dtrace_unregister(provid) != 0) {
384 if (fasttrap_total > fasttrap_max / 2)
385 (void) dtrace_condense(provid);
386 later += fp->ftp_marked;
387 fpp = &fp->ftp_next;
388 } else {
389 *fpp = fp->ftp_next;
390 fasttrap_provider_free(fp);
391 }
392 }
393 lck_mtx_unlock(&bucket->ftb_mtx);
394 }
395
396 return later;
397 }
398
399 typedef struct fasttrap_tracepoint_spec {
400 pid_t fttps_pid;
401 user_addr_t fttps_pc;
402 } fasttrap_tracepoint_spec_t;
403
404 static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
405 static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
406 static LCK_MTX_DECLARE_ATTR(fasttrap_retired_mtx,
407 &fasttrap_lck_grp, &fasttrap_lck_attr);
408
409 #define DEFAULT_RETIRED_SIZE 256
410
411 static void
412 fasttrap_tracepoint_cleanup(void)
413 {
414 size_t i;
415 pid_t pid = 0;
416 user_addr_t pc;
417 proc_t *p = PROC_NULL;
418 fasttrap_tracepoint_t *tp = NULL;
419 lck_mtx_lock(&fasttrap_retired_mtx);
420 fasttrap_bucket_t *bucket;
421 for (i = 0; i < fasttrap_cur_retired; i++) {
422 pc = fasttrap_retired_spec[i].fttps_pc;
423 if (fasttrap_retired_spec[i].fttps_pid != pid) {
424 pid = fasttrap_retired_spec[i].fttps_pid;
425 if (p != PROC_NULL) {
426 sprunlock(p);
427 }
428 if ((p = sprlock(pid)) == PROC_NULL) {
429 pid = 0;
430 continue;
431 }
432 }
433 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
434 lck_mtx_lock(&bucket->ftb_mtx);
435 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
436 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
437 tp->ftt_proc->ftpc_acount != 0)
438 break;
439 }
440 /*
441 * Check that the tracepoint is not gone or has not been
442 * re-activated for another probe
443 */
444 if (tp == NULL || tp->ftt_retired == 0) {
445 lck_mtx_unlock(&bucket->ftb_mtx);
446 continue;
447 }
448 fasttrap_tracepoint_remove(p, tp);
449 lck_mtx_unlock(&bucket->ftb_mtx);
450 }
451 if (p != PROC_NULL) {
452 sprunlock(p);
453 }
454
455 fasttrap_cur_retired = 0;
456
457 lck_mtx_unlock(&fasttrap_retired_mtx);
458 }
459
460 void
461 fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
462 {
463 if (tp->ftt_retired)
464 return;
465 lck_mtx_lock(&fasttrap_retired_mtx);
466 fasttrap_tracepoint_spec_t *s = &fasttrap_retired_spec[fasttrap_cur_retired++];
467 s->fttps_pid = p->p_pid;
468 s->fttps_pc = tp->ftt_pc;
469
470 if (fasttrap_cur_retired == fasttrap_retired_size) {
471 fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
472 fasttrap_retired_size * 2 *
473 sizeof(*fasttrap_retired_spec),
474 KM_SLEEP);
475 memcpy(new_retired, fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
476 kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
477 fasttrap_retired_size *= 2;
478 fasttrap_retired_spec = new_retired;
479 }
480
481 lck_mtx_unlock(&fasttrap_retired_mtx);
482
483 tp->ftt_retired = 1;
484
485 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
486 }
487
488 static void
489 fasttrap_pid_cleanup_compute_priority(void)
490 {
491 if (fasttrap_total > (fasttrap_max / 100 * 90) || fasttrap_retired > fasttrap_max / 2) {
492 thread_precedence_policy_data_t precedence = {12 /* BASEPRI_PREEMPT_HIGH */};
493 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
494 }
495 else {
496 thread_precedence_policy_data_t precedence = {-39 /* BASEPRI_USER_INITIATED */};
497 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
498
499 }
500 }
501
502 /*
503 * This is the timeout's callback for cleaning up the providers and their
504 * probes.
505 */
506 /*ARGSUSED*/
507 __attribute__((noreturn))
508 static void
509 fasttrap_pid_cleanup_cb(void)
510 {
511 uint32_t work = 0;
512 lck_mtx_lock(&fasttrap_cleanup_mtx);
513 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
514 while (1) {
515 unsigned int later = 0;
516
517 work = os_atomic_xchg(&fasttrap_cleanup_work, 0, relaxed);
518 lck_mtx_unlock(&fasttrap_cleanup_mtx);
519 if (work & FASTTRAP_CLEANUP_PROVIDER) {
520 later = fasttrap_pid_cleanup_providers();
521 }
522 if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
523 fasttrap_tracepoint_cleanup();
524 }
525 lck_mtx_lock(&fasttrap_cleanup_mtx);
526
527 fasttrap_pid_cleanup_compute_priority();
528 if (!fasttrap_cleanup_work) {
529 /*
530 * If we were unable to remove a retired provider, try again after
531 * a second. This situation can occur in certain circumstances where
532 * providers cannot be unregistered even though they have no probes
533 * enabled because of an execution of dtrace -l or something similar.
534 * If the timeout has been disabled (set to 1 because we're trying
535 * to detach), we set fasttrap_cleanup_work to ensure that we'll
536 * get a chance to do that work if and when the timeout is reenabled
537 * (if detach fails).
538 */
539 if (later > 0) {
540 struct timespec t = {.tv_sec = 1, .tv_nsec = 0};
541 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", &t);
542 }
543 else
544 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
545 }
546 }
547
548 }
549
550 /*
551 * Activates the asynchronous cleanup mechanism.
552 */
553 static void
554 fasttrap_pid_cleanup(uint32_t work)
555 {
556 lck_mtx_lock(&fasttrap_cleanup_mtx);
557 os_atomic_or(&fasttrap_cleanup_work, work, relaxed);
558 fasttrap_pid_cleanup_compute_priority();
559 wakeup(&fasttrap_pid_cleanup_cb);
560 lck_mtx_unlock(&fasttrap_cleanup_mtx);
561 }
562
563 static int
564 fasttrap_setdebug(proc_t *p)
565 {
566 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
567
568 /*
569 * CS_KILL and CS_HARD will cause code-signing to kill the process
570 * when the process text is modified, so register the intent
571 * to allow invalid access beforehand.
572 */
573 if ((p->p_csflags & (CS_KILL|CS_HARD))) {
574 proc_unlock(p);
575 for (int i = 0; i < DTRACE_NCLIENTS; i++) {
576 dtrace_state_t *state = dtrace_state_get(i);
577 if (state == NULL)
578 continue;
579 if (state->dts_cred.dcr_cred == NULL)
580 continue;
581 /*
582 * The get_task call flags whether the process should
583 * be flagged to have the cs_allow_invalid call
584 * succeed. We want the best credential that any dtrace
585 * client has, so try all of them.
586 */
587
588 /*
589 * mac_proc_check_get_task() can trigger upcalls. It's
590 * not safe to hold proc references accross upcalls, so
591 * just drop the reference. Given the context, it
592 * should not be possible for the process to actually
593 * disappear.
594 */
595 struct proc_ident pident = proc_ident(p);
596 sprunlock(p);
597 p = PROC_NULL;
598
599 (void) mac_proc_check_get_task(state->dts_cred.dcr_cred, &pident, TASK_FLAVOR_CONTROL);
600
601 p = sprlock(pident.p_pid);
602 if (p == PROC_NULL) {
603 return (ESRCH);
604 }
605 }
606 int rc = cs_allow_invalid(p);
607 proc_lock(p);
608 if (rc == 0) {
609 return (EACCES);
610 }
611 }
612 return (0);
613 }
614
615 /*
616 * This is called from cfork() via dtrace_fasttrap_fork(). The child
617 * process's address space is a (roughly) a copy of the parent process's so
618 * we have to remove all the instrumentation we had previously enabled in the
619 * parent.
620 */
621 static void
622 fasttrap_fork(proc_t *p, proc_t *cp)
623 {
624 pid_t ppid = p->p_pid;
625 unsigned int i;
626
627 ASSERT(current_proc() == p);
628 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
629 ASSERT(p->p_dtrace_count > 0);
630 ASSERT(cp->p_dtrace_count == 0);
631
632 /*
633 * This would be simpler and faster if we maintained per-process
634 * hash tables of enabled tracepoints. It could, however, potentially
635 * slow down execution of a tracepoint since we'd need to go
636 * through two levels of indirection. In the future, we should
637 * consider either maintaining per-process ancillary lists of
638 * enabled tracepoints or hanging a pointer to a per-process hash
639 * table of enabled tracepoints off the proc structure.
640 */
641
642 /*
643 * We don't have to worry about the child process disappearing
644 * because we're in fork().
645 */
646 if (cp != sprlock(cp->p_pid)) {
647 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp->p_pid);
648 return;
649 }
650
651 proc_lock(cp);
652 if (fasttrap_setdebug(cp) == ESRCH) {
653 printf("fasttrap_fork: failed to re-acquire proc\n");
654 return;
655 }
656 proc_unlock(cp);
657
658 /*
659 * Iterate over every tracepoint looking for ones that belong to the
660 * parent process, and remove each from the child process.
661 */
662 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
663 fasttrap_tracepoint_t *tp;
664 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
665
666 lck_mtx_lock(&bucket->ftb_mtx);
667 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
668 if (tp->ftt_pid == ppid &&
669 tp->ftt_proc->ftpc_acount != 0) {
670 fasttrap_tracepoint_remove(cp, tp);
671
672 /*
673 * The count of active providers can only be
674 * decremented (i.e. to zero) during exec,
675 * exit, and removal of a meta provider so it
676 * should be impossible to drop the count
677 * mid-fork.
678 */
679 ASSERT(tp->ftt_proc->ftpc_acount != 0);
680 }
681 }
682 lck_mtx_unlock(&bucket->ftb_mtx);
683 }
684
685 /*
686 * Free any ptss pages/entries in the child.
687 */
688 dtrace_ptss_fork(p, cp);
689
690 sprunlock(cp);
691 }
692
693 /*
694 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
695 * is set on the proc structure to indicate that there is a pid provider
696 * associated with this process.
697 */
698 static void
699 fasttrap_exec_exit(proc_t *p)
700 {
701 ASSERT(p == current_proc());
702 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
703 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
704
705
706 /* APPLE NOTE: Okay, the locking here is really odd and needs some
707 * explaining. This method is always called with the proc_lock held.
708 * We must drop the proc_lock before calling fasttrap_provider_retire
709 * to avoid a deadlock when it takes the bucket lock.
710 *
711 * Next, the dtrace_ptss_exec_exit function requires the sprlock
712 * be held, but not the proc_lock.
713 *
714 * Finally, we must re-acquire the proc_lock
715 */
716 proc_unlock(p);
717
718 /*
719 * We clean up the pid provider for this process here; user-land
720 * static probes are handled by the meta-provider remove entry point.
721 */
722 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
723
724 /*
725 * APPLE NOTE: We also need to remove any aliased providers.
726 * XXX optimization: track which provider types are instantiated
727 * and only retire as needed.
728 */
729 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
730 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
731
732 /*
733 * This should be called after it is no longer possible for a user
734 * thread to execute (potentially dtrace instrumented) instructions.
735 */
736 lck_mtx_lock(&p->p_dtrace_sprlock);
737 dtrace_ptss_exec_exit(p);
738 lck_mtx_unlock(&p->p_dtrace_sprlock);
739
740 proc_lock(p);
741 }
742
743
744 /*ARGSUSED*/
745 static void
746 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
747 {
748 #pragma unused(arg, desc)
749 /*
750 * There are no "default" pid probes.
751 */
752 }
753
754 static int
755 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
756 {
757 fasttrap_tracepoint_t *tp, *new_tp = NULL;
758 fasttrap_bucket_t *bucket;
759 fasttrap_id_t *id;
760 pid_t pid;
761 user_addr_t pc;
762
763 ASSERT(index < probe->ftp_ntps);
764
765 pid = probe->ftp_pid;
766 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
767 id = &probe->ftp_tps[index].fit_id;
768
769 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
770
771 //ASSERT(!(p->p_flag & SVFORK));
772
773 /*
774 * Before we make any modifications, make sure we've imposed a barrier
775 * on the generation in which this probe was last modified.
776 */
777 fasttrap_mod_barrier(probe->ftp_gen);
778
779 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
780
781 /*
782 * If the tracepoint has already been enabled, just add our id to the
783 * list of interested probes. This may be our second time through
784 * this path in which case we'll have constructed the tracepoint we'd
785 * like to install. If we can't find a match, and have an allocated
786 * tracepoint ready to go, enable that one now.
787 *
788 * A tracepoint whose process is defunct is also considered defunct.
789 */
790 again:
791 lck_mtx_lock(&bucket->ftb_mtx);
792 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
793 int rc = 0;
794 /*
795 * Note that it's safe to access the active count on the
796 * associated proc structure because we know that at least one
797 * provider (this one) will still be around throughout this
798 * operation.
799 */
800 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
801 tp->ftt_proc->ftpc_acount == 0)
802 continue;
803
804 /*
805 * Now that we've found a matching tracepoint, it would be
806 * a decent idea to confirm that the tracepoint is still
807 * enabled and the trap instruction hasn't been overwritten.
808 * Since this is a little hairy, we'll punt for now.
809 */
810 if (!tp->ftt_installed) {
811 if (fasttrap_tracepoint_install(p, tp) != 0)
812 rc = FASTTRAP_ENABLE_PARTIAL;
813 }
814 /*
815 * This can't be the first interested probe. We don't have
816 * to worry about another thread being in the midst of
817 * deleting this tracepoint (which would be the only valid
818 * reason for a tracepoint to have no interested probes)
819 * since we're holding P_PR_LOCK for this process.
820 */
821 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
822
823 switch (id->fti_ptype) {
824 case DTFTP_ENTRY:
825 case DTFTP_OFFSETS:
826 case DTFTP_IS_ENABLED:
827 id->fti_next = tp->ftt_ids;
828 dtrace_membar_producer();
829 tp->ftt_ids = id;
830 dtrace_membar_producer();
831 break;
832
833 case DTFTP_RETURN:
834 case DTFTP_POST_OFFSETS:
835 id->fti_next = tp->ftt_retids;
836 dtrace_membar_producer();
837 tp->ftt_retids = id;
838 dtrace_membar_producer();
839 break;
840
841 default:
842 ASSERT(0);
843 }
844
845 tp->ftt_retired = 0;
846
847 lck_mtx_unlock(&bucket->ftb_mtx);
848
849 if (new_tp != NULL) {
850 new_tp->ftt_ids = NULL;
851 new_tp->ftt_retids = NULL;
852 }
853
854 return rc;
855 }
856
857 /*
858 * If we have a good tracepoint ready to go, install it now while
859 * we have the lock held and no one can screw with us.
860 */
861 if (new_tp != NULL) {
862 int rc = 0;
863
864 new_tp->ftt_next = bucket->ftb_data;
865 dtrace_membar_producer();
866 bucket->ftb_data = new_tp;
867 dtrace_membar_producer();
868 lck_mtx_unlock(&bucket->ftb_mtx);
869
870 /*
871 * Activate the tracepoint in the ISA-specific manner.
872 * If this fails, we need to report the failure, but
873 * indicate that this tracepoint must still be disabled
874 * by calling fasttrap_tracepoint_disable().
875 */
876 if (fasttrap_tracepoint_install(p, new_tp) != 0)
877 rc = FASTTRAP_ENABLE_PARTIAL;
878 /*
879 * Increment the count of the number of tracepoints active in
880 * the victim process.
881 */
882 //ASSERT(p->p_proc_flag & P_PR_LOCK);
883 p->p_dtrace_count++;
884
885
886 return (rc);
887 }
888
889 lck_mtx_unlock(&bucket->ftb_mtx);
890
891 /*
892 * Initialize the tracepoint that's been preallocated with the probe.
893 */
894 new_tp = probe->ftp_tps[index].fit_tp;
895 new_tp->ftt_retired = 0;
896
897 ASSERT(new_tp->ftt_pid == pid);
898 ASSERT(new_tp->ftt_pc == pc);
899 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
900 ASSERT(new_tp->ftt_ids == NULL);
901 ASSERT(new_tp->ftt_retids == NULL);
902
903 switch (id->fti_ptype) {
904 case DTFTP_ENTRY:
905 case DTFTP_OFFSETS:
906 case DTFTP_IS_ENABLED:
907 id->fti_next = NULL;
908 new_tp->ftt_ids = id;
909 break;
910
911 case DTFTP_RETURN:
912 case DTFTP_POST_OFFSETS:
913 id->fti_next = NULL;
914 new_tp->ftt_retids = id;
915 break;
916
917 default:
918 ASSERT(0);
919 }
920
921 /*
922 * If the ISA-dependent initialization goes to plan, go back to the
923 * beginning and try to install this freshly made tracepoint.
924 */
925 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
926 goto again;
927
928 new_tp->ftt_ids = NULL;
929 new_tp->ftt_retids = NULL;
930
931 return (FASTTRAP_ENABLE_FAIL);
932 }
933
934 static void
935 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
936 {
937 fasttrap_bucket_t *bucket;
938 fasttrap_provider_t *provider = probe->ftp_prov;
939 fasttrap_tracepoint_t **pp, *tp;
940 fasttrap_id_t *id, **idp;
941 pid_t pid;
942 user_addr_t pc;
943
944 ASSERT(index < probe->ftp_ntps);
945
946 pid = probe->ftp_pid;
947 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
948 id = &probe->ftp_tps[index].fit_id;
949
950 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
951
952 /*
953 * Find the tracepoint and make sure that our id is one of the
954 * ones registered with it.
955 */
956 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
957 lck_mtx_lock(&bucket->ftb_mtx);
958 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
959 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
960 tp->ftt_proc == provider->ftp_proc)
961 break;
962 }
963
964 /*
965 * If we somehow lost this tracepoint, we're in a world of hurt.
966 */
967 ASSERT(tp != NULL);
968
969 switch (id->fti_ptype) {
970 case DTFTP_ENTRY:
971 case DTFTP_OFFSETS:
972 case DTFTP_IS_ENABLED:
973 ASSERT(tp->ftt_ids != NULL);
974 idp = &tp->ftt_ids;
975 break;
976
977 case DTFTP_RETURN:
978 case DTFTP_POST_OFFSETS:
979 ASSERT(tp->ftt_retids != NULL);
980 idp = &tp->ftt_retids;
981 break;
982
983 default:
984 /* Fix compiler warning... */
985 idp = NULL;
986 ASSERT(0);
987 }
988
989 while ((*idp)->fti_probe != probe) {
990 idp = &(*idp)->fti_next;
991 ASSERT(*idp != NULL);
992 }
993
994 id = *idp;
995 *idp = id->fti_next;
996 dtrace_membar_producer();
997
998 ASSERT(id->fti_probe == probe);
999
1000 /*
1001 * If there are other registered enablings of this tracepoint, we're
1002 * all done, but if this was the last probe assocated with this
1003 * this tracepoint, we need to remove and free it.
1004 */
1005 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1006
1007 /*
1008 * If the current probe's tracepoint is in use, swap it
1009 * for an unused tracepoint.
1010 */
1011 if (tp == probe->ftp_tps[index].fit_tp) {
1012 fasttrap_probe_t *tmp_probe;
1013 fasttrap_tracepoint_t **tmp_tp;
1014 uint_t tmp_index;
1015
1016 if (tp->ftt_ids != NULL) {
1017 tmp_probe = tp->ftt_ids->fti_probe;
1018 /* LINTED - alignment */
1019 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1020 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1021 } else {
1022 tmp_probe = tp->ftt_retids->fti_probe;
1023 /* LINTED - alignment */
1024 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1025 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1026 }
1027
1028 ASSERT(*tmp_tp != NULL);
1029 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1030 ASSERT((*tmp_tp)->ftt_ids == NULL);
1031 ASSERT((*tmp_tp)->ftt_retids == NULL);
1032
1033 probe->ftp_tps[index].fit_tp = *tmp_tp;
1034 *tmp_tp = tp;
1035
1036 }
1037
1038 lck_mtx_unlock(&bucket->ftb_mtx);
1039
1040 /*
1041 * Tag the modified probe with the generation in which it was
1042 * changed.
1043 */
1044 probe->ftp_gen = fasttrap_mod_gen;
1045 return;
1046 }
1047
1048 lck_mtx_unlock(&bucket->ftb_mtx);
1049
1050 /*
1051 * We can't safely remove the tracepoint from the set of active
1052 * tracepoints until we've actually removed the fasttrap instruction
1053 * from the process's text. We can, however, operate on this
1054 * tracepoint secure in the knowledge that no other thread is going to
1055 * be looking at it since we hold P_PR_LOCK on the process if it's
1056 * live or we hold the provider lock on the process if it's dead and
1057 * gone.
1058 */
1059
1060 /*
1061 * We only need to remove the actual instruction if we're looking
1062 * at an existing process
1063 */
1064 if (p != NULL) {
1065 /*
1066 * If we fail to restore the instruction we need to kill
1067 * this process since it's in a completely unrecoverable
1068 * state.
1069 */
1070 if (fasttrap_tracepoint_remove(p, tp) != 0)
1071 fasttrap_sigtrap(p, NULL, pc);
1072
1073 /*
1074 * Decrement the count of the number of tracepoints active
1075 * in the victim process.
1076 */
1077 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1078 p->p_dtrace_count--;
1079 }
1080
1081 /*
1082 * Remove the probe from the hash table of active tracepoints.
1083 */
1084 lck_mtx_lock(&bucket->ftb_mtx);
1085 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1086 ASSERT(*pp != NULL);
1087 while (*pp != tp) {
1088 pp = &(*pp)->ftt_next;
1089 ASSERT(*pp != NULL);
1090 }
1091
1092 *pp = tp->ftt_next;
1093 dtrace_membar_producer();
1094
1095 lck_mtx_unlock(&bucket->ftb_mtx);
1096
1097 /*
1098 * Tag the modified probe with the generation in which it was changed.
1099 */
1100 probe->ftp_gen = fasttrap_mod_gen;
1101 }
1102
1103 static void
1104 fasttrap_enable_callbacks(void)
1105 {
1106 /*
1107 * We don't have to play the rw lock game here because we're
1108 * providing something rather than taking something away --
1109 * we can be sure that no threads have tried to follow this
1110 * function pointer yet.
1111 */
1112 lck_mtx_lock(&fasttrap_count_mtx);
1113 if (fasttrap_pid_count == 0) {
1114 ASSERT(dtrace_pid_probe_ptr == NULL);
1115 ASSERT(dtrace_return_probe_ptr == NULL);
1116 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1117 dtrace_return_probe_ptr = &fasttrap_return_probe;
1118 }
1119 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1120 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1121 fasttrap_pid_count++;
1122 lck_mtx_unlock(&fasttrap_count_mtx);
1123 }
1124
1125 static void
1126 fasttrap_disable_callbacks(void)
1127 {
1128 //ASSERT(MUTEX_HELD(&cpu_lock));
1129
1130 lck_mtx_lock(&fasttrap_count_mtx);
1131 ASSERT(fasttrap_pid_count > 0);
1132 fasttrap_pid_count--;
1133 if (fasttrap_pid_count == 0) {
1134 dtrace_cpu_t *cur, *cpu = CPU;
1135
1136 /*
1137 * APPLE NOTE: This loop seems broken, it touches every CPU
1138 * but the one we're actually running on. Need to ask Sun folks
1139 * if that is safe. Scenario is this: We're running on CPU A,
1140 * and lock all but A. Then we get preempted, and start running
1141 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1142 */
1143 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1144 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
1145 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1146 }
1147
1148 dtrace_pid_probe_ptr = NULL;
1149 dtrace_return_probe_ptr = NULL;
1150
1151 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1152 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
1153 // rw_exit(&cur->cpu_ft_lock);
1154 }
1155 }
1156 lck_mtx_unlock(&fasttrap_count_mtx);
1157 }
1158
1159 /*ARGSUSED*/
1160 static int
1161 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1162 {
1163 #pragma unused(arg, id)
1164 fasttrap_probe_t *probe = parg;
1165 proc_t *p;
1166 int i, rc;
1167
1168 ASSERT(probe != NULL);
1169 ASSERT(!probe->ftp_enabled);
1170 ASSERT(id == probe->ftp_id);
1171 // ASSERT(MUTEX_HELD(&cpu_lock));
1172
1173 /*
1174 * Increment the count of enabled probes on this probe's provider;
1175 * the provider can't go away while the probe still exists. We
1176 * must increment this even if we aren't able to properly enable
1177 * this probe.
1178 */
1179 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1180 probe->ftp_prov->ftp_rcount++;
1181 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1182
1183 /*
1184 * If this probe's provider is retired (meaning it was valid in a
1185 * previously exec'ed incarnation of this address space), bail out. The
1186 * provider can't go away while we're in this code path.
1187 */
1188 if (probe->ftp_prov->ftp_retired)
1189 return(0);
1190
1191 /*
1192 * If we can't find the process, it may be that we're in the context of
1193 * a fork in which the traced process is being born and we're copying
1194 * USDT probes. Otherwise, the process is gone so bail.
1195 */
1196 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1197 /*
1198 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1199 * does not return process's with SIDL set, but we always return
1200 * the child process.
1201 */
1202 return(0);
1203 }
1204
1205 proc_lock(p);
1206 int p_pid = proc_pid(p);
1207
1208 rc = fasttrap_setdebug(p);
1209 switch (rc) {
1210 case EACCES:
1211 proc_unlock(p);
1212 sprunlock(p);
1213 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1214 "Process does not allow invalid code pages\n", p_pid);
1215 return (0);
1216 case ESRCH:
1217 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1218 "Failed to re-acquire process\n", p_pid);
1219 return (0);
1220 default:
1221 assert(rc == 0);
1222 break;
1223 }
1224
1225 /*
1226 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1227 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1228 * To mimic this, we allocate on demand scratch space. If this is the first
1229 * time a probe has been enabled in this process, we need to allocate scratch
1230 * space for each already existing thread. Now is a good time to do this, as
1231 * the target process is suspended and the proc_lock is held.
1232 */
1233 if (p->p_dtrace_ptss_pages == NULL) {
1234 dtrace_ptss_enable(p);
1235 }
1236
1237 // ASSERT(!(p->p_flag & SVFORK));
1238 proc_unlock(p);
1239
1240 /*
1241 * We have to enable the trap entry point before any user threads have
1242 * the chance to execute the trap instruction we're about to place
1243 * in their process's text.
1244 */
1245 fasttrap_enable_callbacks();
1246
1247 /*
1248 * Enable all the tracepoints and add this probe's id to each
1249 * tracepoint's list of active probes.
1250 */
1251 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1252 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1253 /*
1254 * If enabling the tracepoint failed completely,
1255 * we don't have to disable it; if the failure
1256 * was only partial we must disable it.
1257 */
1258 if (rc == FASTTRAP_ENABLE_FAIL)
1259 i--;
1260 else
1261 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1262
1263 /*
1264 * Back up and pull out all the tracepoints we've
1265 * created so far for this probe.
1266 */
1267 while (i >= 0) {
1268 fasttrap_tracepoint_disable(p, probe, i);
1269 i--;
1270 }
1271
1272 sprunlock(p);
1273
1274 /*
1275 * Since we're not actually enabling this probe,
1276 * drop our reference on the trap table entry.
1277 */
1278 fasttrap_disable_callbacks();
1279 return(0);
1280 }
1281 }
1282
1283 sprunlock(p);
1284
1285 probe->ftp_enabled = 1;
1286 return (0);
1287 }
1288
1289 /*ARGSUSED*/
1290 static void
1291 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1292 {
1293 #pragma unused(arg, id)
1294 fasttrap_probe_t *probe = parg;
1295 fasttrap_provider_t *provider = probe->ftp_prov;
1296 proc_t *p;
1297 int i, whack = 0;
1298
1299 ASSERT(id == probe->ftp_id);
1300
1301 /*
1302 * We won't be able to acquire a /proc-esque lock on the process
1303 * iff the process is dead and gone. In this case, we rely on the
1304 * provider lock as a point of mutual exclusion to prevent other
1305 * DTrace consumers from disabling this probe.
1306 */
1307 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1308 // ASSERT(!(p->p_flag & SVFORK));
1309 }
1310
1311 lck_mtx_lock(&provider->ftp_mtx);
1312
1313 /*
1314 * Disable all the associated tracepoints (for fully enabled probes).
1315 */
1316 if (probe->ftp_enabled) {
1317 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1318 fasttrap_tracepoint_disable(p, probe, i);
1319 }
1320 }
1321
1322 ASSERT(provider->ftp_rcount > 0);
1323 provider->ftp_rcount--;
1324
1325 if (p != NULL) {
1326 /*
1327 * Even though we may not be able to remove it entirely, we
1328 * mark this retired provider to get a chance to remove some
1329 * of the associated probes.
1330 */
1331 if (provider->ftp_retired && !provider->ftp_marked)
1332 whack = provider->ftp_marked = 1;
1333 lck_mtx_unlock(&provider->ftp_mtx);
1334
1335 sprunlock(p);
1336 } else {
1337 /*
1338 * If the process is dead, we're just waiting for the
1339 * last probe to be disabled to be able to free it.
1340 */
1341 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1342 whack = provider->ftp_marked = 1;
1343 lck_mtx_unlock(&provider->ftp_mtx);
1344 }
1345
1346 if (whack) {
1347 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1348 }
1349
1350 if (!probe->ftp_enabled)
1351 return;
1352
1353 probe->ftp_enabled = 0;
1354
1355 // ASSERT(MUTEX_HELD(&cpu_lock));
1356 fasttrap_disable_callbacks();
1357 }
1358
1359 /*ARGSUSED*/
1360 static void
1361 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1362 dtrace_argdesc_t *desc)
1363 {
1364 #pragma unused(arg, id)
1365 fasttrap_probe_t *probe = parg;
1366 char *str;
1367 int i, ndx;
1368
1369 desc->dtargd_native[0] = '\0';
1370 desc->dtargd_xlate[0] = '\0';
1371
1372 if (probe->ftp_prov->ftp_retired != 0 ||
1373 desc->dtargd_ndx >= probe->ftp_nargs) {
1374 desc->dtargd_ndx = DTRACE_ARGNONE;
1375 return;
1376 }
1377
1378 ndx = (probe->ftp_argmap != NULL) ?
1379 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1380
1381 str = probe->ftp_ntypes;
1382 for (i = 0; i < ndx; i++) {
1383 str += strlen(str) + 1;
1384 }
1385
1386 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1387
1388 if (probe->ftp_xtypes == NULL)
1389 return;
1390
1391 str = probe->ftp_xtypes;
1392 for (i = 0; i < desc->dtargd_ndx; i++) {
1393 str += strlen(str) + 1;
1394 }
1395
1396 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1397 }
1398
1399 /*ARGSUSED*/
1400 static void
1401 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1402 {
1403 #pragma unused(arg, id)
1404 fasttrap_probe_t *probe = parg;
1405 unsigned int i;
1406
1407 ASSERT(probe != NULL);
1408 ASSERT(!probe->ftp_enabled);
1409 ASSERT(fasttrap_total >= probe->ftp_ntps);
1410
1411 os_atomic_sub(&fasttrap_total, probe->ftp_ntps, relaxed);
1412 os_atomic_sub(&fasttrap_retired, probe->ftp_ntps, relaxed);
1413
1414 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1415 fasttrap_mod_barrier(probe->ftp_gen);
1416
1417 for (i = 0; i < probe->ftp_ntps; i++) {
1418 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1419 }
1420
1421 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1422 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1423 } else {
1424 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1425 kmem_free(probe, size);
1426 }
1427 }
1428
1429
1430 static const dtrace_pattr_t pid_attr = {
1431 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1432 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1433 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1434 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1435 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1436 };
1437
1438 static dtrace_pops_t pid_pops = {
1439 .dtps_provide = fasttrap_pid_provide,
1440 .dtps_provide_module = NULL,
1441 .dtps_enable = fasttrap_pid_enable,
1442 .dtps_disable = fasttrap_pid_disable,
1443 .dtps_suspend = NULL,
1444 .dtps_resume = NULL,
1445 .dtps_getargdesc = fasttrap_pid_getargdesc,
1446 .dtps_getargval = fasttrap_pid_getarg,
1447 .dtps_usermode = NULL,
1448 .dtps_destroy = fasttrap_pid_destroy
1449 };
1450
1451 static dtrace_pops_t usdt_pops = {
1452 .dtps_provide = fasttrap_pid_provide,
1453 .dtps_provide_module = NULL,
1454 .dtps_enable = fasttrap_pid_enable,
1455 .dtps_disable = fasttrap_pid_disable,
1456 .dtps_suspend = NULL,
1457 .dtps_resume = NULL,
1458 .dtps_getargdesc = fasttrap_pid_getargdesc,
1459 .dtps_getargval = fasttrap_usdt_getarg,
1460 .dtps_usermode = NULL,
1461 .dtps_destroy = fasttrap_pid_destroy
1462 };
1463
1464 static fasttrap_proc_t *
1465 fasttrap_proc_lookup(pid_t pid)
1466 {
1467 fasttrap_bucket_t *bucket;
1468 fasttrap_proc_t *fprc, *new_fprc;
1469
1470 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1471 lck_mtx_lock(&bucket->ftb_mtx);
1472
1473 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1474 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1475 lck_mtx_lock(&fprc->ftpc_mtx);
1476 lck_mtx_unlock(&bucket->ftb_mtx);
1477 fprc->ftpc_rcount++;
1478 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1479 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1480 lck_mtx_unlock(&fprc->ftpc_mtx);
1481
1482 return (fprc);
1483 }
1484 }
1485
1486 /*
1487 * Drop the bucket lock so we don't try to perform a sleeping
1488 * allocation under it.
1489 */
1490 lck_mtx_unlock(&bucket->ftb_mtx);
1491
1492 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1493 ASSERT(new_fprc != NULL);
1494 new_fprc->ftpc_pid = pid;
1495 new_fprc->ftpc_rcount = 1;
1496 new_fprc->ftpc_acount = 1;
1497
1498 lck_mtx_lock(&bucket->ftb_mtx);
1499
1500 /*
1501 * Take another lap through the list to make sure a proc hasn't
1502 * been created for this pid while we weren't under the bucket lock.
1503 */
1504 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1505 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1506 lck_mtx_lock(&fprc->ftpc_mtx);
1507 lck_mtx_unlock(&bucket->ftb_mtx);
1508 fprc->ftpc_rcount++;
1509 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1510 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1511 lck_mtx_unlock(&fprc->ftpc_mtx);
1512
1513 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1514
1515 return (fprc);
1516 }
1517 }
1518
1519 /*
1520 * APPLE NOTE: We have to initialize all locks explicitly
1521 */
1522 lck_mtx_init(&new_fprc->ftpc_mtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
1523
1524 new_fprc->ftpc_next = bucket->ftb_data;
1525 bucket->ftb_data = new_fprc;
1526
1527 lck_mtx_unlock(&bucket->ftb_mtx);
1528
1529 return (new_fprc);
1530 }
1531
1532 static void
1533 fasttrap_proc_release(fasttrap_proc_t *proc)
1534 {
1535 fasttrap_bucket_t *bucket;
1536 fasttrap_proc_t *fprc, **fprcp;
1537 pid_t pid = proc->ftpc_pid;
1538
1539 lck_mtx_lock(&proc->ftpc_mtx);
1540
1541 ASSERT(proc->ftpc_rcount != 0);
1542 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1543
1544 if (--proc->ftpc_rcount != 0) {
1545 lck_mtx_unlock(&proc->ftpc_mtx);
1546 return;
1547 }
1548
1549 lck_mtx_unlock(&proc->ftpc_mtx);
1550
1551 /*
1552 * There should definitely be no live providers associated with this
1553 * process at this point.
1554 */
1555 ASSERT(proc->ftpc_acount == 0);
1556
1557 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1558 lck_mtx_lock(&bucket->ftb_mtx);
1559
1560 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1561 while ((fprc = *fprcp) != NULL) {
1562 if (fprc == proc)
1563 break;
1564
1565 fprcp = &fprc->ftpc_next;
1566 }
1567
1568 /*
1569 * Something strange has happened if we can't find the proc.
1570 */
1571 ASSERT(fprc != NULL);
1572
1573 *fprcp = fprc->ftpc_next;
1574
1575 lck_mtx_unlock(&bucket->ftb_mtx);
1576
1577 /*
1578 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1579 * memory is freed even without the destroy. Maybe accounting cleanup?
1580 */
1581 lck_mtx_destroy(&fprc->ftpc_mtx, &fasttrap_lck_grp);
1582
1583 kmem_free(fprc, sizeof (fasttrap_proc_t));
1584 }
1585
1586 /*
1587 * Lookup a fasttrap-managed provider based on its name and associated proc.
1588 * A reference to the proc must be held for the duration of the call.
1589 * If the pattr argument is non-NULL, this function instantiates the provider
1590 * if it doesn't exist otherwise it returns NULL. The provider is returned
1591 * with its lock held.
1592 */
1593 static fasttrap_provider_t *
1594 fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1595 const dtrace_pattr_t *pattr)
1596 {
1597 pid_t pid = p->p_pid;
1598 fasttrap_provider_t *fp, *new_fp = NULL;
1599 fasttrap_bucket_t *bucket;
1600 char provname[DTRACE_PROVNAMELEN];
1601 cred_t *cred;
1602
1603 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1604 ASSERT(pattr != NULL);
1605
1606 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1607 lck_mtx_lock(&bucket->ftb_mtx);
1608
1609 /*
1610 * Take a lap through the list and return the match if we find it.
1611 */
1612 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1613 if (fp->ftp_pid == pid &&
1614 fp->ftp_provider_type == provider_type &&
1615 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1616 !fp->ftp_retired) {
1617 lck_mtx_lock(&fp->ftp_mtx);
1618 lck_mtx_unlock(&bucket->ftb_mtx);
1619 return (fp);
1620 }
1621 }
1622
1623 /*
1624 * Drop the bucket lock so we don't try to perform a sleeping
1625 * allocation under it.
1626 */
1627 lck_mtx_unlock(&bucket->ftb_mtx);
1628
1629 /*
1630 * Make sure the process isn't a child created as the result
1631 * of a vfork(2), and isn't a zombie (but may be in fork).
1632 */
1633 proc_lock(p);
1634 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1635 proc_unlock(p);
1636 return (NULL);
1637 }
1638
1639 /*
1640 * Increment p_dtrace_probes so that the process knows to inform us
1641 * when it exits or execs. fasttrap_provider_free() decrements this
1642 * when we're done with this provider.
1643 */
1644 p->p_dtrace_probes++;
1645
1646 /*
1647 * Grab the credentials for this process so we have
1648 * something to pass to dtrace_register().
1649 * APPLE NOTE: We have no equivalent to crhold,
1650 * even though there is a cr_ref filed in ucred.
1651 */
1652 cred = kauth_cred_proc_ref(p);
1653 proc_unlock(p);
1654
1655 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1656 ASSERT(new_fp != NULL);
1657 new_fp->ftp_pid = p->p_pid;
1658 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1659 new_fp->ftp_provider_type = provider_type;
1660
1661 /*
1662 * APPLE NOTE: locks require explicit init
1663 */
1664 lck_mtx_init(&new_fp->ftp_mtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
1665 lck_mtx_init(&new_fp->ftp_cmtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
1666
1667 ASSERT(new_fp->ftp_proc != NULL);
1668
1669 lck_mtx_lock(&bucket->ftb_mtx);
1670
1671 /*
1672 * Take another lap through the list to make sure a provider hasn't
1673 * been created for this pid while we weren't under the bucket lock.
1674 */
1675 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1676 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1677 !fp->ftp_retired) {
1678 lck_mtx_lock(&fp->ftp_mtx);
1679 lck_mtx_unlock(&bucket->ftb_mtx);
1680 fasttrap_provider_free(new_fp);
1681 kauth_cred_unref(&cred);
1682 return (fp);
1683 }
1684 }
1685
1686 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1687
1688 /*
1689 * Fail and return NULL if either the provider name is too long
1690 * or we fail to register this new provider with the DTrace
1691 * framework. Note that this is the only place we ever construct
1692 * the full provider name -- we keep it in pieces in the provider
1693 * structure.
1694 */
1695 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1696 (int)sizeof (provname) ||
1697 dtrace_register(provname, pattr,
1698 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1699 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1700 &new_fp->ftp_provid) != 0) {
1701 lck_mtx_unlock(&bucket->ftb_mtx);
1702 fasttrap_provider_free(new_fp);
1703 kauth_cred_unref(&cred);
1704 return (NULL);
1705 }
1706
1707 new_fp->ftp_next = bucket->ftb_data;
1708 bucket->ftb_data = new_fp;
1709
1710 lck_mtx_lock(&new_fp->ftp_mtx);
1711 lck_mtx_unlock(&bucket->ftb_mtx);
1712
1713 kauth_cred_unref(&cred);
1714
1715 return (new_fp);
1716 }
1717
1718 static void
1719 fasttrap_provider_free(fasttrap_provider_t *provider)
1720 {
1721 pid_t pid = provider->ftp_pid;
1722 proc_t *p;
1723
1724 /*
1725 * There need to be no associated enabled probes, no consumers
1726 * creating probes, and no meta providers referencing this provider.
1727 */
1728 ASSERT(provider->ftp_rcount == 0);
1729 ASSERT(provider->ftp_ccount == 0);
1730 ASSERT(provider->ftp_mcount == 0);
1731
1732 /*
1733 * If this provider hasn't been retired, we need to explicitly drop the
1734 * count of active providers on the associated process structure.
1735 */
1736 if (!provider->ftp_retired) {
1737 os_atomic_dec(&provider->ftp_proc->ftpc_acount, relaxed);
1738 ASSERT(provider->ftp_proc->ftpc_acount <
1739 provider->ftp_proc->ftpc_rcount);
1740 }
1741
1742 fasttrap_proc_release(provider->ftp_proc);
1743
1744 /*
1745 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1746 * memory is freed even without the destroy. Maybe accounting cleanup?
1747 */
1748 lck_mtx_destroy(&provider->ftp_mtx, &fasttrap_lck_grp);
1749 lck_mtx_destroy(&provider->ftp_cmtx, &fasttrap_lck_grp);
1750
1751 kmem_free(provider, sizeof (fasttrap_provider_t));
1752
1753 /*
1754 * Decrement p_dtrace_probes on the process whose provider we're
1755 * freeing. We don't have to worry about clobbering somone else's
1756 * modifications to it because we have locked the bucket that
1757 * corresponds to this process's hash chain in the provider hash
1758 * table. Don't sweat it if we can't find the process.
1759 */
1760 if ((p = proc_find(pid)) == NULL) {
1761 return;
1762 }
1763
1764 proc_lock(p);
1765 p->p_dtrace_probes--;
1766 proc_unlock(p);
1767
1768 proc_rele(p);
1769 }
1770
1771 static void
1772 fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1773 {
1774 fasttrap_provider_t *fp;
1775 fasttrap_bucket_t *bucket;
1776 dtrace_provider_id_t provid;
1777 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1778
1779 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
1780 lck_mtx_lock(&bucket->ftb_mtx);
1781
1782 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1783 if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1784 !fp->ftp_retired)
1785 break;
1786 }
1787
1788 if (fp == NULL) {
1789 lck_mtx_unlock(&bucket->ftb_mtx);
1790 return;
1791 }
1792
1793 lck_mtx_lock(&fp->ftp_mtx);
1794 ASSERT(!mprov || fp->ftp_mcount > 0);
1795 if (mprov && --fp->ftp_mcount != 0) {
1796 lck_mtx_unlock(&fp->ftp_mtx);
1797 lck_mtx_unlock(&bucket->ftb_mtx);
1798 return;
1799 }
1800
1801 /*
1802 * Mark the provider to be removed in our post-processing step, mark it
1803 * retired, and drop the active count on its proc. Marking it indicates
1804 * that we should try to remove it; setting the retired flag indicates
1805 * that we're done with this provider; dropping the active the proc
1806 * releases our hold, and when this reaches zero (as it will during
1807 * exit or exec) the proc and associated providers become defunct.
1808 *
1809 * We obviously need to take the bucket lock before the provider lock
1810 * to perform the lookup, but we need to drop the provider lock
1811 * before calling into the DTrace framework since we acquire the
1812 * provider lock in callbacks invoked from the DTrace framework. The
1813 * bucket lock therefore protects the integrity of the provider hash
1814 * table.
1815 */
1816 os_atomic_dec(&fp->ftp_proc->ftpc_acount, relaxed);
1817 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1818
1819 /*
1820 * Add this provider probes to the retired count and
1821 * make sure we don't add them twice
1822 */
1823 os_atomic_add(&fasttrap_retired, fp->ftp_pcount, relaxed);
1824 fp->ftp_pcount = 0;
1825
1826 fp->ftp_retired = 1;
1827 fp->ftp_marked = 1;
1828 provid = fp->ftp_provid;
1829 lck_mtx_unlock(&fp->ftp_mtx);
1830
1831 /*
1832 * We don't have to worry about invalidating the same provider twice
1833 * since fasttrap_provider_lookup() will ignore providers that have
1834 * been marked as retired.
1835 */
1836 dtrace_invalidate(provid);
1837
1838 lck_mtx_unlock(&bucket->ftb_mtx);
1839
1840 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1841 }
1842
1843 static int
1844 fasttrap_uint32_cmp(const void *ap, const void *bp)
1845 {
1846 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1847 }
1848
1849 static int
1850 fasttrap_uint64_cmp(const void *ap, const void *bp)
1851 {
1852 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1853 }
1854
1855 static int
1856 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1857 {
1858 proc_t *p;
1859 fasttrap_provider_t *provider;
1860 fasttrap_probe_t *pp;
1861 fasttrap_tracepoint_t *tp;
1862 const char *name;
1863 unsigned int i, aframes, whack;
1864
1865 /*
1866 * There needs to be at least one desired trace point.
1867 */
1868 if (pdata->ftps_noffs == 0)
1869 return (EINVAL);
1870
1871 switch (pdata->ftps_probe_type) {
1872 case DTFTP_ENTRY:
1873 name = "entry";
1874 aframes = FASTTRAP_ENTRY_AFRAMES;
1875 break;
1876 case DTFTP_RETURN:
1877 name = "return";
1878 aframes = FASTTRAP_RETURN_AFRAMES;
1879 break;
1880 case DTFTP_OFFSETS:
1881 aframes = 0;
1882 name = NULL;
1883 break;
1884 default:
1885 return (EINVAL);
1886 }
1887
1888 const char* provider_name;
1889 switch (pdata->ftps_provider_type) {
1890 case DTFTP_PROVIDER_PID:
1891 provider_name = FASTTRAP_PID_NAME;
1892 break;
1893 case DTFTP_PROVIDER_OBJC:
1894 provider_name = FASTTRAP_OBJC_NAME;
1895 break;
1896 case DTFTP_PROVIDER_ONESHOT:
1897 provider_name = FASTTRAP_ONESHOT_NAME;
1898 break;
1899 default:
1900 return (EINVAL);
1901 }
1902
1903 p = proc_find(pdata->ftps_pid);
1904 if (p == PROC_NULL)
1905 return (ESRCH);
1906
1907 if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
1908 provider_name, &pid_attr)) == NULL) {
1909 proc_rele(p);
1910 return (ESRCH);
1911 }
1912
1913 proc_rele(p);
1914 /*
1915 * Increment this reference count to indicate that a consumer is
1916 * actively adding a new probe associated with this provider. This
1917 * prevents the provider from being deleted -- we'll need to check
1918 * for pending deletions when we drop this reference count.
1919 */
1920 provider->ftp_ccount++;
1921 lck_mtx_unlock(&provider->ftp_mtx);
1922
1923 /*
1924 * Grab the creation lock to ensure consistency between calls to
1925 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1926 * other threads creating probes. We must drop the provider lock
1927 * before taking this lock to avoid a three-way deadlock with the
1928 * DTrace framework.
1929 */
1930 lck_mtx_lock(&provider->ftp_cmtx);
1931
1932 if (name == NULL) {
1933 for (i = 0; i < pdata->ftps_noffs; i++) {
1934 char name_str[17];
1935
1936 (void) snprintf(name_str, sizeof(name_str), "%llx",
1937 (uint64_t)pdata->ftps_offs[i]);
1938
1939 if (dtrace_probe_lookup(provider->ftp_provid,
1940 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1941 continue;
1942
1943 os_atomic_inc(&fasttrap_total, relaxed);
1944 if (fasttrap_total > fasttrap_max) {
1945 os_atomic_dec(&fasttrap_total, relaxed);
1946 goto no_mem;
1947 }
1948 provider->ftp_pcount++;
1949
1950 pp = zalloc(fasttrap_probe_t_zones[1]);
1951 bzero(pp, sizeof (fasttrap_probe_t));
1952
1953 pp->ftp_prov = provider;
1954 pp->ftp_faddr = pdata->ftps_pc;
1955 pp->ftp_fsize = pdata->ftps_size;
1956 pp->ftp_pid = pdata->ftps_pid;
1957 pp->ftp_ntps = 1;
1958
1959 tp = zalloc(fasttrap_tracepoint_t_zone);
1960 bzero(tp, sizeof (fasttrap_tracepoint_t));
1961
1962 tp->ftt_proc = provider->ftp_proc;
1963 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1964 tp->ftt_pid = pdata->ftps_pid;
1965
1966 #if defined(__arm__) || defined(__arm64__)
1967 /*
1968 * On arm the subinfo is used to distinguish between arm
1969 * and thumb modes. On arm64 there is no thumb mode, so
1970 * this field is simply initialized to 0 on its way
1971 * into the kernel.
1972 */
1973 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1974 #endif
1975
1976 pp->ftp_tps[0].fit_tp = tp;
1977 pp->ftp_tps[0].fit_id.fti_probe = pp;
1978 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1979 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1980 pdata->ftps_mod, pdata->ftps_func, name_str,
1981 FASTTRAP_OFFSET_AFRAMES, pp);
1982 }
1983
1984 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1985 pdata->ftps_func, name) == 0) {
1986 os_atomic_add(&fasttrap_total, pdata->ftps_noffs, relaxed);
1987
1988 if (fasttrap_total > fasttrap_max) {
1989 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1990 goto no_mem;
1991 }
1992
1993 /*
1994 * Make sure all tracepoint program counter values are unique.
1995 * We later assume that each probe has exactly one tracepoint
1996 * for a given pc.
1997 */
1998 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1999 sizeof (uint64_t), fasttrap_uint64_cmp);
2000 for (i = 1; i < pdata->ftps_noffs; i++) {
2001 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
2002 continue;
2003
2004 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
2005 goto no_mem;
2006 }
2007 provider->ftp_pcount += pdata->ftps_noffs;
2008 ASSERT(pdata->ftps_noffs > 0);
2009 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2010 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
2011 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
2012 } else {
2013 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
2014 }
2015
2016 pp->ftp_prov = provider;
2017 pp->ftp_faddr = pdata->ftps_pc;
2018 pp->ftp_fsize = pdata->ftps_size;
2019 pp->ftp_pid = pdata->ftps_pid;
2020 pp->ftp_ntps = pdata->ftps_noffs;
2021
2022 for (i = 0; i < pdata->ftps_noffs; i++) {
2023 tp = zalloc(fasttrap_tracepoint_t_zone);
2024 bzero(tp, sizeof (fasttrap_tracepoint_t));
2025 tp->ftt_proc = provider->ftp_proc;
2026 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
2027 tp->ftt_pid = pdata->ftps_pid;
2028
2029 #if defined(__arm__) || defined (__arm64__)
2030 /*
2031 * On arm the subinfo is used to distinguish between arm
2032 * and thumb modes. On arm64 there is no thumb mode, so
2033 * this field is simply initialized to 0 on its way
2034 * into the kernel.
2035 */
2036
2037 tp->ftt_fntype = pdata->ftps_arch_subinfo;
2038 #endif
2039 pp->ftp_tps[i].fit_tp = tp;
2040 pp->ftp_tps[i].fit_id.fti_probe = pp;
2041 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
2042 }
2043
2044 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
2045 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
2046 }
2047
2048 lck_mtx_unlock(&provider->ftp_cmtx);
2049
2050 /*
2051 * We know that the provider is still valid since we incremented the
2052 * creation reference count. If someone tried to clean up this provider
2053 * while we were using it (e.g. because the process called exec(2) or
2054 * exit(2)), take note of that and try to clean it up now.
2055 */
2056 lck_mtx_lock(&provider->ftp_mtx);
2057 provider->ftp_ccount--;
2058 whack = provider->ftp_retired;
2059 lck_mtx_unlock(&provider->ftp_mtx);
2060
2061 if (whack)
2062 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2063
2064 return (0);
2065
2066 no_mem:
2067 /*
2068 * If we've exhausted the allowable resources, we'll try to remove
2069 * this provider to free some up. This is to cover the case where
2070 * the user has accidentally created many more probes than was
2071 * intended (e.g. pid123:::).
2072 */
2073 lck_mtx_unlock(&provider->ftp_cmtx);
2074 lck_mtx_lock(&provider->ftp_mtx);
2075 provider->ftp_ccount--;
2076 provider->ftp_marked = 1;
2077 lck_mtx_unlock(&provider->ftp_mtx);
2078
2079 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2080
2081 return (ENOMEM);
2082 }
2083
2084 /*ARGSUSED*/
2085 static void *
2086 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2087 {
2088 #pragma unused(arg)
2089 fasttrap_provider_t *provider;
2090
2091 /*
2092 * A 32-bit unsigned integer (like a pid for example) can be
2093 * expressed in 10 or fewer decimal digits. Make sure that we'll
2094 * have enough space for the provider name.
2095 */
2096 if (strlen(dhpv->dthpv_provname) + 10 >=
2097 sizeof (provider->ftp_name)) {
2098 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2099 "name too long to accomodate pid", dhpv->dthpv_provname);
2100 return (NULL);
2101 }
2102
2103 /*
2104 * Don't let folks spoof the true pid provider.
2105 */
2106 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
2107 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2108 "%s is an invalid name", dhpv->dthpv_provname,
2109 FASTTRAP_PID_NAME);
2110 return (NULL);
2111 }
2112
2113 /*
2114 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2115 */
2116 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
2117 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2118 "%s is an invalid name", dhpv->dthpv_provname,
2119 FASTTRAP_OBJC_NAME);
2120 return (NULL);
2121 }
2122 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
2123 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2124 "%s is an invalid name", dhpv->dthpv_provname,
2125 FASTTRAP_ONESHOT_NAME);
2126 return (NULL);
2127 }
2128
2129 /*
2130 * The highest stability class that fasttrap supports is ISA; cap
2131 * the stability of the new provider accordingly.
2132 */
2133 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2134 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2135 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2136 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2137 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2138 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2139 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2140 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2141 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2142 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2143
2144 if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2145 &dhpv->dthpv_pattr)) == NULL) {
2146 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2147 "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
2148 return (NULL);
2149 }
2150
2151 /*
2152 * APPLE NOTE!
2153 *
2154 * USDT probes (fasttrap meta probes) are very expensive to create.
2155 * Profiling has shown that the largest single cost is verifying that
2156 * dtrace hasn't already created a given meta_probe. The reason for
2157 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2158 * each static probe being created. We want to get rid of that check.
2159 * The simplest way of eliminating it is to deny the ability to add
2160 * probes to an existing provider. If the provider already exists, BZZT!
2161 * This still leaves the possibility of intentionally malformed DOF
2162 * having duplicate probes. However, duplicate probes are not fatal,
2163 * and there is no way to get that by accident, so we will not check
2164 * for that case.
2165 *
2166 * UPDATE: It turns out there are several use cases that require adding
2167 * probes to existing providers. Disabling the dtrace_probe_lookup()
2168 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2169 */
2170
2171 /*
2172 * Up the meta provider count so this provider isn't removed until
2173 * the meta provider has been told to remove it.
2174 */
2175 provider->ftp_mcount++;
2176
2177 lck_mtx_unlock(&provider->ftp_mtx);
2178
2179 return (provider);
2180 }
2181
2182 /*ARGSUSED*/
2183 static void
2184 fasttrap_meta_create_probe(void *arg, void *parg,
2185 dtrace_helper_probedesc_t *dhpb)
2186 {
2187 #pragma unused(arg)
2188 fasttrap_provider_t *provider = parg;
2189 fasttrap_probe_t *pp;
2190 fasttrap_tracepoint_t *tp;
2191 unsigned int i, j;
2192 uint32_t ntps;
2193
2194 /*
2195 * Since the meta provider count is non-zero we don't have to worry
2196 * about this provider disappearing.
2197 */
2198 ASSERT(provider->ftp_mcount > 0);
2199
2200 /*
2201 * The offsets must be unique.
2202 */
2203 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2204 fasttrap_uint32_cmp);
2205 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2206 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2207 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2208 return;
2209 }
2210
2211 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2212 fasttrap_uint32_cmp);
2213 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2214 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2215 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2216 return;
2217 }
2218
2219 /*
2220 * Grab the creation lock to ensure consistency between calls to
2221 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2222 * other threads creating probes.
2223 */
2224 lck_mtx_lock(&provider->ftp_cmtx);
2225
2226 #if 0
2227 /*
2228 * APPLE NOTE: This is hideously expensive. See note in
2229 * fasttrap_meta_provide() for why we can get away without
2230 * checking here.
2231 */
2232 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2233 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2234 lck_mtx_unlock(&provider->ftp_cmtx);
2235 return;
2236 }
2237 #endif
2238
2239 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2240 ASSERT(ntps > 0);
2241
2242 os_atomic_add(&fasttrap_total, ntps, relaxed);
2243
2244 if (fasttrap_total > fasttrap_max) {
2245 os_atomic_sub(&fasttrap_total, ntps, relaxed);
2246 lck_mtx_unlock(&provider->ftp_cmtx);
2247 return;
2248 }
2249
2250 provider->ftp_pcount += ntps;
2251
2252 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2253 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2254 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2255 } else {
2256 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2257 }
2258
2259 pp->ftp_prov = provider;
2260 pp->ftp_pid = provider->ftp_pid;
2261 pp->ftp_ntps = ntps;
2262 pp->ftp_nargs = dhpb->dthpb_xargc;
2263 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2264 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2265
2266 /*
2267 * First create a tracepoint for each actual point of interest.
2268 */
2269 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2270 tp = zalloc(fasttrap_tracepoint_t_zone);
2271 bzero(tp, sizeof (fasttrap_tracepoint_t));
2272
2273 tp->ftt_proc = provider->ftp_proc;
2274
2275 /*
2276 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2277 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2278 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2279 */
2280 #if defined(__x86_64__)
2281 /*
2282 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2283 */
2284 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2285 #elif defined(__arm__) || defined(__arm64__)
2286 /*
2287 * All ARM and ARM64 probes are zero offset. We need to zero out the
2288 * thumb bit because we still support 32bit user processes.
2289 * On 64bit user processes, bit zero won't be set anyway.
2290 */
2291 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
2292 tp->ftt_fntype = FASTTRAP_FN_USDT;
2293 #else
2294 #error "Architecture not supported"
2295 #endif
2296
2297 tp->ftt_pid = provider->ftp_pid;
2298
2299 pp->ftp_tps[i].fit_tp = tp;
2300 pp->ftp_tps[i].fit_id.fti_probe = pp;
2301 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2302 }
2303
2304 /*
2305 * Then create a tracepoint for each is-enabled point.
2306 */
2307 for (j = 0; i < ntps; i++, j++) {
2308 tp = zalloc(fasttrap_tracepoint_t_zone);
2309 bzero(tp, sizeof (fasttrap_tracepoint_t));
2310
2311 tp->ftt_proc = provider->ftp_proc;
2312
2313 /*
2314 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2315 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2316 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2317 */
2318 #if defined(__x86_64__)
2319 /*
2320 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2321 */
2322 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2323 #elif defined(__arm__) || defined(__arm64__)
2324 /*
2325 * All ARM and ARM64 probes are zero offset. We need to zero out the
2326 * thumb bit because we still support 32bit user processes.
2327 * On 64bit user processes, bit zero won't be set anyway.
2328 */
2329 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
2330 tp->ftt_fntype = FASTTRAP_FN_USDT;
2331 #else
2332 #error "Architecture not supported"
2333 #endif
2334
2335 tp->ftt_pid = provider->ftp_pid;
2336
2337 pp->ftp_tps[i].fit_tp = tp;
2338 pp->ftp_tps[i].fit_id.fti_probe = pp;
2339 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2340 }
2341
2342 /*
2343 * If the arguments are shuffled around we set the argument remapping
2344 * table. Later, when the probe fires, we only remap the arguments
2345 * if the table is non-NULL.
2346 */
2347 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2348 if (dhpb->dthpb_args[i] != i) {
2349 pp->ftp_argmap = dhpb->dthpb_args;
2350 break;
2351 }
2352 }
2353
2354 /*
2355 * The probe is fully constructed -- register it with DTrace.
2356 */
2357 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2358 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2359
2360 lck_mtx_unlock(&provider->ftp_cmtx);
2361 }
2362
2363 /*ARGSUSED*/
2364 static void
2365 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2366 {
2367 #pragma unused(arg)
2368 /*
2369 * Clean up the USDT provider. There may be active consumers of the
2370 * provider busy adding probes, no damage will actually befall the
2371 * provider until that count has dropped to zero. This just puts
2372 * the provider on death row.
2373 */
2374 fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
2375 }
2376
2377 static char*
2378 fasttrap_meta_provider_name(void *arg)
2379 {
2380 fasttrap_provider_t *fprovider = arg;
2381 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2382 return provider->dtpv_name;
2383 }
2384
2385 static dtrace_mops_t fasttrap_mops = {
2386 .dtms_create_probe = fasttrap_meta_create_probe,
2387 .dtms_provide_proc = fasttrap_meta_provide,
2388 .dtms_remove_proc = fasttrap_meta_remove,
2389 .dtms_provider_name = fasttrap_meta_provider_name
2390 };
2391
2392 /*
2393 * Validate a null-terminated string. If str is not null-terminated,
2394 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2395 * returned.
2396 *
2397 * str: string to validate.
2398 * maxlen: maximal length of the string, null-terminated byte included.
2399 */
2400 static int
2401 fasttrap_validatestr(char const* str, size_t maxlen) {
2402 size_t len;
2403
2404 assert(str);
2405 assert(maxlen != 0);
2406
2407 /* Check if the string is null-terminated. */
2408 len = strnlen(str, maxlen);
2409 if (len >= maxlen)
2410 return -1;
2411
2412 /* Finally, check for UTF8 validity. */
2413 return utf8_validatestr((unsigned const char*) str, len);
2414 }
2415
2416 /*
2417 * Checks that provided credentials are allowed to debug target process.
2418 */
2419 static int
2420 fasttrap_check_cred_priv(cred_t *cr, proc_t *p)
2421 {
2422 int err = 0;
2423
2424 /* Only root can use DTrace. */
2425 if (!kauth_cred_issuser(cr)) {
2426 err = EPERM;
2427 goto out;
2428 }
2429
2430 /* Process is marked as no attach. */
2431 if (ISSET(p->p_lflag, P_LNOATTACH)) {
2432 err = EBUSY;
2433 goto out;
2434 }
2435
2436 #if CONFIG_MACF
2437 /* Check with MAC framework when enabled. */
2438 struct proc_ident cur_ident = proc_ident(current_proc());
2439 struct proc_ident p_ident = proc_ident(p);
2440
2441 /* Do not hold ref to proc here to avoid deadlock. */
2442 proc_rele(p);
2443 err = mac_proc_check_debug(&cur_ident, cr, &p_ident);
2444
2445 if (proc_find_ident(&p_ident) == PROC_NULL) {
2446 err = ESRCH;
2447 goto out_no_proc;
2448 }
2449 #endif /* CONFIG_MACF */
2450
2451 out:
2452 proc_rele(p);
2453
2454 out_no_proc:
2455 return err;
2456 }
2457
2458 /*ARGSUSED*/
2459 static int
2460 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2461 {
2462 #pragma unused(dev, md, rv)
2463 if (!dtrace_attached())
2464 return (EAGAIN);
2465
2466 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2467 fasttrap_probe_spec_t *probe;
2468 uint64_t noffs;
2469 size_t size;
2470 int ret;
2471
2472 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2473 sizeof (probe->ftps_noffs)))
2474 return (EFAULT);
2475
2476 /*
2477 * Probes must have at least one tracepoint.
2478 */
2479 if (noffs == 0)
2480 return (EINVAL);
2481
2482 /*
2483 * We want to check the number of noffs before doing
2484 * sizing math, to prevent potential buffer overflows.
2485 */
2486 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2487 return (ENOMEM);
2488
2489 size = sizeof (fasttrap_probe_spec_t) +
2490 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2491
2492 probe = kmem_alloc(size, KM_SLEEP);
2493
2494 if (copyin(arg, probe, size) != 0 ||
2495 probe->ftps_noffs != noffs) {
2496 kmem_free(probe, size);
2497 return (EFAULT);
2498 }
2499
2500 /*
2501 * Verify that the function and module strings contain no
2502 * funny characters.
2503 */
2504
2505 if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
2506 ret = EINVAL;
2507 goto err;
2508 }
2509
2510 if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
2511 ret = EINVAL;
2512 goto err;
2513 }
2514
2515 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2516 proc_t *p;
2517 pid_t pid = probe->ftps_pid;
2518
2519 /*
2520 * Report an error if the process doesn't exist
2521 * or is actively being birthed.
2522 */
2523 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2524 if (p != PROC_NULL)
2525 proc_rele(p);
2526 ret = ESRCH;
2527 goto err;
2528 }
2529
2530 ret = fasttrap_check_cred_priv(cr, p);
2531 if (ret != 0) {
2532 goto err;
2533 }
2534 }
2535
2536 ret = fasttrap_add_probe(probe);
2537
2538 err:
2539 kmem_free(probe, size);
2540
2541 return (ret);
2542
2543 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2544 fasttrap_instr_query_t instr;
2545 fasttrap_tracepoint_t *tp;
2546 uint_t index;
2547 int ret;
2548
2549 if (copyin(arg, &instr, sizeof (instr)) != 0)
2550 return (EFAULT);
2551
2552 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2553 proc_t *p;
2554 pid_t pid = instr.ftiq_pid;
2555
2556 /*
2557 * Report an error if the process doesn't exist
2558 * or is actively being birthed.
2559 */
2560 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2561 if (p != PROC_NULL)
2562 proc_rele(p);
2563 return (ESRCH);
2564 }
2565
2566 ret = fasttrap_check_cred_priv(cr, p);
2567 if (ret != 0) {
2568 return (ret);
2569 }
2570 }
2571
2572 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2573
2574 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2575 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2576 while (tp != NULL) {
2577 if (instr.ftiq_pid == tp->ftt_pid &&
2578 instr.ftiq_pc == tp->ftt_pc &&
2579 tp->ftt_proc->ftpc_acount != 0)
2580 break;
2581
2582 tp = tp->ftt_next;
2583 }
2584
2585 if (tp == NULL) {
2586 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2587 return (ENOENT);
2588 }
2589
2590 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2591 sizeof (instr.ftiq_instr));
2592 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2593
2594 if (copyout(&instr, arg, sizeof (instr)) != 0)
2595 return (EFAULT);
2596
2597 return (0);
2598 }
2599
2600 return (EINVAL);
2601 }
2602
2603 static void
2604 fasttrap_attach(void)
2605 {
2606 ulong_t nent;
2607 unsigned int i;
2608
2609 /*
2610 * Install our hooks into fork(2), exec(2), and exit(2).
2611 */
2612 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2613 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2614 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2615
2616 /*
2617 * APPLE NOTE: We size the maximum number of fasttrap probes
2618 * based on system memory. 100k probes per 256M of system memory.
2619 * Yes, this is a WAG.
2620 */
2621 fasttrap_max = (sane_size >> 28) * 100000;
2622
2623 if (fasttrap_max == 0)
2624 fasttrap_max = 50000;
2625
2626 fasttrap_total = 0;
2627 fasttrap_retired = 0;
2628
2629 /*
2630 * Conjure up the tracepoints hashtable...
2631 */
2632 #ifdef illumos
2633 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2634 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2635 #else
2636 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2637 #endif
2638
2639 if (nent <= 0 || nent > 0x1000000)
2640 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2641
2642 if ((nent & (nent - 1)) == 0)
2643 fasttrap_tpoints.fth_nent = nent;
2644 else
2645 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2646 ASSERT(fasttrap_tpoints.fth_nent > 0);
2647 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2648 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2649 sizeof (fasttrap_bucket_t), KM_SLEEP);
2650 ASSERT(fasttrap_tpoints.fth_table != NULL);
2651
2652 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
2653 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
2654 &fasttrap_lck_attr);
2655 }
2656
2657 /*
2658 * ... and the providers hash table...
2659 */
2660 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2661 if ((nent & (nent - 1)) == 0)
2662 fasttrap_provs.fth_nent = nent;
2663 else
2664 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2665 ASSERT(fasttrap_provs.fth_nent > 0);
2666 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2667 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2668 sizeof (fasttrap_bucket_t), KM_SLEEP);
2669 ASSERT(fasttrap_provs.fth_table != NULL);
2670
2671 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2672 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
2673 &fasttrap_lck_attr);
2674 }
2675
2676 /*
2677 * ... and the procs hash table.
2678 */
2679 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2680 if ((nent & (nent - 1)) == 0)
2681 fasttrap_procs.fth_nent = nent;
2682 else
2683 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2684 ASSERT(fasttrap_procs.fth_nent > 0);
2685 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2686 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2687 sizeof (fasttrap_bucket_t), KM_SLEEP);
2688 ASSERT(fasttrap_procs.fth_table != NULL);
2689
2690 #ifndef illumos
2691 for (i = 0; i < fasttrap_procs.fth_nent; i++) {
2692 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
2693 &fasttrap_lck_attr);
2694 }
2695 #endif
2696
2697 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2698 &fasttrap_meta_id);
2699 }
2700
2701 static int
2702 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2703 {
2704 #pragma unused(dev, flags, devtype, p)
2705 return 0;
2706 }
2707
2708 static int
2709 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2710 {
2711 int err, rv = 0;
2712 user_addr_t uaddrp;
2713
2714 if (proc_is64bit(p)) {
2715 uaddrp = *(user_addr_t *)data;
2716 } else {
2717 uaddrp = (user_addr_t) *(uint32_t *)data;
2718 }
2719
2720 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2721
2722 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2723 if (err != 0) {
2724 ASSERT( (err & 0xfffff000) == 0 );
2725 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2726 } else if (rv != 0) {
2727 ASSERT( (rv & 0xfff00000) == 0 );
2728 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2729 } else
2730 return 0;
2731 }
2732
2733 static int fasttrap_inited = 0;
2734
2735 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2736
2737 static const struct cdevsw fasttrap_cdevsw =
2738 {
2739 .d_open = _fasttrap_open,
2740 .d_close = eno_opcl,
2741 .d_read = eno_rdwrt,
2742 .d_write = eno_rdwrt,
2743 .d_ioctl = _fasttrap_ioctl,
2744 .d_stop = (stop_fcn_t *)nulldev,
2745 .d_reset = (reset_fcn_t *)nulldev,
2746 .d_select = eno_select,
2747 .d_mmap = eno_mmap,
2748 .d_strategy = eno_strat,
2749 .d_reserved_1 = eno_getc,
2750 .d_reserved_2 = eno_putc,
2751 };
2752
2753 void fasttrap_init(void);
2754
2755 void
2756 fasttrap_init( void )
2757 {
2758 /*
2759 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2760 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2761 *
2762 * The reason is to delay allocating the (rather large) resources as late as possible.
2763 */
2764 if (!fasttrap_inited) {
2765 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2766
2767 if (majdevno < 0) {
2768 // FIX ME! What kind of error reporting to do here?
2769 printf("fasttrap_init: failed to allocate a major number!\n");
2770 return;
2771 }
2772
2773 dev_t device = makedev( (uint32_t)majdevno, 0 );
2774 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2775 return;
2776 }
2777
2778 /*
2779 * fasttrap_probe_t's are variable in size. We use an array of zones to
2780 * cover the most common sizes.
2781 */
2782 int i;
2783 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2784 fasttrap_probe_t_zones[i] =
2785 zone_create(fasttrap_probe_t_zone_names[i],
2786 offsetof(fasttrap_probe_t, ftp_tps[i]), ZC_NONE);
2787 }
2788
2789
2790 fasttrap_attach();
2791
2792 /*
2793 * Start the fasttrap cleanup thread
2794 */
2795 kern_return_t res = kernel_thread_start_priority((thread_continue_t)fasttrap_pid_cleanup_cb, NULL, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread);
2796 if (res != KERN_SUCCESS) {
2797 panic("Could not create fasttrap_cleanup_thread");
2798 }
2799 thread_set_thread_name(fasttrap_cleanup_thread, "dtrace_fasttrap_cleanup_thread");
2800
2801 fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
2802 fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
2803 KM_SLEEP);
2804
2805 fasttrap_inited = 1;
2806 }
2807 }
2808
2809 #undef FASTTRAP_MAJOR