]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/fasttrap.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / fasttrap.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/time.h>
29
30 #include <sys/codesign.h>
31 #include <sys/errno.h>
32 #include <sys/stat.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/kauth.h>
36 #include <sys/utfconv.h>
37
38 #include <sys/fasttrap.h>
39 #include <sys/fasttrap_impl.h>
40 #include <sys/fasttrap_isa.h>
41 #include <sys/dtrace.h>
42 #include <sys/dtrace_impl.h>
43 #include <sys/proc.h>
44
45 #include <security/mac_framework.h>
46
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
51
52 #include <kern/cs_blobs.h>
53 #include <kern/thread.h>
54 #include <kern/zalloc.h>
55
56 #include <mach/thread_act.h>
57
58 extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread);
59
60 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
61 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
62
63 __private_extern__
64 void
65 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
66
67 /*
68 * User-Land Trap-Based Tracing
69 * ----------------------------
70 *
71 * The fasttrap provider allows DTrace consumers to instrument any user-level
72 * instruction to gather data; this includes probes with semantic
73 * signifigance like entry and return as well as simple offsets into the
74 * function. While the specific techniques used are very ISA specific, the
75 * methodology is generalizable to any architecture.
76 *
77 *
78 * The General Methodology
79 * -----------------------
80 *
81 * With the primary goal of tracing every user-land instruction and the
82 * limitation that we can't trust user space so don't want to rely on much
83 * information there, we begin by replacing the instructions we want to trace
84 * with trap instructions. Each instruction we overwrite is saved into a hash
85 * table keyed by process ID and pc address. When we enter the kernel due to
86 * this trap instruction, we need the effects of the replaced instruction to
87 * appear to have occurred before we proceed with the user thread's
88 * execution.
89 *
90 * Each user level thread is represented by a ulwp_t structure which is
91 * always easily accessible through a register. The most basic way to produce
92 * the effects of the instruction we replaced is to copy that instruction out
93 * to a bit of scratch space reserved in the user thread's ulwp_t structure
94 * (a sort of kernel-private thread local storage), set the PC to that
95 * scratch space and single step. When we reenter the kernel after single
96 * stepping the instruction we must then adjust the PC to point to what would
97 * normally be the next instruction. Of course, special care must be taken
98 * for branches and jumps, but these represent such a small fraction of any
99 * instruction set that writing the code to emulate these in the kernel is
100 * not too difficult.
101 *
102 * Return probes may require several tracepoints to trace every return site,
103 * and, conversely, each tracepoint may activate several probes (the entry
104 * and offset 0 probes, for example). To solve this muliplexing problem,
105 * tracepoints contain lists of probes to activate and probes contain lists
106 * of tracepoints to enable. If a probe is activated, it adds its ID to
107 * existing tracepoints or creates new ones as necessary.
108 *
109 * Most probes are activated _before_ the instruction is executed, but return
110 * probes are activated _after_ the effects of the last instruction of the
111 * function are visible. Return probes must be fired _after_ we have
112 * single-stepped the instruction whereas all other probes are fired
113 * beforehand.
114 *
115 *
116 * Lock Ordering
117 * -------------
118 *
119 * The lock ordering below -- both internally and with respect to the DTrace
120 * framework -- is a little tricky and bears some explanation. Each provider
121 * has a lock (ftp_mtx) that protects its members including reference counts
122 * for enabled probes (ftp_rcount), consumers actively creating probes
123 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
124 * from being freed. A provider is looked up by taking the bucket lock for the
125 * provider hash table, and is returned with its lock held. The provider lock
126 * may be taken in functions invoked by the DTrace framework, but may not be
127 * held while calling functions in the DTrace framework.
128 *
129 * To ensure consistency over multiple calls to the DTrace framework, the
130 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
131 * not be taken when holding the provider lock as that would create a cyclic
132 * lock ordering. In situations where one would naturally take the provider
133 * lock and then the creation lock, we instead up a reference count to prevent
134 * the provider from disappearing, drop the provider lock, and acquire the
135 * creation lock.
136 *
137 * Briefly:
138 * bucket lock before provider lock
139 * DTrace before provider lock
140 * creation lock before DTrace
141 * never hold the provider lock and creation lock simultaneously
142 */
143
144 static dtrace_meta_provider_id_t fasttrap_meta_id;
145
146 static thread_t fasttrap_cleanup_thread;
147
148 static lck_mtx_t fasttrap_cleanup_mtx;
149
150
151 #define FASTTRAP_CLEANUP_PROVIDER 0x1
152 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
153
154 static uint32_t fasttrap_cleanup_work = 0;
155
156 /*
157 * Generation count on modifications to the global tracepoint lookup table.
158 */
159 static volatile uint64_t fasttrap_mod_gen;
160
161 /*
162 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
163 * base on system memory. Each time a probe is created, fasttrap_total is
164 * incremented by the number of tracepoints that may be associated with that
165 * probe; fasttrap_total is capped at fasttrap_max.
166 */
167
168 static uint32_t fasttrap_max;
169 static uint32_t fasttrap_retired;
170 static uint32_t fasttrap_total;
171
172
173 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
174 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
175 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
176
177 fasttrap_hash_t fasttrap_tpoints;
178 static fasttrap_hash_t fasttrap_provs;
179 static fasttrap_hash_t fasttrap_procs;
180
181 static uint64_t fasttrap_pid_count; /* pid ref count */
182 static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */
183
184 #define FASTTRAP_ENABLE_FAIL 1
185 #define FASTTRAP_ENABLE_PARTIAL 2
186
187 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
188 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
189
190 static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
191 const dtrace_pattr_t *);
192 static void fasttrap_provider_retire(proc_t*, const char *, int);
193 static void fasttrap_provider_free(fasttrap_provider_t *);
194
195 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
196 static void fasttrap_proc_release(fasttrap_proc_t *);
197
198 #define FASTTRAP_PROVS_INDEX(pid, name) \
199 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
200
201 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
202
203 /*
204 * APPLE NOTE: To save memory, some common memory allocations are given
205 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
206 * which means it would fall into the kalloc.128 bucket. With
207 * 20k elements allocated, the space saved is substantial.
208 */
209
210 struct zone *fasttrap_tracepoint_t_zone;
211
212 /*
213 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
214 * that the sweet spot for reducing memory footprint is covering the first
215 * three sizes. Everything larger goes into the common pool.
216 */
217 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
218
219 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
220
221 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
222 "",
223 "dtrace.fasttrap_probe_t[1]",
224 "dtrace.fasttrap_probe_t[2]",
225 "dtrace.fasttrap_probe_t[3]"
226 };
227
228 /*
229 * APPLE NOTE: We have to manage locks explicitly
230 */
231 lck_grp_t* fasttrap_lck_grp;
232 lck_grp_attr_t* fasttrap_lck_grp_attr;
233 lck_attr_t* fasttrap_lck_attr;
234
235 static int
236 fasttrap_highbit(ulong_t i)
237 {
238 int h = 1;
239
240 if (i == 0)
241 return (0);
242 #ifdef _LP64
243 if (i & 0xffffffff00000000ul) {
244 h += 32; i >>= 32;
245 }
246 #endif
247 if (i & 0xffff0000) {
248 h += 16; i >>= 16;
249 }
250 if (i & 0xff00) {
251 h += 8; i >>= 8;
252 }
253 if (i & 0xf0) {
254 h += 4; i >>= 4;
255 }
256 if (i & 0xc) {
257 h += 2; i >>= 2;
258 }
259 if (i & 0x2) {
260 h += 1;
261 }
262 return (h);
263 }
264
265 static uint_t
266 fasttrap_hash_str(const char *p)
267 {
268 unsigned int g;
269 uint_t hval = 0;
270
271 while (*p) {
272 hval = (hval << 4) + *p++;
273 if ((g = (hval & 0xf0000000)) != 0)
274 hval ^= g >> 24;
275 hval &= ~g;
276 }
277 return (hval);
278 }
279
280 /*
281 * APPLE NOTE: fasttrap_sigtrap not implemented
282 */
283 void
284 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
285 {
286 #pragma unused(p, t, pc)
287
288 #if !defined(__APPLE__)
289 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
290
291 sqp->sq_info.si_signo = SIGTRAP;
292 sqp->sq_info.si_code = TRAP_DTRACE;
293 sqp->sq_info.si_addr = (caddr_t)pc;
294
295 mutex_enter(&p->p_lock);
296 sigaddqa(p, t, sqp);
297 mutex_exit(&p->p_lock);
298
299 if (t != NULL)
300 aston(t);
301 #endif /* __APPLE__ */
302
303 printf("fasttrap_sigtrap called with no implementation.\n");
304 }
305
306 /*
307 * This function ensures that no threads are actively using the memory
308 * associated with probes that were formerly live.
309 */
310 static void
311 fasttrap_mod_barrier(uint64_t gen)
312 {
313 unsigned int i;
314
315 if (gen < fasttrap_mod_gen)
316 return;
317
318 fasttrap_mod_gen++;
319
320 for (i = 0; i < NCPU; i++) {
321 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
322 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
323 }
324 }
325
326 static void fasttrap_pid_cleanup(uint32_t);
327
328 static unsigned int
329 fasttrap_pid_cleanup_providers(void)
330 {
331 fasttrap_provider_t **fpp, *fp;
332 fasttrap_bucket_t *bucket;
333 dtrace_provider_id_t provid;
334 unsigned int later = 0, i;
335
336 /*
337 * Iterate over all the providers trying to remove the marked
338 * ones. If a provider is marked but not retired, we just
339 * have to take a crack at removing it -- it's no big deal if
340 * we can't.
341 */
342 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
343 bucket = &fasttrap_provs.fth_table[i];
344 lck_mtx_lock(&bucket->ftb_mtx);
345 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
346
347 while ((fp = *fpp) != NULL) {
348 if (!fp->ftp_marked) {
349 fpp = &fp->ftp_next;
350 continue;
351 }
352
353 lck_mtx_lock(&fp->ftp_mtx);
354
355 /*
356 * If this provider has consumers actively
357 * creating probes (ftp_ccount) or is a USDT
358 * provider (ftp_mcount), we can't unregister
359 * or even condense.
360 */
361 if (fp->ftp_ccount != 0 ||
362 fp->ftp_mcount != 0) {
363 fp->ftp_marked = 0;
364 lck_mtx_unlock(&fp->ftp_mtx);
365 continue;
366 }
367
368 if (!fp->ftp_retired || fp->ftp_rcount != 0)
369 fp->ftp_marked = 0;
370
371 lck_mtx_unlock(&fp->ftp_mtx);
372
373 /*
374 * If we successfully unregister this
375 * provider we can remove it from the hash
376 * chain and free the memory. If our attempt
377 * to unregister fails and this is a retired
378 * provider, increment our flag to try again
379 * pretty soon. If we've consumed more than
380 * half of our total permitted number of
381 * probes call dtrace_condense() to try to
382 * clean out the unenabled probes.
383 */
384 provid = fp->ftp_provid;
385 if (dtrace_unregister(provid) != 0) {
386 if (fasttrap_total > fasttrap_max / 2)
387 (void) dtrace_condense(provid);
388 later += fp->ftp_marked;
389 fpp = &fp->ftp_next;
390 } else {
391 *fpp = fp->ftp_next;
392 fasttrap_provider_free(fp);
393 }
394 }
395 lck_mtx_unlock(&bucket->ftb_mtx);
396 }
397
398 return later;
399 }
400
401 typedef struct fasttrap_tracepoint_spec {
402 pid_t fttps_pid;
403 user_addr_t fttps_pc;
404 } fasttrap_tracepoint_spec_t;
405
406 static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
407 static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
408 static lck_mtx_t fasttrap_retired_mtx;
409
410 #define DEFAULT_RETIRED_SIZE 256
411
412 static void
413 fasttrap_tracepoint_cleanup(void)
414 {
415 size_t i;
416 pid_t pid = 0;
417 user_addr_t pc;
418 proc_t *p = PROC_NULL;
419 fasttrap_tracepoint_t *tp = NULL;
420 lck_mtx_lock(&fasttrap_retired_mtx);
421 fasttrap_bucket_t *bucket;
422 for (i = 0; i < fasttrap_cur_retired; i++) {
423 pc = fasttrap_retired_spec[i].fttps_pc;
424 if (fasttrap_retired_spec[i].fttps_pid != pid) {
425 pid = fasttrap_retired_spec[i].fttps_pid;
426 if (p != PROC_NULL) {
427 sprunlock(p);
428 }
429 if ((p = sprlock(pid)) == PROC_NULL) {
430 pid = 0;
431 continue;
432 }
433 }
434 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
435 lck_mtx_lock(&bucket->ftb_mtx);
436 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
437 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
438 tp->ftt_proc->ftpc_acount != 0)
439 break;
440 }
441 /*
442 * Check that the tracepoint is not gone or has not been
443 * re-activated for another probe
444 */
445 if (tp == NULL || tp->ftt_retired == 0) {
446 lck_mtx_unlock(&bucket->ftb_mtx);
447 continue;
448 }
449 fasttrap_tracepoint_remove(p, tp);
450 lck_mtx_unlock(&bucket->ftb_mtx);
451 }
452 if (p != PROC_NULL) {
453 sprunlock(p);
454 }
455
456 fasttrap_cur_retired = 0;
457
458 lck_mtx_unlock(&fasttrap_retired_mtx);
459 }
460
461 void
462 fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
463 {
464 if (tp->ftt_retired)
465 return;
466 lck_mtx_lock(&fasttrap_retired_mtx);
467 fasttrap_tracepoint_spec_t *s = &fasttrap_retired_spec[fasttrap_cur_retired++];
468 s->fttps_pid = p->p_pid;
469 s->fttps_pc = tp->ftt_pc;
470
471 if (fasttrap_cur_retired == fasttrap_retired_size) {
472 fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
473 fasttrap_retired_size * 2 *
474 sizeof(*fasttrap_retired_spec),
475 KM_SLEEP);
476 memcpy(new_retired, fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
477 kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
478 fasttrap_retired_size *= 2;
479 fasttrap_retired_spec = new_retired;
480 }
481
482 lck_mtx_unlock(&fasttrap_retired_mtx);
483
484 tp->ftt_retired = 1;
485
486 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
487 }
488
489 static void
490 fasttrap_pid_cleanup_compute_priority(void)
491 {
492 if (fasttrap_total > (fasttrap_max / 100 * 90) || fasttrap_retired > fasttrap_max / 2) {
493 thread_precedence_policy_data_t precedence = {12 /* BASEPRI_PREEMPT_HIGH */};
494 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
495 }
496 else {
497 thread_precedence_policy_data_t precedence = {-39 /* BASEPRI_USER_INITIATED */};
498 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
499
500 }
501 }
502
503 /*
504 * This is the timeout's callback for cleaning up the providers and their
505 * probes.
506 */
507 /*ARGSUSED*/
508 __attribute__((noreturn))
509 static void
510 fasttrap_pid_cleanup_cb(void)
511 {
512 uint32_t work = 0;
513 lck_mtx_lock(&fasttrap_cleanup_mtx);
514 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
515 while (1) {
516 unsigned int later = 0;
517
518 work = os_atomic_xchg(&fasttrap_cleanup_work, 0, relaxed);
519 lck_mtx_unlock(&fasttrap_cleanup_mtx);
520 if (work & FASTTRAP_CLEANUP_PROVIDER) {
521 later = fasttrap_pid_cleanup_providers();
522 }
523 if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
524 fasttrap_tracepoint_cleanup();
525 }
526 lck_mtx_lock(&fasttrap_cleanup_mtx);
527
528 fasttrap_pid_cleanup_compute_priority();
529 if (!fasttrap_cleanup_work) {
530 /*
531 * If we were unable to remove a retired provider, try again after
532 * a second. This situation can occur in certain circumstances where
533 * providers cannot be unregistered even though they have no probes
534 * enabled because of an execution of dtrace -l or something similar.
535 * If the timeout has been disabled (set to 1 because we're trying
536 * to detach), we set fasttrap_cleanup_work to ensure that we'll
537 * get a chance to do that work if and when the timeout is reenabled
538 * (if detach fails).
539 */
540 if (later > 0) {
541 struct timespec t = {.tv_sec = 1, .tv_nsec = 0};
542 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", &t);
543 }
544 else
545 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
546 }
547 }
548
549 }
550
551 /*
552 * Activates the asynchronous cleanup mechanism.
553 */
554 static void
555 fasttrap_pid_cleanup(uint32_t work)
556 {
557 lck_mtx_lock(&fasttrap_cleanup_mtx);
558 os_atomic_or(&fasttrap_cleanup_work, work, relaxed);
559 fasttrap_pid_cleanup_compute_priority();
560 wakeup(&fasttrap_pid_cleanup_cb);
561 lck_mtx_unlock(&fasttrap_cleanup_mtx);
562 }
563
564
565 /*
566 * This is called from cfork() via dtrace_fasttrap_fork(). The child
567 * process's address space is a (roughly) a copy of the parent process's so
568 * we have to remove all the instrumentation we had previously enabled in the
569 * parent.
570 */
571 static void
572 fasttrap_fork(proc_t *p, proc_t *cp)
573 {
574 pid_t ppid = p->p_pid;
575 unsigned int i;
576
577 ASSERT(current_proc() == p);
578 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
579 ASSERT(p->p_dtrace_count > 0);
580 ASSERT(cp->p_dtrace_count == 0);
581
582 /*
583 * This would be simpler and faster if we maintained per-process
584 * hash tables of enabled tracepoints. It could, however, potentially
585 * slow down execution of a tracepoint since we'd need to go
586 * through two levels of indirection. In the future, we should
587 * consider either maintaining per-process ancillary lists of
588 * enabled tracepoints or hanging a pointer to a per-process hash
589 * table of enabled tracepoints off the proc structure.
590 */
591
592 /*
593 * We don't have to worry about the child process disappearing
594 * because we're in fork().
595 */
596 if (cp != sprlock(cp->p_pid)) {
597 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", cp->p_pid);
598 return;
599 }
600
601 /*
602 * Iterate over every tracepoint looking for ones that belong to the
603 * parent process, and remove each from the child process.
604 */
605 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
606 fasttrap_tracepoint_t *tp;
607 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
608
609 lck_mtx_lock(&bucket->ftb_mtx);
610 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
611 if (tp->ftt_pid == ppid &&
612 tp->ftt_proc->ftpc_acount != 0) {
613 fasttrap_tracepoint_remove(cp, tp);
614
615 /*
616 * The count of active providers can only be
617 * decremented (i.e. to zero) during exec,
618 * exit, and removal of a meta provider so it
619 * should be impossible to drop the count
620 * mid-fork.
621 */
622 ASSERT(tp->ftt_proc->ftpc_acount != 0);
623 }
624 }
625 lck_mtx_unlock(&bucket->ftb_mtx);
626 }
627
628 /*
629 * Free any ptss pages/entries in the child.
630 */
631 dtrace_ptss_fork(p, cp);
632
633 sprunlock(cp);
634 }
635
636 /*
637 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
638 * is set on the proc structure to indicate that there is a pid provider
639 * associated with this process.
640 */
641 static void
642 fasttrap_exec_exit(proc_t *p)
643 {
644 ASSERT(p == current_proc());
645 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
646 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
647
648
649 /* APPLE NOTE: Okay, the locking here is really odd and needs some
650 * explaining. This method is always called with the proc_lock held.
651 * We must drop the proc_lock before calling fasttrap_provider_retire
652 * to avoid a deadlock when it takes the bucket lock.
653 *
654 * Next, the dtrace_ptss_exec_exit function requires the sprlock
655 * be held, but not the proc_lock.
656 *
657 * Finally, we must re-acquire the proc_lock
658 */
659 proc_unlock(p);
660
661 /*
662 * We clean up the pid provider for this process here; user-land
663 * static probes are handled by the meta-provider remove entry point.
664 */
665 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
666
667 /*
668 * APPLE NOTE: We also need to remove any aliased providers.
669 * XXX optimization: track which provider types are instantiated
670 * and only retire as needed.
671 */
672 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
673 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
674
675 /*
676 * This should be called after it is no longer possible for a user
677 * thread to execute (potentially dtrace instrumented) instructions.
678 */
679 lck_mtx_lock(&p->p_dtrace_sprlock);
680 dtrace_ptss_exec_exit(p);
681 lck_mtx_unlock(&p->p_dtrace_sprlock);
682
683 proc_lock(p);
684 }
685
686
687 /*ARGSUSED*/
688 static void
689 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
690 {
691 #pragma unused(arg, desc)
692 /*
693 * There are no "default" pid probes.
694 */
695 }
696
697 static int
698 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
699 {
700 fasttrap_tracepoint_t *tp, *new_tp = NULL;
701 fasttrap_bucket_t *bucket;
702 fasttrap_id_t *id;
703 pid_t pid;
704 user_addr_t pc;
705
706 ASSERT(index < probe->ftp_ntps);
707
708 pid = probe->ftp_pid;
709 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
710 id = &probe->ftp_tps[index].fit_id;
711
712 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
713
714 //ASSERT(!(p->p_flag & SVFORK));
715
716 /*
717 * Before we make any modifications, make sure we've imposed a barrier
718 * on the generation in which this probe was last modified.
719 */
720 fasttrap_mod_barrier(probe->ftp_gen);
721
722 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
723
724 /*
725 * If the tracepoint has already been enabled, just add our id to the
726 * list of interested probes. This may be our second time through
727 * this path in which case we'll have constructed the tracepoint we'd
728 * like to install. If we can't find a match, and have an allocated
729 * tracepoint ready to go, enable that one now.
730 *
731 * A tracepoint whose process is defunct is also considered defunct.
732 */
733 again:
734 lck_mtx_lock(&bucket->ftb_mtx);
735 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
736 int rc = 0;
737 /*
738 * Note that it's safe to access the active count on the
739 * associated proc structure because we know that at least one
740 * provider (this one) will still be around throughout this
741 * operation.
742 */
743 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
744 tp->ftt_proc->ftpc_acount == 0)
745 continue;
746
747 /*
748 * Now that we've found a matching tracepoint, it would be
749 * a decent idea to confirm that the tracepoint is still
750 * enabled and the trap instruction hasn't been overwritten.
751 * Since this is a little hairy, we'll punt for now.
752 */
753 if (!tp->ftt_installed) {
754 if (fasttrap_tracepoint_install(p, tp) != 0)
755 rc = FASTTRAP_ENABLE_PARTIAL;
756 }
757 /*
758 * This can't be the first interested probe. We don't have
759 * to worry about another thread being in the midst of
760 * deleting this tracepoint (which would be the only valid
761 * reason for a tracepoint to have no interested probes)
762 * since we're holding P_PR_LOCK for this process.
763 */
764 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
765
766 switch (id->fti_ptype) {
767 case DTFTP_ENTRY:
768 case DTFTP_OFFSETS:
769 case DTFTP_IS_ENABLED:
770 id->fti_next = tp->ftt_ids;
771 dtrace_membar_producer();
772 tp->ftt_ids = id;
773 dtrace_membar_producer();
774 break;
775
776 case DTFTP_RETURN:
777 case DTFTP_POST_OFFSETS:
778 id->fti_next = tp->ftt_retids;
779 dtrace_membar_producer();
780 tp->ftt_retids = id;
781 dtrace_membar_producer();
782 break;
783
784 default:
785 ASSERT(0);
786 }
787
788 tp->ftt_retired = 0;
789
790 lck_mtx_unlock(&bucket->ftb_mtx);
791
792 if (new_tp != NULL) {
793 new_tp->ftt_ids = NULL;
794 new_tp->ftt_retids = NULL;
795 }
796
797 return rc;
798 }
799
800 /*
801 * If we have a good tracepoint ready to go, install it now while
802 * we have the lock held and no one can screw with us.
803 */
804 if (new_tp != NULL) {
805 int rc = 0;
806
807 new_tp->ftt_next = bucket->ftb_data;
808 dtrace_membar_producer();
809 bucket->ftb_data = new_tp;
810 dtrace_membar_producer();
811 lck_mtx_unlock(&bucket->ftb_mtx);
812
813 /*
814 * Activate the tracepoint in the ISA-specific manner.
815 * If this fails, we need to report the failure, but
816 * indicate that this tracepoint must still be disabled
817 * by calling fasttrap_tracepoint_disable().
818 */
819 if (fasttrap_tracepoint_install(p, new_tp) != 0)
820 rc = FASTTRAP_ENABLE_PARTIAL;
821 /*
822 * Increment the count of the number of tracepoints active in
823 * the victim process.
824 */
825 //ASSERT(p->p_proc_flag & P_PR_LOCK);
826 p->p_dtrace_count++;
827
828
829 return (rc);
830 }
831
832 lck_mtx_unlock(&bucket->ftb_mtx);
833
834 /*
835 * Initialize the tracepoint that's been preallocated with the probe.
836 */
837 new_tp = probe->ftp_tps[index].fit_tp;
838 new_tp->ftt_retired = 0;
839
840 ASSERT(new_tp->ftt_pid == pid);
841 ASSERT(new_tp->ftt_pc == pc);
842 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
843 ASSERT(new_tp->ftt_ids == NULL);
844 ASSERT(new_tp->ftt_retids == NULL);
845
846 switch (id->fti_ptype) {
847 case DTFTP_ENTRY:
848 case DTFTP_OFFSETS:
849 case DTFTP_IS_ENABLED:
850 id->fti_next = NULL;
851 new_tp->ftt_ids = id;
852 break;
853
854 case DTFTP_RETURN:
855 case DTFTP_POST_OFFSETS:
856 id->fti_next = NULL;
857 new_tp->ftt_retids = id;
858 break;
859
860 default:
861 ASSERT(0);
862 }
863
864 /*
865 * If the ISA-dependent initialization goes to plan, go back to the
866 * beginning and try to install this freshly made tracepoint.
867 */
868 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
869 goto again;
870
871 new_tp->ftt_ids = NULL;
872 new_tp->ftt_retids = NULL;
873
874 return (FASTTRAP_ENABLE_FAIL);
875 }
876
877 static void
878 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
879 {
880 fasttrap_bucket_t *bucket;
881 fasttrap_provider_t *provider = probe->ftp_prov;
882 fasttrap_tracepoint_t **pp, *tp;
883 fasttrap_id_t *id, **idp;
884 pid_t pid;
885 user_addr_t pc;
886
887 ASSERT(index < probe->ftp_ntps);
888
889 pid = probe->ftp_pid;
890 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
891 id = &probe->ftp_tps[index].fit_id;
892
893 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
894
895 /*
896 * Find the tracepoint and make sure that our id is one of the
897 * ones registered with it.
898 */
899 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
900 lck_mtx_lock(&bucket->ftb_mtx);
901 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
902 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
903 tp->ftt_proc == provider->ftp_proc)
904 break;
905 }
906
907 /*
908 * If we somehow lost this tracepoint, we're in a world of hurt.
909 */
910 ASSERT(tp != NULL);
911
912 switch (id->fti_ptype) {
913 case DTFTP_ENTRY:
914 case DTFTP_OFFSETS:
915 case DTFTP_IS_ENABLED:
916 ASSERT(tp->ftt_ids != NULL);
917 idp = &tp->ftt_ids;
918 break;
919
920 case DTFTP_RETURN:
921 case DTFTP_POST_OFFSETS:
922 ASSERT(tp->ftt_retids != NULL);
923 idp = &tp->ftt_retids;
924 break;
925
926 default:
927 /* Fix compiler warning... */
928 idp = NULL;
929 ASSERT(0);
930 }
931
932 while ((*idp)->fti_probe != probe) {
933 idp = &(*idp)->fti_next;
934 ASSERT(*idp != NULL);
935 }
936
937 id = *idp;
938 *idp = id->fti_next;
939 dtrace_membar_producer();
940
941 ASSERT(id->fti_probe == probe);
942
943 /*
944 * If there are other registered enablings of this tracepoint, we're
945 * all done, but if this was the last probe assocated with this
946 * this tracepoint, we need to remove and free it.
947 */
948 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
949
950 /*
951 * If the current probe's tracepoint is in use, swap it
952 * for an unused tracepoint.
953 */
954 if (tp == probe->ftp_tps[index].fit_tp) {
955 fasttrap_probe_t *tmp_probe;
956 fasttrap_tracepoint_t **tmp_tp;
957 uint_t tmp_index;
958
959 if (tp->ftt_ids != NULL) {
960 tmp_probe = tp->ftt_ids->fti_probe;
961 /* LINTED - alignment */
962 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
963 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
964 } else {
965 tmp_probe = tp->ftt_retids->fti_probe;
966 /* LINTED - alignment */
967 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
968 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
969 }
970
971 ASSERT(*tmp_tp != NULL);
972 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
973 ASSERT((*tmp_tp)->ftt_ids == NULL);
974 ASSERT((*tmp_tp)->ftt_retids == NULL);
975
976 probe->ftp_tps[index].fit_tp = *tmp_tp;
977 *tmp_tp = tp;
978
979 }
980
981 lck_mtx_unlock(&bucket->ftb_mtx);
982
983 /*
984 * Tag the modified probe with the generation in which it was
985 * changed.
986 */
987 probe->ftp_gen = fasttrap_mod_gen;
988 return;
989 }
990
991 lck_mtx_unlock(&bucket->ftb_mtx);
992
993 /*
994 * We can't safely remove the tracepoint from the set of active
995 * tracepoints until we've actually removed the fasttrap instruction
996 * from the process's text. We can, however, operate on this
997 * tracepoint secure in the knowledge that no other thread is going to
998 * be looking at it since we hold P_PR_LOCK on the process if it's
999 * live or we hold the provider lock on the process if it's dead and
1000 * gone.
1001 */
1002
1003 /*
1004 * We only need to remove the actual instruction if we're looking
1005 * at an existing process
1006 */
1007 if (p != NULL) {
1008 /*
1009 * If we fail to restore the instruction we need to kill
1010 * this process since it's in a completely unrecoverable
1011 * state.
1012 */
1013 if (fasttrap_tracepoint_remove(p, tp) != 0)
1014 fasttrap_sigtrap(p, NULL, pc);
1015
1016 /*
1017 * Decrement the count of the number of tracepoints active
1018 * in the victim process.
1019 */
1020 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1021 p->p_dtrace_count--;
1022 }
1023
1024 /*
1025 * Remove the probe from the hash table of active tracepoints.
1026 */
1027 lck_mtx_lock(&bucket->ftb_mtx);
1028 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1029 ASSERT(*pp != NULL);
1030 while (*pp != tp) {
1031 pp = &(*pp)->ftt_next;
1032 ASSERT(*pp != NULL);
1033 }
1034
1035 *pp = tp->ftt_next;
1036 dtrace_membar_producer();
1037
1038 lck_mtx_unlock(&bucket->ftb_mtx);
1039
1040 /*
1041 * Tag the modified probe with the generation in which it was changed.
1042 */
1043 probe->ftp_gen = fasttrap_mod_gen;
1044 }
1045
1046 static void
1047 fasttrap_enable_callbacks(void)
1048 {
1049 /*
1050 * We don't have to play the rw lock game here because we're
1051 * providing something rather than taking something away --
1052 * we can be sure that no threads have tried to follow this
1053 * function pointer yet.
1054 */
1055 lck_mtx_lock(&fasttrap_count_mtx);
1056 if (fasttrap_pid_count == 0) {
1057 ASSERT(dtrace_pid_probe_ptr == NULL);
1058 ASSERT(dtrace_return_probe_ptr == NULL);
1059 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1060 dtrace_return_probe_ptr = &fasttrap_return_probe;
1061 }
1062 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1063 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1064 fasttrap_pid_count++;
1065 lck_mtx_unlock(&fasttrap_count_mtx);
1066 }
1067
1068 static void
1069 fasttrap_disable_callbacks(void)
1070 {
1071 //ASSERT(MUTEX_HELD(&cpu_lock));
1072
1073 lck_mtx_lock(&fasttrap_count_mtx);
1074 ASSERT(fasttrap_pid_count > 0);
1075 fasttrap_pid_count--;
1076 if (fasttrap_pid_count == 0) {
1077 dtrace_cpu_t *cur, *cpu = CPU;
1078
1079 /*
1080 * APPLE NOTE: This loop seems broken, it touches every CPU
1081 * but the one we're actually running on. Need to ask Sun folks
1082 * if that is safe. Scenario is this: We're running on CPU A,
1083 * and lock all but A. Then we get preempted, and start running
1084 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1085 */
1086 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1087 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
1088 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1089 }
1090
1091 dtrace_pid_probe_ptr = NULL;
1092 dtrace_return_probe_ptr = NULL;
1093
1094 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1095 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
1096 // rw_exit(&cur->cpu_ft_lock);
1097 }
1098 }
1099 lck_mtx_unlock(&fasttrap_count_mtx);
1100 }
1101
1102 /*ARGSUSED*/
1103 static int
1104 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1105 {
1106 #pragma unused(arg, id)
1107 fasttrap_probe_t *probe = parg;
1108 proc_t *p;
1109 int i, rc;
1110
1111 ASSERT(probe != NULL);
1112 ASSERT(!probe->ftp_enabled);
1113 ASSERT(id == probe->ftp_id);
1114 // ASSERT(MUTEX_HELD(&cpu_lock));
1115
1116 /*
1117 * Increment the count of enabled probes on this probe's provider;
1118 * the provider can't go away while the probe still exists. We
1119 * must increment this even if we aren't able to properly enable
1120 * this probe.
1121 */
1122 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1123 probe->ftp_prov->ftp_rcount++;
1124 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1125
1126 /*
1127 * If this probe's provider is retired (meaning it was valid in a
1128 * previously exec'ed incarnation of this address space), bail out. The
1129 * provider can't go away while we're in this code path.
1130 */
1131 if (probe->ftp_prov->ftp_retired)
1132 return(0);
1133
1134 /*
1135 * If we can't find the process, it may be that we're in the context of
1136 * a fork in which the traced process is being born and we're copying
1137 * USDT probes. Otherwise, the process is gone so bail.
1138 */
1139 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1140 /*
1141 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1142 * does not return process's with SIDL set, but we always return
1143 * the child process.
1144 */
1145 return(0);
1146 }
1147
1148 proc_lock(p);
1149
1150 if ((p->p_csflags & (CS_KILL|CS_HARD))) {
1151 proc_unlock(p);
1152 for (i = 0; i < DTRACE_NCLIENTS; i++) {
1153 dtrace_state_t *state = dtrace_state_get(i);
1154 if (state == NULL)
1155 continue;
1156 if (state->dts_cred.dcr_cred == NULL)
1157 continue;
1158 mac_proc_check_get_task(state->dts_cred.dcr_cred, p);
1159 }
1160 rc = cs_allow_invalid(p);
1161 if (rc == 0) {
1162 sprunlock(p);
1163 cmn_err(CE_WARN, "process doesn't allow invalid code pages, failing to install fasttrap probe\n");
1164 return (0);
1165 }
1166 proc_lock(p);
1167 }
1168
1169 /*
1170 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1171 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1172 * To mimic this, we allocate on demand scratch space. If this is the first
1173 * time a probe has been enabled in this process, we need to allocate scratch
1174 * space for each already existing thread. Now is a good time to do this, as
1175 * the target process is suspended and the proc_lock is held.
1176 */
1177 if (p->p_dtrace_ptss_pages == NULL) {
1178 dtrace_ptss_enable(p);
1179 }
1180
1181 // ASSERT(!(p->p_flag & SVFORK));
1182 proc_unlock(p);
1183
1184 /*
1185 * We have to enable the trap entry point before any user threads have
1186 * the chance to execute the trap instruction we're about to place
1187 * in their process's text.
1188 */
1189 fasttrap_enable_callbacks();
1190
1191 /*
1192 * Enable all the tracepoints and add this probe's id to each
1193 * tracepoint's list of active probes.
1194 */
1195 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1196 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1197 /*
1198 * If enabling the tracepoint failed completely,
1199 * we don't have to disable it; if the failure
1200 * was only partial we must disable it.
1201 */
1202 if (rc == FASTTRAP_ENABLE_FAIL)
1203 i--;
1204 else
1205 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1206
1207 /*
1208 * Back up and pull out all the tracepoints we've
1209 * created so far for this probe.
1210 */
1211 while (i >= 0) {
1212 fasttrap_tracepoint_disable(p, probe, i);
1213 i--;
1214 }
1215
1216 sprunlock(p);
1217
1218 /*
1219 * Since we're not actually enabling this probe,
1220 * drop our reference on the trap table entry.
1221 */
1222 fasttrap_disable_callbacks();
1223 return(0);
1224 }
1225 }
1226
1227 sprunlock(p);
1228
1229 probe->ftp_enabled = 1;
1230 return (0);
1231 }
1232
1233 /*ARGSUSED*/
1234 static void
1235 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1236 {
1237 #pragma unused(arg, id)
1238 fasttrap_probe_t *probe = parg;
1239 fasttrap_provider_t *provider = probe->ftp_prov;
1240 proc_t *p;
1241 int i, whack = 0;
1242
1243 ASSERT(id == probe->ftp_id);
1244
1245 /*
1246 * We won't be able to acquire a /proc-esque lock on the process
1247 * iff the process is dead and gone. In this case, we rely on the
1248 * provider lock as a point of mutual exclusion to prevent other
1249 * DTrace consumers from disabling this probe.
1250 */
1251 if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1252 // ASSERT(!(p->p_flag & SVFORK));
1253 }
1254
1255 lck_mtx_lock(&provider->ftp_mtx);
1256
1257 /*
1258 * Disable all the associated tracepoints (for fully enabled probes).
1259 */
1260 if (probe->ftp_enabled) {
1261 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1262 fasttrap_tracepoint_disable(p, probe, i);
1263 }
1264 }
1265
1266 ASSERT(provider->ftp_rcount > 0);
1267 provider->ftp_rcount--;
1268
1269 if (p != NULL) {
1270 /*
1271 * Even though we may not be able to remove it entirely, we
1272 * mark this retired provider to get a chance to remove some
1273 * of the associated probes.
1274 */
1275 if (provider->ftp_retired && !provider->ftp_marked)
1276 whack = provider->ftp_marked = 1;
1277 lck_mtx_unlock(&provider->ftp_mtx);
1278
1279 sprunlock(p);
1280 } else {
1281 /*
1282 * If the process is dead, we're just waiting for the
1283 * last probe to be disabled to be able to free it.
1284 */
1285 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1286 whack = provider->ftp_marked = 1;
1287 lck_mtx_unlock(&provider->ftp_mtx);
1288 }
1289
1290 if (whack) {
1291 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1292 }
1293
1294 if (!probe->ftp_enabled)
1295 return;
1296
1297 probe->ftp_enabled = 0;
1298
1299 // ASSERT(MUTEX_HELD(&cpu_lock));
1300 fasttrap_disable_callbacks();
1301 }
1302
1303 /*ARGSUSED*/
1304 static void
1305 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1306 dtrace_argdesc_t *desc)
1307 {
1308 #pragma unused(arg, id)
1309 fasttrap_probe_t *probe = parg;
1310 char *str;
1311 int i, ndx;
1312
1313 desc->dtargd_native[0] = '\0';
1314 desc->dtargd_xlate[0] = '\0';
1315
1316 if (probe->ftp_prov->ftp_retired != 0 ||
1317 desc->dtargd_ndx >= probe->ftp_nargs) {
1318 desc->dtargd_ndx = DTRACE_ARGNONE;
1319 return;
1320 }
1321
1322 ndx = (probe->ftp_argmap != NULL) ?
1323 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1324
1325 str = probe->ftp_ntypes;
1326 for (i = 0; i < ndx; i++) {
1327 str += strlen(str) + 1;
1328 }
1329
1330 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1331
1332 if (probe->ftp_xtypes == NULL)
1333 return;
1334
1335 str = probe->ftp_xtypes;
1336 for (i = 0; i < desc->dtargd_ndx; i++) {
1337 str += strlen(str) + 1;
1338 }
1339
1340 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1341 }
1342
1343 /*ARGSUSED*/
1344 static void
1345 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1346 {
1347 #pragma unused(arg, id)
1348 fasttrap_probe_t *probe = parg;
1349 unsigned int i;
1350
1351 ASSERT(probe != NULL);
1352 ASSERT(!probe->ftp_enabled);
1353 ASSERT(fasttrap_total >= probe->ftp_ntps);
1354
1355 os_atomic_sub(&fasttrap_total, probe->ftp_ntps, relaxed);
1356 os_atomic_sub(&fasttrap_retired, probe->ftp_ntps, relaxed);
1357
1358 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1359 fasttrap_mod_barrier(probe->ftp_gen);
1360
1361 for (i = 0; i < probe->ftp_ntps; i++) {
1362 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1363 }
1364
1365 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1366 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1367 } else {
1368 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1369 kmem_free(probe, size);
1370 }
1371 }
1372
1373
1374 static const dtrace_pattr_t pid_attr = {
1375 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1376 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1377 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1378 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1379 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1380 };
1381
1382 static dtrace_pops_t pid_pops = {
1383 .dtps_provide = fasttrap_pid_provide,
1384 .dtps_provide_module = NULL,
1385 .dtps_enable = fasttrap_pid_enable,
1386 .dtps_disable = fasttrap_pid_disable,
1387 .dtps_suspend = NULL,
1388 .dtps_resume = NULL,
1389 .dtps_getargdesc = fasttrap_pid_getargdesc,
1390 .dtps_getargval = fasttrap_pid_getarg,
1391 .dtps_usermode = NULL,
1392 .dtps_destroy = fasttrap_pid_destroy
1393 };
1394
1395 static dtrace_pops_t usdt_pops = {
1396 .dtps_provide = fasttrap_pid_provide,
1397 .dtps_provide_module = NULL,
1398 .dtps_enable = fasttrap_pid_enable,
1399 .dtps_disable = fasttrap_pid_disable,
1400 .dtps_suspend = NULL,
1401 .dtps_resume = NULL,
1402 .dtps_getargdesc = fasttrap_pid_getargdesc,
1403 .dtps_getargval = fasttrap_usdt_getarg,
1404 .dtps_usermode = NULL,
1405 .dtps_destroy = fasttrap_pid_destroy
1406 };
1407
1408 static fasttrap_proc_t *
1409 fasttrap_proc_lookup(pid_t pid)
1410 {
1411 fasttrap_bucket_t *bucket;
1412 fasttrap_proc_t *fprc, *new_fprc;
1413
1414 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1415 lck_mtx_lock(&bucket->ftb_mtx);
1416
1417 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1418 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1419 lck_mtx_lock(&fprc->ftpc_mtx);
1420 lck_mtx_unlock(&bucket->ftb_mtx);
1421 fprc->ftpc_rcount++;
1422 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1423 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1424 lck_mtx_unlock(&fprc->ftpc_mtx);
1425
1426 return (fprc);
1427 }
1428 }
1429
1430 /*
1431 * Drop the bucket lock so we don't try to perform a sleeping
1432 * allocation under it.
1433 */
1434 lck_mtx_unlock(&bucket->ftb_mtx);
1435
1436 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1437 ASSERT(new_fprc != NULL);
1438 new_fprc->ftpc_pid = pid;
1439 new_fprc->ftpc_rcount = 1;
1440 new_fprc->ftpc_acount = 1;
1441
1442 lck_mtx_lock(&bucket->ftb_mtx);
1443
1444 /*
1445 * Take another lap through the list to make sure a proc hasn't
1446 * been created for this pid while we weren't under the bucket lock.
1447 */
1448 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1449 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1450 lck_mtx_lock(&fprc->ftpc_mtx);
1451 lck_mtx_unlock(&bucket->ftb_mtx);
1452 fprc->ftpc_rcount++;
1453 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1454 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1455 lck_mtx_unlock(&fprc->ftpc_mtx);
1456
1457 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1458
1459 return (fprc);
1460 }
1461 }
1462
1463 /*
1464 * APPLE NOTE: We have to initialize all locks explicitly
1465 */
1466 lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1467
1468 new_fprc->ftpc_next = bucket->ftb_data;
1469 bucket->ftb_data = new_fprc;
1470
1471 lck_mtx_unlock(&bucket->ftb_mtx);
1472
1473 return (new_fprc);
1474 }
1475
1476 static void
1477 fasttrap_proc_release(fasttrap_proc_t *proc)
1478 {
1479 fasttrap_bucket_t *bucket;
1480 fasttrap_proc_t *fprc, **fprcp;
1481 pid_t pid = proc->ftpc_pid;
1482
1483 lck_mtx_lock(&proc->ftpc_mtx);
1484
1485 ASSERT(proc->ftpc_rcount != 0);
1486 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1487
1488 if (--proc->ftpc_rcount != 0) {
1489 lck_mtx_unlock(&proc->ftpc_mtx);
1490 return;
1491 }
1492
1493 lck_mtx_unlock(&proc->ftpc_mtx);
1494
1495 /*
1496 * There should definitely be no live providers associated with this
1497 * process at this point.
1498 */
1499 ASSERT(proc->ftpc_acount == 0);
1500
1501 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1502 lck_mtx_lock(&bucket->ftb_mtx);
1503
1504 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1505 while ((fprc = *fprcp) != NULL) {
1506 if (fprc == proc)
1507 break;
1508
1509 fprcp = &fprc->ftpc_next;
1510 }
1511
1512 /*
1513 * Something strange has happened if we can't find the proc.
1514 */
1515 ASSERT(fprc != NULL);
1516
1517 *fprcp = fprc->ftpc_next;
1518
1519 lck_mtx_unlock(&bucket->ftb_mtx);
1520
1521 /*
1522 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1523 * memory is freed even without the destroy. Maybe accounting cleanup?
1524 */
1525 lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1526
1527 kmem_free(fprc, sizeof (fasttrap_proc_t));
1528 }
1529
1530 /*
1531 * Lookup a fasttrap-managed provider based on its name and associated proc.
1532 * A reference to the proc must be held for the duration of the call.
1533 * If the pattr argument is non-NULL, this function instantiates the provider
1534 * if it doesn't exist otherwise it returns NULL. The provider is returned
1535 * with its lock held.
1536 */
1537 static fasttrap_provider_t *
1538 fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1539 const dtrace_pattr_t *pattr)
1540 {
1541 pid_t pid = p->p_pid;
1542 fasttrap_provider_t *fp, *new_fp = NULL;
1543 fasttrap_bucket_t *bucket;
1544 char provname[DTRACE_PROVNAMELEN];
1545 cred_t *cred;
1546
1547 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1548 ASSERT(pattr != NULL);
1549
1550 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1551 lck_mtx_lock(&bucket->ftb_mtx);
1552
1553 /*
1554 * Take a lap through the list and return the match if we find it.
1555 */
1556 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1557 if (fp->ftp_pid == pid &&
1558 fp->ftp_provider_type == provider_type &&
1559 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1560 !fp->ftp_retired) {
1561 lck_mtx_lock(&fp->ftp_mtx);
1562 lck_mtx_unlock(&bucket->ftb_mtx);
1563 return (fp);
1564 }
1565 }
1566
1567 /*
1568 * Drop the bucket lock so we don't try to perform a sleeping
1569 * allocation under it.
1570 */
1571 lck_mtx_unlock(&bucket->ftb_mtx);
1572
1573 /*
1574 * Make sure the process isn't a child created as the result
1575 * of a vfork(2), and isn't a zombie (but may be in fork).
1576 */
1577 proc_lock(p);
1578 if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1579 proc_unlock(p);
1580 return (NULL);
1581 }
1582
1583 /*
1584 * Increment p_dtrace_probes so that the process knows to inform us
1585 * when it exits or execs. fasttrap_provider_free() decrements this
1586 * when we're done with this provider.
1587 */
1588 p->p_dtrace_probes++;
1589
1590 /*
1591 * Grab the credentials for this process so we have
1592 * something to pass to dtrace_register().
1593 * APPLE NOTE: We have no equivalent to crhold,
1594 * even though there is a cr_ref filed in ucred.
1595 */
1596 cred = kauth_cred_proc_ref(p);
1597 proc_unlock(p);
1598
1599 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1600 ASSERT(new_fp != NULL);
1601 new_fp->ftp_pid = p->p_pid;
1602 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1603 new_fp->ftp_provider_type = provider_type;
1604
1605 /*
1606 * APPLE NOTE: locks require explicit init
1607 */
1608 lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1609 lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1610
1611 ASSERT(new_fp->ftp_proc != NULL);
1612
1613 lck_mtx_lock(&bucket->ftb_mtx);
1614
1615 /*
1616 * Take another lap through the list to make sure a provider hasn't
1617 * been created for this pid while we weren't under the bucket lock.
1618 */
1619 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1620 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1621 !fp->ftp_retired) {
1622 lck_mtx_lock(&fp->ftp_mtx);
1623 lck_mtx_unlock(&bucket->ftb_mtx);
1624 fasttrap_provider_free(new_fp);
1625 kauth_cred_unref(&cred);
1626 return (fp);
1627 }
1628 }
1629
1630 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1631
1632 /*
1633 * Fail and return NULL if either the provider name is too long
1634 * or we fail to register this new provider with the DTrace
1635 * framework. Note that this is the only place we ever construct
1636 * the full provider name -- we keep it in pieces in the provider
1637 * structure.
1638 */
1639 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1640 (int)sizeof (provname) ||
1641 dtrace_register(provname, pattr,
1642 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1643 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1644 &new_fp->ftp_provid) != 0) {
1645 lck_mtx_unlock(&bucket->ftb_mtx);
1646 fasttrap_provider_free(new_fp);
1647 kauth_cred_unref(&cred);
1648 return (NULL);
1649 }
1650
1651 new_fp->ftp_next = bucket->ftb_data;
1652 bucket->ftb_data = new_fp;
1653
1654 lck_mtx_lock(&new_fp->ftp_mtx);
1655 lck_mtx_unlock(&bucket->ftb_mtx);
1656
1657 kauth_cred_unref(&cred);
1658
1659 return (new_fp);
1660 }
1661
1662 static void
1663 fasttrap_provider_free(fasttrap_provider_t *provider)
1664 {
1665 pid_t pid = provider->ftp_pid;
1666 proc_t *p;
1667
1668 /*
1669 * There need to be no associated enabled probes, no consumers
1670 * creating probes, and no meta providers referencing this provider.
1671 */
1672 ASSERT(provider->ftp_rcount == 0);
1673 ASSERT(provider->ftp_ccount == 0);
1674 ASSERT(provider->ftp_mcount == 0);
1675
1676 /*
1677 * If this provider hasn't been retired, we need to explicitly drop the
1678 * count of active providers on the associated process structure.
1679 */
1680 if (!provider->ftp_retired) {
1681 os_atomic_dec(&provider->ftp_proc->ftpc_acount, relaxed);
1682 ASSERT(provider->ftp_proc->ftpc_acount <
1683 provider->ftp_proc->ftpc_rcount);
1684 }
1685
1686 fasttrap_proc_release(provider->ftp_proc);
1687
1688 /*
1689 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1690 * memory is freed even without the destroy. Maybe accounting cleanup?
1691 */
1692 lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1693 lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1694
1695 kmem_free(provider, sizeof (fasttrap_provider_t));
1696
1697 /*
1698 * Decrement p_dtrace_probes on the process whose provider we're
1699 * freeing. We don't have to worry about clobbering somone else's
1700 * modifications to it because we have locked the bucket that
1701 * corresponds to this process's hash chain in the provider hash
1702 * table. Don't sweat it if we can't find the process.
1703 */
1704 if ((p = proc_find(pid)) == NULL) {
1705 return;
1706 }
1707
1708 proc_lock(p);
1709 p->p_dtrace_probes--;
1710 proc_unlock(p);
1711
1712 proc_rele(p);
1713 }
1714
1715 static void
1716 fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1717 {
1718 fasttrap_provider_t *fp;
1719 fasttrap_bucket_t *bucket;
1720 dtrace_provider_id_t provid;
1721 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1722
1723 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
1724 lck_mtx_lock(&bucket->ftb_mtx);
1725
1726 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1727 if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1728 !fp->ftp_retired)
1729 break;
1730 }
1731
1732 if (fp == NULL) {
1733 lck_mtx_unlock(&bucket->ftb_mtx);
1734 return;
1735 }
1736
1737 lck_mtx_lock(&fp->ftp_mtx);
1738 ASSERT(!mprov || fp->ftp_mcount > 0);
1739 if (mprov && --fp->ftp_mcount != 0) {
1740 lck_mtx_unlock(&fp->ftp_mtx);
1741 lck_mtx_unlock(&bucket->ftb_mtx);
1742 return;
1743 }
1744
1745 /*
1746 * Mark the provider to be removed in our post-processing step, mark it
1747 * retired, and drop the active count on its proc. Marking it indicates
1748 * that we should try to remove it; setting the retired flag indicates
1749 * that we're done with this provider; dropping the active the proc
1750 * releases our hold, and when this reaches zero (as it will during
1751 * exit or exec) the proc and associated providers become defunct.
1752 *
1753 * We obviously need to take the bucket lock before the provider lock
1754 * to perform the lookup, but we need to drop the provider lock
1755 * before calling into the DTrace framework since we acquire the
1756 * provider lock in callbacks invoked from the DTrace framework. The
1757 * bucket lock therefore protects the integrity of the provider hash
1758 * table.
1759 */
1760 os_atomic_dec(&fp->ftp_proc->ftpc_acount, relaxed);
1761 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1762
1763 /*
1764 * Add this provider probes to the retired count and
1765 * make sure we don't add them twice
1766 */
1767 os_atomic_add(&fasttrap_retired, fp->ftp_pcount, relaxed);
1768 fp->ftp_pcount = 0;
1769
1770 fp->ftp_retired = 1;
1771 fp->ftp_marked = 1;
1772 provid = fp->ftp_provid;
1773 lck_mtx_unlock(&fp->ftp_mtx);
1774
1775 /*
1776 * We don't have to worry about invalidating the same provider twice
1777 * since fasttrap_provider_lookup() will ignore providers that have
1778 * been marked as retired.
1779 */
1780 dtrace_invalidate(provid);
1781
1782 lck_mtx_unlock(&bucket->ftb_mtx);
1783
1784 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1785 }
1786
1787 static int
1788 fasttrap_uint32_cmp(const void *ap, const void *bp)
1789 {
1790 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1791 }
1792
1793 static int
1794 fasttrap_uint64_cmp(const void *ap, const void *bp)
1795 {
1796 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1797 }
1798
1799 static int
1800 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1801 {
1802 proc_t *p;
1803 fasttrap_provider_t *provider;
1804 fasttrap_probe_t *pp;
1805 fasttrap_tracepoint_t *tp;
1806 const char *name;
1807 unsigned int i, aframes, whack;
1808
1809 /*
1810 * There needs to be at least one desired trace point.
1811 */
1812 if (pdata->ftps_noffs == 0)
1813 return (EINVAL);
1814
1815 switch (pdata->ftps_probe_type) {
1816 case DTFTP_ENTRY:
1817 name = "entry";
1818 aframes = FASTTRAP_ENTRY_AFRAMES;
1819 break;
1820 case DTFTP_RETURN:
1821 name = "return";
1822 aframes = FASTTRAP_RETURN_AFRAMES;
1823 break;
1824 case DTFTP_OFFSETS:
1825 aframes = 0;
1826 name = NULL;
1827 break;
1828 default:
1829 return (EINVAL);
1830 }
1831
1832 const char* provider_name;
1833 switch (pdata->ftps_provider_type) {
1834 case DTFTP_PROVIDER_PID:
1835 provider_name = FASTTRAP_PID_NAME;
1836 break;
1837 case DTFTP_PROVIDER_OBJC:
1838 provider_name = FASTTRAP_OBJC_NAME;
1839 break;
1840 case DTFTP_PROVIDER_ONESHOT:
1841 provider_name = FASTTRAP_ONESHOT_NAME;
1842 break;
1843 default:
1844 return (EINVAL);
1845 }
1846
1847 p = proc_find(pdata->ftps_pid);
1848 if (p == PROC_NULL)
1849 return (ESRCH);
1850
1851 if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
1852 provider_name, &pid_attr)) == NULL) {
1853 proc_rele(p);
1854 return (ESRCH);
1855 }
1856
1857 proc_rele(p);
1858 /*
1859 * Increment this reference count to indicate that a consumer is
1860 * actively adding a new probe associated with this provider. This
1861 * prevents the provider from being deleted -- we'll need to check
1862 * for pending deletions when we drop this reference count.
1863 */
1864 provider->ftp_ccount++;
1865 lck_mtx_unlock(&provider->ftp_mtx);
1866
1867 /*
1868 * Grab the creation lock to ensure consistency between calls to
1869 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1870 * other threads creating probes. We must drop the provider lock
1871 * before taking this lock to avoid a three-way deadlock with the
1872 * DTrace framework.
1873 */
1874 lck_mtx_lock(&provider->ftp_cmtx);
1875
1876 if (name == NULL) {
1877 for (i = 0; i < pdata->ftps_noffs; i++) {
1878 char name_str[17];
1879
1880 (void) snprintf(name_str, sizeof(name_str), "%llx",
1881 (uint64_t)pdata->ftps_offs[i]);
1882
1883 if (dtrace_probe_lookup(provider->ftp_provid,
1884 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1885 continue;
1886
1887 os_atomic_inc(&fasttrap_total, relaxed);
1888 if (fasttrap_total > fasttrap_max) {
1889 os_atomic_dec(&fasttrap_total, relaxed);
1890 goto no_mem;
1891 }
1892 provider->ftp_pcount++;
1893
1894 pp = zalloc(fasttrap_probe_t_zones[1]);
1895 bzero(pp, sizeof (fasttrap_probe_t));
1896
1897 pp->ftp_prov = provider;
1898 pp->ftp_faddr = pdata->ftps_pc;
1899 pp->ftp_fsize = pdata->ftps_size;
1900 pp->ftp_pid = pdata->ftps_pid;
1901 pp->ftp_ntps = 1;
1902
1903 tp = zalloc(fasttrap_tracepoint_t_zone);
1904 bzero(tp, sizeof (fasttrap_tracepoint_t));
1905
1906 tp->ftt_proc = provider->ftp_proc;
1907 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1908 tp->ftt_pid = pdata->ftps_pid;
1909
1910 #if defined(__arm__) || defined(__arm64__)
1911 /*
1912 * On arm the subinfo is used to distinguish between arm
1913 * and thumb modes. On arm64 there is no thumb mode, so
1914 * this field is simply initialized to 0 on its way
1915 * into the kernel.
1916 */
1917 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1918 #endif
1919
1920 pp->ftp_tps[0].fit_tp = tp;
1921 pp->ftp_tps[0].fit_id.fti_probe = pp;
1922 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1923 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1924 pdata->ftps_mod, pdata->ftps_func, name_str,
1925 FASTTRAP_OFFSET_AFRAMES, pp);
1926 }
1927
1928 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1929 pdata->ftps_func, name) == 0) {
1930 os_atomic_add(&fasttrap_total, pdata->ftps_noffs, relaxed);
1931
1932 if (fasttrap_total > fasttrap_max) {
1933 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1934 goto no_mem;
1935 }
1936
1937 /*
1938 * Make sure all tracepoint program counter values are unique.
1939 * We later assume that each probe has exactly one tracepoint
1940 * for a given pc.
1941 */
1942 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1943 sizeof (uint64_t), fasttrap_uint64_cmp);
1944 for (i = 1; i < pdata->ftps_noffs; i++) {
1945 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1946 continue;
1947
1948 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1949 goto no_mem;
1950 }
1951 provider->ftp_pcount += pdata->ftps_noffs;
1952 ASSERT(pdata->ftps_noffs > 0);
1953 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1954 pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
1955 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
1956 } else {
1957 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1958 }
1959
1960 pp->ftp_prov = provider;
1961 pp->ftp_faddr = pdata->ftps_pc;
1962 pp->ftp_fsize = pdata->ftps_size;
1963 pp->ftp_pid = pdata->ftps_pid;
1964 pp->ftp_ntps = pdata->ftps_noffs;
1965
1966 for (i = 0; i < pdata->ftps_noffs; i++) {
1967 tp = zalloc(fasttrap_tracepoint_t_zone);
1968 bzero(tp, sizeof (fasttrap_tracepoint_t));
1969 tp->ftt_proc = provider->ftp_proc;
1970 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1971 tp->ftt_pid = pdata->ftps_pid;
1972
1973 #if defined(__arm__) || defined (__arm64__)
1974 /*
1975 * On arm the subinfo is used to distinguish between arm
1976 * and thumb modes. On arm64 there is no thumb mode, so
1977 * this field is simply initialized to 0 on its way
1978 * into the kernel.
1979 */
1980
1981 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1982 #endif
1983 pp->ftp_tps[i].fit_tp = tp;
1984 pp->ftp_tps[i].fit_id.fti_probe = pp;
1985 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
1986 }
1987
1988 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1989 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1990 }
1991
1992 lck_mtx_unlock(&provider->ftp_cmtx);
1993
1994 /*
1995 * We know that the provider is still valid since we incremented the
1996 * creation reference count. If someone tried to clean up this provider
1997 * while we were using it (e.g. because the process called exec(2) or
1998 * exit(2)), take note of that and try to clean it up now.
1999 */
2000 lck_mtx_lock(&provider->ftp_mtx);
2001 provider->ftp_ccount--;
2002 whack = provider->ftp_retired;
2003 lck_mtx_unlock(&provider->ftp_mtx);
2004
2005 if (whack)
2006 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2007
2008 return (0);
2009
2010 no_mem:
2011 /*
2012 * If we've exhausted the allowable resources, we'll try to remove
2013 * this provider to free some up. This is to cover the case where
2014 * the user has accidentally created many more probes than was
2015 * intended (e.g. pid123:::).
2016 */
2017 lck_mtx_unlock(&provider->ftp_cmtx);
2018 lck_mtx_lock(&provider->ftp_mtx);
2019 provider->ftp_ccount--;
2020 provider->ftp_marked = 1;
2021 lck_mtx_unlock(&provider->ftp_mtx);
2022
2023 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2024
2025 return (ENOMEM);
2026 }
2027
2028 /*ARGSUSED*/
2029 static void *
2030 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2031 {
2032 #pragma unused(arg)
2033 fasttrap_provider_t *provider;
2034
2035 /*
2036 * A 32-bit unsigned integer (like a pid for example) can be
2037 * expressed in 10 or fewer decimal digits. Make sure that we'll
2038 * have enough space for the provider name.
2039 */
2040 if (strlen(dhpv->dthpv_provname) + 10 >=
2041 sizeof (provider->ftp_name)) {
2042 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2043 "name too long to accomodate pid", dhpv->dthpv_provname);
2044 return (NULL);
2045 }
2046
2047 /*
2048 * Don't let folks spoof the true pid provider.
2049 */
2050 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
2051 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2052 "%s is an invalid name", dhpv->dthpv_provname,
2053 FASTTRAP_PID_NAME);
2054 return (NULL);
2055 }
2056
2057 /*
2058 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2059 */
2060 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
2061 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2062 "%s is an invalid name", dhpv->dthpv_provname,
2063 FASTTRAP_OBJC_NAME);
2064 return (NULL);
2065 }
2066 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
2067 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2068 "%s is an invalid name", dhpv->dthpv_provname,
2069 FASTTRAP_ONESHOT_NAME);
2070 return (NULL);
2071 }
2072
2073 /*
2074 * The highest stability class that fasttrap supports is ISA; cap
2075 * the stability of the new provider accordingly.
2076 */
2077 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2078 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2079 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2080 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2081 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2082 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2083 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2084 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2085 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2086 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2087
2088 if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2089 &dhpv->dthpv_pattr)) == NULL) {
2090 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2091 "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
2092 return (NULL);
2093 }
2094
2095 /*
2096 * APPLE NOTE!
2097 *
2098 * USDT probes (fasttrap meta probes) are very expensive to create.
2099 * Profiling has shown that the largest single cost is verifying that
2100 * dtrace hasn't already created a given meta_probe. The reason for
2101 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2102 * each static probe being created. We want to get rid of that check.
2103 * The simplest way of eliminating it is to deny the ability to add
2104 * probes to an existing provider. If the provider already exists, BZZT!
2105 * This still leaves the possibility of intentionally malformed DOF
2106 * having duplicate probes. However, duplicate probes are not fatal,
2107 * and there is no way to get that by accident, so we will not check
2108 * for that case.
2109 *
2110 * UPDATE: It turns out there are several use cases that require adding
2111 * probes to existing providers. Disabling the dtrace_probe_lookup()
2112 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2113 */
2114
2115 /*
2116 * Up the meta provider count so this provider isn't removed until
2117 * the meta provider has been told to remove it.
2118 */
2119 provider->ftp_mcount++;
2120
2121 lck_mtx_unlock(&provider->ftp_mtx);
2122
2123 return (provider);
2124 }
2125
2126 /*ARGSUSED*/
2127 static void
2128 fasttrap_meta_create_probe(void *arg, void *parg,
2129 dtrace_helper_probedesc_t *dhpb)
2130 {
2131 #pragma unused(arg)
2132 fasttrap_provider_t *provider = parg;
2133 fasttrap_probe_t *pp;
2134 fasttrap_tracepoint_t *tp;
2135 unsigned int i, j;
2136 uint32_t ntps;
2137
2138 /*
2139 * Since the meta provider count is non-zero we don't have to worry
2140 * about this provider disappearing.
2141 */
2142 ASSERT(provider->ftp_mcount > 0);
2143
2144 /*
2145 * The offsets must be unique.
2146 */
2147 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2148 fasttrap_uint32_cmp);
2149 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2150 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2151 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2152 return;
2153 }
2154
2155 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2156 fasttrap_uint32_cmp);
2157 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2158 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2159 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2160 return;
2161 }
2162
2163 /*
2164 * Grab the creation lock to ensure consistency between calls to
2165 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2166 * other threads creating probes.
2167 */
2168 lck_mtx_lock(&provider->ftp_cmtx);
2169
2170 #if 0
2171 /*
2172 * APPLE NOTE: This is hideously expensive. See note in
2173 * fasttrap_meta_provide() for why we can get away without
2174 * checking here.
2175 */
2176 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2177 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2178 lck_mtx_unlock(&provider->ftp_cmtx);
2179 return;
2180 }
2181 #endif
2182
2183 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2184 ASSERT(ntps > 0);
2185
2186 os_atomic_add(&fasttrap_total, ntps, relaxed);
2187
2188 if (fasttrap_total > fasttrap_max) {
2189 os_atomic_sub(&fasttrap_total, ntps, relaxed);
2190 lck_mtx_unlock(&provider->ftp_cmtx);
2191 return;
2192 }
2193
2194 provider->ftp_pcount += ntps;
2195
2196 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2197 pp = zalloc(fasttrap_probe_t_zones[ntps]);
2198 bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2199 } else {
2200 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2201 }
2202
2203 pp->ftp_prov = provider;
2204 pp->ftp_pid = provider->ftp_pid;
2205 pp->ftp_ntps = ntps;
2206 pp->ftp_nargs = dhpb->dthpb_xargc;
2207 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2208 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2209
2210 /*
2211 * First create a tracepoint for each actual point of interest.
2212 */
2213 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2214 tp = zalloc(fasttrap_tracepoint_t_zone);
2215 bzero(tp, sizeof (fasttrap_tracepoint_t));
2216
2217 tp->ftt_proc = provider->ftp_proc;
2218
2219 /*
2220 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2221 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2222 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2223 */
2224 #if defined(__x86_64__)
2225 /*
2226 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2227 */
2228 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2229 #elif defined(__arm__) || defined(__arm64__)
2230 /*
2231 * All ARM and ARM64 probes are zero offset. We need to zero out the
2232 * thumb bit because we still support 32bit user processes.
2233 * On 64bit user processes, bit zero won't be set anyway.
2234 */
2235 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
2236 tp->ftt_fntype = FASTTRAP_FN_USDT;
2237 #else
2238 #error "Architecture not supported"
2239 #endif
2240
2241 tp->ftt_pid = provider->ftp_pid;
2242
2243 pp->ftp_tps[i].fit_tp = tp;
2244 pp->ftp_tps[i].fit_id.fti_probe = pp;
2245 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2246 }
2247
2248 /*
2249 * Then create a tracepoint for each is-enabled point.
2250 */
2251 for (j = 0; i < ntps; i++, j++) {
2252 tp = zalloc(fasttrap_tracepoint_t_zone);
2253 bzero(tp, sizeof (fasttrap_tracepoint_t));
2254
2255 tp->ftt_proc = provider->ftp_proc;
2256
2257 /*
2258 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2259 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2260 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2261 */
2262 #if defined(__x86_64__)
2263 /*
2264 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2265 */
2266 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2267 #elif defined(__arm__) || defined(__arm64__)
2268 /*
2269 * All ARM and ARM64 probes are zero offset. We need to zero out the
2270 * thumb bit because we still support 32bit user processes.
2271 * On 64bit user processes, bit zero won't be set anyway.
2272 */
2273 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
2274 tp->ftt_fntype = FASTTRAP_FN_USDT;
2275 #else
2276 #error "Architecture not supported"
2277 #endif
2278
2279 tp->ftt_pid = provider->ftp_pid;
2280
2281 pp->ftp_tps[i].fit_tp = tp;
2282 pp->ftp_tps[i].fit_id.fti_probe = pp;
2283 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2284 }
2285
2286 /*
2287 * If the arguments are shuffled around we set the argument remapping
2288 * table. Later, when the probe fires, we only remap the arguments
2289 * if the table is non-NULL.
2290 */
2291 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2292 if (dhpb->dthpb_args[i] != i) {
2293 pp->ftp_argmap = dhpb->dthpb_args;
2294 break;
2295 }
2296 }
2297
2298 /*
2299 * The probe is fully constructed -- register it with DTrace.
2300 */
2301 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2302 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2303
2304 lck_mtx_unlock(&provider->ftp_cmtx);
2305 }
2306
2307 /*ARGSUSED*/
2308 static void
2309 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2310 {
2311 #pragma unused(arg)
2312 /*
2313 * Clean up the USDT provider. There may be active consumers of the
2314 * provider busy adding probes, no damage will actually befall the
2315 * provider until that count has dropped to zero. This just puts
2316 * the provider on death row.
2317 */
2318 fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
2319 }
2320
2321 static char*
2322 fasttrap_meta_provider_name(void *arg)
2323 {
2324 fasttrap_provider_t *fprovider = arg;
2325 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2326 return provider->dtpv_name;
2327 }
2328
2329 static dtrace_mops_t fasttrap_mops = {
2330 .dtms_create_probe = fasttrap_meta_create_probe,
2331 .dtms_provide_proc = fasttrap_meta_provide,
2332 .dtms_remove_proc = fasttrap_meta_remove,
2333 .dtms_provider_name = fasttrap_meta_provider_name
2334 };
2335
2336 /*
2337 * Validate a null-terminated string. If str is not null-terminated,
2338 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2339 * returned.
2340 *
2341 * str: string to validate.
2342 * maxlen: maximal length of the string, null-terminated byte included.
2343 */
2344 static int
2345 fasttrap_validatestr(char const* str, size_t maxlen) {
2346 size_t len;
2347
2348 assert(str);
2349 assert(maxlen != 0);
2350
2351 /* Check if the string is null-terminated. */
2352 len = strnlen(str, maxlen);
2353 if (len >= maxlen)
2354 return -1;
2355
2356 /* Finally, check for UTF8 validity. */
2357 return utf8_validatestr((unsigned const char*) str, len);
2358 }
2359
2360 /*ARGSUSED*/
2361 static int
2362 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2363 {
2364 #pragma unused(dev, md, rv)
2365 if (!dtrace_attached())
2366 return (EAGAIN);
2367
2368 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2369 fasttrap_probe_spec_t *probe;
2370 uint64_t noffs;
2371 size_t size;
2372 int ret;
2373
2374 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2375 sizeof (probe->ftps_noffs)))
2376 return (EFAULT);
2377
2378 /*
2379 * Probes must have at least one tracepoint.
2380 */
2381 if (noffs == 0)
2382 return (EINVAL);
2383
2384 /*
2385 * We want to check the number of noffs before doing
2386 * sizing math, to prevent potential buffer overflows.
2387 */
2388 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2389 return (ENOMEM);
2390
2391 size = sizeof (fasttrap_probe_spec_t) +
2392 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2393
2394 probe = kmem_alloc(size, KM_SLEEP);
2395
2396 if (copyin(arg, probe, size) != 0 ||
2397 probe->ftps_noffs != noffs) {
2398 kmem_free(probe, size);
2399 return (EFAULT);
2400 }
2401
2402 /*
2403 * Verify that the function and module strings contain no
2404 * funny characters.
2405 */
2406
2407 if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
2408 ret = EINVAL;
2409 goto err;
2410 }
2411
2412 if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
2413 ret = EINVAL;
2414 goto err;
2415 }
2416
2417 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2418 proc_t *p;
2419 pid_t pid = probe->ftps_pid;
2420
2421 /*
2422 * Report an error if the process doesn't exist
2423 * or is actively being birthed.
2424 */
2425 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2426 if (p != PROC_NULL)
2427 proc_rele(p);
2428 ret = ESRCH;
2429 goto err;
2430 }
2431 // proc_lock(p);
2432 // FIXME! How is this done on OS X?
2433 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2434 // VREAD | VWRITE)) != 0) {
2435 // mutex_exit(&p->p_lock);
2436 // return (ret);
2437 // }
2438 // proc_unlock(p);
2439 proc_rele(p);
2440 }
2441
2442 ret = fasttrap_add_probe(probe);
2443
2444 err:
2445 kmem_free(probe, size);
2446
2447 return (ret);
2448
2449 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2450 fasttrap_instr_query_t instr;
2451 fasttrap_tracepoint_t *tp;
2452 uint_t index;
2453 // int ret;
2454
2455 if (copyin(arg, &instr, sizeof (instr)) != 0)
2456 return (EFAULT);
2457
2458 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2459 proc_t *p;
2460 pid_t pid = instr.ftiq_pid;
2461
2462 /*
2463 * Report an error if the process doesn't exist
2464 * or is actively being birthed.
2465 */
2466 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2467 if (p != PROC_NULL)
2468 proc_rele(p);
2469 return (ESRCH);
2470 }
2471 //proc_lock(p);
2472 // FIXME! How is this done on OS X?
2473 // if ((ret = priv_proc_cred_perm(cr, p, NULL,
2474 // VREAD)) != 0) {
2475 // mutex_exit(&p->p_lock);
2476 // return (ret);
2477 // }
2478 // proc_unlock(p);
2479 proc_rele(p);
2480 }
2481
2482 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2483
2484 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2485 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2486 while (tp != NULL) {
2487 if (instr.ftiq_pid == tp->ftt_pid &&
2488 instr.ftiq_pc == tp->ftt_pc &&
2489 tp->ftt_proc->ftpc_acount != 0)
2490 break;
2491
2492 tp = tp->ftt_next;
2493 }
2494
2495 if (tp == NULL) {
2496 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2497 return (ENOENT);
2498 }
2499
2500 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2501 sizeof (instr.ftiq_instr));
2502 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2503
2504 if (copyout(&instr, arg, sizeof (instr)) != 0)
2505 return (EFAULT);
2506
2507 return (0);
2508 }
2509
2510 return (EINVAL);
2511 }
2512
2513 static void
2514 fasttrap_attach(void)
2515 {
2516 ulong_t nent;
2517 unsigned int i;
2518
2519 /*
2520 * Install our hooks into fork(2), exec(2), and exit(2).
2521 */
2522 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2523 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2524 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2525
2526 /*
2527 * APPLE NOTE: We size the maximum number of fasttrap probes
2528 * based on system memory. 100k probes per 256M of system memory.
2529 * Yes, this is a WAG.
2530 */
2531 fasttrap_max = (sane_size >> 28) * 100000;
2532
2533 if (fasttrap_max == 0)
2534 fasttrap_max = 50000;
2535
2536 fasttrap_total = 0;
2537 fasttrap_retired = 0;
2538
2539 /*
2540 * Conjure up the tracepoints hashtable...
2541 */
2542 #ifdef illumos
2543 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2544 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2545 #else
2546 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2547 #endif
2548
2549 if (nent <= 0 || nent > 0x1000000)
2550 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2551
2552 if ((nent & (nent - 1)) == 0)
2553 fasttrap_tpoints.fth_nent = nent;
2554 else
2555 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2556 ASSERT(fasttrap_tpoints.fth_nent > 0);
2557 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2558 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2559 sizeof (fasttrap_bucket_t), KM_SLEEP);
2560 ASSERT(fasttrap_tpoints.fth_table != NULL);
2561
2562 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
2563 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2564 }
2565
2566 /*
2567 * ... and the providers hash table...
2568 */
2569 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2570 if ((nent & (nent - 1)) == 0)
2571 fasttrap_provs.fth_nent = nent;
2572 else
2573 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2574 ASSERT(fasttrap_provs.fth_nent > 0);
2575 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2576 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2577 sizeof (fasttrap_bucket_t), KM_SLEEP);
2578 ASSERT(fasttrap_provs.fth_table != NULL);
2579
2580 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2581 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2582 }
2583
2584 /*
2585 * ... and the procs hash table.
2586 */
2587 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2588 if ((nent & (nent - 1)) == 0)
2589 fasttrap_procs.fth_nent = nent;
2590 else
2591 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2592 ASSERT(fasttrap_procs.fth_nent > 0);
2593 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2594 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2595 sizeof (fasttrap_bucket_t), KM_SLEEP);
2596 ASSERT(fasttrap_procs.fth_table != NULL);
2597
2598 #ifndef illumos
2599 for (i = 0; i < fasttrap_procs.fth_nent; i++) {
2600 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2601 }
2602 #endif
2603
2604 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2605 &fasttrap_meta_id);
2606 }
2607
2608 static int
2609 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2610 {
2611 #pragma unused(dev, flags, devtype, p)
2612 return 0;
2613 }
2614
2615 static int
2616 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2617 {
2618 int err, rv = 0;
2619 user_addr_t uaddrp;
2620
2621 if (proc_is64bit(p))
2622 uaddrp = *(user_addr_t *)data;
2623 else
2624 uaddrp = (user_addr_t) *(uint32_t *)data;
2625
2626 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2627
2628 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2629 if (err != 0) {
2630 ASSERT( (err & 0xfffff000) == 0 );
2631 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2632 } else if (rv != 0) {
2633 ASSERT( (rv & 0xfff00000) == 0 );
2634 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2635 } else
2636 return 0;
2637 }
2638
2639 static int fasttrap_inited = 0;
2640
2641 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2642
2643 /*
2644 * A struct describing which functions will get invoked for certain
2645 * actions.
2646 */
2647
2648 static struct cdevsw fasttrap_cdevsw =
2649 {
2650 _fasttrap_open, /* open */
2651 eno_opcl, /* close */
2652 eno_rdwrt, /* read */
2653 eno_rdwrt, /* write */
2654 _fasttrap_ioctl, /* ioctl */
2655 (stop_fcn_t *)nulldev, /* stop */
2656 (reset_fcn_t *)nulldev, /* reset */
2657 NULL, /* tty's */
2658 eno_select, /* select */
2659 eno_mmap, /* mmap */
2660 eno_strat, /* strategy */
2661 eno_getc, /* getc */
2662 eno_putc, /* putc */
2663 0 /* type */
2664 };
2665
2666 void fasttrap_init(void);
2667
2668 void
2669 fasttrap_init( void )
2670 {
2671 /*
2672 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2673 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2674 *
2675 * The reason is to delay allocating the (rather large) resources as late as possible.
2676 */
2677 if (!fasttrap_inited) {
2678 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2679
2680 if (majdevno < 0) {
2681 // FIX ME! What kind of error reporting to do here?
2682 printf("fasttrap_init: failed to allocate a major number!\n");
2683 return;
2684 }
2685
2686 dev_t device = makedev( (uint32_t)majdevno, 0 );
2687 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2688 return;
2689 }
2690
2691 /*
2692 * Allocate the fasttrap_tracepoint_t zone
2693 */
2694 fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
2695 1024 * sizeof(fasttrap_tracepoint_t),
2696 sizeof(fasttrap_tracepoint_t),
2697 "dtrace.fasttrap_tracepoint_t");
2698
2699 /*
2700 * fasttrap_probe_t's are variable in size. We use an array of zones to
2701 * cover the most common sizes.
2702 */
2703 int i;
2704 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2705 size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
2706 fasttrap_probe_t_zones[i] = zinit(zone_element_size,
2707 1024 * zone_element_size,
2708 zone_element_size,
2709 fasttrap_probe_t_zone_names[i]);
2710 }
2711
2712
2713 /*
2714 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2715 */
2716 fasttrap_lck_attr = lck_attr_alloc_init();
2717 fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2718 fasttrap_lck_grp = lck_grp_alloc_init("fasttrap", fasttrap_lck_grp_attr);
2719
2720 /*
2721 * Initialize global locks
2722 */
2723 lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2724 lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2725
2726 fasttrap_attach();
2727
2728 /*
2729 * Start the fasttrap cleanup thread
2730 */
2731 kern_return_t res = kernel_thread_start_priority((thread_continue_t)fasttrap_pid_cleanup_cb, NULL, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread);
2732 if (res != KERN_SUCCESS) {
2733 panic("Could not create fasttrap_cleanup_thread");
2734 }
2735 thread_set_thread_name(fasttrap_cleanup_thread, "dtrace_fasttrap_cleanup_thread");
2736
2737 fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
2738 fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
2739 KM_SLEEP);
2740 lck_mtx_init(&fasttrap_retired_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2741
2742 fasttrap_inited = 1;
2743 }
2744 }
2745
2746 #undef FASTTRAP_MAJOR