xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/thread.h>
30
31 #include <sys/time.h>
32 #include <sys/proc.h>
33 #include <sys/kauth.h>
34 #include <sys/user.h>
35 #include <sys/systm.h>
36 #include <sys/dtrace.h>
37 #include <sys/dtrace_impl.h>
38 #include <machine/atomic.h>
39 #include <libkern/OSKextLibPrivate.h>
40 #include <kern/kern_types.h>
41 #include <kern/timer_call.h>
42 #include <kern/thread_call.h>
43 #include <kern/task.h>
44 #include <kern/sched_prim.h>
45 #include <miscfs/devfs/devfs.h>
46 #include <kern/kalloc.h>
47
48 #include <mach/vm_param.h>
49 #include <mach/mach_vm.h>
50 #include <mach/task.h>
51 #include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
52
53 /*
54 * pid/proc
55 */
56 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
57 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
58
59 void
60 dtrace_sprlock(proc_t *p)
61 {
62 lck_mtx_lock(&p->p_dtrace_sprlock);
63 }
64
65 void
66 dtrace_sprunlock(proc_t *p)
67 {
68 lck_mtx_unlock(&p->p_dtrace_sprlock);
69 }
70
71 /* Not called from probe context */
72 proc_t *
73 sprlock(pid_t pid)
74 {
75 proc_t* p;
76
77 if ((p = proc_find(pid)) == PROC_NULL) {
78 return PROC_NULL;
79 }
80
81 task_suspend_internal(p->task);
82
83 dtrace_sprlock(p);
84
85 return p;
86 }
87
88 /* Not called from probe context */
89 void
90 sprunlock(proc_t *p)
91 {
92 if (p != PROC_NULL) {
93 dtrace_sprunlock(p);
94
95 task_resume_internal(p->task);
96
97 proc_rele(p);
98 }
99 }
100
101 /*
102 * uread/uwrite
103 */
104
105 // These are not exported from vm_map.h.
106 extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
107 extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
108
109 /* Not called from probe context */
110 int
111 uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
112 {
113 kern_return_t ret;
114
115 ASSERT(p != PROC_NULL);
116 ASSERT(p->task != NULL);
117
118 task_t task = p->task;
119
120 /*
121 * Grab a reference to the task vm_map_t to make sure
122 * the map isn't pulled out from under us.
123 *
124 * Because the proc_lock is not held at all times on all code
125 * paths leading here, it is possible for the proc to have
126 * exited. If the map is null, fail.
127 */
128 vm_map_t map = get_task_map_reference(task);
129 if (map) {
130 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
131 vm_map_deallocate(map);
132 } else {
133 ret = KERN_TERMINATED;
134 }
135
136 return (int)ret;
137 }
138
139
140 /* Not called from probe context */
141 int
142 uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
143 {
144 kern_return_t ret;
145
146 ASSERT(p != NULL);
147 ASSERT(p->task != NULL);
148
149 task_t task = p->task;
150
151 /*
152 * Grab a reference to the task vm_map_t to make sure
153 * the map isn't pulled out from under us.
154 *
155 * Because the proc_lock is not held at all times on all code
156 * paths leading here, it is possible for the proc to have
157 * exited. If the map is null, fail.
158 */
159 vm_map_t map = get_task_map_reference(task);
160 if (map) {
161 /* Find the memory permissions. */
162 uint32_t nestingDepth = 999999;
163 vm_region_submap_short_info_data_64_t info;
164 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
165 mach_vm_address_t address = (mach_vm_address_t)a;
166 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
167
168 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
169 if (ret != KERN_SUCCESS) {
170 goto done;
171 }
172
173 vm_prot_t reprotect;
174
175 if (!(info.protection & VM_PROT_WRITE)) {
176 /* Save the original protection values for restoration later */
177 reprotect = info.protection;
178
179 if (info.max_protection & VM_PROT_WRITE) {
180 /* The memory is not currently writable, but can be made writable. */
181 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
182 } else {
183 /*
184 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
185 *
186 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
187 */
188 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
189 }
190
191 if (ret != KERN_SUCCESS) {
192 goto done;
193 }
194 } else {
195 /* The memory was already writable. */
196 reprotect = VM_PROT_NONE;
197 }
198
199 ret = vm_map_write_user( map,
200 buf,
201 (vm_map_address_t)a,
202 (vm_size_t)len);
203
204 dtrace_flush_caches();
205
206 if (ret != KERN_SUCCESS) {
207 goto done;
208 }
209
210 if (reprotect != VM_PROT_NONE) {
211 ASSERT(reprotect & VM_PROT_EXECUTE);
212 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
213 }
214
215 done:
216 vm_map_deallocate(map);
217 } else {
218 ret = KERN_TERMINATED;
219 }
220
221 return (int)ret;
222 }
223
224 /*
225 * cpuvar
226 */
227 lck_mtx_t cpu_lock;
228 lck_mtx_t cyc_lock;
229 lck_mtx_t mod_lock;
230
231 dtrace_cpu_t *cpu_list;
232 cpu_core_t *cpu_core; /* XXX TLB lockdown? */
233
234 /*
235 * cred_t
236 */
237
238 /*
239 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
240 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
241 */
242 cred_t *
243 dtrace_CRED(void)
244 {
245 struct uthread *uthread = get_bsdthread_info(current_thread());
246
247 if (uthread == NULL) {
248 return NULL;
249 } else {
250 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
251 }
252 }
253
254 int
255 PRIV_POLICY_CHOICE(void* cred, int priv, int all)
256 {
257 #pragma unused(priv, all)
258 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
259 }
260
261 int
262 PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
263 {
264 #pragma unused(priv, boolean)
265 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
266 }
267
268 uid_t
269 crgetuid(const cred_t *cr)
270 {
271 cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr);
272 }
273
274 /*
275 * "cyclic"
276 */
277
278 typedef struct wrap_timer_call {
279 /* node attributes */
280 cyc_handler_t hdlr;
281 cyc_time_t when;
282 uint64_t deadline;
283 int cpuid;
284 boolean_t suspended;
285 struct timer_call call;
286
287 /* next item in the linked list */
288 LIST_ENTRY(wrap_timer_call) entries;
289 } wrap_timer_call_t;
290
291 #define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
292 #define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
293
294
295 typedef struct cyc_list {
296 cyc_omni_handler_t cyl_omni;
297 wrap_timer_call_t cyl_wrap_by_cpus[];
298 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
299 } __attribute__ ((aligned(8))) cyc_list_t;
300 #else
301 } cyc_list_t;
302 #endif
303
304 /* CPU going online/offline notifications */
305 void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
306 void dtrace_cpu_state_changed(int, boolean_t);
307
308 void
309 dtrace_install_cpu_hooks(void)
310 {
311 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
312 }
313
314 void
315 dtrace_cpu_state_changed(int cpuid, boolean_t is_running)
316 {
317 #pragma unused(cpuid)
318 wrap_timer_call_t *wrapTC = NULL;
319 boolean_t suspend = (is_running ? FALSE : TRUE);
320 dtrace_icookie_t s;
321
322 /* Ensure that we're not going to leave the CPU */
323 s = dtrace_interrupt_disable();
324 assert(cpuid == cpu_number());
325
326 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
327 assert(wrapTC->cpuid == cpu_number());
328 if (suspend) {
329 assert(!wrapTC->suspended);
330 /* If this fails, we'll panic anyway, so let's do this now. */
331 if (!timer_call_cancel(&wrapTC->call)) {
332 panic("timer_call_set_suspend() failed to cancel a timer call");
333 }
334 wrapTC->suspended = TRUE;
335 } else {
336 /* Rearm the timer, but ensure it was suspended first. */
337 assert(wrapTC->suspended);
338 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
339 &wrapTC->deadline);
340 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
341 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
342 wrapTC->suspended = FALSE;
343 }
344 }
345
346 /* Restore the previous interrupt state. */
347 dtrace_interrupt_enable(s);
348 }
349
350 static void
351 _timer_call_apply_cyclic( void *ignore, void *vTChdl )
352 {
353 #pragma unused(ignore)
354 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
355
356 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
357
358 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
359 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
360 }
361
362 static cyclic_id_t
363 timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
364 {
365 uint64_t now;
366 dtrace_icookie_t s;
367
368 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
369 wrapTC->hdlr = *handler;
370 wrapTC->when = *when;
371
372 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
373
374 now = mach_absolute_time();
375 wrapTC->deadline = now;
376
377 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
378
379 /* Insert the timer to the list of the running timers on this CPU, and start it. */
380 s = dtrace_interrupt_disable();
381 wrapTC->cpuid = cpu_number();
382 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
383 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
384 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
385 wrapTC->suspended = FALSE;
386 dtrace_interrupt_enable(s);
387
388 return (cyclic_id_t)wrapTC;
389 }
390
391 /*
392 * Executed on the CPU the timer is running on.
393 */
394 static void
395 timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
396 {
397 assert(wrapTC);
398 assert(cpu_number() == wrapTC->cpuid);
399
400 if (!timer_call_cancel(&wrapTC->call)) {
401 panic("timer_call_remove_cyclic() failed to cancel a timer call");
402 }
403
404 LIST_REMOVE(wrapTC, entries);
405 }
406
407 static void *
408 timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
409 {
410 return wrapTC ? wrapTC->hdlr.cyh_arg : NULL;
411 }
412
413 cyclic_id_t
414 cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
415 {
416 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
417 if (NULL == wrapTC) {
418 return CYCLIC_NONE;
419 } else {
420 return timer_call_add_cyclic( wrapTC, handler, when );
421 }
422 }
423
424 void
425 cyclic_timer_remove(cyclic_id_t cyclic)
426 {
427 ASSERT( cyclic != CYCLIC_NONE );
428
429 /* Removing a timer call must be done on the CPU the timer is running on. */
430 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
431 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
432
433 _FREE((void *)cyclic, M_TEMP);
434 }
435
436 static void
437 _cyclic_add_omni(cyc_list_t *cyc_list)
438 {
439 cyc_time_t cT;
440 cyc_handler_t cH;
441 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
442
443 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
444
445 wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
446 timer_call_add_cyclic(wrapTC, &cH, &cT);
447 }
448
449 cyclic_id_list_t
450 cyclic_add_omni(cyc_omni_handler_t *omni)
451 {
452 cyc_list_t *cyc_list =
453 _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
454
455 if (NULL == cyc_list) {
456 return NULL;
457 }
458
459 cyc_list->cyl_omni = *omni;
460
461 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
462
463 return (cyclic_id_list_t)cyc_list;
464 }
465
466 static void
467 _cyclic_remove_omni(cyc_list_t *cyc_list)
468 {
469 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
470 void *oarg;
471 wrap_timer_call_t *wrapTC;
472
473 /*
474 * If the processor was offline when dtrace started, we did not allocate
475 * a cyclic timer for this CPU.
476 */
477 if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
478 oarg = timer_call_get_cyclic_arg(wrapTC);
479 timer_call_remove_cyclic(wrapTC);
480 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
481 }
482 }
483
484 void
485 cyclic_remove_omni(cyclic_id_list_t cyc_list)
486 {
487 ASSERT(cyc_list != NULL);
488
489 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
490 _FREE(cyc_list, M_TEMP);
491 }
492
493 typedef struct wrap_thread_call {
494 thread_call_t TChdl;
495 cyc_handler_t hdlr;
496 cyc_time_t when;
497 uint64_t deadline;
498 } wrap_thread_call_t;
499
500 /*
501 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
502 * cleaner and the deadman, but too distant in time and place for the profile provider.
503 */
504 static void
505 _cyclic_apply( void *ignore, void *vTChdl )
506 {
507 #pragma unused(ignore)
508 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
509
510 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
511
512 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
513 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
514
515 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
516 if (wrapTC->when.cyt_interval == WAKEUP_REAPER) {
517 thread_wakeup((event_t)wrapTC);
518 }
519 }
520
521 cyclic_id_t
522 cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
523 {
524 uint64_t now;
525
526 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
527 if (NULL == wrapTC) {
528 return CYCLIC_NONE;
529 }
530
531 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
532 wrapTC->hdlr = *handler;
533 wrapTC->when = *when;
534
535 ASSERT(when->cyt_when == 0);
536 ASSERT(when->cyt_interval < WAKEUP_REAPER);
537
538 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
539
540 now = mach_absolute_time();
541 wrapTC->deadline = now;
542
543 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
544 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
545
546 return (cyclic_id_t)wrapTC;
547 }
548
549 static void
550 noop_cyh_func(void * ignore)
551 {
552 #pragma unused(ignore)
553 }
554
555 void
556 cyclic_remove(cyclic_id_t cyclic)
557 {
558 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
559
560 ASSERT(cyclic != CYCLIC_NONE);
561
562 while (!thread_call_cancel(wrapTC->TChdl)) {
563 int ret = assert_wait(wrapTC, THREAD_UNINT);
564 ASSERT(ret == THREAD_WAITING);
565
566 wrapTC->when.cyt_interval = WAKEUP_REAPER;
567
568 ret = thread_block(THREAD_CONTINUE_NULL);
569 ASSERT(ret == THREAD_AWAKENED);
570 }
571
572 if (thread_call_free(wrapTC->TChdl)) {
573 _FREE(wrapTC, M_TEMP);
574 } else {
575 /* Gut this cyclic and move on ... */
576 wrapTC->hdlr.cyh_func = noop_cyh_func;
577 wrapTC->when.cyt_interval = NEARLY_FOREVER;
578 }
579 }
580
581 int
582 ddi_driver_major(dev_info_t *devi)
583 {
584 return (int)major(CAST_DOWN_EXPLICIT(int, devi));
585 }
586
587 int
588 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
589 minor_t minor_num, const char *node_type, int flag)
590 {
591 #pragma unused(spec_type,node_type,flag)
592 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
593
594 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) {
595 return DDI_FAILURE;
596 } else {
597 return DDI_SUCCESS;
598 }
599 }
600
601 void
602 ddi_remove_minor_node(dev_info_t *dip, char *name)
603 {
604 #pragma unused(dip,name)
605 /* XXX called from dtrace_detach, so NOTREACHED for now. */
606 }
607
608 major_t
609 getemajor( dev_t d )
610 {
611 return (major_t) major(d);
612 }
613
614 minor_t
615 getminor( dev_t d )
616 {
617 return (minor_t) minor(d);
618 }
619
620 extern void Debugger(const char*);
621
622 void
623 debug_enter(char *c)
624 {
625 Debugger(c);
626 }
627
628 /*
629 * kmem
630 */
631
632 void *
633 dt_kmem_alloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
634 {
635 #pragma unused(kmflag)
636
637 /*
638 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
639 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
640 */
641 vm_size_t vsize = size;
642 return kalloc_canblock(&vsize, TRUE, site);
643 }
644
645 void *
646 dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
647 {
648 #pragma unused(kmflag)
649
650 /*
651 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
652 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
653 */
654 vm_size_t vsize = size;
655 void* buf = kalloc_canblock(&vsize, TRUE, site);
656
657 if (!buf) {
658 return NULL;
659 }
660
661 bzero(buf, size);
662
663 return buf;
664 }
665
666 void
667 dt_kmem_free(void *buf, size_t size)
668 {
669 #pragma unused(size)
670 /*
671 * DTrace relies on this, its doing a lot of NULL frees.
672 * A null free causes the debug builds to panic.
673 */
674 if (buf == NULL) {
675 return;
676 }
677
678 ASSERT(size > 0);
679
680 kfree(buf, size);
681 }
682
683
684
685 /*
686 * aligned dt_kmem allocator
687 * align should be a power of two
688 */
689
690 void*
691 dt_kmem_alloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *site)
692 {
693 void *mem, **addr_to_free;
694 intptr_t mem_aligned;
695 size_t *size_to_free, hdr_size;
696
697 /* Must be a power of two. */
698 assert(align != 0);
699 assert((align & (align - 1)) == 0);
700
701 /*
702 * We are going to add a header to the allocation. It contains
703 * the address to free and the total size of the buffer.
704 */
705 hdr_size = sizeof(size_t) + sizeof(void*);
706 mem = dt_kmem_alloc_site(size + align + hdr_size, kmflag, site);
707 if (mem == NULL) {
708 return NULL;
709 }
710
711 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
712
713 /* Write the address to free in the header. */
714 addr_to_free = (void**) (mem_aligned - sizeof(void*));
715 *addr_to_free = mem;
716
717 /* Write the size to free in the header. */
718 size_to_free = (size_t*) (mem_aligned - hdr_size);
719 *size_to_free = size + align + hdr_size;
720
721 return (void*) mem_aligned;
722 }
723
724 void*
725 dt_kmem_zalloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *s)
726 {
727 void* buf;
728
729 buf = dt_kmem_alloc_aligned_site(size, align, kmflag, s);
730
731 if (!buf) {
732 return NULL;
733 }
734
735 bzero(buf, size);
736
737 return buf;
738 }
739
740 void
741 dt_kmem_free_aligned(void* buf, size_t size)
742 {
743 #pragma unused(size)
744 intptr_t ptr = (intptr_t) buf;
745 void **addr_to_free = (void**) (ptr - sizeof(void*));
746 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
747
748 if (buf == NULL) {
749 return;
750 }
751
752 dt_kmem_free(*addr_to_free, *size_to_free);
753 }
754
755 /*
756 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
757 * doesn't specify constructor, destructor, or reclaim methods.
758 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
759 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
760 */
761 kmem_cache_t *
762 kmem_cache_create(
763 const char *name, /* descriptive name for this cache */
764 size_t bufsize, /* size of the objects it manages */
765 size_t align, /* required object alignment */
766 int (*constructor)(void *, void *, int), /* object constructor */
767 void (*destructor)(void *, void *), /* object destructor */
768 void (*reclaim)(void *), /* memory reclaim callback */
769 void *private, /* pass-thru arg for constr/destr/reclaim */
770 vmem_t *vmp, /* vmem source for slab allocation */
771 int cflags) /* cache creation flags */
772 {
773 #pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
774 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
775 }
776
777 void *
778 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
779 {
780 #pragma unused(kmflag)
781 size_t bufsize = (size_t)cp;
782 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
783 }
784
785 void
786 kmem_cache_free(kmem_cache_t *cp, void *buf)
787 {
788 #pragma unused(cp)
789 _FREE(buf, M_TEMP);
790 }
791
792 void
793 kmem_cache_destroy(kmem_cache_t *cp)
794 {
795 #pragma unused(cp)
796 }
797
798 /*
799 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
800 */
801 typedef unsigned int u_daddr_t;
802 #include "blist.h"
803
804 /* By passing around blist *handles*, the underlying blist can be resized as needed. */
805 struct blist_hdl {
806 blist_t blist;
807 };
808
809 vmem_t *
810 vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
811 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
812 {
813 #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
814 blist_t bl;
815 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
816
817 ASSERT(quantum == 1);
818 ASSERT(NULL == ignore5);
819 ASSERT(NULL == ignore6);
820 ASSERT(NULL == source);
821 ASSERT(0 == qcache_max);
822 ASSERT(vmflag & VMC_IDENTIFIER);
823
824 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
825
826 p->blist = bl = blist_create( size );
827 blist_free(bl, 0, size);
828 if (base) {
829 blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
830 }
831 return (vmem_t *)p;
832 }
833
834 void *
835 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
836 {
837 #pragma unused(vmflag)
838 struct blist_hdl *q = (struct blist_hdl *)vmp;
839 blist_t bl = q->blist;
840 daddr_t p;
841
842 p = blist_alloc(bl, (daddr_t)size);
843
844 if ((daddr_t)-1 == p) {
845 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
846 q->blist = bl;
847 p = blist_alloc(bl, (daddr_t)size);
848 if ((daddr_t)-1 == p) {
849 panic("vmem_alloc: failure after blist_resize!");
850 }
851 }
852
853 return (void *)(uintptr_t)p;
854 }
855
856 void
857 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
858 {
859 struct blist_hdl *p = (struct blist_hdl *)vmp;
860
861 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
862 }
863
864 void
865 vmem_destroy(vmem_t *vmp)
866 {
867 struct blist_hdl *p = (struct blist_hdl *)vmp;
868
869 blist_destroy( p->blist );
870 _FREE( p, sizeof(struct blist_hdl));
871 }
872
873 /*
874 * Timing
875 */
876
877 /*
878 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
879 * January 1, 1970. Because it can be called from probe context, it must take no locks.
880 */
881
882 hrtime_t
883 dtrace_gethrestime(void)
884 {
885 clock_sec_t secs;
886 clock_nsec_t nanosecs;
887 uint64_t secs64, ns64;
888
889 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
890 secs64 = (uint64_t)secs;
891 ns64 = (uint64_t)nanosecs;
892
893 ns64 = ns64 + (secs64 * 1000000000LL);
894 return ns64;
895 }
896
897 /*
898 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
899 * Hence its primary use is to specify intervals.
900 */
901
902 hrtime_t
903 dtrace_abs_to_nano(uint64_t elapsed)
904 {
905 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
906
907 /*
908 * If this is the first time we've run, get the timebase.
909 * We can use denom == 0 to indicate that sTimebaseInfo is
910 * uninitialised because it makes no sense to have a zero
911 * denominator in a fraction.
912 */
913
914 if (sTimebaseInfo.denom == 0) {
915 (void) clock_timebase_info(&sTimebaseInfo);
916 }
917
918 /*
919 * Convert to nanoseconds.
920 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
921 *
922 * Provided the final result is representable in 64 bits the following maneuver will
923 * deliver that result without intermediate overflow.
924 */
925 if (sTimebaseInfo.denom == sTimebaseInfo.numer) {
926 return elapsed;
927 } else if (sTimebaseInfo.denom == 1) {
928 return elapsed * (uint64_t)sTimebaseInfo.numer;
929 } else {
930 /* Decompose elapsed = eta32 * 2^32 + eps32: */
931 uint64_t eta32 = elapsed >> 32;
932 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
933
934 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
935
936 /* Form product of elapsed64 (decomposed) and numer: */
937 uint64_t mu64 = numer * eta32;
938 uint64_t lambda64 = numer * eps32;
939
940 /* Divide the constituents by denom: */
941 uint64_t q32 = mu64 / denom;
942 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
943
944 return (q32 << 32) + ((r32 << 32) + lambda64) / denom;
945 }
946 }
947
948 hrtime_t
949 dtrace_gethrtime(void)
950 {
951 static uint64_t start = 0;
952
953 if (start == 0) {
954 start = mach_absolute_time();
955 }
956
957 return dtrace_abs_to_nano(mach_absolute_time() - start);
958 }
959
960 /*
961 * Atomicity and synchronization
962 */
963 uint32_t
964 dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
965 {
966 if (OSCompareAndSwap((UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) {
967 return cmp;
968 } else {
969 return ~cmp; /* Must return something *other* than cmp */
970 }
971 }
972
973 void *
974 dtrace_casptr(void *target, void *cmp, void *new)
975 {
976 if (OSCompareAndSwapPtr( cmp, new, (void**)target )) {
977 return cmp;
978 } else {
979 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
980 }
981 }
982
983 /*
984 * Interrupt manipulation
985 */
986 dtrace_icookie_t
987 dtrace_interrupt_disable(void)
988 {
989 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
990 }
991
992 void
993 dtrace_interrupt_enable(dtrace_icookie_t reenable)
994 {
995 (void)ml_set_interrupts_enabled((boolean_t)reenable);
996 }
997
998 /*
999 * MP coordination
1000 */
1001 static void
1002 dtrace_sync_func(void)
1003 {
1004 }
1005
1006 /*
1007 * dtrace_sync() is not called from probe context.
1008 */
1009 void
1010 dtrace_sync(void)
1011 {
1012 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1013 }
1014
1015 /*
1016 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1017 */
1018
1019 extern kern_return_t dtrace_copyio_preflight(addr64_t);
1020 extern kern_return_t dtrace_copyio_postflight(addr64_t);
1021
1022 static int
1023 dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1024 {
1025 #pragma unused(kaddr)
1026
1027 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1028 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1029
1030 ASSERT(kaddr + size >= kaddr);
1031
1032 if (uaddr + size < uaddr || /* Avoid address wrap. */
1033 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) { /* Machine specific setup/constraints. */
1034 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1035 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1036 return 0;
1037 }
1038 return 1;
1039 }
1040
1041 void
1042 dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
1043 {
1044 #pragma unused(flags)
1045
1046 if (dtrace_copycheck( src, dst, len )) {
1047 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1048 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1049 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1050 }
1051 dtrace_copyio_postflight(src);
1052 }
1053 }
1054
1055 void
1056 dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
1057 {
1058 #pragma unused(flags)
1059
1060 size_t actual;
1061
1062 if (dtrace_copycheck( src, dst, len )) {
1063 /* copyin as many as 'len' bytes. */
1064 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1065
1066 /*
1067 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1068 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1069 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1070 * to the caller.
1071 */
1072 if (error && error != ENAMETOOLONG) {
1073 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1074 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1075 }
1076 dtrace_copyio_postflight(src);
1077 }
1078 }
1079
1080 void
1081 dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1082 {
1083 #pragma unused(flags)
1084
1085 if (dtrace_copycheck( dst, src, len )) {
1086 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1087 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1088 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1089 }
1090 dtrace_copyio_postflight(dst);
1091 }
1092 }
1093
1094 void
1095 dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1096 {
1097 #pragma unused(flags)
1098
1099 size_t actual;
1100
1101 if (dtrace_copycheck( dst, src, len )) {
1102 /*
1103 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1104 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1105 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1106 * to the caller.
1107 */
1108 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1109 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1110 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1111 }
1112 dtrace_copyio_postflight(dst);
1113 }
1114 }
1115
1116 extern const int copysize_limit_panic;
1117
1118 int
1119 dtrace_copy_maxsize(void)
1120 {
1121 return copysize_limit_panic;
1122 }
1123
1124
1125 int
1126 dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes)
1127 {
1128 int maxsize = dtrace_copy_maxsize();
1129 /*
1130 * Partition the copyout in copysize_limit_panic-sized chunks
1131 */
1132 while (nbytes >= (vm_size_t)maxsize) {
1133 if (copyout(kaddr, uaddr, maxsize) != 0) {
1134 return EFAULT;
1135 }
1136
1137 nbytes -= maxsize;
1138 uaddr += maxsize;
1139 kaddr += maxsize;
1140 }
1141 if (nbytes > 0) {
1142 if (copyout(kaddr, uaddr, nbytes) != 0) {
1143 return EFAULT;
1144 }
1145 }
1146
1147 return 0;
1148 }
1149
1150 uint8_t
1151 dtrace_fuword8(user_addr_t uaddr)
1152 {
1153 uint8_t ret = 0;
1154
1155 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1156 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1157 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1158 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1159 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1160 }
1161 dtrace_copyio_postflight(uaddr);
1162 }
1163 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1164
1165 return ret;
1166 }
1167
1168 uint16_t
1169 dtrace_fuword16(user_addr_t uaddr)
1170 {
1171 uint16_t ret = 0;
1172
1173 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1174 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1175 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1176 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1177 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1178 }
1179 dtrace_copyio_postflight(uaddr);
1180 }
1181 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1182
1183 return ret;
1184 }
1185
1186 uint32_t
1187 dtrace_fuword32(user_addr_t uaddr)
1188 {
1189 uint32_t ret = 0;
1190
1191 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1192 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1193 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1194 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1196 }
1197 dtrace_copyio_postflight(uaddr);
1198 }
1199 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1200
1201 return ret;
1202 }
1203
1204 uint64_t
1205 dtrace_fuword64(user_addr_t uaddr)
1206 {
1207 uint64_t ret = 0;
1208
1209 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1210 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1211 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1212 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1213 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1214 }
1215 dtrace_copyio_postflight(uaddr);
1216 }
1217 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1218
1219 return ret;
1220 }
1221
1222 /*
1223 * Emulation of Solaris fuword / suword
1224 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1225 */
1226
1227 int
1228 fuword8(user_addr_t uaddr, uint8_t *value)
1229 {
1230 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1231 return -1;
1232 }
1233
1234 return 0;
1235 }
1236
1237 int
1238 fuword16(user_addr_t uaddr, uint16_t *value)
1239 {
1240 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1241 return -1;
1242 }
1243
1244 return 0;
1245 }
1246
1247 int
1248 fuword32(user_addr_t uaddr, uint32_t *value)
1249 {
1250 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1251 return -1;
1252 }
1253
1254 return 0;
1255 }
1256
1257 int
1258 fuword64(user_addr_t uaddr, uint64_t *value)
1259 {
1260 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1261 return -1;
1262 }
1263
1264 return 0;
1265 }
1266
1267 void
1268 fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1269 {
1270 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1271 *value = 0;
1272 }
1273 }
1274
1275 void
1276 fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1277 {
1278 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1279 *value = 0;
1280 }
1281 }
1282
1283 int
1284 suword64(user_addr_t addr, uint64_t value)
1285 {
1286 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1287 return -1;
1288 }
1289
1290 return 0;
1291 }
1292
1293 int
1294 suword32(user_addr_t addr, uint32_t value)
1295 {
1296 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1297 return -1;
1298 }
1299
1300 return 0;
1301 }
1302
1303 /*
1304 * Miscellaneous
1305 */
1306 extern boolean_t dtrace_tally_fault(user_addr_t);
1307
1308 boolean_t
1309 dtrace_tally_fault(user_addr_t uaddr)
1310 {
1311 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1312 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1313 return DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE;
1314 }
1315
1316 #define TOTTY 0x02
1317 extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1318
1319 int
1320 vuprintf(const char *format, va_list ap)
1321 {
1322 return prf(format, ap, TOTTY, NULL);
1323 }
1324
1325 /* Not called from probe context */
1326 void
1327 cmn_err( int level, const char *format, ... )
1328 {
1329 #pragma unused(level)
1330 va_list alist;
1331
1332 va_start(alist, format);
1333 vuprintf(format, alist);
1334 va_end(alist);
1335 uprintf("\n");
1336 }
1337
1338 /*
1339 * History:
1340 * 2002-01-24 gvdl Initial implementation of strstr
1341 */
1342
1343 __private_extern__ const char *
1344 strstr(const char *in, const char *str)
1345 {
1346 char c;
1347 size_t len;
1348 if (!in || !str) {
1349 return in;
1350 }
1351
1352 c = *str++;
1353 if (!c) {
1354 return (const char *) in; // Trivial empty string case
1355 }
1356 len = strlen(str);
1357 do {
1358 char sc;
1359
1360 do {
1361 sc = *in++;
1362 if (!sc) {
1363 return (char *) 0;
1364 }
1365 } while (sc != c);
1366 } while (strncmp(in, str, len) != 0);
1367
1368 return (const char *) (in - 1);
1369 }
1370
1371 const void*
1372 bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *))
1373 {
1374 const char *base = base0;
1375 size_t lim;
1376 int cmp;
1377 const void *p;
1378 for (lim = nmemb; lim != 0; lim >>= 1) {
1379 p = base + (lim >> 1) * size;
1380 cmp = (*compar)(key, p);
1381 if (cmp == 0) {
1382 return p;
1383 }
1384 if (cmp > 0) { /* key > p: move right */
1385 base = (const char *)p + size;
1386 lim--;
1387 } /* else move left */
1388 }
1389 return NULL;
1390 }
1391
1392 /*
1393 * Runtime and ABI
1394 */
1395 uintptr_t
1396 dtrace_caller(int ignore)
1397 {
1398 #pragma unused(ignore)
1399 return -1; /* Just as in Solaris dtrace_asm.s */
1400 }
1401
1402 int
1403 dtrace_getstackdepth(int aframes)
1404 {
1405 struct frame *fp = (struct frame *)__builtin_frame_address(0);
1406 struct frame *nextfp, *minfp, *stacktop;
1407 int depth = 0;
1408 int on_intr;
1409
1410 if ((on_intr = CPU_ON_INTR(CPU)) != 0) {
1411 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1412 } else {
1413 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
1414 }
1415
1416 minfp = fp;
1417
1418 aframes++;
1419
1420 for (;;) {
1421 depth++;
1422
1423 nextfp = *(struct frame **)fp;
1424
1425 if (nextfp <= minfp || nextfp >= stacktop) {
1426 if (on_intr) {
1427 /*
1428 * Hop from interrupt stack to thread stack.
1429 */
1430 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1431
1432 minfp = (struct frame *)kstack_base;
1433 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
1434
1435 on_intr = 0;
1436 continue;
1437 }
1438 break;
1439 }
1440
1441 fp = nextfp;
1442 minfp = fp;
1443 }
1444
1445 if (depth <= aframes) {
1446 return 0;
1447 }
1448
1449 return depth - aframes;
1450 }
1451
1452 int
1453 dtrace_addr_in_module(void* addr, struct modctl *ctl)
1454 {
1455 return OSKextKextForAddress(addr) == (void*)ctl->mod_address;
1456 }
1457
1458 /*
1459 * Unconsidered
1460 */
1461 void
1462 dtrace_vtime_enable(void)
1463 {
1464 }
1465
1466 void
1467 dtrace_vtime_disable(void)
1468 {
1469 }