]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/dtrace_glue.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36 #if CONFIG_DTRACE
37
38 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39 #include <kern/thread.h>
40 #include <mach/thread_status.h>
41
42 #include <stdarg.h>
43 #include <string.h>
44 #include <sys/malloc.h>
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <sys/proc_internal.h>
48 #include <sys/kauth.h>
49 #include <sys/user.h>
50 #include <sys/systm.h>
51 #include <sys/dtrace.h>
52 #include <sys/dtrace_impl.h>
53 #include <libkern/OSAtomic.h>
54 #include <kern/kern_types.h>
55 #include <kern/timer_call.h>
56 #include <kern/thread_call.h>
57 #include <kern/task.h>
58 #include <kern/sched_prim.h>
59 #include <kern/queue.h>
60 #include <miscfs/devfs/devfs.h>
61 #include <kern/kalloc.h>
62
63 #include <mach/vm_param.h>
64 #include <mach/mach_vm.h>
65 #include <mach/task.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
68
69 /*
70 * pid/proc
71 */
72 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
73 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
74
75 /* Not called from probe context */
76 proc_t *
77 sprlock(pid_t pid)
78 {
79 proc_t* p;
80
81 if ((p = proc_find(pid)) == PROC_NULL) {
82 return PROC_NULL;
83 }
84
85 task_suspend_internal(p->task);
86
87 proc_lock(p);
88
89 lck_mtx_lock(&p->p_dtrace_sprlock);
90
91 return p;
92 }
93
94 /* Not called from probe context */
95 void
96 sprunlock(proc_t *p)
97 {
98 if (p != PROC_NULL) {
99 lck_mtx_unlock(&p->p_dtrace_sprlock);
100
101 proc_unlock(p);
102
103 task_resume_internal(p->task);
104
105 proc_rele(p);
106 }
107 }
108
109 /*
110 * uread/uwrite
111 */
112
113 // These are not exported from vm_map.h.
114 extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
115 extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
116
117 /* Not called from probe context */
118 int
119 uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
120 {
121 kern_return_t ret;
122
123 ASSERT(p != PROC_NULL);
124 ASSERT(p->task != NULL);
125
126 task_t task = p->task;
127
128 /*
129 * Grab a reference to the task vm_map_t to make sure
130 * the map isn't pulled out from under us.
131 *
132 * Because the proc_lock is not held at all times on all code
133 * paths leading here, it is possible for the proc to have
134 * exited. If the map is null, fail.
135 */
136 vm_map_t map = get_task_map_reference(task);
137 if (map) {
138 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
139 vm_map_deallocate(map);
140 } else
141 ret = KERN_TERMINATED;
142
143 return (int)ret;
144 }
145
146
147 /* Not called from probe context */
148 int
149 uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
150 {
151 kern_return_t ret;
152
153 ASSERT(p != NULL);
154 ASSERT(p->task != NULL);
155
156 task_t task = p->task;
157
158 /*
159 * Grab a reference to the task vm_map_t to make sure
160 * the map isn't pulled out from under us.
161 *
162 * Because the proc_lock is not held at all times on all code
163 * paths leading here, it is possible for the proc to have
164 * exited. If the map is null, fail.
165 */
166 vm_map_t map = get_task_map_reference(task);
167 if (map) {
168 /* Find the memory permissions. */
169 uint32_t nestingDepth=999999;
170 vm_region_submap_short_info_data_64_t info;
171 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
172 mach_vm_address_t address = (mach_vm_address_t)a;
173 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
174
175 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
176 if (ret != KERN_SUCCESS)
177 goto done;
178
179 vm_prot_t reprotect;
180
181 if (!(info.protection & VM_PROT_WRITE)) {
182 /* Save the original protection values for restoration later */
183 reprotect = info.protection;
184
185 if (info.max_protection & VM_PROT_WRITE) {
186 /* The memory is not currently writable, but can be made writable. */
187 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
188 } else {
189 /*
190 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
191 *
192 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
193 */
194 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
195 }
196
197 if (ret != KERN_SUCCESS)
198 goto done;
199
200 } else {
201 /* The memory was already writable. */
202 reprotect = VM_PROT_NONE;
203 }
204
205 ret = vm_map_write_user( map,
206 buf,
207 (vm_map_address_t)a,
208 (vm_size_t)len);
209
210 if (ret != KERN_SUCCESS)
211 goto done;
212
213 if (reprotect != VM_PROT_NONE) {
214 ASSERT(reprotect & VM_PROT_EXECUTE);
215 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
216 }
217
218 done:
219 vm_map_deallocate(map);
220 } else
221 ret = KERN_TERMINATED;
222
223 return (int)ret;
224 }
225
226 /*
227 * cpuvar
228 */
229 lck_mtx_t cpu_lock;
230 lck_mtx_t cyc_lock;
231 lck_mtx_t mod_lock;
232
233 dtrace_cpu_t *cpu_list;
234 cpu_core_t *cpu_core; /* XXX TLB lockdown? */
235
236 /*
237 * cred_t
238 */
239
240 /*
241 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
242 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
243 */
244 cred_t *
245 dtrace_CRED(void)
246 {
247 struct uthread *uthread = get_bsdthread_info(current_thread());
248
249 if (uthread == NULL)
250 return NULL;
251 else
252 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
253 }
254
255 #define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
256 #define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
257 HAS_ALLPRIVS(cr) : \
258 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
259
260 int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
261 {
262 #pragma unused(priv, all)
263 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
264 }
265
266 int
267 PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
268 {
269 #pragma unused(priv, boolean)
270 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
271 }
272
273 /* XXX Get around const poisoning using structure assigns */
274 gid_t
275 crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(&copy_cr); }
276
277 uid_t
278 crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr); }
279
280 /*
281 * "cyclic"
282 */
283
284 typedef struct wrap_timer_call {
285 /* node attributes */
286 cyc_handler_t hdlr;
287 cyc_time_t when;
288 uint64_t deadline;
289 int cpuid;
290 boolean_t suspended;
291 struct timer_call call;
292
293 /* next item in the linked list */
294 LIST_ENTRY(wrap_timer_call) entries;
295 } wrap_timer_call_t;
296
297 #define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
298 #define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
299
300
301 typedef struct cyc_list {
302 cyc_omni_handler_t cyl_omni;
303 wrap_timer_call_t cyl_wrap_by_cpus[];
304 } cyc_list_t;
305
306 /* CPU going online/offline notifications */
307 void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
308 void dtrace_cpu_state_changed(int, boolean_t);
309
310 void
311 dtrace_install_cpu_hooks(void) {
312 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
313 }
314
315 void
316 dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
317 #pragma unused(cpuid)
318 wrap_timer_call_t *wrapTC = NULL;
319 boolean_t suspend = (is_running ? FALSE : TRUE);
320 dtrace_icookie_t s;
321
322 /* Ensure that we're not going to leave the CPU */
323 s = dtrace_interrupt_disable();
324 assert(cpuid == cpu_number());
325
326 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
327 assert(wrapTC->cpuid == cpu_number());
328 if (suspend) {
329 assert(!wrapTC->suspended);
330 /* If this fails, we'll panic anyway, so let's do this now. */
331 if (!timer_call_cancel(&wrapTC->call))
332 panic("timer_call_set_suspend() failed to cancel a timer call");
333 wrapTC->suspended = TRUE;
334 } else {
335 /* Rearm the timer, but ensure it was suspended first. */
336 assert(wrapTC->suspended);
337 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
338 &wrapTC->deadline);
339 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
340 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
341 wrapTC->suspended = FALSE;
342 }
343
344 }
345
346 /* Restore the previous interrupt state. */
347 dtrace_interrupt_enable(s);
348 }
349
350 static void
351 _timer_call_apply_cyclic( void *ignore, void *vTChdl )
352 {
353 #pragma unused(ignore)
354 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
355
356 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
357
358 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
359 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
360 }
361
362 static cyclic_id_t
363 timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
364 {
365 uint64_t now;
366 dtrace_icookie_t s;
367
368 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
369 wrapTC->hdlr = *handler;
370 wrapTC->when = *when;
371
372 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
373
374 now = mach_absolute_time();
375 wrapTC->deadline = now;
376
377 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
378
379 /* Insert the timer to the list of the running timers on this CPU, and start it. */
380 s = dtrace_interrupt_disable();
381 wrapTC->cpuid = cpu_number();
382 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
383 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
384 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
385 wrapTC->suspended = FALSE;
386 dtrace_interrupt_enable(s);
387
388 return (cyclic_id_t)wrapTC;
389 }
390
391 /*
392 * Executed on the CPU the timer is running on.
393 */
394 static void
395 timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
396 {
397 assert(wrapTC);
398 assert(cpu_number() == wrapTC->cpuid);
399
400 if (!timer_call_cancel(&wrapTC->call))
401 panic("timer_call_remove_cyclic() failed to cancel a timer call");
402
403 LIST_REMOVE(wrapTC, entries);
404 }
405
406 static void *
407 timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
408 {
409 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
410 }
411
412 cyclic_id_t
413 cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
414 {
415 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
416 if (NULL == wrapTC)
417 return CYCLIC_NONE;
418 else
419 return timer_call_add_cyclic( wrapTC, handler, when );
420 }
421
422 void
423 cyclic_timer_remove(cyclic_id_t cyclic)
424 {
425 ASSERT( cyclic != CYCLIC_NONE );
426
427 /* Removing a timer call must be done on the CPU the timer is running on. */
428 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
429 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
430
431 _FREE((void *)cyclic, M_TEMP);
432 }
433
434 static void
435 _cyclic_add_omni(cyc_list_t *cyc_list)
436 {
437 cyc_time_t cT;
438 cyc_handler_t cH;
439 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
440
441 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
442
443 wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
444 timer_call_add_cyclic(wrapTC, &cH, &cT);
445 }
446
447 cyclic_id_list_t
448 cyclic_add_omni(cyc_omni_handler_t *omni)
449 {
450 cyc_list_t *cyc_list =
451 _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
452
453 if (NULL == cyc_list)
454 return NULL;
455
456 cyc_list->cyl_omni = *omni;
457
458 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
459
460 return (cyclic_id_list_t)cyc_list;
461 }
462
463 static void
464 _cyclic_remove_omni(cyc_list_t *cyc_list)
465 {
466 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
467 void *oarg;
468 wrap_timer_call_t *wrapTC;
469
470 /*
471 * If the processor was offline when dtrace started, we did not allocate
472 * a cyclic timer for this CPU.
473 */
474 if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
475 oarg = timer_call_get_cyclic_arg(wrapTC);
476 timer_call_remove_cyclic(wrapTC);
477 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
478 }
479 }
480
481 void
482 cyclic_remove_omni(cyclic_id_list_t cyc_list)
483 {
484 ASSERT(cyc_list != NULL);
485
486 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
487 _FREE(cyc_list, M_TEMP);
488 }
489
490 typedef struct wrap_thread_call {
491 thread_call_t TChdl;
492 cyc_handler_t hdlr;
493 cyc_time_t when;
494 uint64_t deadline;
495 } wrap_thread_call_t;
496
497 /*
498 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
499 * cleaner and the deadman, but too distant in time and place for the profile provider.
500 */
501 static void
502 _cyclic_apply( void *ignore, void *vTChdl )
503 {
504 #pragma unused(ignore)
505 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
506
507 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
508
509 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
510 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
511
512 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
513 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
514 thread_wakeup((event_t)wrapTC);
515 }
516
517 cyclic_id_t
518 cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
519 {
520 uint64_t now;
521
522 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
523 if (NULL == wrapTC)
524 return CYCLIC_NONE;
525
526 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
527 wrapTC->hdlr = *handler;
528 wrapTC->when = *when;
529
530 ASSERT(when->cyt_when == 0);
531 ASSERT(when->cyt_interval < WAKEUP_REAPER);
532
533 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
534
535 now = mach_absolute_time();
536 wrapTC->deadline = now;
537
538 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
539 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
540
541 return (cyclic_id_t)wrapTC;
542 }
543
544 static void
545 noop_cyh_func(void * ignore)
546 {
547 #pragma unused(ignore)
548 }
549
550 void
551 cyclic_remove(cyclic_id_t cyclic)
552 {
553 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
554
555 ASSERT(cyclic != CYCLIC_NONE);
556
557 while (!thread_call_cancel(wrapTC->TChdl)) {
558 int ret = assert_wait(wrapTC, THREAD_UNINT);
559 ASSERT(ret == THREAD_WAITING);
560
561 wrapTC->when.cyt_interval = WAKEUP_REAPER;
562
563 ret = thread_block(THREAD_CONTINUE_NULL);
564 ASSERT(ret == THREAD_AWAKENED);
565 }
566
567 if (thread_call_free(wrapTC->TChdl))
568 _FREE(wrapTC, M_TEMP);
569 else {
570 /* Gut this cyclic and move on ... */
571 wrapTC->hdlr.cyh_func = noop_cyh_func;
572 wrapTC->when.cyt_interval = NEARLY_FOREVER;
573 }
574 }
575
576 /*
577 * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision)
578 */
579
580 thread_call_t
581 dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos)
582 {
583 #pragma unused(arg)
584 thread_call_t call = thread_call_allocate(func, NULL);
585
586 nanoseconds_to_absolutetime(nanos, &nanos);
587
588 /*
589 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot,
590 * and clock drift on later invocations is not a worry.
591 */
592 uint64_t deadline = mach_absolute_time() + nanos;
593 /* DRK: consider using a lower priority callout here */
594 thread_call_enter_delayed(call, deadline);
595
596 return call;
597 }
598
599 /*
600 * ddi
601 */
602 void
603 ddi_report_dev(dev_info_t *devi)
604 {
605 #pragma unused(devi)
606 }
607
608
609 static unsigned int gRegisteredProps = 0;
610 static struct {
611 char name[32]; /* enough for "dof-data-" + digits */
612 int *data;
613 uint_t nelements;
614 } gPropTable[16];
615
616 kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
617
618 kern_return_t
619 _dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
620 {
621 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) {
622 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK);
623
624 if (NULL == p)
625 return KERN_FAILURE;
626
627 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name));
628 gPropTable[gRegisteredProps].nelements = nelements;
629 gPropTable[gRegisteredProps].data = p;
630
631 while (nelements-- > 0) {
632 *p++ = (int)(*data++);
633 }
634
635 gRegisteredProps++;
636 return KERN_SUCCESS;
637 }
638 else
639 return KERN_FAILURE;
640 }
641
642 int
643 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
644 const char *name, int **data, uint_t *nelements)
645 {
646 #pragma unused(match_dev,dip,flags)
647 unsigned int i;
648 for (i = 0; i < gRegisteredProps; ++i)
649 {
650 if (0 == strncmp(name, gPropTable[i].name,
651 sizeof(gPropTable[i].name))) {
652 *data = gPropTable[i].data;
653 *nelements = gPropTable[i].nelements;
654 return DDI_SUCCESS;
655 }
656 }
657 return DDI_FAILURE;
658 }
659
660 int
661 ddi_prop_free(void *buf)
662 {
663 _FREE(buf, M_TEMP);
664 return DDI_SUCCESS;
665 }
666
667 int
668 ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
669
670 int
671 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
672 minor_t minor_num, const char *node_type, int flag)
673 {
674 #pragma unused(spec_type,node_type,flag)
675 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
676
677 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
678 return DDI_FAILURE;
679 else
680 return DDI_SUCCESS;
681 }
682
683 void
684 ddi_remove_minor_node(dev_info_t *dip, char *name)
685 {
686 #pragma unused(dip,name)
687 /* XXX called from dtrace_detach, so NOTREACHED for now. */
688 }
689
690 major_t
691 getemajor( dev_t d )
692 {
693 return (major_t) major(d);
694 }
695
696 minor_t
697 getminor ( dev_t d )
698 {
699 return (minor_t) minor(d);
700 }
701
702 dev_t
703 makedevice(major_t major, minor_t minor)
704 {
705 return makedev( major, minor );
706 }
707
708 int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
709 {
710 #pragma unused(dev, dip, flags, name)
711
712 return defvalue;
713 }
714
715 /*
716 * Kernel Debug Interface
717 */
718 int
719 kdi_dtrace_set(kdi_dtrace_set_t ignore)
720 {
721 #pragma unused(ignore)
722 return 0; /* Success */
723 }
724
725 extern void Debugger(const char*);
726
727 void
728 debug_enter(char *c) { Debugger(c); }
729
730 /*
731 * kmem
732 */
733
734 void *
735 dt_kmem_alloc(size_t size, int kmflag)
736 {
737 #pragma unused(kmflag)
738
739 /*
740 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
741 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
742 */
743 #if defined(DTRACE_MEMORY_ZONES)
744 return dtrace_alloc(size);
745 #else
746 return kalloc(size);
747 #endif
748 }
749
750 void *
751 dt_kmem_zalloc(size_t size, int kmflag)
752 {
753 #pragma unused(kmflag)
754
755 /*
756 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
757 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
758 */
759 #if defined(DTRACE_MEMORY_ZONES)
760 void* buf = dtrace_alloc(size);
761 #else
762 void* buf = kalloc(size);
763 #endif
764
765 if(!buf)
766 return NULL;
767
768 bzero(buf, size);
769
770 return buf;
771 }
772
773 void
774 dt_kmem_free(void *buf, size_t size)
775 {
776 #pragma unused(size)
777 /*
778 * DTrace relies on this, its doing a lot of NULL frees.
779 * A null free causes the debug builds to panic.
780 */
781 if (buf == NULL) return;
782
783 ASSERT(size > 0);
784
785 #if defined(DTRACE_MEMORY_ZONES)
786 dtrace_free(buf, size);
787 #else
788 kfree(buf, size);
789 #endif
790 }
791
792
793
794 /*
795 * aligned kmem allocator
796 * align should be a power of two
797 */
798
799 void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
800 {
801 void *mem, **addr_to_free;
802 intptr_t mem_aligned;
803 size_t *size_to_free, hdr_size;
804
805 /* Must be a power of two. */
806 assert(align != 0);
807 assert((align & (align - 1)) == 0);
808
809 /*
810 * We are going to add a header to the allocation. It contains
811 * the address to free and the total size of the buffer.
812 */
813 hdr_size = sizeof(size_t) + sizeof(void*);
814 mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
815 if (mem == NULL)
816 return NULL;
817
818 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
819
820 /* Write the address to free in the header. */
821 addr_to_free = (void**) (mem_aligned - sizeof(void*));
822 *addr_to_free = mem;
823
824 /* Write the size to free in the header. */
825 size_to_free = (size_t*) (mem_aligned - hdr_size);
826 *size_to_free = size + align + hdr_size;
827
828 return (void*) mem_aligned;
829 }
830
831 void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
832 {
833 void* buf;
834
835 buf = dt_kmem_alloc_aligned(size, align, kmflag);
836
837 if(!buf)
838 return NULL;
839
840 bzero(buf, size);
841
842 return buf;
843 }
844
845 void dt_kmem_free_aligned(void* buf, size_t size)
846 {
847 #pragma unused(size)
848 intptr_t ptr = (intptr_t) buf;
849 void **addr_to_free = (void**) (ptr - sizeof(void*));
850 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
851
852 if (buf == NULL)
853 return;
854
855 dt_kmem_free(*addr_to_free, *size_to_free);
856 }
857
858 /*
859 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
860 * doesn't specify constructor, destructor, or reclaim methods.
861 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
862 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
863 */
864 kmem_cache_t *
865 kmem_cache_create(
866 const char *name, /* descriptive name for this cache */
867 size_t bufsize, /* size of the objects it manages */
868 size_t align, /* required object alignment */
869 int (*constructor)(void *, void *, int), /* object constructor */
870 void (*destructor)(void *, void *), /* object destructor */
871 void (*reclaim)(void *), /* memory reclaim callback */
872 void *private, /* pass-thru arg for constr/destr/reclaim */
873 vmem_t *vmp, /* vmem source for slab allocation */
874 int cflags) /* cache creation flags */
875 {
876 #pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
877 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
878 }
879
880 void *
881 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
882 {
883 #pragma unused(kmflag)
884 size_t bufsize = (size_t)cp;
885 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
886 }
887
888 void
889 kmem_cache_free(kmem_cache_t *cp, void *buf)
890 {
891 #pragma unused(cp)
892 _FREE(buf, M_TEMP);
893 }
894
895 void
896 kmem_cache_destroy(kmem_cache_t *cp)
897 {
898 #pragma unused(cp)
899 }
900
901 /*
902 * taskq
903 */
904 extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
905
906 static void
907 _taskq_apply( task_func_t func, thread_call_param_t arg )
908 {
909 func( (void *)arg );
910 }
911
912 taskq_t *
913 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
914 int maxalloc, uint_t flags)
915 {
916 #pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
917
918 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
919 }
920
921 taskqid_t
922 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
923 {
924 #pragma unused(flags)
925 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
926 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
927 return (taskqid_t) tq /* for lack of anything better */;
928 }
929
930 void
931 taskq_destroy(taskq_t *tq)
932 {
933 thread_call_cancel( (thread_call_t) tq );
934 thread_call_free( (thread_call_t) tq );
935 }
936
937 pri_t maxclsyspri;
938
939 /*
940 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
941 */
942 typedef unsigned int u_daddr_t;
943 #include "blist.h"
944
945 /* By passing around blist *handles*, the underlying blist can be resized as needed. */
946 struct blist_hdl {
947 blist_t blist;
948 };
949
950 vmem_t *
951 vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
952 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
953 {
954 #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
955 blist_t bl;
956 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
957
958 ASSERT(quantum == 1);
959 ASSERT(NULL == ignore5);
960 ASSERT(NULL == ignore6);
961 ASSERT(NULL == source);
962 ASSERT(0 == qcache_max);
963 ASSERT(vmflag & VMC_IDENTIFIER);
964
965 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
966
967 p->blist = bl = blist_create( size );
968 blist_free(bl, 0, size);
969 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
970
971 return (vmem_t *)p;
972 }
973
974 void *
975 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
976 {
977 #pragma unused(vmflag)
978 struct blist_hdl *q = (struct blist_hdl *)vmp;
979 blist_t bl = q->blist;
980 daddr_t p;
981
982 p = blist_alloc(bl, (daddr_t)size);
983
984 if ((daddr_t)-1 == p) {
985 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
986 q->blist = bl;
987 p = blist_alloc(bl, (daddr_t)size);
988 if ((daddr_t)-1 == p)
989 panic("vmem_alloc: failure after blist_resize!");
990 }
991
992 return (void *)(uintptr_t)p;
993 }
994
995 void
996 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
997 {
998 struct blist_hdl *p = (struct blist_hdl *)vmp;
999
1000 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
1001 }
1002
1003 void
1004 vmem_destroy(vmem_t *vmp)
1005 {
1006 struct blist_hdl *p = (struct blist_hdl *)vmp;
1007
1008 blist_destroy( p->blist );
1009 _FREE( p, sizeof(struct blist_hdl) );
1010 }
1011
1012 /*
1013 * Timing
1014 */
1015
1016 /*
1017 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
1018 * January 1, 1970. Because it can be called from probe context, it must take no locks.
1019 */
1020
1021 hrtime_t
1022 dtrace_gethrestime(void)
1023 {
1024 clock_sec_t secs;
1025 clock_nsec_t nanosecs;
1026 uint64_t secs64, ns64;
1027
1028 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
1029 secs64 = (uint64_t)secs;
1030 ns64 = (uint64_t)nanosecs;
1031
1032 ns64 = ns64 + (secs64 * 1000000000LL);
1033 return ns64;
1034 }
1035
1036 /*
1037 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
1038 * Hence its primary use is to specify intervals.
1039 */
1040
1041 hrtime_t
1042 dtrace_abs_to_nano(uint64_t elapsed)
1043 {
1044 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
1045
1046 /*
1047 * If this is the first time we've run, get the timebase.
1048 * We can use denom == 0 to indicate that sTimebaseInfo is
1049 * uninitialised because it makes no sense to have a zero
1050 * denominator in a fraction.
1051 */
1052
1053 if ( sTimebaseInfo.denom == 0 ) {
1054 (void) clock_timebase_info(&sTimebaseInfo);
1055 }
1056
1057 /*
1058 * Convert to nanoseconds.
1059 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
1060 *
1061 * Provided the final result is representable in 64 bits the following maneuver will
1062 * deliver that result without intermediate overflow.
1063 */
1064 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
1065 return elapsed;
1066 else if (sTimebaseInfo.denom == 1)
1067 return elapsed * (uint64_t)sTimebaseInfo.numer;
1068 else {
1069 /* Decompose elapsed = eta32 * 2^32 + eps32: */
1070 uint64_t eta32 = elapsed >> 32;
1071 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
1072
1073 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
1074
1075 /* Form product of elapsed64 (decomposed) and numer: */
1076 uint64_t mu64 = numer * eta32;
1077 uint64_t lambda64 = numer * eps32;
1078
1079 /* Divide the constituents by denom: */
1080 uint64_t q32 = mu64/denom;
1081 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
1082
1083 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
1084 }
1085 }
1086
1087 hrtime_t
1088 dtrace_gethrtime(void)
1089 {
1090 static uint64_t start = 0;
1091
1092 if (start == 0)
1093 start = mach_absolute_time();
1094
1095 return dtrace_abs_to_nano(mach_absolute_time() - start);
1096 }
1097
1098 /*
1099 * Atomicity and synchronization
1100 */
1101 uint32_t
1102 dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
1103 {
1104 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
1105 return cmp;
1106 else
1107 return ~cmp; /* Must return something *other* than cmp */
1108 }
1109
1110 void *
1111 dtrace_casptr(void *target, void *cmp, void *new)
1112 {
1113 if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
1114 return cmp;
1115 else
1116 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
1117 }
1118
1119 /*
1120 * Interrupt manipulation
1121 */
1122 dtrace_icookie_t
1123 dtrace_interrupt_disable(void)
1124 {
1125 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
1126 }
1127
1128 void
1129 dtrace_interrupt_enable(dtrace_icookie_t reenable)
1130 {
1131 (void)ml_set_interrupts_enabled((boolean_t)reenable);
1132 }
1133
1134 /*
1135 * MP coordination
1136 */
1137 static void
1138 dtrace_sync_func(void) {}
1139
1140 /*
1141 * dtrace_sync() is not called from probe context.
1142 */
1143 void
1144 dtrace_sync(void)
1145 {
1146 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1147 }
1148
1149 /*
1150 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1151 */
1152
1153 extern kern_return_t dtrace_copyio_preflight(addr64_t);
1154 extern kern_return_t dtrace_copyio_postflight(addr64_t);
1155
1156 static int
1157 dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1158 {
1159 #pragma unused(kaddr)
1160
1161 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1162 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1163
1164 ASSERT(kaddr + size >= kaddr);
1165
1166 if ( uaddr + size < uaddr || /* Avoid address wrap. */
1167 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1168 {
1169 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1170 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1171 return (0);
1172 }
1173 return (1);
1174 }
1175
1176 void
1177 dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
1178 {
1179 #pragma unused(flags)
1180
1181 if (dtrace_copycheck( src, dst, len )) {
1182 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1183 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1184 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1185 }
1186 dtrace_copyio_postflight(src);
1187 }
1188 }
1189
1190 void
1191 dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
1192 {
1193 #pragma unused(flags)
1194
1195 size_t actual;
1196
1197 if (dtrace_copycheck( src, dst, len )) {
1198 /* copyin as many as 'len' bytes. */
1199 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1200
1201 /*
1202 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1203 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1204 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1205 * to the caller.
1206 */
1207 if (error && error != ENAMETOOLONG) {
1208 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1209 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1210 }
1211 dtrace_copyio_postflight(src);
1212 }
1213 }
1214
1215 void
1216 dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1217 {
1218 #pragma unused(flags)
1219
1220 if (dtrace_copycheck( dst, src, len )) {
1221 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1222 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1223 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1224 }
1225 dtrace_copyio_postflight(dst);
1226 }
1227 }
1228
1229 void
1230 dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1231 {
1232 #pragma unused(flags)
1233
1234 size_t actual;
1235
1236 if (dtrace_copycheck( dst, src, len )) {
1237
1238 /*
1239 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1240 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1241 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1242 * to the caller.
1243 */
1244 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1245 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1246 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1247 }
1248 dtrace_copyio_postflight(dst);
1249 }
1250 }
1251
1252 uint8_t
1253 dtrace_fuword8(user_addr_t uaddr)
1254 {
1255 uint8_t ret = 0;
1256
1257 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1258 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1259 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1260 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1261 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1262 }
1263 dtrace_copyio_postflight(uaddr);
1264 }
1265 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1266
1267 return(ret);
1268 }
1269
1270 uint16_t
1271 dtrace_fuword16(user_addr_t uaddr)
1272 {
1273 uint16_t ret = 0;
1274
1275 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1276 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1277 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1278 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1279 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1280 }
1281 dtrace_copyio_postflight(uaddr);
1282 }
1283 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1284
1285 return(ret);
1286 }
1287
1288 uint32_t
1289 dtrace_fuword32(user_addr_t uaddr)
1290 {
1291 uint32_t ret = 0;
1292
1293 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1294 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1295 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1296 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1297 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1298 }
1299 dtrace_copyio_postflight(uaddr);
1300 }
1301 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1302
1303 return(ret);
1304 }
1305
1306 uint64_t
1307 dtrace_fuword64(user_addr_t uaddr)
1308 {
1309 uint64_t ret = 0;
1310
1311 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1312 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1313 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1314 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1315 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1316 }
1317 dtrace_copyio_postflight(uaddr);
1318 }
1319 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1320
1321 return(ret);
1322 }
1323
1324 /*
1325 * Emulation of Solaris fuword / suword
1326 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1327 */
1328
1329 int
1330 fuword8(user_addr_t uaddr, uint8_t *value)
1331 {
1332 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1333 return -1;
1334 }
1335
1336 return 0;
1337 }
1338
1339 int
1340 fuword16(user_addr_t uaddr, uint16_t *value)
1341 {
1342 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1343 return -1;
1344 }
1345
1346 return 0;
1347 }
1348
1349 int
1350 fuword32(user_addr_t uaddr, uint32_t *value)
1351 {
1352 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1353 return -1;
1354 }
1355
1356 return 0;
1357 }
1358
1359 int
1360 fuword64(user_addr_t uaddr, uint64_t *value)
1361 {
1362 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1363 return -1;
1364 }
1365
1366 return 0;
1367 }
1368
1369 void
1370 fuword8_noerr(user_addr_t uaddr, uint8_t *value)
1371 {
1372 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
1373 *value = 0;
1374 }
1375 }
1376
1377 void
1378 fuword16_noerr(user_addr_t uaddr, uint16_t *value)
1379 {
1380 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
1381 *value = 0;
1382 }
1383 }
1384
1385 void
1386 fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1387 {
1388 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1389 *value = 0;
1390 }
1391 }
1392
1393 void
1394 fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1395 {
1396 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1397 *value = 0;
1398 }
1399 }
1400
1401 int
1402 suword64(user_addr_t addr, uint64_t value)
1403 {
1404 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1405 return -1;
1406 }
1407
1408 return 0;
1409 }
1410
1411 int
1412 suword32(user_addr_t addr, uint32_t value)
1413 {
1414 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1415 return -1;
1416 }
1417
1418 return 0;
1419 }
1420
1421 int
1422 suword16(user_addr_t addr, uint16_t value)
1423 {
1424 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1425 return -1;
1426 }
1427
1428 return 0;
1429 }
1430
1431 int
1432 suword8(user_addr_t addr, uint8_t value)
1433 {
1434 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1435 return -1;
1436 }
1437
1438 return 0;
1439 }
1440
1441
1442 /*
1443 * Miscellaneous
1444 */
1445 extern boolean_t dtrace_tally_fault(user_addr_t);
1446
1447 boolean_t
1448 dtrace_tally_fault(user_addr_t uaddr)
1449 {
1450 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1451 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1452 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1453 }
1454
1455 #define TOTTY 0x02
1456 extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1457
1458 int
1459 vuprintf(const char *format, va_list ap)
1460 {
1461 return prf(format, ap, TOTTY, NULL);
1462 }
1463
1464 /* Not called from probe context */
1465 void cmn_err( int level, const char *format, ... )
1466 {
1467 #pragma unused(level)
1468 va_list alist;
1469
1470 va_start(alist, format);
1471 vuprintf(format, alist);
1472 va_end(alist);
1473 uprintf("\n");
1474 }
1475
1476 /*
1477 * History:
1478 * 2002-01-24 gvdl Initial implementation of strstr
1479 */
1480
1481 __private_extern__ const char *
1482 strstr(const char *in, const char *str)
1483 {
1484 char c;
1485 size_t len;
1486
1487 c = *str++;
1488 if (!c)
1489 return (const char *) in; // Trivial empty string case
1490
1491 len = strlen(str);
1492 do {
1493 char sc;
1494
1495 do {
1496 sc = *in++;
1497 if (!sc)
1498 return (char *) 0;
1499 } while (sc != c);
1500 } while (strncmp(in, str, len) != 0);
1501
1502 return (const char *) (in - 1);
1503 }
1504
1505 /*
1506 * Runtime and ABI
1507 */
1508 uintptr_t
1509 dtrace_caller(int ignore)
1510 {
1511 #pragma unused(ignore)
1512 return -1; /* Just as in Solaris dtrace_asm.s */
1513 }
1514
1515 int
1516 dtrace_getstackdepth(int aframes)
1517 {
1518 struct frame *fp = (struct frame *)__builtin_frame_address(0);
1519 struct frame *nextfp, *minfp, *stacktop;
1520 int depth = 0;
1521 int on_intr;
1522
1523 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1524 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1525 else
1526 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
1527
1528 minfp = fp;
1529
1530 aframes++;
1531
1532 for (;;) {
1533 depth++;
1534
1535 nextfp = *(struct frame **)fp;
1536
1537 if (nextfp <= minfp || nextfp >= stacktop) {
1538 if (on_intr) {
1539 /*
1540 * Hop from interrupt stack to thread stack.
1541 */
1542 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1543
1544 minfp = (struct frame *)kstack_base;
1545 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
1546
1547 on_intr = 0;
1548 continue;
1549 }
1550 break;
1551 }
1552
1553 fp = nextfp;
1554 minfp = fp;
1555 }
1556
1557 if (depth <= aframes)
1558 return (0);
1559
1560 return (depth - aframes);
1561 }
1562
1563 /*
1564 * Unconsidered
1565 */
1566 void
1567 dtrace_vtime_enable(void) {}
1568
1569 void
1570 dtrace_vtime_disable(void) {}
1571
1572 #else /* else ! CONFIG_DTRACE */
1573
1574 #include <sys/types.h>
1575 #include <mach/vm_types.h>
1576 #include <mach/kmod.h>
1577
1578 /*
1579 * This exists to prevent build errors when dtrace is unconfigured.
1580 */
1581
1582 kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1583
1584 kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1585 #pragma unused(arg1, arg2, arg3)
1586
1587 return KERN_FAILURE;
1588 }
1589
1590 #endif /* CONFIG_DTRACE */