]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace_glue.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30/*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36#if CONFIG_DTRACE
37
38#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39#include <kern/thread.h>
40#include <mach/thread_status.h>
41
42#include <stdarg.h>
43#include <string.h>
44#include <sys/malloc.h>
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <sys/proc_internal.h>
48#include <sys/kauth.h>
49#include <sys/user.h>
50#include <sys/systm.h>
51#include <sys/dtrace.h>
52#include <sys/dtrace_impl.h>
53#include <libkern/OSAtomic.h>
d9a64523 54#include <libkern/OSKextLibPrivate.h>
39236c6e
A
55#include <kern/kern_types.h>
56#include <kern/timer_call.h>
2d21ac55
A
57#include <kern/thread_call.h>
58#include <kern/task.h>
59#include <kern/sched_prim.h>
60#include <kern/queue.h>
61#include <miscfs/devfs/devfs.h>
62#include <kern/kalloc.h>
63
64#include <mach/vm_param.h>
65#include <mach/mach_vm.h>
66#include <mach/task.h>
67#include <vm/pmap.h>
68#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
69
70/*
71 * pid/proc
72 */
b0d623f7
A
73/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
74#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
2d21ac55 75
d9a64523
A
76void
77dtrace_sprlock(proc_t *p)
78{
79 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
80 lck_mtx_lock(&p->p_dtrace_sprlock);
81}
82
83void
84dtrace_sprunlock(proc_t *p)
85{
86 lck_mtx_unlock(&p->p_dtrace_sprlock);
87
88}
89
2d21ac55 90/* Not called from probe context */
d9a64523 91proc_t *
2d21ac55
A
92sprlock(pid_t pid)
93{
94 proc_t* p;
95
96 if ((p = proc_find(pid)) == PROC_NULL) {
97 return PROC_NULL;
98 }
99
fe8ab488 100 task_suspend_internal(p->task);
2d21ac55 101
d9a64523 102 dtrace_sprlock(p);
2d21ac55 103
d9a64523 104 proc_lock(p);
2d21ac55
A
105
106 return p;
107}
108
109/* Not called from probe context */
110void
111sprunlock(proc_t *p)
112{
113 if (p != PROC_NULL) {
2d21ac55
A
114 proc_unlock(p);
115
d9a64523
A
116 dtrace_sprunlock(p);
117
fe8ab488 118 task_resume_internal(p->task);
2d21ac55
A
119
120 proc_rele(p);
121 }
122}
123
124/*
125 * uread/uwrite
126 */
127
128// These are not exported from vm_map.h.
129extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
130extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
131
132/* Not called from probe context */
133int
134uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
135{
136 kern_return_t ret;
137
138 ASSERT(p != PROC_NULL);
139 ASSERT(p->task != NULL);
140
141 task_t task = p->task;
142
143 /*
144 * Grab a reference to the task vm_map_t to make sure
145 * the map isn't pulled out from under us.
146 *
147 * Because the proc_lock is not held at all times on all code
148 * paths leading here, it is possible for the proc to have
149 * exited. If the map is null, fail.
150 */
151 vm_map_t map = get_task_map_reference(task);
152 if (map) {
153 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
154 vm_map_deallocate(map);
155 } else
156 ret = KERN_TERMINATED;
157
158 return (int)ret;
159}
160
161
162/* Not called from probe context */
163int
164uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
165{
166 kern_return_t ret;
167
168 ASSERT(p != NULL);
169 ASSERT(p->task != NULL);
170
171 task_t task = p->task;
172
173 /*
174 * Grab a reference to the task vm_map_t to make sure
175 * the map isn't pulled out from under us.
176 *
177 * Because the proc_lock is not held at all times on all code
178 * paths leading here, it is possible for the proc to have
179 * exited. If the map is null, fail.
180 */
181 vm_map_t map = get_task_map_reference(task);
182 if (map) {
183 /* Find the memory permissions. */
184 uint32_t nestingDepth=999999;
185 vm_region_submap_short_info_data_64_t info;
186 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
187 mach_vm_address_t address = (mach_vm_address_t)a;
188 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
189
190 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
191 if (ret != KERN_SUCCESS)
192 goto done;
193
194 vm_prot_t reprotect;
195
196 if (!(info.protection & VM_PROT_WRITE)) {
197 /* Save the original protection values for restoration later */
198 reprotect = info.protection;
199
200 if (info.max_protection & VM_PROT_WRITE) {
201 /* The memory is not currently writable, but can be made writable. */
d9a64523 202 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
2d21ac55
A
203 } else {
204 /*
205 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
206 *
207 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
208 */
209 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
210 }
211
212 if (ret != KERN_SUCCESS)
213 goto done;
214
215 } else {
216 /* The memory was already writable. */
217 reprotect = VM_PROT_NONE;
218 }
219
220 ret = vm_map_write_user( map,
221 buf,
222 (vm_map_address_t)a,
223 (vm_size_t)len);
224
d9a64523
A
225 dtrace_flush_caches();
226
2d21ac55
A
227 if (ret != KERN_SUCCESS)
228 goto done;
229
230 if (reprotect != VM_PROT_NONE) {
231 ASSERT(reprotect & VM_PROT_EXECUTE);
232 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
233 }
234
235done:
236 vm_map_deallocate(map);
237 } else
238 ret = KERN_TERMINATED;
239
240 return (int)ret;
241}
242
243/*
244 * cpuvar
245 */
246lck_mtx_t cpu_lock;
fe8ab488 247lck_mtx_t cyc_lock;
2d21ac55
A
248lck_mtx_t mod_lock;
249
6d2010ae 250dtrace_cpu_t *cpu_list;
2d21ac55
A
251cpu_core_t *cpu_core; /* XXX TLB lockdown? */
252
253/*
254 * cred_t
255 */
256
257/*
258 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
259 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
260 */
261cred_t *
262dtrace_CRED(void)
263{
264 struct uthread *uthread = get_bsdthread_info(current_thread());
265
266 if (uthread == NULL)
267 return NULL;
268 else
269 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
270}
271
272#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
273#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
274 HAS_ALLPRIVS(cr) : \
275 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
276
277int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
278{
279#pragma unused(priv, all)
280 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
281}
282
283int
284PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
285{
286#pragma unused(priv, boolean)
287 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
288}
289
2d21ac55 290uid_t
6d2010ae 291crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr); }
2d21ac55
A
292
293/*
294 * "cyclic"
295 */
296
2d21ac55 297typedef struct wrap_timer_call {
fe8ab488
A
298 /* node attributes */
299 cyc_handler_t hdlr;
300 cyc_time_t when;
301 uint64_t deadline;
302 int cpuid;
303 boolean_t suspended;
304 struct timer_call call;
305
306 /* next item in the linked list */
307 LIST_ENTRY(wrap_timer_call) entries;
2d21ac55
A
308} wrap_timer_call_t;
309
fe8ab488
A
310#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
311#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
312
39037602
A
313
314typedef struct cyc_list {
315 cyc_omni_handler_t cyl_omni;
316 wrap_timer_call_t cyl_wrap_by_cpus[];
5ba3f43e
A
317#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
318} __attribute__ ((aligned (8))) cyc_list_t;
319#else
39037602 320} cyc_list_t;
5ba3f43e 321#endif
39037602 322
fe8ab488
A
323/* CPU going online/offline notifications */
324void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
325void dtrace_cpu_state_changed(int, boolean_t);
326
327void
328dtrace_install_cpu_hooks(void) {
329 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
330}
331
332void
333dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
334#pragma unused(cpuid)
335 wrap_timer_call_t *wrapTC = NULL;
336 boolean_t suspend = (is_running ? FALSE : TRUE);
337 dtrace_icookie_t s;
338
339 /* Ensure that we're not going to leave the CPU */
340 s = dtrace_interrupt_disable();
341 assert(cpuid == cpu_number());
342
343 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
344 assert(wrapTC->cpuid == cpu_number());
345 if (suspend) {
346 assert(!wrapTC->suspended);
347 /* If this fails, we'll panic anyway, so let's do this now. */
348 if (!timer_call_cancel(&wrapTC->call))
349 panic("timer_call_set_suspend() failed to cancel a timer call");
350 wrapTC->suspended = TRUE;
351 } else {
352 /* Rearm the timer, but ensure it was suspended first. */
353 assert(wrapTC->suspended);
354 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
355 &wrapTC->deadline);
356 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
357 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
358 wrapTC->suspended = FALSE;
359 }
360
361 }
362
363 /* Restore the previous interrupt state. */
364 dtrace_interrupt_enable(s);
365}
2d21ac55
A
366
367static void
368_timer_call_apply_cyclic( void *ignore, void *vTChdl )
369{
370#pragma unused(ignore)
371 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
372
373 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
374
375 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
39236c6e 376 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
2d21ac55
A
377}
378
379static cyclic_id_t
380timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
381{
382 uint64_t now;
fe8ab488 383 dtrace_icookie_t s;
2d21ac55
A
384
385 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
386 wrapTC->hdlr = *handler;
387 wrapTC->when = *when;
388
389 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
390
391 now = mach_absolute_time();
392 wrapTC->deadline = now;
393
394 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
fe8ab488
A
395
396 /* Insert the timer to the list of the running timers on this CPU, and start it. */
397 s = dtrace_interrupt_disable();
398 wrapTC->cpuid = cpu_number();
399 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
400 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
401 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
402 wrapTC->suspended = FALSE;
403 dtrace_interrupt_enable(s);
2d21ac55
A
404
405 return (cyclic_id_t)wrapTC;
406}
407
fe8ab488
A
408/*
409 * Executed on the CPU the timer is running on.
410 */
2d21ac55 411static void
39037602 412timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
2d21ac55 413{
fe8ab488
A
414 assert(wrapTC);
415 assert(cpu_number() == wrapTC->cpuid);
2d21ac55 416
fe8ab488
A
417 if (!timer_call_cancel(&wrapTC->call))
418 panic("timer_call_remove_cyclic() failed to cancel a timer call");
2d21ac55 419
fe8ab488 420 LIST_REMOVE(wrapTC, entries);
2d21ac55
A
421}
422
423static void *
39037602
A
424timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
425{
2d21ac55 426 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
39037602 427}
2d21ac55
A
428
429cyclic_id_t
430cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
431{
432 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
433 if (NULL == wrapTC)
434 return CYCLIC_NONE;
435 else
436 return timer_call_add_cyclic( wrapTC, handler, when );
437}
438
439void
440cyclic_timer_remove(cyclic_id_t cyclic)
441{
442 ASSERT( cyclic != CYCLIC_NONE );
443
fe8ab488
A
444 /* Removing a timer call must be done on the CPU the timer is running on. */
445 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
446 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
447
2d21ac55
A
448 _FREE((void *)cyclic, M_TEMP);
449}
450
451static void
39037602 452_cyclic_add_omni(cyc_list_t *cyc_list)
2d21ac55
A
453{
454 cyc_time_t cT;
455 cyc_handler_t cH;
39037602 456 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
2d21ac55 457
39037602 458 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
2d21ac55 459
39037602
A
460 wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
461 timer_call_add_cyclic(wrapTC, &cH, &cT);
2d21ac55
A
462}
463
464cyclic_id_list_t
465cyclic_add_omni(cyc_omni_handler_t *omni)
466{
39037602
A
467 cyc_list_t *cyc_list =
468 _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
469
2d21ac55 470 if (NULL == cyc_list)
39037602
A
471 return NULL;
472
473 cyc_list->cyl_omni = *omni;
2d21ac55 474
2d21ac55
A
475 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
476
39037602 477 return (cyclic_id_list_t)cyc_list;
2d21ac55
A
478}
479
480static void
39037602 481_cyclic_remove_omni(cyc_list_t *cyc_list)
2d21ac55 482{
39037602 483 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
2d21ac55 484 void *oarg;
39037602 485 wrap_timer_call_t *wrapTC;
2d21ac55 486
fe8ab488
A
487 /*
488 * If the processor was offline when dtrace started, we did not allocate
489 * a cyclic timer for this CPU.
490 */
39037602
A
491 if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
492 oarg = timer_call_get_cyclic_arg(wrapTC);
493 timer_call_remove_cyclic(wrapTC);
fe8ab488
A
494 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
495 }
2d21ac55
A
496}
497
498void
499cyclic_remove_omni(cyclic_id_list_t cyc_list)
500{
39037602 501 ASSERT(cyc_list != NULL);
2d21ac55
A
502
503 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
504 _FREE(cyc_list, M_TEMP);
505}
506
507typedef struct wrap_thread_call {
508 thread_call_t TChdl;
509 cyc_handler_t hdlr;
510 cyc_time_t when;
511 uint64_t deadline;
512} wrap_thread_call_t;
513
514/*
515 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
516 * cleaner and the deadman, but too distant in time and place for the profile provider.
517 */
518static void
519_cyclic_apply( void *ignore, void *vTChdl )
520{
521#pragma unused(ignore)
522 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
523
524 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
525
526 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
527 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
528
529 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
530 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
531 thread_wakeup((event_t)wrapTC);
532}
533
534cyclic_id_t
535cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
536{
537 uint64_t now;
538
539 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
540 if (NULL == wrapTC)
541 return CYCLIC_NONE;
542
543 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
544 wrapTC->hdlr = *handler;
545 wrapTC->when = *when;
546
547 ASSERT(when->cyt_when == 0);
548 ASSERT(when->cyt_interval < WAKEUP_REAPER);
549
550 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
551
552 now = mach_absolute_time();
553 wrapTC->deadline = now;
554
555 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
556 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
557
558 return (cyclic_id_t)wrapTC;
559}
560
561static void
562noop_cyh_func(void * ignore)
563{
564#pragma unused(ignore)
565}
566
567void
568cyclic_remove(cyclic_id_t cyclic)
569{
570 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
571
572 ASSERT(cyclic != CYCLIC_NONE);
573
574 while (!thread_call_cancel(wrapTC->TChdl)) {
575 int ret = assert_wait(wrapTC, THREAD_UNINT);
576 ASSERT(ret == THREAD_WAITING);
577
578 wrapTC->when.cyt_interval = WAKEUP_REAPER;
579
580 ret = thread_block(THREAD_CONTINUE_NULL);
581 ASSERT(ret == THREAD_AWAKENED);
582 }
583
584 if (thread_call_free(wrapTC->TChdl))
585 _FREE(wrapTC, M_TEMP);
586 else {
587 /* Gut this cyclic and move on ... */
588 wrapTC->hdlr.cyh_func = noop_cyh_func;
589 wrapTC->when.cyt_interval = NEARLY_FOREVER;
590 }
591}
592
2d21ac55
A
593kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
594
595kern_return_t
596_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
597{
a39ff7e2
A
598#pragma unused(name, data, nelements)
599 return KERN_FAILURE;
2d21ac55
A
600}
601
602int
b0d623f7 603ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
2d21ac55
A
604
605int
606ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
607 minor_t minor_num, const char *node_type, int flag)
608{
609#pragma unused(spec_type,node_type,flag)
b0d623f7 610 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
2d21ac55
A
611
612 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
613 return DDI_FAILURE;
614 else
615 return DDI_SUCCESS;
616}
617
618void
619ddi_remove_minor_node(dev_info_t *dip, char *name)
620{
621#pragma unused(dip,name)
622/* XXX called from dtrace_detach, so NOTREACHED for now. */
623}
624
625major_t
626getemajor( dev_t d )
627{
628 return (major_t) major(d);
629}
630
631minor_t
632getminor ( dev_t d )
633{
634 return (minor_t) minor(d);
635}
636
2d21ac55
A
637extern void Debugger(const char*);
638
639void
640debug_enter(char *c) { Debugger(c); }
641
642/*
643 * kmem
644 */
645
646void *
d9a64523 647dt_kmem_alloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
2d21ac55
A
648{
649#pragma unused(kmflag)
650
651/*
652 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
653 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
654 */
d9a64523
A
655 vm_size_t vsize = size;
656 return kalloc_canblock(&vsize, TRUE, site);
2d21ac55
A
657}
658
659void *
d9a64523 660dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
2d21ac55
A
661{
662#pragma unused(kmflag)
663
664/*
665 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
666 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
667 */
d9a64523
A
668 vm_size_t vsize = size;
669 void* buf = kalloc_canblock(&vsize, TRUE, site);
2d21ac55
A
670
671 if(!buf)
672 return NULL;
673
674 bzero(buf, size);
675
676 return buf;
677}
678
679void
680dt_kmem_free(void *buf, size_t size)
681{
682#pragma unused(size)
683 /*
684 * DTrace relies on this, its doing a lot of NULL frees.
685 * A null free causes the debug builds to panic.
686 */
687 if (buf == NULL) return;
688
689 ASSERT(size > 0);
690
2d21ac55 691 kfree(buf, size);
2d21ac55
A
692}
693
694
695
696/*
d9a64523 697 * aligned dt_kmem allocator
2d21ac55
A
698 * align should be a power of two
699 */
700
d9a64523
A
701void*
702dt_kmem_alloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *site)
2d21ac55 703{
fe8ab488
A
704 void *mem, **addr_to_free;
705 intptr_t mem_aligned;
706 size_t *size_to_free, hdr_size;
2d21ac55 707
fe8ab488
A
708 /* Must be a power of two. */
709 assert(align != 0);
710 assert((align & (align - 1)) == 0);
2d21ac55 711
fe8ab488
A
712 /*
713 * We are going to add a header to the allocation. It contains
714 * the address to free and the total size of the buffer.
715 */
716 hdr_size = sizeof(size_t) + sizeof(void*);
d9a64523 717 mem = dt_kmem_alloc_site(size + align + hdr_size, kmflag, site);
fe8ab488 718 if (mem == NULL)
2d21ac55
A
719 return NULL;
720
fe8ab488
A
721 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
722
723 /* Write the address to free in the header. */
724 addr_to_free = (void**) (mem_aligned - sizeof(void*));
725 *addr_to_free = mem;
2d21ac55 726
fe8ab488
A
727 /* Write the size to free in the header. */
728 size_to_free = (size_t*) (mem_aligned - hdr_size);
729 *size_to_free = size + align + hdr_size;
2d21ac55 730
fe8ab488 731 return (void*) mem_aligned;
2d21ac55
A
732}
733
d9a64523
A
734void*
735dt_kmem_zalloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *s)
2d21ac55
A
736{
737 void* buf;
738
d9a64523 739 buf = dt_kmem_alloc_aligned_site(size, align, kmflag, s);
2d21ac55
A
740
741 if(!buf)
742 return NULL;
743
744 bzero(buf, size);
745
746 return buf;
747}
748
d9a64523
A
749void
750dt_kmem_free_aligned(void* buf, size_t size)
2d21ac55
A
751{
752#pragma unused(size)
fe8ab488
A
753 intptr_t ptr = (intptr_t) buf;
754 void **addr_to_free = (void**) (ptr - sizeof(void*));
755 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
2d21ac55 756
fe8ab488
A
757 if (buf == NULL)
758 return;
2d21ac55 759
fe8ab488 760 dt_kmem_free(*addr_to_free, *size_to_free);
2d21ac55
A
761}
762
763/*
764 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
765 * doesn't specify constructor, destructor, or reclaim methods.
766 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
767 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
768 */
769kmem_cache_t *
770kmem_cache_create(
b0d623f7 771 const char *name, /* descriptive name for this cache */
2d21ac55
A
772 size_t bufsize, /* size of the objects it manages */
773 size_t align, /* required object alignment */
774 int (*constructor)(void *, void *, int), /* object constructor */
775 void (*destructor)(void *, void *), /* object destructor */
776 void (*reclaim)(void *), /* memory reclaim callback */
777 void *private, /* pass-thru arg for constr/destr/reclaim */
778 vmem_t *vmp, /* vmem source for slab allocation */
779 int cflags) /* cache creation flags */
780{
781#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
782 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
783}
784
785void *
786kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
787{
788#pragma unused(kmflag)
789 size_t bufsize = (size_t)cp;
790 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
791}
792
793void
794kmem_cache_free(kmem_cache_t *cp, void *buf)
795{
796#pragma unused(cp)
797 _FREE(buf, M_TEMP);
798}
799
800void
801kmem_cache_destroy(kmem_cache_t *cp)
802{
803#pragma unused(cp)
804}
805
2d21ac55
A
806/*
807 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
808 */
809typedef unsigned int u_daddr_t;
810#include "blist.h"
811
812/* By passing around blist *handles*, the underlying blist can be resized as needed. */
813struct blist_hdl {
814 blist_t blist;
815};
816
817vmem_t *
818vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
819 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
820{
821#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
822 blist_t bl;
823 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
824
825 ASSERT(quantum == 1);
826 ASSERT(NULL == ignore5);
827 ASSERT(NULL == ignore6);
828 ASSERT(NULL == source);
829 ASSERT(0 == qcache_max);
830 ASSERT(vmflag & VMC_IDENTIFIER);
831
832 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
833
834 p->blist = bl = blist_create( size );
835 blist_free(bl, 0, size);
b0d623f7 836 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
2d21ac55
A
837
838 return (vmem_t *)p;
839}
840
841void *
842vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
843{
844#pragma unused(vmflag)
845 struct blist_hdl *q = (struct blist_hdl *)vmp;
846 blist_t bl = q->blist;
847 daddr_t p;
848
849 p = blist_alloc(bl, (daddr_t)size);
850
851 if ((daddr_t)-1 == p) {
852 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
853 q->blist = bl;
854 p = blist_alloc(bl, (daddr_t)size);
855 if ((daddr_t)-1 == p)
856 panic("vmem_alloc: failure after blist_resize!");
857 }
858
b0d623f7 859 return (void *)(uintptr_t)p;
2d21ac55
A
860}
861
862void
863vmem_free(vmem_t *vmp, void *vaddr, size_t size)
864{
865 struct blist_hdl *p = (struct blist_hdl *)vmp;
866
b0d623f7 867 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
2d21ac55
A
868}
869
870void
871vmem_destroy(vmem_t *vmp)
872{
873 struct blist_hdl *p = (struct blist_hdl *)vmp;
874
875 blist_destroy( p->blist );
876 _FREE( p, sizeof(struct blist_hdl) );
877}
878
879/*
880 * Timing
881 */
882
883/*
884 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
885 * January 1, 1970. Because it can be called from probe context, it must take no locks.
886 */
887
888hrtime_t
889dtrace_gethrestime(void)
890{
b0d623f7
A
891 clock_sec_t secs;
892 clock_nsec_t nanosecs;
2d21ac55
A
893 uint64_t secs64, ns64;
894
895 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
896 secs64 = (uint64_t)secs;
897 ns64 = (uint64_t)nanosecs;
898
899 ns64 = ns64 + (secs64 * 1000000000LL);
900 return ns64;
901}
902
903/*
904 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
905 * Hence its primary use is to specify intervals.
906 */
907
908hrtime_t
909dtrace_abs_to_nano(uint64_t elapsed)
910{
911 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
912
913 /*
914 * If this is the first time we've run, get the timebase.
915 * We can use denom == 0 to indicate that sTimebaseInfo is
916 * uninitialised because it makes no sense to have a zero
917 * denominator in a fraction.
918 */
919
920 if ( sTimebaseInfo.denom == 0 ) {
921 (void) clock_timebase_info(&sTimebaseInfo);
922 }
923
924 /*
925 * Convert to nanoseconds.
926 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
927 *
928 * Provided the final result is representable in 64 bits the following maneuver will
929 * deliver that result without intermediate overflow.
930 */
931 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
932 return elapsed;
933 else if (sTimebaseInfo.denom == 1)
934 return elapsed * (uint64_t)sTimebaseInfo.numer;
935 else {
936 /* Decompose elapsed = eta32 * 2^32 + eps32: */
937 uint64_t eta32 = elapsed >> 32;
938 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
939
940 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
941
942 /* Form product of elapsed64 (decomposed) and numer: */
943 uint64_t mu64 = numer * eta32;
944 uint64_t lambda64 = numer * eps32;
945
946 /* Divide the constituents by denom: */
947 uint64_t q32 = mu64/denom;
948 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
949
950 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
951 }
952}
953
954hrtime_t
955dtrace_gethrtime(void)
956{
957 static uint64_t start = 0;
958
959 if (start == 0)
960 start = mach_absolute_time();
961
962 return dtrace_abs_to_nano(mach_absolute_time() - start);
963}
964
965/*
966 * Atomicity and synchronization
967 */
968uint32_t
969dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
970{
b0d623f7 971 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
2d21ac55
A
972 return cmp;
973 else
974 return ~cmp; /* Must return something *other* than cmp */
975}
976
977void *
978dtrace_casptr(void *target, void *cmp, void *new)
979{
b0d623f7 980 if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
2d21ac55
A
981 return cmp;
982 else
983 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
2d21ac55
A
984}
985
986/*
987 * Interrupt manipulation
988 */
989dtrace_icookie_t
990dtrace_interrupt_disable(void)
991{
992 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
993}
994
995void
996dtrace_interrupt_enable(dtrace_icookie_t reenable)
997{
998 (void)ml_set_interrupts_enabled((boolean_t)reenable);
999}
1000
1001/*
1002 * MP coordination
1003 */
1004static void
1005dtrace_sync_func(void) {}
1006
1007/*
1008 * dtrace_sync() is not called from probe context.
1009 */
1010void
1011dtrace_sync(void)
1012{
1013 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1014}
1015
1016/*
1017 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1018 */
1019
1020extern kern_return_t dtrace_copyio_preflight(addr64_t);
1021extern kern_return_t dtrace_copyio_postflight(addr64_t);
1022
1023static int
1024dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1025{
1026#pragma unused(kaddr)
1027
1028 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1029 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1030
1031 ASSERT(kaddr + size >= kaddr);
1032
39236c6e 1033 if ( uaddr + size < uaddr || /* Avoid address wrap. */
2d21ac55
A
1034 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1035 {
1036 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1037 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1038 return (0);
1039 }
1040 return (1);
1041}
1042
1043void
b0d623f7 1044dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1045{
b0d623f7
A
1046#pragma unused(flags)
1047
2d21ac55
A
1048 if (dtrace_copycheck( src, dst, len )) {
1049 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1050 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1051 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1052 }
1053 dtrace_copyio_postflight(src);
1054 }
1055}
1056
1057void
b0d623f7 1058dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1059{
b0d623f7
A
1060#pragma unused(flags)
1061
2d21ac55
A
1062 size_t actual;
1063
1064 if (dtrace_copycheck( src, dst, len )) {
4a3eedf9
A
1065 /* copyin as many as 'len' bytes. */
1066 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1067
1068 /*
1069 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1070 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1071 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1072 * to the caller.
1073 */
1074 if (error && error != ENAMETOOLONG) {
2d21ac55
A
1075 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1076 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1077 }
1078 dtrace_copyio_postflight(src);
1079 }
1080}
1081
1082void
b0d623f7 1083dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1084{
b0d623f7
A
1085#pragma unused(flags)
1086
2d21ac55
A
1087 if (dtrace_copycheck( dst, src, len )) {
1088 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1089 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1090 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1091 }
1092 dtrace_copyio_postflight(dst);
1093 }
1094}
1095
1096void
b0d623f7 1097dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1098{
b0d623f7
A
1099#pragma unused(flags)
1100
2d21ac55
A
1101 size_t actual;
1102
1103 if (dtrace_copycheck( dst, src, len )) {
4a3eedf9
A
1104
1105 /*
1106 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1107 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1108 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1109 * to the caller.
1110 */
2d21ac55
A
1111 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1112 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1113 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1114 }
1115 dtrace_copyio_postflight(dst);
1116 }
1117}
1118
5ba3f43e
A
1119extern const int copysize_limit_panic;
1120
d9a64523
A
1121int dtrace_copy_maxsize(void)
1122{
1123 return copysize_limit_panic;
1124}
1125
1126
5ba3f43e
A
1127int
1128dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes)
1129{
d9a64523 1130 int maxsize = dtrace_copy_maxsize();
5ba3f43e
A
1131 /*
1132 * Partition the copyout in copysize_limit_panic-sized chunks
1133 */
d9a64523
A
1134 while (nbytes >= (vm_size_t)maxsize) {
1135 if (copyout(kaddr, uaddr, maxsize) != 0)
5ba3f43e
A
1136 return (EFAULT);
1137
d9a64523
A
1138 nbytes -= maxsize;
1139 uaddr += maxsize;
1140 kaddr += maxsize;
5ba3f43e
A
1141 }
1142 if (nbytes > 0) {
1143 if (copyout(kaddr, uaddr, nbytes) != 0)
1144 return (EFAULT);
1145 }
1146
1147 return (0);
1148}
1149
2d21ac55
A
1150uint8_t
1151dtrace_fuword8(user_addr_t uaddr)
1152{
1153 uint8_t ret = 0;
1154
1155 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1156 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1157 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1158 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1159 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1160 }
1161 dtrace_copyio_postflight(uaddr);
1162 }
1163 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1164
1165 return(ret);
1166}
1167
1168uint16_t
1169dtrace_fuword16(user_addr_t uaddr)
1170{
1171 uint16_t ret = 0;
1172
1173 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1174 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1175 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1176 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1177 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1178 }
1179 dtrace_copyio_postflight(uaddr);
1180 }
1181 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1182
1183 return(ret);
1184}
1185
1186uint32_t
1187dtrace_fuword32(user_addr_t uaddr)
1188{
1189 uint32_t ret = 0;
1190
1191 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1192 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1193 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1194 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1196 }
1197 dtrace_copyio_postflight(uaddr);
1198 }
1199 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1200
1201 return(ret);
1202}
1203
1204uint64_t
1205dtrace_fuword64(user_addr_t uaddr)
1206{
1207 uint64_t ret = 0;
1208
1209 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1210 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1211 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1212 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1213 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1214 }
1215 dtrace_copyio_postflight(uaddr);
1216 }
1217 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1218
1219 return(ret);
1220}
1221
1222/*
1223 * Emulation of Solaris fuword / suword
1224 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1225 */
1226
1227int
1228fuword8(user_addr_t uaddr, uint8_t *value)
1229{
1230 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1231 return -1;
1232 }
1233
1234 return 0;
1235}
1236
1237int
1238fuword16(user_addr_t uaddr, uint16_t *value)
1239{
1240 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1241 return -1;
1242 }
1243
1244 return 0;
1245}
1246
1247int
1248fuword32(user_addr_t uaddr, uint32_t *value)
1249{
1250 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1251 return -1;
1252 }
1253
1254 return 0;
1255}
1256
1257int
1258fuword64(user_addr_t uaddr, uint64_t *value)
1259{
1260 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1261 return -1;
1262 }
1263
1264 return 0;
1265}
1266
2d21ac55
A
1267void
1268fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1269{
1270 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1271 *value = 0;
1272 }
1273}
1274
1275void
1276fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1277{
1278 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1279 *value = 0;
1280 }
1281}
1282
1283int
1284suword64(user_addr_t addr, uint64_t value)
1285{
1286 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1287 return -1;
1288 }
1289
1290 return 0;
1291}
1292
1293int
1294suword32(user_addr_t addr, uint32_t value)
1295{
1296 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1297 return -1;
1298 }
1299
1300 return 0;
1301}
1302
2d21ac55
A
1303/*
1304 * Miscellaneous
1305 */
1306extern boolean_t dtrace_tally_fault(user_addr_t);
1307
1308boolean_t
1309dtrace_tally_fault(user_addr_t uaddr)
1310{
1311 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1312 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1313 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1314}
1315
2d21ac55
A
1316#define TOTTY 0x02
1317extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1318
1319int
1320vuprintf(const char *format, va_list ap)
1321{
1322 return prf(format, ap, TOTTY, NULL);
1323}
1324
1325/* Not called from probe context */
1326void cmn_err( int level, const char *format, ... )
1327{
1328#pragma unused(level)
1329 va_list alist;
1330
1331 va_start(alist, format);
1332 vuprintf(format, alist);
1333 va_end(alist);
1334 uprintf("\n");
1335}
1336
1337/*
1338 * History:
1339 * 2002-01-24 gvdl Initial implementation of strstr
1340 */
1341
b0d623f7 1342__private_extern__ const char *
2d21ac55
A
1343strstr(const char *in, const char *str)
1344{
1345 char c;
1346 size_t len;
5ba3f43e
A
1347 if (!in || !str)
1348 return in;
2d21ac55
A
1349
1350 c = *str++;
1351 if (!c)
b0d623f7 1352 return (const char *) in; // Trivial empty string case
2d21ac55
A
1353
1354 len = strlen(str);
1355 do {
1356 char sc;
1357
1358 do {
1359 sc = *in++;
1360 if (!sc)
1361 return (char *) 0;
1362 } while (sc != c);
1363 } while (strncmp(in, str, len) != 0);
1364
b0d623f7 1365 return (const char *) (in - 1);
2d21ac55
A
1366}
1367
5ba3f43e
A
1368const void*
1369bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *))
1370{
1371 const char *base = base0;
1372 size_t lim;
1373 int cmp;
1374 const void *p;
1375 for (lim = nmemb; lim != 0; lim >>= 1) {
1376 p = base + (lim >> 1) * size;
1377 cmp = (*compar)(key, p);
1378 if (cmp == 0)
1379 return p;
1380 if (cmp > 0) { /* key > p: move right */
1381 base = (const char *)p + size;
1382 lim--;
1383 } /* else move left */
1384 }
1385 return (NULL);
1386}
1387
2d21ac55
A
1388/*
1389 * Runtime and ABI
1390 */
1391uintptr_t
1392dtrace_caller(int ignore)
1393{
1394#pragma unused(ignore)
1395 return -1; /* Just as in Solaris dtrace_asm.s */
1396}
1397
1398int
1399dtrace_getstackdepth(int aframes)
1400{
b0d623f7 1401 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
1402 struct frame *nextfp, *minfp, *stacktop;
1403 int depth = 0;
1404 int on_intr;
1405
1406 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1407 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1408 else
b0d623f7 1409 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
2d21ac55
A
1410
1411 minfp = fp;
1412
1413 aframes++;
1414
1415 for (;;) {
1416 depth++;
1417
1418 nextfp = *(struct frame **)fp;
1419
1420 if (nextfp <= minfp || nextfp >= stacktop) {
1421 if (on_intr) {
1422 /*
1423 * Hop from interrupt stack to thread stack.
1424 */
1425 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1426
1427 minfp = (struct frame *)kstack_base;
b0d623f7 1428 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
2d21ac55
A
1429
1430 on_intr = 0;
1431 continue;
1432 }
1433 break;
1434 }
1435
1436 fp = nextfp;
1437 minfp = fp;
1438 }
1439
1440 if (depth <= aframes)
1441 return (0);
1442
1443 return (depth - aframes);
1444}
1445
d9a64523
A
1446int
1447dtrace_addr_in_module(void* addr, struct modctl *ctl)
1448{
1449 return OSKextKextForAddress(addr) == (void*)ctl->mod_address;
1450}
1451
2d21ac55
A
1452/*
1453 * Unconsidered
1454 */
1455void
1456dtrace_vtime_enable(void) {}
1457
1458void
1459dtrace_vtime_disable(void) {}
1460
1461#else /* else ! CONFIG_DTRACE */
1462
1463#include <sys/types.h>
1464#include <mach/vm_types.h>
1465#include <mach/kmod.h>
1466
1467/*
1468 * This exists to prevent build errors when dtrace is unconfigured.
1469 */
1470
1471kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1472
1473kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1474#pragma unused(arg1, arg2, arg3)
1475
1476 return KERN_FAILURE;
1477}
1478
1479#endif /* CONFIG_DTRACE */