]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace_glue.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30/*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36#if CONFIG_DTRACE
37
38#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39#include <kern/thread.h>
40#include <mach/thread_status.h>
41
42#include <stdarg.h>
43#include <string.h>
44#include <sys/malloc.h>
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <sys/proc_internal.h>
48#include <sys/kauth.h>
49#include <sys/user.h>
50#include <sys/systm.h>
51#include <sys/dtrace.h>
52#include <sys/dtrace_impl.h>
53#include <libkern/OSAtomic.h>
39236c6e
A
54#include <kern/kern_types.h>
55#include <kern/timer_call.h>
2d21ac55
A
56#include <kern/thread_call.h>
57#include <kern/task.h>
58#include <kern/sched_prim.h>
59#include <kern/queue.h>
60#include <miscfs/devfs/devfs.h>
61#include <kern/kalloc.h>
62
63#include <mach/vm_param.h>
64#include <mach/mach_vm.h>
65#include <mach/task.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
68
69/*
70 * pid/proc
71 */
b0d623f7
A
72/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
73#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
2d21ac55
A
74
75/* Not called from probe context */
76proc_t *
77sprlock(pid_t pid)
78{
79 proc_t* p;
80
81 if ((p = proc_find(pid)) == PROC_NULL) {
82 return PROC_NULL;
83 }
84
fe8ab488 85 task_suspend_internal(p->task);
2d21ac55
A
86
87 proc_lock(p);
88
89 lck_mtx_lock(&p->p_dtrace_sprlock);
90
91 return p;
92}
93
94/* Not called from probe context */
95void
96sprunlock(proc_t *p)
97{
98 if (p != PROC_NULL) {
99 lck_mtx_unlock(&p->p_dtrace_sprlock);
100
101 proc_unlock(p);
102
fe8ab488 103 task_resume_internal(p->task);
2d21ac55
A
104
105 proc_rele(p);
106 }
107}
108
109/*
110 * uread/uwrite
111 */
112
113// These are not exported from vm_map.h.
114extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
115extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
116
117/* Not called from probe context */
118int
119uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
120{
121 kern_return_t ret;
122
123 ASSERT(p != PROC_NULL);
124 ASSERT(p->task != NULL);
125
126 task_t task = p->task;
127
128 /*
129 * Grab a reference to the task vm_map_t to make sure
130 * the map isn't pulled out from under us.
131 *
132 * Because the proc_lock is not held at all times on all code
133 * paths leading here, it is possible for the proc to have
134 * exited. If the map is null, fail.
135 */
136 vm_map_t map = get_task_map_reference(task);
137 if (map) {
138 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
139 vm_map_deallocate(map);
140 } else
141 ret = KERN_TERMINATED;
142
143 return (int)ret;
144}
145
146
147/* Not called from probe context */
148int
149uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
150{
151 kern_return_t ret;
152
153 ASSERT(p != NULL);
154 ASSERT(p->task != NULL);
155
156 task_t task = p->task;
157
158 /*
159 * Grab a reference to the task vm_map_t to make sure
160 * the map isn't pulled out from under us.
161 *
162 * Because the proc_lock is not held at all times on all code
163 * paths leading here, it is possible for the proc to have
164 * exited. If the map is null, fail.
165 */
166 vm_map_t map = get_task_map_reference(task);
167 if (map) {
168 /* Find the memory permissions. */
169 uint32_t nestingDepth=999999;
170 vm_region_submap_short_info_data_64_t info;
171 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
172 mach_vm_address_t address = (mach_vm_address_t)a;
173 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
174
175 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
176 if (ret != KERN_SUCCESS)
177 goto done;
178
179 vm_prot_t reprotect;
180
181 if (!(info.protection & VM_PROT_WRITE)) {
182 /* Save the original protection values for restoration later */
183 reprotect = info.protection;
184
185 if (info.max_protection & VM_PROT_WRITE) {
186 /* The memory is not currently writable, but can be made writable. */
187 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
188 } else {
189 /*
190 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
191 *
192 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
193 */
194 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
195 }
196
197 if (ret != KERN_SUCCESS)
198 goto done;
199
200 } else {
201 /* The memory was already writable. */
202 reprotect = VM_PROT_NONE;
203 }
204
205 ret = vm_map_write_user( map,
206 buf,
207 (vm_map_address_t)a,
208 (vm_size_t)len);
209
210 if (ret != KERN_SUCCESS)
211 goto done;
212
213 if (reprotect != VM_PROT_NONE) {
214 ASSERT(reprotect & VM_PROT_EXECUTE);
215 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
216 }
217
218done:
219 vm_map_deallocate(map);
220 } else
221 ret = KERN_TERMINATED;
222
223 return (int)ret;
224}
225
226/*
227 * cpuvar
228 */
229lck_mtx_t cpu_lock;
fe8ab488 230lck_mtx_t cyc_lock;
2d21ac55
A
231lck_mtx_t mod_lock;
232
6d2010ae 233dtrace_cpu_t *cpu_list;
2d21ac55
A
234cpu_core_t *cpu_core; /* XXX TLB lockdown? */
235
236/*
237 * cred_t
238 */
239
240/*
241 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
242 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
243 */
244cred_t *
245dtrace_CRED(void)
246{
247 struct uthread *uthread = get_bsdthread_info(current_thread());
248
249 if (uthread == NULL)
250 return NULL;
251 else
252 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
253}
254
255#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
256#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
257 HAS_ALLPRIVS(cr) : \
258 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
259
260int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
261{
262#pragma unused(priv, all)
263 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
264}
265
266int
267PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
268{
269#pragma unused(priv, boolean)
270 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
271}
272
6d2010ae 273/* XXX Get around const poisoning using structure assigns */
2d21ac55 274gid_t
6d2010ae 275crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(&copy_cr); }
2d21ac55
A
276
277uid_t
6d2010ae 278crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr); }
2d21ac55
A
279
280/*
281 * "cyclic"
282 */
283
2d21ac55 284typedef struct wrap_timer_call {
fe8ab488
A
285 /* node attributes */
286 cyc_handler_t hdlr;
287 cyc_time_t when;
288 uint64_t deadline;
289 int cpuid;
290 boolean_t suspended;
291 struct timer_call call;
292
293 /* next item in the linked list */
294 LIST_ENTRY(wrap_timer_call) entries;
2d21ac55
A
295} wrap_timer_call_t;
296
fe8ab488
A
297#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
298#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
299
39037602
A
300
301typedef struct cyc_list {
302 cyc_omni_handler_t cyl_omni;
303 wrap_timer_call_t cyl_wrap_by_cpus[];
5ba3f43e
A
304#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
305} __attribute__ ((aligned (8))) cyc_list_t;
306#else
39037602 307} cyc_list_t;
5ba3f43e 308#endif
39037602 309
fe8ab488
A
310/* CPU going online/offline notifications */
311void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
312void dtrace_cpu_state_changed(int, boolean_t);
313
314void
315dtrace_install_cpu_hooks(void) {
316 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
317}
318
319void
320dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
321#pragma unused(cpuid)
322 wrap_timer_call_t *wrapTC = NULL;
323 boolean_t suspend = (is_running ? FALSE : TRUE);
324 dtrace_icookie_t s;
325
326 /* Ensure that we're not going to leave the CPU */
327 s = dtrace_interrupt_disable();
328 assert(cpuid == cpu_number());
329
330 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
331 assert(wrapTC->cpuid == cpu_number());
332 if (suspend) {
333 assert(!wrapTC->suspended);
334 /* If this fails, we'll panic anyway, so let's do this now. */
335 if (!timer_call_cancel(&wrapTC->call))
336 panic("timer_call_set_suspend() failed to cancel a timer call");
337 wrapTC->suspended = TRUE;
338 } else {
339 /* Rearm the timer, but ensure it was suspended first. */
340 assert(wrapTC->suspended);
341 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
342 &wrapTC->deadline);
343 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
344 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
345 wrapTC->suspended = FALSE;
346 }
347
348 }
349
350 /* Restore the previous interrupt state. */
351 dtrace_interrupt_enable(s);
352}
2d21ac55
A
353
354static void
355_timer_call_apply_cyclic( void *ignore, void *vTChdl )
356{
357#pragma unused(ignore)
358 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
359
360 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
361
362 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
39236c6e 363 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
2d21ac55
A
364}
365
366static cyclic_id_t
367timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
368{
369 uint64_t now;
fe8ab488 370 dtrace_icookie_t s;
2d21ac55
A
371
372 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
373 wrapTC->hdlr = *handler;
374 wrapTC->when = *when;
375
376 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
377
378 now = mach_absolute_time();
379 wrapTC->deadline = now;
380
381 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
fe8ab488
A
382
383 /* Insert the timer to the list of the running timers on this CPU, and start it. */
384 s = dtrace_interrupt_disable();
385 wrapTC->cpuid = cpu_number();
386 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
387 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
388 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
389 wrapTC->suspended = FALSE;
390 dtrace_interrupt_enable(s);
2d21ac55
A
391
392 return (cyclic_id_t)wrapTC;
393}
394
fe8ab488
A
395/*
396 * Executed on the CPU the timer is running on.
397 */
2d21ac55 398static void
39037602 399timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
2d21ac55 400{
fe8ab488
A
401 assert(wrapTC);
402 assert(cpu_number() == wrapTC->cpuid);
2d21ac55 403
fe8ab488
A
404 if (!timer_call_cancel(&wrapTC->call))
405 panic("timer_call_remove_cyclic() failed to cancel a timer call");
2d21ac55 406
fe8ab488 407 LIST_REMOVE(wrapTC, entries);
2d21ac55
A
408}
409
410static void *
39037602
A
411timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
412{
2d21ac55 413 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
39037602 414}
2d21ac55
A
415
416cyclic_id_t
417cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
418{
419 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
420 if (NULL == wrapTC)
421 return CYCLIC_NONE;
422 else
423 return timer_call_add_cyclic( wrapTC, handler, when );
424}
425
426void
427cyclic_timer_remove(cyclic_id_t cyclic)
428{
429 ASSERT( cyclic != CYCLIC_NONE );
430
fe8ab488
A
431 /* Removing a timer call must be done on the CPU the timer is running on. */
432 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
433 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
434
2d21ac55
A
435 _FREE((void *)cyclic, M_TEMP);
436}
437
438static void
39037602 439_cyclic_add_omni(cyc_list_t *cyc_list)
2d21ac55
A
440{
441 cyc_time_t cT;
442 cyc_handler_t cH;
39037602 443 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
2d21ac55 444
39037602 445 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
2d21ac55 446
39037602
A
447 wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
448 timer_call_add_cyclic(wrapTC, &cH, &cT);
2d21ac55
A
449}
450
451cyclic_id_list_t
452cyclic_add_omni(cyc_omni_handler_t *omni)
453{
39037602
A
454 cyc_list_t *cyc_list =
455 _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
456
2d21ac55 457 if (NULL == cyc_list)
39037602
A
458 return NULL;
459
460 cyc_list->cyl_omni = *omni;
2d21ac55 461
2d21ac55
A
462 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
463
39037602 464 return (cyclic_id_list_t)cyc_list;
2d21ac55
A
465}
466
467static void
39037602 468_cyclic_remove_omni(cyc_list_t *cyc_list)
2d21ac55 469{
39037602 470 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
2d21ac55 471 void *oarg;
39037602 472 wrap_timer_call_t *wrapTC;
2d21ac55 473
fe8ab488
A
474 /*
475 * If the processor was offline when dtrace started, we did not allocate
476 * a cyclic timer for this CPU.
477 */
39037602
A
478 if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
479 oarg = timer_call_get_cyclic_arg(wrapTC);
480 timer_call_remove_cyclic(wrapTC);
fe8ab488
A
481 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
482 }
2d21ac55
A
483}
484
485void
486cyclic_remove_omni(cyclic_id_list_t cyc_list)
487{
39037602 488 ASSERT(cyc_list != NULL);
2d21ac55
A
489
490 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
491 _FREE(cyc_list, M_TEMP);
492}
493
494typedef struct wrap_thread_call {
495 thread_call_t TChdl;
496 cyc_handler_t hdlr;
497 cyc_time_t when;
498 uint64_t deadline;
499} wrap_thread_call_t;
500
501/*
502 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
503 * cleaner and the deadman, but too distant in time and place for the profile provider.
504 */
505static void
506_cyclic_apply( void *ignore, void *vTChdl )
507{
508#pragma unused(ignore)
509 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
510
511 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
512
513 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
514 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
515
516 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
517 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
518 thread_wakeup((event_t)wrapTC);
519}
520
521cyclic_id_t
522cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
523{
524 uint64_t now;
525
526 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
527 if (NULL == wrapTC)
528 return CYCLIC_NONE;
529
530 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
531 wrapTC->hdlr = *handler;
532 wrapTC->when = *when;
533
534 ASSERT(when->cyt_when == 0);
535 ASSERT(when->cyt_interval < WAKEUP_REAPER);
536
537 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
538
539 now = mach_absolute_time();
540 wrapTC->deadline = now;
541
542 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
543 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
544
545 return (cyclic_id_t)wrapTC;
546}
547
548static void
549noop_cyh_func(void * ignore)
550{
551#pragma unused(ignore)
552}
553
554void
555cyclic_remove(cyclic_id_t cyclic)
556{
557 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
558
559 ASSERT(cyclic != CYCLIC_NONE);
560
561 while (!thread_call_cancel(wrapTC->TChdl)) {
562 int ret = assert_wait(wrapTC, THREAD_UNINT);
563 ASSERT(ret == THREAD_WAITING);
564
565 wrapTC->when.cyt_interval = WAKEUP_REAPER;
566
567 ret = thread_block(THREAD_CONTINUE_NULL);
568 ASSERT(ret == THREAD_AWAKENED);
569 }
570
571 if (thread_call_free(wrapTC->TChdl))
572 _FREE(wrapTC, M_TEMP);
573 else {
574 /* Gut this cyclic and move on ... */
575 wrapTC->hdlr.cyh_func = noop_cyh_func;
576 wrapTC->when.cyt_interval = NEARLY_FOREVER;
577 }
578}
579
2d21ac55
A
580/*
581 * ddi
582 */
583void
584ddi_report_dev(dev_info_t *devi)
585{
586#pragma unused(devi)
587}
588
2d21ac55
A
589
590static unsigned int gRegisteredProps = 0;
591static struct {
592 char name[32]; /* enough for "dof-data-" + digits */
593 int *data;
594 uint_t nelements;
595} gPropTable[16];
596
597kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
598
599kern_return_t
600_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
601{
602 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) {
603 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK);
604
605 if (NULL == p)
606 return KERN_FAILURE;
607
608 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name));
609 gPropTable[gRegisteredProps].nelements = nelements;
610 gPropTable[gRegisteredProps].data = p;
611
612 while (nelements-- > 0) {
613 *p++ = (int)(*data++);
614 }
615
616 gRegisteredProps++;
617 return KERN_SUCCESS;
618 }
619 else
620 return KERN_FAILURE;
621}
622
623int
624ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
b0d623f7 625 const char *name, int **data, uint_t *nelements)
2d21ac55
A
626{
627#pragma unused(match_dev,dip,flags)
628 unsigned int i;
629 for (i = 0; i < gRegisteredProps; ++i)
630 {
631 if (0 == strncmp(name, gPropTable[i].name,
632 sizeof(gPropTable[i].name))) {
633 *data = gPropTable[i].data;
634 *nelements = gPropTable[i].nelements;
635 return DDI_SUCCESS;
636 }
637 }
638 return DDI_FAILURE;
639}
640
641int
642ddi_prop_free(void *buf)
643{
644 _FREE(buf, M_TEMP);
645 return DDI_SUCCESS;
646}
647
648int
b0d623f7 649ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
2d21ac55
A
650
651int
652ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
653 minor_t minor_num, const char *node_type, int flag)
654{
655#pragma unused(spec_type,node_type,flag)
b0d623f7 656 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
2d21ac55
A
657
658 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
659 return DDI_FAILURE;
660 else
661 return DDI_SUCCESS;
662}
663
664void
665ddi_remove_minor_node(dev_info_t *dip, char *name)
666{
667#pragma unused(dip,name)
668/* XXX called from dtrace_detach, so NOTREACHED for now. */
669}
670
671major_t
672getemajor( dev_t d )
673{
674 return (major_t) major(d);
675}
676
677minor_t
678getminor ( dev_t d )
679{
680 return (minor_t) minor(d);
681}
682
683dev_t
684makedevice(major_t major, minor_t minor)
685{
686 return makedev( major, minor );
687}
688
689int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
690{
691#pragma unused(dev, dip, flags, name)
692
693 return defvalue;
694}
695
696/*
697 * Kernel Debug Interface
698 */
699int
700kdi_dtrace_set(kdi_dtrace_set_t ignore)
701{
702#pragma unused(ignore)
703 return 0; /* Success */
704}
705
706extern void Debugger(const char*);
707
708void
709debug_enter(char *c) { Debugger(c); }
710
711/*
712 * kmem
713 */
714
715void *
716dt_kmem_alloc(size_t size, int kmflag)
717{
718#pragma unused(kmflag)
719
720/*
721 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
722 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
723 */
724#if defined(DTRACE_MEMORY_ZONES)
725 return dtrace_alloc(size);
726#else
727 return kalloc(size);
728#endif
729}
730
731void *
732dt_kmem_zalloc(size_t size, int kmflag)
733{
734#pragma unused(kmflag)
735
736/*
737 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
738 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
739 */
740#if defined(DTRACE_MEMORY_ZONES)
741 void* buf = dtrace_alloc(size);
742#else
743 void* buf = kalloc(size);
744#endif
745
746 if(!buf)
747 return NULL;
748
749 bzero(buf, size);
750
751 return buf;
752}
753
754void
755dt_kmem_free(void *buf, size_t size)
756{
757#pragma unused(size)
758 /*
759 * DTrace relies on this, its doing a lot of NULL frees.
760 * A null free causes the debug builds to panic.
761 */
762 if (buf == NULL) return;
763
764 ASSERT(size > 0);
765
766#if defined(DTRACE_MEMORY_ZONES)
767 dtrace_free(buf, size);
768#else
769 kfree(buf, size);
770#endif
771}
772
773
774
775/*
776 * aligned kmem allocator
777 * align should be a power of two
778 */
779
780void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
781{
fe8ab488
A
782 void *mem, **addr_to_free;
783 intptr_t mem_aligned;
784 size_t *size_to_free, hdr_size;
2d21ac55 785
fe8ab488
A
786 /* Must be a power of two. */
787 assert(align != 0);
788 assert((align & (align - 1)) == 0);
2d21ac55 789
fe8ab488
A
790 /*
791 * We are going to add a header to the allocation. It contains
792 * the address to free and the total size of the buffer.
793 */
794 hdr_size = sizeof(size_t) + sizeof(void*);
795 mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
796 if (mem == NULL)
2d21ac55
A
797 return NULL;
798
fe8ab488
A
799 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
800
801 /* Write the address to free in the header. */
802 addr_to_free = (void**) (mem_aligned - sizeof(void*));
803 *addr_to_free = mem;
2d21ac55 804
fe8ab488
A
805 /* Write the size to free in the header. */
806 size_to_free = (size_t*) (mem_aligned - hdr_size);
807 *size_to_free = size + align + hdr_size;
2d21ac55 808
fe8ab488 809 return (void*) mem_aligned;
2d21ac55
A
810}
811
812void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
813{
814 void* buf;
815
816 buf = dt_kmem_alloc_aligned(size, align, kmflag);
817
818 if(!buf)
819 return NULL;
820
821 bzero(buf, size);
822
823 return buf;
824}
825
826void dt_kmem_free_aligned(void* buf, size_t size)
827{
828#pragma unused(size)
fe8ab488
A
829 intptr_t ptr = (intptr_t) buf;
830 void **addr_to_free = (void**) (ptr - sizeof(void*));
831 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
2d21ac55 832
fe8ab488
A
833 if (buf == NULL)
834 return;
2d21ac55 835
fe8ab488 836 dt_kmem_free(*addr_to_free, *size_to_free);
2d21ac55
A
837}
838
839/*
840 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
841 * doesn't specify constructor, destructor, or reclaim methods.
842 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
843 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
844 */
845kmem_cache_t *
846kmem_cache_create(
b0d623f7 847 const char *name, /* descriptive name for this cache */
2d21ac55
A
848 size_t bufsize, /* size of the objects it manages */
849 size_t align, /* required object alignment */
850 int (*constructor)(void *, void *, int), /* object constructor */
851 void (*destructor)(void *, void *), /* object destructor */
852 void (*reclaim)(void *), /* memory reclaim callback */
853 void *private, /* pass-thru arg for constr/destr/reclaim */
854 vmem_t *vmp, /* vmem source for slab allocation */
855 int cflags) /* cache creation flags */
856{
857#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
858 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
859}
860
861void *
862kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
863{
864#pragma unused(kmflag)
865 size_t bufsize = (size_t)cp;
866 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
867}
868
869void
870kmem_cache_free(kmem_cache_t *cp, void *buf)
871{
872#pragma unused(cp)
873 _FREE(buf, M_TEMP);
874}
875
876void
877kmem_cache_destroy(kmem_cache_t *cp)
878{
879#pragma unused(cp)
880}
881
882/*
883 * taskq
884 */
885extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
886
887static void
888_taskq_apply( task_func_t func, thread_call_param_t arg )
889{
890 func( (void *)arg );
891}
892
893taskq_t *
894taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
895 int maxalloc, uint_t flags)
896{
897#pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
898
899 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
900}
901
902taskqid_t
903taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
904{
905#pragma unused(flags)
906 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
907 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
908 return (taskqid_t) tq /* for lack of anything better */;
909}
910
911void
912taskq_destroy(taskq_t *tq)
913{
914 thread_call_cancel( (thread_call_t) tq );
915 thread_call_free( (thread_call_t) tq );
916}
917
918pri_t maxclsyspri;
919
920/*
921 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
922 */
923typedef unsigned int u_daddr_t;
924#include "blist.h"
925
926/* By passing around blist *handles*, the underlying blist can be resized as needed. */
927struct blist_hdl {
928 blist_t blist;
929};
930
931vmem_t *
932vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
933 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
934{
935#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
936 blist_t bl;
937 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
938
939 ASSERT(quantum == 1);
940 ASSERT(NULL == ignore5);
941 ASSERT(NULL == ignore6);
942 ASSERT(NULL == source);
943 ASSERT(0 == qcache_max);
944 ASSERT(vmflag & VMC_IDENTIFIER);
945
946 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
947
948 p->blist = bl = blist_create( size );
949 blist_free(bl, 0, size);
b0d623f7 950 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
2d21ac55
A
951
952 return (vmem_t *)p;
953}
954
955void *
956vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
957{
958#pragma unused(vmflag)
959 struct blist_hdl *q = (struct blist_hdl *)vmp;
960 blist_t bl = q->blist;
961 daddr_t p;
962
963 p = blist_alloc(bl, (daddr_t)size);
964
965 if ((daddr_t)-1 == p) {
966 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
967 q->blist = bl;
968 p = blist_alloc(bl, (daddr_t)size);
969 if ((daddr_t)-1 == p)
970 panic("vmem_alloc: failure after blist_resize!");
971 }
972
b0d623f7 973 return (void *)(uintptr_t)p;
2d21ac55
A
974}
975
976void
977vmem_free(vmem_t *vmp, void *vaddr, size_t size)
978{
979 struct blist_hdl *p = (struct blist_hdl *)vmp;
980
b0d623f7 981 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
2d21ac55
A
982}
983
984void
985vmem_destroy(vmem_t *vmp)
986{
987 struct blist_hdl *p = (struct blist_hdl *)vmp;
988
989 blist_destroy( p->blist );
990 _FREE( p, sizeof(struct blist_hdl) );
991}
992
993/*
994 * Timing
995 */
996
997/*
998 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
999 * January 1, 1970. Because it can be called from probe context, it must take no locks.
1000 */
1001
1002hrtime_t
1003dtrace_gethrestime(void)
1004{
b0d623f7
A
1005 clock_sec_t secs;
1006 clock_nsec_t nanosecs;
2d21ac55
A
1007 uint64_t secs64, ns64;
1008
1009 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
1010 secs64 = (uint64_t)secs;
1011 ns64 = (uint64_t)nanosecs;
1012
1013 ns64 = ns64 + (secs64 * 1000000000LL);
1014 return ns64;
1015}
1016
1017/*
1018 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
1019 * Hence its primary use is to specify intervals.
1020 */
1021
1022hrtime_t
1023dtrace_abs_to_nano(uint64_t elapsed)
1024{
1025 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
1026
1027 /*
1028 * If this is the first time we've run, get the timebase.
1029 * We can use denom == 0 to indicate that sTimebaseInfo is
1030 * uninitialised because it makes no sense to have a zero
1031 * denominator in a fraction.
1032 */
1033
1034 if ( sTimebaseInfo.denom == 0 ) {
1035 (void) clock_timebase_info(&sTimebaseInfo);
1036 }
1037
1038 /*
1039 * Convert to nanoseconds.
1040 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
1041 *
1042 * Provided the final result is representable in 64 bits the following maneuver will
1043 * deliver that result without intermediate overflow.
1044 */
1045 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
1046 return elapsed;
1047 else if (sTimebaseInfo.denom == 1)
1048 return elapsed * (uint64_t)sTimebaseInfo.numer;
1049 else {
1050 /* Decompose elapsed = eta32 * 2^32 + eps32: */
1051 uint64_t eta32 = elapsed >> 32;
1052 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
1053
1054 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
1055
1056 /* Form product of elapsed64 (decomposed) and numer: */
1057 uint64_t mu64 = numer * eta32;
1058 uint64_t lambda64 = numer * eps32;
1059
1060 /* Divide the constituents by denom: */
1061 uint64_t q32 = mu64/denom;
1062 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
1063
1064 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
1065 }
1066}
1067
1068hrtime_t
1069dtrace_gethrtime(void)
1070{
1071 static uint64_t start = 0;
1072
1073 if (start == 0)
1074 start = mach_absolute_time();
1075
1076 return dtrace_abs_to_nano(mach_absolute_time() - start);
1077}
1078
1079/*
1080 * Atomicity and synchronization
1081 */
1082uint32_t
1083dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
1084{
b0d623f7 1085 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
2d21ac55
A
1086 return cmp;
1087 else
1088 return ~cmp; /* Must return something *other* than cmp */
1089}
1090
1091void *
1092dtrace_casptr(void *target, void *cmp, void *new)
1093{
b0d623f7 1094 if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
2d21ac55
A
1095 return cmp;
1096 else
1097 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
2d21ac55
A
1098}
1099
1100/*
1101 * Interrupt manipulation
1102 */
1103dtrace_icookie_t
1104dtrace_interrupt_disable(void)
1105{
1106 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
1107}
1108
1109void
1110dtrace_interrupt_enable(dtrace_icookie_t reenable)
1111{
1112 (void)ml_set_interrupts_enabled((boolean_t)reenable);
1113}
1114
1115/*
1116 * MP coordination
1117 */
1118static void
1119dtrace_sync_func(void) {}
1120
1121/*
1122 * dtrace_sync() is not called from probe context.
1123 */
1124void
1125dtrace_sync(void)
1126{
1127 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1128}
1129
1130/*
1131 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1132 */
1133
1134extern kern_return_t dtrace_copyio_preflight(addr64_t);
1135extern kern_return_t dtrace_copyio_postflight(addr64_t);
1136
1137static int
1138dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1139{
1140#pragma unused(kaddr)
1141
1142 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1143 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1144
1145 ASSERT(kaddr + size >= kaddr);
1146
39236c6e 1147 if ( uaddr + size < uaddr || /* Avoid address wrap. */
2d21ac55
A
1148 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1149 {
1150 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1151 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1152 return (0);
1153 }
1154 return (1);
1155}
1156
1157void
b0d623f7 1158dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1159{
b0d623f7
A
1160#pragma unused(flags)
1161
2d21ac55
A
1162 if (dtrace_copycheck( src, dst, len )) {
1163 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1164 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1165 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1166 }
1167 dtrace_copyio_postflight(src);
1168 }
1169}
1170
1171void
b0d623f7 1172dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1173{
b0d623f7
A
1174#pragma unused(flags)
1175
2d21ac55
A
1176 size_t actual;
1177
1178 if (dtrace_copycheck( src, dst, len )) {
4a3eedf9
A
1179 /* copyin as many as 'len' bytes. */
1180 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1181
1182 /*
1183 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1184 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1185 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1186 * to the caller.
1187 */
1188 if (error && error != ENAMETOOLONG) {
2d21ac55
A
1189 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1190 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1191 }
1192 dtrace_copyio_postflight(src);
1193 }
1194}
1195
1196void
b0d623f7 1197dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1198{
b0d623f7
A
1199#pragma unused(flags)
1200
2d21ac55
A
1201 if (dtrace_copycheck( dst, src, len )) {
1202 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1203 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1204 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1205 }
1206 dtrace_copyio_postflight(dst);
1207 }
1208}
1209
1210void
b0d623f7 1211dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1212{
b0d623f7
A
1213#pragma unused(flags)
1214
2d21ac55
A
1215 size_t actual;
1216
1217 if (dtrace_copycheck( dst, src, len )) {
4a3eedf9
A
1218
1219 /*
1220 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1221 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1222 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1223 * to the caller.
1224 */
2d21ac55
A
1225 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1226 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1227 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1228 }
1229 dtrace_copyio_postflight(dst);
1230 }
1231}
1232
5ba3f43e
A
1233extern const int copysize_limit_panic;
1234
1235int
1236dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes)
1237{
1238 /*
1239 * Partition the copyout in copysize_limit_panic-sized chunks
1240 */
1241 while (nbytes >= (vm_size_t)copysize_limit_panic) {
1242 if (copyout(kaddr, uaddr, copysize_limit_panic) != 0)
1243 return (EFAULT);
1244
1245 nbytes -= copysize_limit_panic;
1246 uaddr += copysize_limit_panic;
1247 kaddr += copysize_limit_panic;
1248 }
1249 if (nbytes > 0) {
1250 if (copyout(kaddr, uaddr, nbytes) != 0)
1251 return (EFAULT);
1252 }
1253
1254 return (0);
1255}
1256
2d21ac55
A
1257uint8_t
1258dtrace_fuword8(user_addr_t uaddr)
1259{
1260 uint8_t ret = 0;
1261
1262 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1263 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1264 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1265 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1266 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1267 }
1268 dtrace_copyio_postflight(uaddr);
1269 }
1270 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1271
1272 return(ret);
1273}
1274
1275uint16_t
1276dtrace_fuword16(user_addr_t uaddr)
1277{
1278 uint16_t ret = 0;
1279
1280 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1281 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1282 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1283 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1284 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1285 }
1286 dtrace_copyio_postflight(uaddr);
1287 }
1288 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1289
1290 return(ret);
1291}
1292
1293uint32_t
1294dtrace_fuword32(user_addr_t uaddr)
1295{
1296 uint32_t ret = 0;
1297
1298 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1299 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1300 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1301 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1302 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1303 }
1304 dtrace_copyio_postflight(uaddr);
1305 }
1306 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1307
1308 return(ret);
1309}
1310
1311uint64_t
1312dtrace_fuword64(user_addr_t uaddr)
1313{
1314 uint64_t ret = 0;
1315
1316 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1317 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1318 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1319 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1320 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1321 }
1322 dtrace_copyio_postflight(uaddr);
1323 }
1324 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1325
1326 return(ret);
1327}
1328
1329/*
1330 * Emulation of Solaris fuword / suword
1331 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1332 */
1333
1334int
1335fuword8(user_addr_t uaddr, uint8_t *value)
1336{
1337 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1338 return -1;
1339 }
1340
1341 return 0;
1342}
1343
1344int
1345fuword16(user_addr_t uaddr, uint16_t *value)
1346{
1347 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1348 return -1;
1349 }
1350
1351 return 0;
1352}
1353
1354int
1355fuword32(user_addr_t uaddr, uint32_t *value)
1356{
1357 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1358 return -1;
1359 }
1360
1361 return 0;
1362}
1363
1364int
1365fuword64(user_addr_t uaddr, uint64_t *value)
1366{
1367 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1368 return -1;
1369 }
1370
1371 return 0;
1372}
1373
1374void
1375fuword8_noerr(user_addr_t uaddr, uint8_t *value)
1376{
1377 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
1378 *value = 0;
1379 }
1380}
1381
1382void
1383fuword16_noerr(user_addr_t uaddr, uint16_t *value)
1384{
1385 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
1386 *value = 0;
1387 }
1388}
1389
1390void
1391fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1392{
1393 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1394 *value = 0;
1395 }
1396}
1397
1398void
1399fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1400{
1401 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1402 *value = 0;
1403 }
1404}
1405
1406int
1407suword64(user_addr_t addr, uint64_t value)
1408{
1409 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1410 return -1;
1411 }
1412
1413 return 0;
1414}
1415
1416int
1417suword32(user_addr_t addr, uint32_t value)
1418{
1419 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1420 return -1;
1421 }
1422
1423 return 0;
1424}
1425
1426int
1427suword16(user_addr_t addr, uint16_t value)
1428{
1429 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1430 return -1;
1431 }
1432
1433 return 0;
1434}
1435
1436int
1437suword8(user_addr_t addr, uint8_t value)
1438{
1439 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1440 return -1;
1441 }
1442
1443 return 0;
1444}
1445
1446
1447/*
1448 * Miscellaneous
1449 */
1450extern boolean_t dtrace_tally_fault(user_addr_t);
1451
1452boolean_t
1453dtrace_tally_fault(user_addr_t uaddr)
1454{
1455 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1456 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1457 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1458}
1459
2d21ac55
A
1460#define TOTTY 0x02
1461extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1462
1463int
1464vuprintf(const char *format, va_list ap)
1465{
1466 return prf(format, ap, TOTTY, NULL);
1467}
1468
1469/* Not called from probe context */
1470void cmn_err( int level, const char *format, ... )
1471{
1472#pragma unused(level)
1473 va_list alist;
1474
1475 va_start(alist, format);
1476 vuprintf(format, alist);
1477 va_end(alist);
1478 uprintf("\n");
1479}
1480
1481/*
1482 * History:
1483 * 2002-01-24 gvdl Initial implementation of strstr
1484 */
1485
b0d623f7 1486__private_extern__ const char *
2d21ac55
A
1487strstr(const char *in, const char *str)
1488{
1489 char c;
1490 size_t len;
5ba3f43e
A
1491 if (!in || !str)
1492 return in;
2d21ac55
A
1493
1494 c = *str++;
1495 if (!c)
b0d623f7 1496 return (const char *) in; // Trivial empty string case
2d21ac55
A
1497
1498 len = strlen(str);
1499 do {
1500 char sc;
1501
1502 do {
1503 sc = *in++;
1504 if (!sc)
1505 return (char *) 0;
1506 } while (sc != c);
1507 } while (strncmp(in, str, len) != 0);
1508
b0d623f7 1509 return (const char *) (in - 1);
2d21ac55
A
1510}
1511
5ba3f43e
A
1512const void*
1513bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *))
1514{
1515 const char *base = base0;
1516 size_t lim;
1517 int cmp;
1518 const void *p;
1519 for (lim = nmemb; lim != 0; lim >>= 1) {
1520 p = base + (lim >> 1) * size;
1521 cmp = (*compar)(key, p);
1522 if (cmp == 0)
1523 return p;
1524 if (cmp > 0) { /* key > p: move right */
1525 base = (const char *)p + size;
1526 lim--;
1527 } /* else move left */
1528 }
1529 return (NULL);
1530}
1531
2d21ac55
A
1532/*
1533 * Runtime and ABI
1534 */
1535uintptr_t
1536dtrace_caller(int ignore)
1537{
1538#pragma unused(ignore)
1539 return -1; /* Just as in Solaris dtrace_asm.s */
1540}
1541
1542int
1543dtrace_getstackdepth(int aframes)
1544{
b0d623f7 1545 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
1546 struct frame *nextfp, *minfp, *stacktop;
1547 int depth = 0;
1548 int on_intr;
1549
1550 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1551 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1552 else
b0d623f7 1553 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
2d21ac55
A
1554
1555 minfp = fp;
1556
1557 aframes++;
1558
1559 for (;;) {
1560 depth++;
1561
1562 nextfp = *(struct frame **)fp;
1563
1564 if (nextfp <= minfp || nextfp >= stacktop) {
1565 if (on_intr) {
1566 /*
1567 * Hop from interrupt stack to thread stack.
1568 */
1569 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1570
1571 minfp = (struct frame *)kstack_base;
b0d623f7 1572 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
2d21ac55
A
1573
1574 on_intr = 0;
1575 continue;
1576 }
1577 break;
1578 }
1579
1580 fp = nextfp;
1581 minfp = fp;
1582 }
1583
1584 if (depth <= aframes)
1585 return (0);
1586
1587 return (depth - aframes);
1588}
1589
1590/*
1591 * Unconsidered
1592 */
1593void
1594dtrace_vtime_enable(void) {}
1595
1596void
1597dtrace_vtime_disable(void) {}
1598
1599#else /* else ! CONFIG_DTRACE */
1600
1601#include <sys/types.h>
1602#include <mach/vm_types.h>
1603#include <mach/kmod.h>
1604
1605/*
1606 * This exists to prevent build errors when dtrace is unconfigured.
1607 */
1608
1609kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1610
1611kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1612#pragma unused(arg1, arg2, arg3)
1613
1614 return KERN_FAILURE;
1615}
1616
1617#endif /* CONFIG_DTRACE */