]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace_glue.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30/*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36#if CONFIG_DTRACE
37
38#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39#include <kern/thread.h>
40#include <mach/thread_status.h>
41
42#include <stdarg.h>
43#include <string.h>
44#include <sys/malloc.h>
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <sys/proc_internal.h>
48#include <sys/kauth.h>
49#include <sys/user.h>
50#include <sys/systm.h>
51#include <sys/dtrace.h>
52#include <sys/dtrace_impl.h>
53#include <libkern/OSAtomic.h>
39236c6e
A
54#include <kern/kern_types.h>
55#include <kern/timer_call.h>
2d21ac55
A
56#include <kern/thread_call.h>
57#include <kern/task.h>
58#include <kern/sched_prim.h>
59#include <kern/queue.h>
60#include <miscfs/devfs/devfs.h>
61#include <kern/kalloc.h>
62
63#include <mach/vm_param.h>
64#include <mach/mach_vm.h>
65#include <mach/task.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
68
fe8ab488
A
69/* missing prototypes, not exported by Mach */
70extern kern_return_t task_suspend_internal(task_t);
71extern kern_return_t task_resume_internal(task_t);
39236c6e 72
2d21ac55
A
73/*
74 * pid/proc
75 */
b0d623f7
A
76/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
77#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
2d21ac55
A
78
79/* Not called from probe context */
80proc_t *
81sprlock(pid_t pid)
82{
83 proc_t* p;
84
85 if ((p = proc_find(pid)) == PROC_NULL) {
86 return PROC_NULL;
87 }
88
fe8ab488 89 task_suspend_internal(p->task);
2d21ac55
A
90
91 proc_lock(p);
92
93 lck_mtx_lock(&p->p_dtrace_sprlock);
94
95 return p;
96}
97
98/* Not called from probe context */
99void
100sprunlock(proc_t *p)
101{
102 if (p != PROC_NULL) {
103 lck_mtx_unlock(&p->p_dtrace_sprlock);
104
105 proc_unlock(p);
106
fe8ab488 107 task_resume_internal(p->task);
2d21ac55
A
108
109 proc_rele(p);
110 }
111}
112
113/*
114 * uread/uwrite
115 */
116
117// These are not exported from vm_map.h.
118extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
119extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
120
121/* Not called from probe context */
122int
123uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
124{
125 kern_return_t ret;
126
127 ASSERT(p != PROC_NULL);
128 ASSERT(p->task != NULL);
129
130 task_t task = p->task;
131
132 /*
133 * Grab a reference to the task vm_map_t to make sure
134 * the map isn't pulled out from under us.
135 *
136 * Because the proc_lock is not held at all times on all code
137 * paths leading here, it is possible for the proc to have
138 * exited. If the map is null, fail.
139 */
140 vm_map_t map = get_task_map_reference(task);
141 if (map) {
142 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
143 vm_map_deallocate(map);
144 } else
145 ret = KERN_TERMINATED;
146
147 return (int)ret;
148}
149
150
151/* Not called from probe context */
152int
153uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
154{
155 kern_return_t ret;
156
157 ASSERT(p != NULL);
158 ASSERT(p->task != NULL);
159
160 task_t task = p->task;
161
162 /*
163 * Grab a reference to the task vm_map_t to make sure
164 * the map isn't pulled out from under us.
165 *
166 * Because the proc_lock is not held at all times on all code
167 * paths leading here, it is possible for the proc to have
168 * exited. If the map is null, fail.
169 */
170 vm_map_t map = get_task_map_reference(task);
171 if (map) {
172 /* Find the memory permissions. */
173 uint32_t nestingDepth=999999;
174 vm_region_submap_short_info_data_64_t info;
175 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
176 mach_vm_address_t address = (mach_vm_address_t)a;
177 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
178
179 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
180 if (ret != KERN_SUCCESS)
181 goto done;
182
183 vm_prot_t reprotect;
184
185 if (!(info.protection & VM_PROT_WRITE)) {
186 /* Save the original protection values for restoration later */
187 reprotect = info.protection;
188
189 if (info.max_protection & VM_PROT_WRITE) {
190 /* The memory is not currently writable, but can be made writable. */
191 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
192 } else {
193 /*
194 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
195 *
196 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
197 */
198 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
199 }
200
201 if (ret != KERN_SUCCESS)
202 goto done;
203
204 } else {
205 /* The memory was already writable. */
206 reprotect = VM_PROT_NONE;
207 }
208
209 ret = vm_map_write_user( map,
210 buf,
211 (vm_map_address_t)a,
212 (vm_size_t)len);
213
214 if (ret != KERN_SUCCESS)
215 goto done;
216
217 if (reprotect != VM_PROT_NONE) {
218 ASSERT(reprotect & VM_PROT_EXECUTE);
219 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
220 }
221
222done:
223 vm_map_deallocate(map);
224 } else
225 ret = KERN_TERMINATED;
226
227 return (int)ret;
228}
229
230/*
231 * cpuvar
232 */
233lck_mtx_t cpu_lock;
fe8ab488 234lck_mtx_t cyc_lock;
2d21ac55
A
235lck_mtx_t mod_lock;
236
6d2010ae 237dtrace_cpu_t *cpu_list;
2d21ac55
A
238cpu_core_t *cpu_core; /* XXX TLB lockdown? */
239
240/*
241 * cred_t
242 */
243
244/*
245 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
246 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
247 */
248cred_t *
249dtrace_CRED(void)
250{
251 struct uthread *uthread = get_bsdthread_info(current_thread());
252
253 if (uthread == NULL)
254 return NULL;
255 else
256 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
257}
258
259#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
260#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
261 HAS_ALLPRIVS(cr) : \
262 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
263
264int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
265{
266#pragma unused(priv, all)
267 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
268}
269
270int
271PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
272{
273#pragma unused(priv, boolean)
274 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
275}
276
6d2010ae 277/* XXX Get around const poisoning using structure assigns */
2d21ac55 278gid_t
6d2010ae 279crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(&copy_cr); }
2d21ac55
A
280
281uid_t
6d2010ae 282crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr); }
2d21ac55
A
283
284/*
285 * "cyclic"
286 */
287
2d21ac55 288typedef struct wrap_timer_call {
fe8ab488
A
289 /* node attributes */
290 cyc_handler_t hdlr;
291 cyc_time_t when;
292 uint64_t deadline;
293 int cpuid;
294 boolean_t suspended;
295 struct timer_call call;
296
297 /* next item in the linked list */
298 LIST_ENTRY(wrap_timer_call) entries;
2d21ac55
A
299} wrap_timer_call_t;
300
fe8ab488
A
301#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
302#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
303
304/* CPU going online/offline notifications */
305void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
306void dtrace_cpu_state_changed(int, boolean_t);
307
308void
309dtrace_install_cpu_hooks(void) {
310 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
311}
312
313void
314dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
315#pragma unused(cpuid)
316 wrap_timer_call_t *wrapTC = NULL;
317 boolean_t suspend = (is_running ? FALSE : TRUE);
318 dtrace_icookie_t s;
319
320 /* Ensure that we're not going to leave the CPU */
321 s = dtrace_interrupt_disable();
322 assert(cpuid == cpu_number());
323
324 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
325 assert(wrapTC->cpuid == cpu_number());
326 if (suspend) {
327 assert(!wrapTC->suspended);
328 /* If this fails, we'll panic anyway, so let's do this now. */
329 if (!timer_call_cancel(&wrapTC->call))
330 panic("timer_call_set_suspend() failed to cancel a timer call");
331 wrapTC->suspended = TRUE;
332 } else {
333 /* Rearm the timer, but ensure it was suspended first. */
334 assert(wrapTC->suspended);
335 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
336 &wrapTC->deadline);
337 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
338 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
339 wrapTC->suspended = FALSE;
340 }
341
342 }
343
344 /* Restore the previous interrupt state. */
345 dtrace_interrupt_enable(s);
346}
2d21ac55
A
347
348static void
349_timer_call_apply_cyclic( void *ignore, void *vTChdl )
350{
351#pragma unused(ignore)
352 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
353
354 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
355
356 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
39236c6e 357 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
2d21ac55
A
358}
359
360static cyclic_id_t
361timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
362{
363 uint64_t now;
fe8ab488 364 dtrace_icookie_t s;
2d21ac55
A
365
366 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
367 wrapTC->hdlr = *handler;
368 wrapTC->when = *when;
369
370 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
371
372 now = mach_absolute_time();
373 wrapTC->deadline = now;
374
375 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
fe8ab488
A
376
377 /* Insert the timer to the list of the running timers on this CPU, and start it. */
378 s = dtrace_interrupt_disable();
379 wrapTC->cpuid = cpu_number();
380 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
381 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
382 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
383 wrapTC->suspended = FALSE;
384 dtrace_interrupt_enable(s);
2d21ac55
A
385
386 return (cyclic_id_t)wrapTC;
387}
388
fe8ab488
A
389/*
390 * Executed on the CPU the timer is running on.
391 */
2d21ac55
A
392static void
393timer_call_remove_cyclic(cyclic_id_t cyclic)
394{
395 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
396
fe8ab488
A
397 assert(wrapTC);
398 assert(cpu_number() == wrapTC->cpuid);
2d21ac55 399
fe8ab488
A
400 if (!timer_call_cancel(&wrapTC->call))
401 panic("timer_call_remove_cyclic() failed to cancel a timer call");
2d21ac55 402
fe8ab488 403 LIST_REMOVE(wrapTC, entries);
2d21ac55
A
404}
405
406static void *
407timer_call_get_cyclic_arg(cyclic_id_t cyclic)
408{
409 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
410
411 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
412}
413
414cyclic_id_t
415cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
416{
417 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
418 if (NULL == wrapTC)
419 return CYCLIC_NONE;
420 else
421 return timer_call_add_cyclic( wrapTC, handler, when );
422}
423
424void
425cyclic_timer_remove(cyclic_id_t cyclic)
426{
427 ASSERT( cyclic != CYCLIC_NONE );
428
fe8ab488
A
429 /* Removing a timer call must be done on the CPU the timer is running on. */
430 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
431 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
432
2d21ac55
A
433 _FREE((void *)cyclic, M_TEMP);
434}
435
436static void
437_cyclic_add_omni(cyclic_id_list_t cyc_list)
438{
439 cyc_time_t cT;
440 cyc_handler_t cH;
441 wrap_timer_call_t *wrapTC;
442 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
443 char *t;
444
445 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
446
447 t = (char *)cyc_list;
448 t += sizeof(cyc_omni_handler_t);
b0d623f7 449 cyc_list = (cyclic_id_list_t)(uintptr_t)t;
2d21ac55
A
450
451 t += sizeof(cyclic_id_t)*NCPU;
452 t += (sizeof(wrap_timer_call_t))*cpu_number();
b0d623f7 453 wrapTC = (wrap_timer_call_t *)(uintptr_t)t;
2d21ac55
A
454
455 cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT);
456}
457
458cyclic_id_list_t
459cyclic_add_omni(cyc_omni_handler_t *omni)
460{
461 cyclic_id_list_t cyc_list =
462 _MALLOC( (sizeof(wrap_timer_call_t))*NCPU +
463 sizeof(cyclic_id_t)*NCPU +
464 sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK);
465 if (NULL == cyc_list)
466 return (cyclic_id_list_t)CYCLIC_NONE;
467
468 *(cyc_omni_handler_t *)cyc_list = *omni;
469 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
470
471 return cyc_list;
472}
473
474static void
475_cyclic_remove_omni(cyclic_id_list_t cyc_list)
476{
477 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
478 void *oarg;
479 cyclic_id_t cid;
480 char *t;
481
482 t = (char *)cyc_list;
483 t += sizeof(cyc_omni_handler_t);
b0d623f7 484 cyc_list = (cyclic_id_list_t)(uintptr_t)t;
2d21ac55 485
fe8ab488
A
486 /*
487 * If the processor was offline when dtrace started, we did not allocate
488 * a cyclic timer for this CPU.
489 */
490 if ((cid = cyc_list[cpu_number()]) != CYCLIC_NONE) {
491 oarg = timer_call_get_cyclic_arg(cid);
492 timer_call_remove_cyclic(cid);
493 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
494 }
2d21ac55
A
495}
496
497void
498cyclic_remove_omni(cyclic_id_list_t cyc_list)
499{
500 ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE );
501
502 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
503 _FREE(cyc_list, M_TEMP);
504}
505
506typedef struct wrap_thread_call {
507 thread_call_t TChdl;
508 cyc_handler_t hdlr;
509 cyc_time_t when;
510 uint64_t deadline;
511} wrap_thread_call_t;
512
513/*
514 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
515 * cleaner and the deadman, but too distant in time and place for the profile provider.
516 */
517static void
518_cyclic_apply( void *ignore, void *vTChdl )
519{
520#pragma unused(ignore)
521 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
522
523 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
524
525 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
526 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
527
528 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
529 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
530 thread_wakeup((event_t)wrapTC);
531}
532
533cyclic_id_t
534cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
535{
536 uint64_t now;
537
538 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
539 if (NULL == wrapTC)
540 return CYCLIC_NONE;
541
542 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
543 wrapTC->hdlr = *handler;
544 wrapTC->when = *when;
545
546 ASSERT(when->cyt_when == 0);
547 ASSERT(when->cyt_interval < WAKEUP_REAPER);
548
549 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
550
551 now = mach_absolute_time();
552 wrapTC->deadline = now;
553
554 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
555 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
556
557 return (cyclic_id_t)wrapTC;
558}
559
560static void
561noop_cyh_func(void * ignore)
562{
563#pragma unused(ignore)
564}
565
566void
567cyclic_remove(cyclic_id_t cyclic)
568{
569 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
570
571 ASSERT(cyclic != CYCLIC_NONE);
572
573 while (!thread_call_cancel(wrapTC->TChdl)) {
574 int ret = assert_wait(wrapTC, THREAD_UNINT);
575 ASSERT(ret == THREAD_WAITING);
576
577 wrapTC->when.cyt_interval = WAKEUP_REAPER;
578
579 ret = thread_block(THREAD_CONTINUE_NULL);
580 ASSERT(ret == THREAD_AWAKENED);
581 }
582
583 if (thread_call_free(wrapTC->TChdl))
584 _FREE(wrapTC, M_TEMP);
585 else {
586 /* Gut this cyclic and move on ... */
587 wrapTC->hdlr.cyh_func = noop_cyh_func;
588 wrapTC->when.cyt_interval = NEARLY_FOREVER;
589 }
590}
591
592/*
593 * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision)
594 */
595
596thread_call_t
597dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos)
598{
599#pragma unused(arg)
600 thread_call_t call = thread_call_allocate(func, NULL);
601
602 nanoseconds_to_absolutetime(nanos, &nanos);
603
604 /*
605 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot,
606 * and clock drift on later invocations is not a worry.
607 */
608 uint64_t deadline = mach_absolute_time() + nanos;
39236c6e 609 /* DRK: consider using a lower priority callout here */
2d21ac55
A
610 thread_call_enter_delayed(call, deadline);
611
612 return call;
613}
614
615/*
616 * ddi
617 */
618void
619ddi_report_dev(dev_info_t *devi)
620{
621#pragma unused(devi)
622}
623
624#define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */
625static void *soft[NSOFT_STATES];
626
627int
628ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
629{
630#pragma unused(n_items)
631 int i;
632
633 for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK);
634 *(size_t *)state_p = size;
635 return 0;
636}
637
638int
639ddi_soft_state_zalloc(void *state, int item)
640{
641#pragma unused(state)
642 if (item < NSOFT_STATES)
643 return DDI_SUCCESS;
644 else
645 return DDI_FAILURE;
646}
647
648void *
649ddi_get_soft_state(void *state, int item)
650{
651#pragma unused(state)
652 ASSERT(item < NSOFT_STATES);
653 return soft[item];
654}
655
656int
657ddi_soft_state_free(void *state, int item)
658{
659 ASSERT(item < NSOFT_STATES);
660 bzero( soft[item], (size_t)state );
661 return DDI_SUCCESS;
662}
663
664void
665ddi_soft_state_fini(void **state_p)
666{
667#pragma unused(state_p)
668 int i;
669
670 for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP );
671}
672
673static unsigned int gRegisteredProps = 0;
674static struct {
675 char name[32]; /* enough for "dof-data-" + digits */
676 int *data;
677 uint_t nelements;
678} gPropTable[16];
679
680kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
681
682kern_return_t
683_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
684{
685 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) {
686 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK);
687
688 if (NULL == p)
689 return KERN_FAILURE;
690
691 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name));
692 gPropTable[gRegisteredProps].nelements = nelements;
693 gPropTable[gRegisteredProps].data = p;
694
695 while (nelements-- > 0) {
696 *p++ = (int)(*data++);
697 }
698
699 gRegisteredProps++;
700 return KERN_SUCCESS;
701 }
702 else
703 return KERN_FAILURE;
704}
705
706int
707ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
b0d623f7 708 const char *name, int **data, uint_t *nelements)
2d21ac55
A
709{
710#pragma unused(match_dev,dip,flags)
711 unsigned int i;
712 for (i = 0; i < gRegisteredProps; ++i)
713 {
714 if (0 == strncmp(name, gPropTable[i].name,
715 sizeof(gPropTable[i].name))) {
716 *data = gPropTable[i].data;
717 *nelements = gPropTable[i].nelements;
718 return DDI_SUCCESS;
719 }
720 }
721 return DDI_FAILURE;
722}
723
724int
725ddi_prop_free(void *buf)
726{
727 _FREE(buf, M_TEMP);
728 return DDI_SUCCESS;
729}
730
731int
b0d623f7 732ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
2d21ac55
A
733
734int
735ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
736 minor_t minor_num, const char *node_type, int flag)
737{
738#pragma unused(spec_type,node_type,flag)
b0d623f7 739 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
2d21ac55
A
740
741 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
742 return DDI_FAILURE;
743 else
744 return DDI_SUCCESS;
745}
746
747void
748ddi_remove_minor_node(dev_info_t *dip, char *name)
749{
750#pragma unused(dip,name)
751/* XXX called from dtrace_detach, so NOTREACHED for now. */
752}
753
754major_t
755getemajor( dev_t d )
756{
757 return (major_t) major(d);
758}
759
760minor_t
761getminor ( dev_t d )
762{
763 return (minor_t) minor(d);
764}
765
766dev_t
767makedevice(major_t major, minor_t minor)
768{
769 return makedev( major, minor );
770}
771
772int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
773{
774#pragma unused(dev, dip, flags, name)
775
776 return defvalue;
777}
778
779/*
780 * Kernel Debug Interface
781 */
782int
783kdi_dtrace_set(kdi_dtrace_set_t ignore)
784{
785#pragma unused(ignore)
786 return 0; /* Success */
787}
788
789extern void Debugger(const char*);
790
791void
792debug_enter(char *c) { Debugger(c); }
793
794/*
795 * kmem
796 */
797
798void *
799dt_kmem_alloc(size_t size, int kmflag)
800{
801#pragma unused(kmflag)
802
803/*
804 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
805 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
806 */
807#if defined(DTRACE_MEMORY_ZONES)
808 return dtrace_alloc(size);
809#else
810 return kalloc(size);
811#endif
812}
813
814void *
815dt_kmem_zalloc(size_t size, int kmflag)
816{
817#pragma unused(kmflag)
818
819/*
820 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
821 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
822 */
823#if defined(DTRACE_MEMORY_ZONES)
824 void* buf = dtrace_alloc(size);
825#else
826 void* buf = kalloc(size);
827#endif
828
829 if(!buf)
830 return NULL;
831
832 bzero(buf, size);
833
834 return buf;
835}
836
837void
838dt_kmem_free(void *buf, size_t size)
839{
840#pragma unused(size)
841 /*
842 * DTrace relies on this, its doing a lot of NULL frees.
843 * A null free causes the debug builds to panic.
844 */
845 if (buf == NULL) return;
846
847 ASSERT(size > 0);
848
849#if defined(DTRACE_MEMORY_ZONES)
850 dtrace_free(buf, size);
851#else
852 kfree(buf, size);
853#endif
854}
855
856
857
858/*
859 * aligned kmem allocator
860 * align should be a power of two
861 */
862
863void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
864{
fe8ab488
A
865 void *mem, **addr_to_free;
866 intptr_t mem_aligned;
867 size_t *size_to_free, hdr_size;
2d21ac55 868
fe8ab488
A
869 /* Must be a power of two. */
870 assert(align != 0);
871 assert((align & (align - 1)) == 0);
2d21ac55 872
fe8ab488
A
873 /*
874 * We are going to add a header to the allocation. It contains
875 * the address to free and the total size of the buffer.
876 */
877 hdr_size = sizeof(size_t) + sizeof(void*);
878 mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
879 if (mem == NULL)
2d21ac55
A
880 return NULL;
881
fe8ab488
A
882 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
883
884 /* Write the address to free in the header. */
885 addr_to_free = (void**) (mem_aligned - sizeof(void*));
886 *addr_to_free = mem;
2d21ac55 887
fe8ab488
A
888 /* Write the size to free in the header. */
889 size_to_free = (size_t*) (mem_aligned - hdr_size);
890 *size_to_free = size + align + hdr_size;
2d21ac55 891
fe8ab488 892 return (void*) mem_aligned;
2d21ac55
A
893}
894
895void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
896{
897 void* buf;
898
899 buf = dt_kmem_alloc_aligned(size, align, kmflag);
900
901 if(!buf)
902 return NULL;
903
904 bzero(buf, size);
905
906 return buf;
907}
908
909void dt_kmem_free_aligned(void* buf, size_t size)
910{
911#pragma unused(size)
fe8ab488
A
912 intptr_t ptr = (intptr_t) buf;
913 void **addr_to_free = (void**) (ptr - sizeof(void*));
914 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
2d21ac55 915
fe8ab488
A
916 if (buf == NULL)
917 return;
2d21ac55 918
fe8ab488 919 dt_kmem_free(*addr_to_free, *size_to_free);
2d21ac55
A
920}
921
922/*
923 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
924 * doesn't specify constructor, destructor, or reclaim methods.
925 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
926 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
927 */
928kmem_cache_t *
929kmem_cache_create(
b0d623f7 930 const char *name, /* descriptive name for this cache */
2d21ac55
A
931 size_t bufsize, /* size of the objects it manages */
932 size_t align, /* required object alignment */
933 int (*constructor)(void *, void *, int), /* object constructor */
934 void (*destructor)(void *, void *), /* object destructor */
935 void (*reclaim)(void *), /* memory reclaim callback */
936 void *private, /* pass-thru arg for constr/destr/reclaim */
937 vmem_t *vmp, /* vmem source for slab allocation */
938 int cflags) /* cache creation flags */
939{
940#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
941 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
942}
943
944void *
945kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
946{
947#pragma unused(kmflag)
948 size_t bufsize = (size_t)cp;
949 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
950}
951
952void
953kmem_cache_free(kmem_cache_t *cp, void *buf)
954{
955#pragma unused(cp)
956 _FREE(buf, M_TEMP);
957}
958
959void
960kmem_cache_destroy(kmem_cache_t *cp)
961{
962#pragma unused(cp)
963}
964
965/*
966 * taskq
967 */
968extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
969
970static void
971_taskq_apply( task_func_t func, thread_call_param_t arg )
972{
973 func( (void *)arg );
974}
975
976taskq_t *
977taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
978 int maxalloc, uint_t flags)
979{
980#pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
981
982 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
983}
984
985taskqid_t
986taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
987{
988#pragma unused(flags)
989 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
990 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
991 return (taskqid_t) tq /* for lack of anything better */;
992}
993
994void
995taskq_destroy(taskq_t *tq)
996{
997 thread_call_cancel( (thread_call_t) tq );
998 thread_call_free( (thread_call_t) tq );
999}
1000
1001pri_t maxclsyspri;
1002
1003/*
1004 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
1005 */
1006typedef unsigned int u_daddr_t;
1007#include "blist.h"
1008
1009/* By passing around blist *handles*, the underlying blist can be resized as needed. */
1010struct blist_hdl {
1011 blist_t blist;
1012};
1013
1014vmem_t *
1015vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
1016 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
1017{
1018#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
1019 blist_t bl;
1020 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
1021
1022 ASSERT(quantum == 1);
1023 ASSERT(NULL == ignore5);
1024 ASSERT(NULL == ignore6);
1025 ASSERT(NULL == source);
1026 ASSERT(0 == qcache_max);
1027 ASSERT(vmflag & VMC_IDENTIFIER);
1028
1029 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
1030
1031 p->blist = bl = blist_create( size );
1032 blist_free(bl, 0, size);
b0d623f7 1033 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
2d21ac55
A
1034
1035 return (vmem_t *)p;
1036}
1037
1038void *
1039vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1040{
1041#pragma unused(vmflag)
1042 struct blist_hdl *q = (struct blist_hdl *)vmp;
1043 blist_t bl = q->blist;
1044 daddr_t p;
1045
1046 p = blist_alloc(bl, (daddr_t)size);
1047
1048 if ((daddr_t)-1 == p) {
1049 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
1050 q->blist = bl;
1051 p = blist_alloc(bl, (daddr_t)size);
1052 if ((daddr_t)-1 == p)
1053 panic("vmem_alloc: failure after blist_resize!");
1054 }
1055
b0d623f7 1056 return (void *)(uintptr_t)p;
2d21ac55
A
1057}
1058
1059void
1060vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1061{
1062 struct blist_hdl *p = (struct blist_hdl *)vmp;
1063
b0d623f7 1064 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
2d21ac55
A
1065}
1066
1067void
1068vmem_destroy(vmem_t *vmp)
1069{
1070 struct blist_hdl *p = (struct blist_hdl *)vmp;
1071
1072 blist_destroy( p->blist );
1073 _FREE( p, sizeof(struct blist_hdl) );
1074}
1075
1076/*
1077 * Timing
1078 */
1079
1080/*
1081 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
1082 * January 1, 1970. Because it can be called from probe context, it must take no locks.
1083 */
1084
1085hrtime_t
1086dtrace_gethrestime(void)
1087{
b0d623f7
A
1088 clock_sec_t secs;
1089 clock_nsec_t nanosecs;
2d21ac55
A
1090 uint64_t secs64, ns64;
1091
1092 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
1093 secs64 = (uint64_t)secs;
1094 ns64 = (uint64_t)nanosecs;
1095
1096 ns64 = ns64 + (secs64 * 1000000000LL);
1097 return ns64;
1098}
1099
1100/*
1101 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
1102 * Hence its primary use is to specify intervals.
1103 */
1104
1105hrtime_t
1106dtrace_abs_to_nano(uint64_t elapsed)
1107{
1108 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
1109
1110 /*
1111 * If this is the first time we've run, get the timebase.
1112 * We can use denom == 0 to indicate that sTimebaseInfo is
1113 * uninitialised because it makes no sense to have a zero
1114 * denominator in a fraction.
1115 */
1116
1117 if ( sTimebaseInfo.denom == 0 ) {
1118 (void) clock_timebase_info(&sTimebaseInfo);
1119 }
1120
1121 /*
1122 * Convert to nanoseconds.
1123 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
1124 *
1125 * Provided the final result is representable in 64 bits the following maneuver will
1126 * deliver that result without intermediate overflow.
1127 */
1128 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
1129 return elapsed;
1130 else if (sTimebaseInfo.denom == 1)
1131 return elapsed * (uint64_t)sTimebaseInfo.numer;
1132 else {
1133 /* Decompose elapsed = eta32 * 2^32 + eps32: */
1134 uint64_t eta32 = elapsed >> 32;
1135 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
1136
1137 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
1138
1139 /* Form product of elapsed64 (decomposed) and numer: */
1140 uint64_t mu64 = numer * eta32;
1141 uint64_t lambda64 = numer * eps32;
1142
1143 /* Divide the constituents by denom: */
1144 uint64_t q32 = mu64/denom;
1145 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
1146
1147 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
1148 }
1149}
1150
1151hrtime_t
1152dtrace_gethrtime(void)
1153{
1154 static uint64_t start = 0;
1155
1156 if (start == 0)
1157 start = mach_absolute_time();
1158
1159 return dtrace_abs_to_nano(mach_absolute_time() - start);
1160}
1161
1162/*
1163 * Atomicity and synchronization
1164 */
1165uint32_t
1166dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
1167{
b0d623f7 1168 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
2d21ac55
A
1169 return cmp;
1170 else
1171 return ~cmp; /* Must return something *other* than cmp */
1172}
1173
1174void *
1175dtrace_casptr(void *target, void *cmp, void *new)
1176{
b0d623f7 1177 if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
2d21ac55
A
1178 return cmp;
1179 else
1180 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
2d21ac55
A
1181}
1182
1183/*
1184 * Interrupt manipulation
1185 */
1186dtrace_icookie_t
1187dtrace_interrupt_disable(void)
1188{
1189 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
1190}
1191
1192void
1193dtrace_interrupt_enable(dtrace_icookie_t reenable)
1194{
1195 (void)ml_set_interrupts_enabled((boolean_t)reenable);
1196}
1197
1198/*
1199 * MP coordination
1200 */
1201static void
1202dtrace_sync_func(void) {}
1203
1204/*
1205 * dtrace_sync() is not called from probe context.
1206 */
1207void
1208dtrace_sync(void)
1209{
1210 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1211}
1212
1213/*
1214 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1215 */
1216
1217extern kern_return_t dtrace_copyio_preflight(addr64_t);
1218extern kern_return_t dtrace_copyio_postflight(addr64_t);
1219
1220static int
1221dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1222{
1223#pragma unused(kaddr)
1224
1225 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1226 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1227
1228 ASSERT(kaddr + size >= kaddr);
1229
39236c6e 1230 if ( uaddr + size < uaddr || /* Avoid address wrap. */
2d21ac55
A
1231 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1232 {
1233 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1234 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1235 return (0);
1236 }
1237 return (1);
1238}
1239
1240void
b0d623f7 1241dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1242{
b0d623f7
A
1243#pragma unused(flags)
1244
2d21ac55
A
1245 if (dtrace_copycheck( src, dst, len )) {
1246 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1247 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1248 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1249 }
1250 dtrace_copyio_postflight(src);
1251 }
1252}
1253
1254void
b0d623f7 1255dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1256{
b0d623f7
A
1257#pragma unused(flags)
1258
2d21ac55
A
1259 size_t actual;
1260
1261 if (dtrace_copycheck( src, dst, len )) {
4a3eedf9
A
1262 /* copyin as many as 'len' bytes. */
1263 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1264
1265 /*
1266 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1267 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1268 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1269 * to the caller.
1270 */
1271 if (error && error != ENAMETOOLONG) {
2d21ac55
A
1272 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1273 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1274 }
1275 dtrace_copyio_postflight(src);
1276 }
1277}
1278
1279void
b0d623f7 1280dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1281{
b0d623f7
A
1282#pragma unused(flags)
1283
2d21ac55
A
1284 if (dtrace_copycheck( dst, src, len )) {
1285 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1286 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1287 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1288 }
1289 dtrace_copyio_postflight(dst);
1290 }
1291}
1292
1293void
b0d623f7 1294dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1295{
b0d623f7
A
1296#pragma unused(flags)
1297
2d21ac55
A
1298 size_t actual;
1299
1300 if (dtrace_copycheck( dst, src, len )) {
4a3eedf9
A
1301
1302 /*
1303 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1304 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1305 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1306 * to the caller.
1307 */
2d21ac55
A
1308 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1309 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1310 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1311 }
1312 dtrace_copyio_postflight(dst);
1313 }
1314}
1315
1316uint8_t
1317dtrace_fuword8(user_addr_t uaddr)
1318{
1319 uint8_t ret = 0;
1320
1321 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1322 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1323 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1324 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1325 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1326 }
1327 dtrace_copyio_postflight(uaddr);
1328 }
1329 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1330
1331 return(ret);
1332}
1333
1334uint16_t
1335dtrace_fuword16(user_addr_t uaddr)
1336{
1337 uint16_t ret = 0;
1338
1339 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1340 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1341 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1342 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1343 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1344 }
1345 dtrace_copyio_postflight(uaddr);
1346 }
1347 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1348
1349 return(ret);
1350}
1351
1352uint32_t
1353dtrace_fuword32(user_addr_t uaddr)
1354{
1355 uint32_t ret = 0;
1356
1357 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1358 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1359 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1360 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1361 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1362 }
1363 dtrace_copyio_postflight(uaddr);
1364 }
1365 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1366
1367 return(ret);
1368}
1369
1370uint64_t
1371dtrace_fuword64(user_addr_t uaddr)
1372{
1373 uint64_t ret = 0;
1374
1375 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1376 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1377 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1378 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1379 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1380 }
1381 dtrace_copyio_postflight(uaddr);
1382 }
1383 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1384
1385 return(ret);
1386}
1387
1388/*
1389 * Emulation of Solaris fuword / suword
1390 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1391 */
1392
1393int
1394fuword8(user_addr_t uaddr, uint8_t *value)
1395{
1396 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1397 return -1;
1398 }
1399
1400 return 0;
1401}
1402
1403int
1404fuword16(user_addr_t uaddr, uint16_t *value)
1405{
1406 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1407 return -1;
1408 }
1409
1410 return 0;
1411}
1412
1413int
1414fuword32(user_addr_t uaddr, uint32_t *value)
1415{
1416 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1417 return -1;
1418 }
1419
1420 return 0;
1421}
1422
1423int
1424fuword64(user_addr_t uaddr, uint64_t *value)
1425{
1426 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1427 return -1;
1428 }
1429
1430 return 0;
1431}
1432
1433void
1434fuword8_noerr(user_addr_t uaddr, uint8_t *value)
1435{
1436 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
1437 *value = 0;
1438 }
1439}
1440
1441void
1442fuword16_noerr(user_addr_t uaddr, uint16_t *value)
1443{
1444 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
1445 *value = 0;
1446 }
1447}
1448
1449void
1450fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1451{
1452 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1453 *value = 0;
1454 }
1455}
1456
1457void
1458fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1459{
1460 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1461 *value = 0;
1462 }
1463}
1464
1465int
1466suword64(user_addr_t addr, uint64_t value)
1467{
1468 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1469 return -1;
1470 }
1471
1472 return 0;
1473}
1474
1475int
1476suword32(user_addr_t addr, uint32_t value)
1477{
1478 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1479 return -1;
1480 }
1481
1482 return 0;
1483}
1484
1485int
1486suword16(user_addr_t addr, uint16_t value)
1487{
1488 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1489 return -1;
1490 }
1491
1492 return 0;
1493}
1494
1495int
1496suword8(user_addr_t addr, uint8_t value)
1497{
1498 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1499 return -1;
1500 }
1501
1502 return 0;
1503}
1504
1505
1506/*
1507 * Miscellaneous
1508 */
1509extern boolean_t dtrace_tally_fault(user_addr_t);
1510
1511boolean_t
1512dtrace_tally_fault(user_addr_t uaddr)
1513{
1514 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1515 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1516 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1517}
1518
2d21ac55
A
1519#define TOTTY 0x02
1520extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1521
1522int
1523vuprintf(const char *format, va_list ap)
1524{
1525 return prf(format, ap, TOTTY, NULL);
1526}
1527
1528/* Not called from probe context */
1529void cmn_err( int level, const char *format, ... )
1530{
1531#pragma unused(level)
1532 va_list alist;
1533
1534 va_start(alist, format);
1535 vuprintf(format, alist);
1536 va_end(alist);
1537 uprintf("\n");
1538}
1539
1540/*
1541 * History:
1542 * 2002-01-24 gvdl Initial implementation of strstr
1543 */
1544
b0d623f7 1545__private_extern__ const char *
2d21ac55
A
1546strstr(const char *in, const char *str)
1547{
1548 char c;
1549 size_t len;
1550
1551 c = *str++;
1552 if (!c)
b0d623f7 1553 return (const char *) in; // Trivial empty string case
2d21ac55
A
1554
1555 len = strlen(str);
1556 do {
1557 char sc;
1558
1559 do {
1560 sc = *in++;
1561 if (!sc)
1562 return (char *) 0;
1563 } while (sc != c);
1564 } while (strncmp(in, str, len) != 0);
1565
b0d623f7 1566 return (const char *) (in - 1);
2d21ac55
A
1567}
1568
1569/*
1570 * Runtime and ABI
1571 */
1572uintptr_t
1573dtrace_caller(int ignore)
1574{
1575#pragma unused(ignore)
1576 return -1; /* Just as in Solaris dtrace_asm.s */
1577}
1578
1579int
1580dtrace_getstackdepth(int aframes)
1581{
b0d623f7 1582 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
1583 struct frame *nextfp, *minfp, *stacktop;
1584 int depth = 0;
1585 int on_intr;
1586
1587 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1588 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1589 else
b0d623f7 1590 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
2d21ac55
A
1591
1592 minfp = fp;
1593
1594 aframes++;
1595
1596 for (;;) {
1597 depth++;
1598
1599 nextfp = *(struct frame **)fp;
1600
1601 if (nextfp <= minfp || nextfp >= stacktop) {
1602 if (on_intr) {
1603 /*
1604 * Hop from interrupt stack to thread stack.
1605 */
1606 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1607
1608 minfp = (struct frame *)kstack_base;
b0d623f7 1609 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
2d21ac55
A
1610
1611 on_intr = 0;
1612 continue;
1613 }
1614 break;
1615 }
1616
1617 fp = nextfp;
1618 minfp = fp;
1619 }
1620
1621 if (depth <= aframes)
1622 return (0);
1623
1624 return (depth - aframes);
1625}
1626
1627/*
1628 * Unconsidered
1629 */
1630void
1631dtrace_vtime_enable(void) {}
1632
1633void
1634dtrace_vtime_disable(void) {}
1635
1636#else /* else ! CONFIG_DTRACE */
1637
1638#include <sys/types.h>
1639#include <mach/vm_types.h>
1640#include <mach/kmod.h>
1641
1642/*
1643 * This exists to prevent build errors when dtrace is unconfigured.
1644 */
1645
1646kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1647
1648kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1649#pragma unused(arg1, arg2, arg3)
1650
1651 return KERN_FAILURE;
1652}
1653
1654#endif /* CONFIG_DTRACE */