]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace_glue.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30/*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36#if CONFIG_DTRACE
37
38#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39#include <kern/thread.h>
40#include <mach/thread_status.h>
41
42#include <stdarg.h>
43#include <string.h>
44#include <sys/malloc.h>
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <sys/proc_internal.h>
48#include <sys/kauth.h>
49#include <sys/user.h>
50#include <sys/systm.h>
51#include <sys/dtrace.h>
52#include <sys/dtrace_impl.h>
53#include <libkern/OSAtomic.h>
39236c6e
A
54#include <kern/kern_types.h>
55#include <kern/timer_call.h>
2d21ac55
A
56#include <kern/thread_call.h>
57#include <kern/task.h>
58#include <kern/sched_prim.h>
59#include <kern/queue.h>
60#include <miscfs/devfs/devfs.h>
61#include <kern/kalloc.h>
62
63#include <mach/vm_param.h>
64#include <mach/mach_vm.h>
65#include <mach/task.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
68
69/*
70 * pid/proc
71 */
b0d623f7
A
72/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
73#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
2d21ac55
A
74
75/* Not called from probe context */
76proc_t *
77sprlock(pid_t pid)
78{
79 proc_t* p;
80
81 if ((p = proc_find(pid)) == PROC_NULL) {
82 return PROC_NULL;
83 }
84
fe8ab488 85 task_suspend_internal(p->task);
2d21ac55
A
86
87 proc_lock(p);
88
89 lck_mtx_lock(&p->p_dtrace_sprlock);
90
91 return p;
92}
93
94/* Not called from probe context */
95void
96sprunlock(proc_t *p)
97{
98 if (p != PROC_NULL) {
99 lck_mtx_unlock(&p->p_dtrace_sprlock);
100
101 proc_unlock(p);
102
fe8ab488 103 task_resume_internal(p->task);
2d21ac55
A
104
105 proc_rele(p);
106 }
107}
108
109/*
110 * uread/uwrite
111 */
112
113// These are not exported from vm_map.h.
114extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
115extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
116
117/* Not called from probe context */
118int
119uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
120{
121 kern_return_t ret;
122
123 ASSERT(p != PROC_NULL);
124 ASSERT(p->task != NULL);
125
126 task_t task = p->task;
127
128 /*
129 * Grab a reference to the task vm_map_t to make sure
130 * the map isn't pulled out from under us.
131 *
132 * Because the proc_lock is not held at all times on all code
133 * paths leading here, it is possible for the proc to have
134 * exited. If the map is null, fail.
135 */
136 vm_map_t map = get_task_map_reference(task);
137 if (map) {
138 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
139 vm_map_deallocate(map);
140 } else
141 ret = KERN_TERMINATED;
142
143 return (int)ret;
144}
145
146
147/* Not called from probe context */
148int
149uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
150{
151 kern_return_t ret;
152
153 ASSERT(p != NULL);
154 ASSERT(p->task != NULL);
155
156 task_t task = p->task;
157
158 /*
159 * Grab a reference to the task vm_map_t to make sure
160 * the map isn't pulled out from under us.
161 *
162 * Because the proc_lock is not held at all times on all code
163 * paths leading here, it is possible for the proc to have
164 * exited. If the map is null, fail.
165 */
166 vm_map_t map = get_task_map_reference(task);
167 if (map) {
168 /* Find the memory permissions. */
169 uint32_t nestingDepth=999999;
170 vm_region_submap_short_info_data_64_t info;
171 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
172 mach_vm_address_t address = (mach_vm_address_t)a;
173 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
174
175 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
176 if (ret != KERN_SUCCESS)
177 goto done;
178
179 vm_prot_t reprotect;
180
181 if (!(info.protection & VM_PROT_WRITE)) {
182 /* Save the original protection values for restoration later */
183 reprotect = info.protection;
184
185 if (info.max_protection & VM_PROT_WRITE) {
186 /* The memory is not currently writable, but can be made writable. */
187 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
188 } else {
189 /*
190 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
191 *
192 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
193 */
194 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
195 }
196
197 if (ret != KERN_SUCCESS)
198 goto done;
199
200 } else {
201 /* The memory was already writable. */
202 reprotect = VM_PROT_NONE;
203 }
204
205 ret = vm_map_write_user( map,
206 buf,
207 (vm_map_address_t)a,
208 (vm_size_t)len);
209
210 if (ret != KERN_SUCCESS)
211 goto done;
212
213 if (reprotect != VM_PROT_NONE) {
214 ASSERT(reprotect & VM_PROT_EXECUTE);
215 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
216 }
217
218done:
219 vm_map_deallocate(map);
220 } else
221 ret = KERN_TERMINATED;
222
223 return (int)ret;
224}
225
226/*
227 * cpuvar
228 */
229lck_mtx_t cpu_lock;
fe8ab488 230lck_mtx_t cyc_lock;
2d21ac55
A
231lck_mtx_t mod_lock;
232
6d2010ae 233dtrace_cpu_t *cpu_list;
2d21ac55
A
234cpu_core_t *cpu_core; /* XXX TLB lockdown? */
235
236/*
237 * cred_t
238 */
239
240/*
241 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
242 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
243 */
244cred_t *
245dtrace_CRED(void)
246{
247 struct uthread *uthread = get_bsdthread_info(current_thread());
248
249 if (uthread == NULL)
250 return NULL;
251 else
252 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
253}
254
255#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
256#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
257 HAS_ALLPRIVS(cr) : \
258 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
259
260int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
261{
262#pragma unused(priv, all)
263 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
264}
265
266int
267PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
268{
269#pragma unused(priv, boolean)
270 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
271}
272
6d2010ae 273/* XXX Get around const poisoning using structure assigns */
2d21ac55 274gid_t
6d2010ae 275crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(&copy_cr); }
2d21ac55
A
276
277uid_t
6d2010ae 278crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr); }
2d21ac55
A
279
280/*
281 * "cyclic"
282 */
283
2d21ac55 284typedef struct wrap_timer_call {
fe8ab488
A
285 /* node attributes */
286 cyc_handler_t hdlr;
287 cyc_time_t when;
288 uint64_t deadline;
289 int cpuid;
290 boolean_t suspended;
291 struct timer_call call;
292
293 /* next item in the linked list */
294 LIST_ENTRY(wrap_timer_call) entries;
2d21ac55
A
295} wrap_timer_call_t;
296
fe8ab488
A
297#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
298#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
299
300/* CPU going online/offline notifications */
301void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
302void dtrace_cpu_state_changed(int, boolean_t);
303
304void
305dtrace_install_cpu_hooks(void) {
306 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
307}
308
309void
310dtrace_cpu_state_changed(int cpuid, boolean_t is_running) {
311#pragma unused(cpuid)
312 wrap_timer_call_t *wrapTC = NULL;
313 boolean_t suspend = (is_running ? FALSE : TRUE);
314 dtrace_icookie_t s;
315
316 /* Ensure that we're not going to leave the CPU */
317 s = dtrace_interrupt_disable();
318 assert(cpuid == cpu_number());
319
320 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
321 assert(wrapTC->cpuid == cpu_number());
322 if (suspend) {
323 assert(!wrapTC->suspended);
324 /* If this fails, we'll panic anyway, so let's do this now. */
325 if (!timer_call_cancel(&wrapTC->call))
326 panic("timer_call_set_suspend() failed to cancel a timer call");
327 wrapTC->suspended = TRUE;
328 } else {
329 /* Rearm the timer, but ensure it was suspended first. */
330 assert(wrapTC->suspended);
331 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
332 &wrapTC->deadline);
333 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
334 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
335 wrapTC->suspended = FALSE;
336 }
337
338 }
339
340 /* Restore the previous interrupt state. */
341 dtrace_interrupt_enable(s);
342}
2d21ac55
A
343
344static void
345_timer_call_apply_cyclic( void *ignore, void *vTChdl )
346{
347#pragma unused(ignore)
348 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
349
350 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
351
352 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
39236c6e 353 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
2d21ac55
A
354}
355
356static cyclic_id_t
357timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
358{
359 uint64_t now;
fe8ab488 360 dtrace_icookie_t s;
2d21ac55
A
361
362 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
363 wrapTC->hdlr = *handler;
364 wrapTC->when = *when;
365
366 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
367
368 now = mach_absolute_time();
369 wrapTC->deadline = now;
370
371 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
fe8ab488
A
372
373 /* Insert the timer to the list of the running timers on this CPU, and start it. */
374 s = dtrace_interrupt_disable();
375 wrapTC->cpuid = cpu_number();
376 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
377 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
378 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
379 wrapTC->suspended = FALSE;
380 dtrace_interrupt_enable(s);
2d21ac55
A
381
382 return (cyclic_id_t)wrapTC;
383}
384
fe8ab488
A
385/*
386 * Executed on the CPU the timer is running on.
387 */
2d21ac55
A
388static void
389timer_call_remove_cyclic(cyclic_id_t cyclic)
390{
391 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
392
fe8ab488
A
393 assert(wrapTC);
394 assert(cpu_number() == wrapTC->cpuid);
2d21ac55 395
fe8ab488
A
396 if (!timer_call_cancel(&wrapTC->call))
397 panic("timer_call_remove_cyclic() failed to cancel a timer call");
2d21ac55 398
fe8ab488 399 LIST_REMOVE(wrapTC, entries);
2d21ac55
A
400}
401
402static void *
403timer_call_get_cyclic_arg(cyclic_id_t cyclic)
404{
405 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
406
407 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
408}
409
410cyclic_id_t
411cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
412{
413 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
414 if (NULL == wrapTC)
415 return CYCLIC_NONE;
416 else
417 return timer_call_add_cyclic( wrapTC, handler, when );
418}
419
420void
421cyclic_timer_remove(cyclic_id_t cyclic)
422{
423 ASSERT( cyclic != CYCLIC_NONE );
424
fe8ab488
A
425 /* Removing a timer call must be done on the CPU the timer is running on. */
426 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
427 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
428
2d21ac55
A
429 _FREE((void *)cyclic, M_TEMP);
430}
431
432static void
433_cyclic_add_omni(cyclic_id_list_t cyc_list)
434{
435 cyc_time_t cT;
436 cyc_handler_t cH;
437 wrap_timer_call_t *wrapTC;
438 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
439 char *t;
440
441 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
442
443 t = (char *)cyc_list;
444 t += sizeof(cyc_omni_handler_t);
b0d623f7 445 cyc_list = (cyclic_id_list_t)(uintptr_t)t;
2d21ac55
A
446
447 t += sizeof(cyclic_id_t)*NCPU;
448 t += (sizeof(wrap_timer_call_t))*cpu_number();
b0d623f7 449 wrapTC = (wrap_timer_call_t *)(uintptr_t)t;
2d21ac55
A
450
451 cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT);
452}
453
454cyclic_id_list_t
455cyclic_add_omni(cyc_omni_handler_t *omni)
456{
457 cyclic_id_list_t cyc_list =
458 _MALLOC( (sizeof(wrap_timer_call_t))*NCPU +
459 sizeof(cyclic_id_t)*NCPU +
460 sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK);
461 if (NULL == cyc_list)
462 return (cyclic_id_list_t)CYCLIC_NONE;
463
464 *(cyc_omni_handler_t *)cyc_list = *omni;
465 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
466
467 return cyc_list;
468}
469
470static void
471_cyclic_remove_omni(cyclic_id_list_t cyc_list)
472{
473 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
474 void *oarg;
475 cyclic_id_t cid;
476 char *t;
477
478 t = (char *)cyc_list;
479 t += sizeof(cyc_omni_handler_t);
b0d623f7 480 cyc_list = (cyclic_id_list_t)(uintptr_t)t;
2d21ac55 481
fe8ab488
A
482 /*
483 * If the processor was offline when dtrace started, we did not allocate
484 * a cyclic timer for this CPU.
485 */
486 if ((cid = cyc_list[cpu_number()]) != CYCLIC_NONE) {
487 oarg = timer_call_get_cyclic_arg(cid);
488 timer_call_remove_cyclic(cid);
489 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
490 }
2d21ac55
A
491}
492
493void
494cyclic_remove_omni(cyclic_id_list_t cyc_list)
495{
496 ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE );
497
498 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
499 _FREE(cyc_list, M_TEMP);
500}
501
502typedef struct wrap_thread_call {
503 thread_call_t TChdl;
504 cyc_handler_t hdlr;
505 cyc_time_t when;
506 uint64_t deadline;
507} wrap_thread_call_t;
508
509/*
510 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
511 * cleaner and the deadman, but too distant in time and place for the profile provider.
512 */
513static void
514_cyclic_apply( void *ignore, void *vTChdl )
515{
516#pragma unused(ignore)
517 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
518
519 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
520
521 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
522 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
523
524 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
525 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
526 thread_wakeup((event_t)wrapTC);
527}
528
529cyclic_id_t
530cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
531{
532 uint64_t now;
533
534 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
535 if (NULL == wrapTC)
536 return CYCLIC_NONE;
537
538 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
539 wrapTC->hdlr = *handler;
540 wrapTC->when = *when;
541
542 ASSERT(when->cyt_when == 0);
543 ASSERT(when->cyt_interval < WAKEUP_REAPER);
544
545 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
546
547 now = mach_absolute_time();
548 wrapTC->deadline = now;
549
550 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
551 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
552
553 return (cyclic_id_t)wrapTC;
554}
555
556static void
557noop_cyh_func(void * ignore)
558{
559#pragma unused(ignore)
560}
561
562void
563cyclic_remove(cyclic_id_t cyclic)
564{
565 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
566
567 ASSERT(cyclic != CYCLIC_NONE);
568
569 while (!thread_call_cancel(wrapTC->TChdl)) {
570 int ret = assert_wait(wrapTC, THREAD_UNINT);
571 ASSERT(ret == THREAD_WAITING);
572
573 wrapTC->when.cyt_interval = WAKEUP_REAPER;
574
575 ret = thread_block(THREAD_CONTINUE_NULL);
576 ASSERT(ret == THREAD_AWAKENED);
577 }
578
579 if (thread_call_free(wrapTC->TChdl))
580 _FREE(wrapTC, M_TEMP);
581 else {
582 /* Gut this cyclic and move on ... */
583 wrapTC->hdlr.cyh_func = noop_cyh_func;
584 wrapTC->when.cyt_interval = NEARLY_FOREVER;
585 }
586}
587
588/*
589 * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision)
590 */
591
592thread_call_t
593dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos)
594{
595#pragma unused(arg)
596 thread_call_t call = thread_call_allocate(func, NULL);
597
598 nanoseconds_to_absolutetime(nanos, &nanos);
599
600 /*
601 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot,
602 * and clock drift on later invocations is not a worry.
603 */
604 uint64_t deadline = mach_absolute_time() + nanos;
39236c6e 605 /* DRK: consider using a lower priority callout here */
2d21ac55
A
606 thread_call_enter_delayed(call, deadline);
607
608 return call;
609}
610
611/*
612 * ddi
613 */
614void
615ddi_report_dev(dev_info_t *devi)
616{
617#pragma unused(devi)
618}
619
620#define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */
621static void *soft[NSOFT_STATES];
622
623int
624ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
625{
626#pragma unused(n_items)
627 int i;
628
629 for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK);
630 *(size_t *)state_p = size;
631 return 0;
632}
633
634int
635ddi_soft_state_zalloc(void *state, int item)
636{
637#pragma unused(state)
638 if (item < NSOFT_STATES)
639 return DDI_SUCCESS;
640 else
641 return DDI_FAILURE;
642}
643
644void *
645ddi_get_soft_state(void *state, int item)
646{
647#pragma unused(state)
648 ASSERT(item < NSOFT_STATES);
649 return soft[item];
650}
651
652int
653ddi_soft_state_free(void *state, int item)
654{
655 ASSERT(item < NSOFT_STATES);
656 bzero( soft[item], (size_t)state );
657 return DDI_SUCCESS;
658}
659
660void
661ddi_soft_state_fini(void **state_p)
662{
663#pragma unused(state_p)
664 int i;
665
666 for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP );
667}
668
669static unsigned int gRegisteredProps = 0;
670static struct {
671 char name[32]; /* enough for "dof-data-" + digits */
672 int *data;
673 uint_t nelements;
674} gPropTable[16];
675
676kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
677
678kern_return_t
679_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
680{
681 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) {
682 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK);
683
684 if (NULL == p)
685 return KERN_FAILURE;
686
687 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name));
688 gPropTable[gRegisteredProps].nelements = nelements;
689 gPropTable[gRegisteredProps].data = p;
690
691 while (nelements-- > 0) {
692 *p++ = (int)(*data++);
693 }
694
695 gRegisteredProps++;
696 return KERN_SUCCESS;
697 }
698 else
699 return KERN_FAILURE;
700}
701
702int
703ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
b0d623f7 704 const char *name, int **data, uint_t *nelements)
2d21ac55
A
705{
706#pragma unused(match_dev,dip,flags)
707 unsigned int i;
708 for (i = 0; i < gRegisteredProps; ++i)
709 {
710 if (0 == strncmp(name, gPropTable[i].name,
711 sizeof(gPropTable[i].name))) {
712 *data = gPropTable[i].data;
713 *nelements = gPropTable[i].nelements;
714 return DDI_SUCCESS;
715 }
716 }
717 return DDI_FAILURE;
718}
719
720int
721ddi_prop_free(void *buf)
722{
723 _FREE(buf, M_TEMP);
724 return DDI_SUCCESS;
725}
726
727int
b0d623f7 728ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
2d21ac55
A
729
730int
731ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
732 minor_t minor_num, const char *node_type, int flag)
733{
734#pragma unused(spec_type,node_type,flag)
b0d623f7 735 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
2d21ac55
A
736
737 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
738 return DDI_FAILURE;
739 else
740 return DDI_SUCCESS;
741}
742
743void
744ddi_remove_minor_node(dev_info_t *dip, char *name)
745{
746#pragma unused(dip,name)
747/* XXX called from dtrace_detach, so NOTREACHED for now. */
748}
749
750major_t
751getemajor( dev_t d )
752{
753 return (major_t) major(d);
754}
755
756minor_t
757getminor ( dev_t d )
758{
759 return (minor_t) minor(d);
760}
761
762dev_t
763makedevice(major_t major, minor_t minor)
764{
765 return makedev( major, minor );
766}
767
768int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
769{
770#pragma unused(dev, dip, flags, name)
771
772 return defvalue;
773}
774
775/*
776 * Kernel Debug Interface
777 */
778int
779kdi_dtrace_set(kdi_dtrace_set_t ignore)
780{
781#pragma unused(ignore)
782 return 0; /* Success */
783}
784
785extern void Debugger(const char*);
786
787void
788debug_enter(char *c) { Debugger(c); }
789
790/*
791 * kmem
792 */
793
794void *
795dt_kmem_alloc(size_t size, int kmflag)
796{
797#pragma unused(kmflag)
798
799/*
800 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
801 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
802 */
803#if defined(DTRACE_MEMORY_ZONES)
804 return dtrace_alloc(size);
805#else
806 return kalloc(size);
807#endif
808}
809
810void *
811dt_kmem_zalloc(size_t size, int kmflag)
812{
813#pragma unused(kmflag)
814
815/*
816 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
817 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
818 */
819#if defined(DTRACE_MEMORY_ZONES)
820 void* buf = dtrace_alloc(size);
821#else
822 void* buf = kalloc(size);
823#endif
824
825 if(!buf)
826 return NULL;
827
828 bzero(buf, size);
829
830 return buf;
831}
832
833void
834dt_kmem_free(void *buf, size_t size)
835{
836#pragma unused(size)
837 /*
838 * DTrace relies on this, its doing a lot of NULL frees.
839 * A null free causes the debug builds to panic.
840 */
841 if (buf == NULL) return;
842
843 ASSERT(size > 0);
844
845#if defined(DTRACE_MEMORY_ZONES)
846 dtrace_free(buf, size);
847#else
848 kfree(buf, size);
849#endif
850}
851
852
853
854/*
855 * aligned kmem allocator
856 * align should be a power of two
857 */
858
859void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
860{
fe8ab488
A
861 void *mem, **addr_to_free;
862 intptr_t mem_aligned;
863 size_t *size_to_free, hdr_size;
2d21ac55 864
fe8ab488
A
865 /* Must be a power of two. */
866 assert(align != 0);
867 assert((align & (align - 1)) == 0);
2d21ac55 868
fe8ab488
A
869 /*
870 * We are going to add a header to the allocation. It contains
871 * the address to free and the total size of the buffer.
872 */
873 hdr_size = sizeof(size_t) + sizeof(void*);
874 mem = dt_kmem_alloc(size + align + hdr_size, kmflag);
875 if (mem == NULL)
2d21ac55
A
876 return NULL;
877
fe8ab488
A
878 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
879
880 /* Write the address to free in the header. */
881 addr_to_free = (void**) (mem_aligned - sizeof(void*));
882 *addr_to_free = mem;
2d21ac55 883
fe8ab488
A
884 /* Write the size to free in the header. */
885 size_to_free = (size_t*) (mem_aligned - hdr_size);
886 *size_to_free = size + align + hdr_size;
2d21ac55 887
fe8ab488 888 return (void*) mem_aligned;
2d21ac55
A
889}
890
891void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
892{
893 void* buf;
894
895 buf = dt_kmem_alloc_aligned(size, align, kmflag);
896
897 if(!buf)
898 return NULL;
899
900 bzero(buf, size);
901
902 return buf;
903}
904
905void dt_kmem_free_aligned(void* buf, size_t size)
906{
907#pragma unused(size)
fe8ab488
A
908 intptr_t ptr = (intptr_t) buf;
909 void **addr_to_free = (void**) (ptr - sizeof(void*));
910 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
2d21ac55 911
fe8ab488
A
912 if (buf == NULL)
913 return;
2d21ac55 914
fe8ab488 915 dt_kmem_free(*addr_to_free, *size_to_free);
2d21ac55
A
916}
917
918/*
919 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
920 * doesn't specify constructor, destructor, or reclaim methods.
921 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
922 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
923 */
924kmem_cache_t *
925kmem_cache_create(
b0d623f7 926 const char *name, /* descriptive name for this cache */
2d21ac55
A
927 size_t bufsize, /* size of the objects it manages */
928 size_t align, /* required object alignment */
929 int (*constructor)(void *, void *, int), /* object constructor */
930 void (*destructor)(void *, void *), /* object destructor */
931 void (*reclaim)(void *), /* memory reclaim callback */
932 void *private, /* pass-thru arg for constr/destr/reclaim */
933 vmem_t *vmp, /* vmem source for slab allocation */
934 int cflags) /* cache creation flags */
935{
936#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
937 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
938}
939
940void *
941kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
942{
943#pragma unused(kmflag)
944 size_t bufsize = (size_t)cp;
945 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
946}
947
948void
949kmem_cache_free(kmem_cache_t *cp, void *buf)
950{
951#pragma unused(cp)
952 _FREE(buf, M_TEMP);
953}
954
955void
956kmem_cache_destroy(kmem_cache_t *cp)
957{
958#pragma unused(cp)
959}
960
961/*
962 * taskq
963 */
964extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
965
966static void
967_taskq_apply( task_func_t func, thread_call_param_t arg )
968{
969 func( (void *)arg );
970}
971
972taskq_t *
973taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
974 int maxalloc, uint_t flags)
975{
976#pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
977
978 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
979}
980
981taskqid_t
982taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
983{
984#pragma unused(flags)
985 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
986 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
987 return (taskqid_t) tq /* for lack of anything better */;
988}
989
990void
991taskq_destroy(taskq_t *tq)
992{
993 thread_call_cancel( (thread_call_t) tq );
994 thread_call_free( (thread_call_t) tq );
995}
996
997pri_t maxclsyspri;
998
999/*
1000 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
1001 */
1002typedef unsigned int u_daddr_t;
1003#include "blist.h"
1004
1005/* By passing around blist *handles*, the underlying blist can be resized as needed. */
1006struct blist_hdl {
1007 blist_t blist;
1008};
1009
1010vmem_t *
1011vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
1012 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
1013{
1014#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
1015 blist_t bl;
1016 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
1017
1018 ASSERT(quantum == 1);
1019 ASSERT(NULL == ignore5);
1020 ASSERT(NULL == ignore6);
1021 ASSERT(NULL == source);
1022 ASSERT(0 == qcache_max);
1023 ASSERT(vmflag & VMC_IDENTIFIER);
1024
1025 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
1026
1027 p->blist = bl = blist_create( size );
1028 blist_free(bl, 0, size);
b0d623f7 1029 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
2d21ac55
A
1030
1031 return (vmem_t *)p;
1032}
1033
1034void *
1035vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1036{
1037#pragma unused(vmflag)
1038 struct blist_hdl *q = (struct blist_hdl *)vmp;
1039 blist_t bl = q->blist;
1040 daddr_t p;
1041
1042 p = blist_alloc(bl, (daddr_t)size);
1043
1044 if ((daddr_t)-1 == p) {
1045 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
1046 q->blist = bl;
1047 p = blist_alloc(bl, (daddr_t)size);
1048 if ((daddr_t)-1 == p)
1049 panic("vmem_alloc: failure after blist_resize!");
1050 }
1051
b0d623f7 1052 return (void *)(uintptr_t)p;
2d21ac55
A
1053}
1054
1055void
1056vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1057{
1058 struct blist_hdl *p = (struct blist_hdl *)vmp;
1059
b0d623f7 1060 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
2d21ac55
A
1061}
1062
1063void
1064vmem_destroy(vmem_t *vmp)
1065{
1066 struct blist_hdl *p = (struct blist_hdl *)vmp;
1067
1068 blist_destroy( p->blist );
1069 _FREE( p, sizeof(struct blist_hdl) );
1070}
1071
1072/*
1073 * Timing
1074 */
1075
1076/*
1077 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
1078 * January 1, 1970. Because it can be called from probe context, it must take no locks.
1079 */
1080
1081hrtime_t
1082dtrace_gethrestime(void)
1083{
b0d623f7
A
1084 clock_sec_t secs;
1085 clock_nsec_t nanosecs;
2d21ac55
A
1086 uint64_t secs64, ns64;
1087
1088 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
1089 secs64 = (uint64_t)secs;
1090 ns64 = (uint64_t)nanosecs;
1091
1092 ns64 = ns64 + (secs64 * 1000000000LL);
1093 return ns64;
1094}
1095
1096/*
1097 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
1098 * Hence its primary use is to specify intervals.
1099 */
1100
1101hrtime_t
1102dtrace_abs_to_nano(uint64_t elapsed)
1103{
1104 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
1105
1106 /*
1107 * If this is the first time we've run, get the timebase.
1108 * We can use denom == 0 to indicate that sTimebaseInfo is
1109 * uninitialised because it makes no sense to have a zero
1110 * denominator in a fraction.
1111 */
1112
1113 if ( sTimebaseInfo.denom == 0 ) {
1114 (void) clock_timebase_info(&sTimebaseInfo);
1115 }
1116
1117 /*
1118 * Convert to nanoseconds.
1119 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
1120 *
1121 * Provided the final result is representable in 64 bits the following maneuver will
1122 * deliver that result without intermediate overflow.
1123 */
1124 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
1125 return elapsed;
1126 else if (sTimebaseInfo.denom == 1)
1127 return elapsed * (uint64_t)sTimebaseInfo.numer;
1128 else {
1129 /* Decompose elapsed = eta32 * 2^32 + eps32: */
1130 uint64_t eta32 = elapsed >> 32;
1131 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
1132
1133 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
1134
1135 /* Form product of elapsed64 (decomposed) and numer: */
1136 uint64_t mu64 = numer * eta32;
1137 uint64_t lambda64 = numer * eps32;
1138
1139 /* Divide the constituents by denom: */
1140 uint64_t q32 = mu64/denom;
1141 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
1142
1143 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
1144 }
1145}
1146
1147hrtime_t
1148dtrace_gethrtime(void)
1149{
1150 static uint64_t start = 0;
1151
1152 if (start == 0)
1153 start = mach_absolute_time();
1154
1155 return dtrace_abs_to_nano(mach_absolute_time() - start);
1156}
1157
1158/*
1159 * Atomicity and synchronization
1160 */
1161uint32_t
1162dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
1163{
b0d623f7 1164 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
2d21ac55
A
1165 return cmp;
1166 else
1167 return ~cmp; /* Must return something *other* than cmp */
1168}
1169
1170void *
1171dtrace_casptr(void *target, void *cmp, void *new)
1172{
b0d623f7 1173 if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
2d21ac55
A
1174 return cmp;
1175 else
1176 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
2d21ac55
A
1177}
1178
1179/*
1180 * Interrupt manipulation
1181 */
1182dtrace_icookie_t
1183dtrace_interrupt_disable(void)
1184{
1185 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
1186}
1187
1188void
1189dtrace_interrupt_enable(dtrace_icookie_t reenable)
1190{
1191 (void)ml_set_interrupts_enabled((boolean_t)reenable);
1192}
1193
1194/*
1195 * MP coordination
1196 */
1197static void
1198dtrace_sync_func(void) {}
1199
1200/*
1201 * dtrace_sync() is not called from probe context.
1202 */
1203void
1204dtrace_sync(void)
1205{
1206 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1207}
1208
1209/*
1210 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1211 */
1212
1213extern kern_return_t dtrace_copyio_preflight(addr64_t);
1214extern kern_return_t dtrace_copyio_postflight(addr64_t);
1215
1216static int
1217dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1218{
1219#pragma unused(kaddr)
1220
1221 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1222 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1223
1224 ASSERT(kaddr + size >= kaddr);
1225
39236c6e 1226 if ( uaddr + size < uaddr || /* Avoid address wrap. */
2d21ac55
A
1227 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1228 {
1229 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1230 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1231 return (0);
1232 }
1233 return (1);
1234}
1235
1236void
b0d623f7 1237dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1238{
b0d623f7
A
1239#pragma unused(flags)
1240
2d21ac55
A
1241 if (dtrace_copycheck( src, dst, len )) {
1242 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1243 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1244 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1245 }
1246 dtrace_copyio_postflight(src);
1247 }
1248}
1249
1250void
b0d623f7 1251dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1252{
b0d623f7
A
1253#pragma unused(flags)
1254
2d21ac55
A
1255 size_t actual;
1256
1257 if (dtrace_copycheck( src, dst, len )) {
4a3eedf9
A
1258 /* copyin as many as 'len' bytes. */
1259 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1260
1261 /*
1262 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1263 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1264 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1265 * to the caller.
1266 */
1267 if (error && error != ENAMETOOLONG) {
2d21ac55
A
1268 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1269 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1270 }
1271 dtrace_copyio_postflight(src);
1272 }
1273}
1274
1275void
b0d623f7 1276dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1277{
b0d623f7
A
1278#pragma unused(flags)
1279
2d21ac55
A
1280 if (dtrace_copycheck( dst, src, len )) {
1281 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1282 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1283 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1284 }
1285 dtrace_copyio_postflight(dst);
1286 }
1287}
1288
1289void
b0d623f7 1290dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1291{
b0d623f7
A
1292#pragma unused(flags)
1293
2d21ac55
A
1294 size_t actual;
1295
1296 if (dtrace_copycheck( dst, src, len )) {
4a3eedf9
A
1297
1298 /*
1299 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1300 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1301 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1302 * to the caller.
1303 */
2d21ac55
A
1304 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1305 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1306 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1307 }
1308 dtrace_copyio_postflight(dst);
1309 }
1310}
1311
1312uint8_t
1313dtrace_fuword8(user_addr_t uaddr)
1314{
1315 uint8_t ret = 0;
1316
1317 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1318 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1319 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1320 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1321 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1322 }
1323 dtrace_copyio_postflight(uaddr);
1324 }
1325 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1326
1327 return(ret);
1328}
1329
1330uint16_t
1331dtrace_fuword16(user_addr_t uaddr)
1332{
1333 uint16_t ret = 0;
1334
1335 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1336 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1337 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1338 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1339 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1340 }
1341 dtrace_copyio_postflight(uaddr);
1342 }
1343 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1344
1345 return(ret);
1346}
1347
1348uint32_t
1349dtrace_fuword32(user_addr_t uaddr)
1350{
1351 uint32_t ret = 0;
1352
1353 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1354 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1355 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1356 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1357 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1358 }
1359 dtrace_copyio_postflight(uaddr);
1360 }
1361 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1362
1363 return(ret);
1364}
1365
1366uint64_t
1367dtrace_fuword64(user_addr_t uaddr)
1368{
1369 uint64_t ret = 0;
1370
1371 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1372 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1373 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1374 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1375 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1376 }
1377 dtrace_copyio_postflight(uaddr);
1378 }
1379 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1380
1381 return(ret);
1382}
1383
1384/*
1385 * Emulation of Solaris fuword / suword
1386 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1387 */
1388
1389int
1390fuword8(user_addr_t uaddr, uint8_t *value)
1391{
1392 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1393 return -1;
1394 }
1395
1396 return 0;
1397}
1398
1399int
1400fuword16(user_addr_t uaddr, uint16_t *value)
1401{
1402 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1403 return -1;
1404 }
1405
1406 return 0;
1407}
1408
1409int
1410fuword32(user_addr_t uaddr, uint32_t *value)
1411{
1412 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1413 return -1;
1414 }
1415
1416 return 0;
1417}
1418
1419int
1420fuword64(user_addr_t uaddr, uint64_t *value)
1421{
1422 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1423 return -1;
1424 }
1425
1426 return 0;
1427}
1428
1429void
1430fuword8_noerr(user_addr_t uaddr, uint8_t *value)
1431{
1432 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
1433 *value = 0;
1434 }
1435}
1436
1437void
1438fuword16_noerr(user_addr_t uaddr, uint16_t *value)
1439{
1440 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
1441 *value = 0;
1442 }
1443}
1444
1445void
1446fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1447{
1448 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1449 *value = 0;
1450 }
1451}
1452
1453void
1454fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1455{
1456 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1457 *value = 0;
1458 }
1459}
1460
1461int
1462suword64(user_addr_t addr, uint64_t value)
1463{
1464 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1465 return -1;
1466 }
1467
1468 return 0;
1469}
1470
1471int
1472suword32(user_addr_t addr, uint32_t value)
1473{
1474 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1475 return -1;
1476 }
1477
1478 return 0;
1479}
1480
1481int
1482suword16(user_addr_t addr, uint16_t value)
1483{
1484 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1485 return -1;
1486 }
1487
1488 return 0;
1489}
1490
1491int
1492suword8(user_addr_t addr, uint8_t value)
1493{
1494 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1495 return -1;
1496 }
1497
1498 return 0;
1499}
1500
1501
1502/*
1503 * Miscellaneous
1504 */
1505extern boolean_t dtrace_tally_fault(user_addr_t);
1506
1507boolean_t
1508dtrace_tally_fault(user_addr_t uaddr)
1509{
1510 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1511 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1512 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1513}
1514
2d21ac55
A
1515#define TOTTY 0x02
1516extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1517
1518int
1519vuprintf(const char *format, va_list ap)
1520{
1521 return prf(format, ap, TOTTY, NULL);
1522}
1523
1524/* Not called from probe context */
1525void cmn_err( int level, const char *format, ... )
1526{
1527#pragma unused(level)
1528 va_list alist;
1529
1530 va_start(alist, format);
1531 vuprintf(format, alist);
1532 va_end(alist);
1533 uprintf("\n");
1534}
1535
1536/*
1537 * History:
1538 * 2002-01-24 gvdl Initial implementation of strstr
1539 */
1540
b0d623f7 1541__private_extern__ const char *
2d21ac55
A
1542strstr(const char *in, const char *str)
1543{
1544 char c;
1545 size_t len;
1546
1547 c = *str++;
1548 if (!c)
b0d623f7 1549 return (const char *) in; // Trivial empty string case
2d21ac55
A
1550
1551 len = strlen(str);
1552 do {
1553 char sc;
1554
1555 do {
1556 sc = *in++;
1557 if (!sc)
1558 return (char *) 0;
1559 } while (sc != c);
1560 } while (strncmp(in, str, len) != 0);
1561
b0d623f7 1562 return (const char *) (in - 1);
2d21ac55
A
1563}
1564
1565/*
1566 * Runtime and ABI
1567 */
1568uintptr_t
1569dtrace_caller(int ignore)
1570{
1571#pragma unused(ignore)
1572 return -1; /* Just as in Solaris dtrace_asm.s */
1573}
1574
1575int
1576dtrace_getstackdepth(int aframes)
1577{
b0d623f7 1578 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
1579 struct frame *nextfp, *minfp, *stacktop;
1580 int depth = 0;
1581 int on_intr;
1582
1583 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1584 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1585 else
b0d623f7 1586 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
2d21ac55
A
1587
1588 minfp = fp;
1589
1590 aframes++;
1591
1592 for (;;) {
1593 depth++;
1594
1595 nextfp = *(struct frame **)fp;
1596
1597 if (nextfp <= minfp || nextfp >= stacktop) {
1598 if (on_intr) {
1599 /*
1600 * Hop from interrupt stack to thread stack.
1601 */
1602 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1603
1604 minfp = (struct frame *)kstack_base;
b0d623f7 1605 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
2d21ac55
A
1606
1607 on_intr = 0;
1608 continue;
1609 }
1610 break;
1611 }
1612
1613 fp = nextfp;
1614 minfp = fp;
1615 }
1616
1617 if (depth <= aframes)
1618 return (0);
1619
1620 return (depth - aframes);
1621}
1622
1623/*
1624 * Unconsidered
1625 */
1626void
1627dtrace_vtime_enable(void) {}
1628
1629void
1630dtrace_vtime_disable(void) {}
1631
1632#else /* else ! CONFIG_DTRACE */
1633
1634#include <sys/types.h>
1635#include <mach/vm_types.h>
1636#include <mach/kmod.h>
1637
1638/*
1639 * This exists to prevent build errors when dtrace is unconfigured.
1640 */
1641
1642kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1643
1644kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1645#pragma unused(arg1, arg2, arg3)
1646
1647 return KERN_FAILURE;
1648}
1649
1650#endif /* CONFIG_DTRACE */