]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace_glue.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30/*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36#if CONFIG_DTRACE
37
38#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39#include <kern/thread.h>
40#include <mach/thread_status.h>
41
42#include <stdarg.h>
43#include <string.h>
44#include <sys/malloc.h>
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <sys/proc_internal.h>
48#include <sys/kauth.h>
49#include <sys/user.h>
50#include <sys/systm.h>
51#include <sys/dtrace.h>
52#include <sys/dtrace_impl.h>
53#include <libkern/OSAtomic.h>
54#include <kern/thread_call.h>
55#include <kern/task.h>
56#include <kern/sched_prim.h>
57#include <kern/queue.h>
58#include <miscfs/devfs/devfs.h>
59#include <kern/kalloc.h>
60
61#include <mach/vm_param.h>
62#include <mach/mach_vm.h>
63#include <mach/task.h>
64#include <vm/pmap.h>
65#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
66
67/*
68 * pid/proc
69 */
70#define proc_t struct proc
71
72/* Not called from probe context */
73proc_t *
74sprlock(pid_t pid)
75{
76 proc_t* p;
77
78 if ((p = proc_find(pid)) == PROC_NULL) {
79 return PROC_NULL;
80 }
81
82 task_suspend(p->task);
83
84 proc_lock(p);
85
86 lck_mtx_lock(&p->p_dtrace_sprlock);
87
88 return p;
89}
90
91/* Not called from probe context */
92void
93sprunlock(proc_t *p)
94{
95 if (p != PROC_NULL) {
96 lck_mtx_unlock(&p->p_dtrace_sprlock);
97
98 proc_unlock(p);
99
100 task_resume(p->task);
101
102 proc_rele(p);
103 }
104}
105
106/*
107 * uread/uwrite
108 */
109
110// These are not exported from vm_map.h.
111extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
112extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
113
114/* Not called from probe context */
115int
116uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
117{
118 kern_return_t ret;
119
120 ASSERT(p != PROC_NULL);
121 ASSERT(p->task != NULL);
122
123 task_t task = p->task;
124
125 /*
126 * Grab a reference to the task vm_map_t to make sure
127 * the map isn't pulled out from under us.
128 *
129 * Because the proc_lock is not held at all times on all code
130 * paths leading here, it is possible for the proc to have
131 * exited. If the map is null, fail.
132 */
133 vm_map_t map = get_task_map_reference(task);
134 if (map) {
135 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
136 vm_map_deallocate(map);
137 } else
138 ret = KERN_TERMINATED;
139
140 return (int)ret;
141}
142
143
144/* Not called from probe context */
145int
146uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
147{
148 kern_return_t ret;
149
150 ASSERT(p != NULL);
151 ASSERT(p->task != NULL);
152
153 task_t task = p->task;
154
155 /*
156 * Grab a reference to the task vm_map_t to make sure
157 * the map isn't pulled out from under us.
158 *
159 * Because the proc_lock is not held at all times on all code
160 * paths leading here, it is possible for the proc to have
161 * exited. If the map is null, fail.
162 */
163 vm_map_t map = get_task_map_reference(task);
164 if (map) {
165 /* Find the memory permissions. */
166 uint32_t nestingDepth=999999;
167 vm_region_submap_short_info_data_64_t info;
168 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
169 mach_vm_address_t address = (mach_vm_address_t)a;
170 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
171
172 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
173 if (ret != KERN_SUCCESS)
174 goto done;
175
176 vm_prot_t reprotect;
177
178 if (!(info.protection & VM_PROT_WRITE)) {
179 /* Save the original protection values for restoration later */
180 reprotect = info.protection;
181
182 if (info.max_protection & VM_PROT_WRITE) {
183 /* The memory is not currently writable, but can be made writable. */
184 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
185 } else {
186 /*
187 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
188 *
189 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
190 */
191 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
192 }
193
194 if (ret != KERN_SUCCESS)
195 goto done;
196
197 } else {
198 /* The memory was already writable. */
199 reprotect = VM_PROT_NONE;
200 }
201
202 ret = vm_map_write_user( map,
203 buf,
204 (vm_map_address_t)a,
205 (vm_size_t)len);
206
207 if (ret != KERN_SUCCESS)
208 goto done;
209
210 if (reprotect != VM_PROT_NONE) {
211 ASSERT(reprotect & VM_PROT_EXECUTE);
212 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
213 }
214
215done:
216 vm_map_deallocate(map);
217 } else
218 ret = KERN_TERMINATED;
219
220 return (int)ret;
221}
222
223/*
224 * cpuvar
225 */
226lck_mtx_t cpu_lock;
227lck_mtx_t mod_lock;
228
229cpu_t *cpu_list;
230cpu_core_t *cpu_core; /* XXX TLB lockdown? */
231
232/*
233 * cred_t
234 */
235
236/*
237 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
238 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
239 */
240cred_t *
241dtrace_CRED(void)
242{
243 struct uthread *uthread = get_bsdthread_info(current_thread());
244
245 if (uthread == NULL)
246 return NULL;
247 else
248 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
249}
250
251#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
252#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
253 HAS_ALLPRIVS(cr) : \
254 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
255
256int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
257{
258#pragma unused(priv, all)
259 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
260}
261
262int
263PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
264{
265#pragma unused(priv, boolean)
266 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
267}
268
269gid_t
270crgetgid(const cred_t *cr) { return cr->cr_groups[0]; }
271
272uid_t
273crgetuid(const cred_t *cr) { return cr->cr_uid; }
274
275/*
276 * "cyclic"
277 */
278
279/* osfmk/kern/timer_call.h */
280typedef void *call_entry_param_t;
281typedef void (*call_entry_func_t)(
282 call_entry_param_t param0,
283 call_entry_param_t param1);
284
285typedef struct call_entry {
286 queue_chain_t q_link;
287 call_entry_func_t func;
288 call_entry_param_t param0;
289 call_entry_param_t param1;
290 uint64_t deadline;
291 enum {
292 IDLE,
293 PENDING,
294 DELAYED } state;
295} call_entry_data_t;
296
297
298typedef struct call_entry *timer_call_t;
299typedef void *timer_call_param_t;
300typedef void (*timer_call_func_t)(
301 timer_call_param_t param0,
302 timer_call_param_t param1);
303
304extern void
305timer_call_setup(
306 timer_call_t call,
307 timer_call_func_t func,
308 timer_call_param_t param0);
309
310extern boolean_t
311timer_call_enter1(
312 timer_call_t call,
313 timer_call_param_t param1,
314 uint64_t deadline);
315
316extern boolean_t
317timer_call_cancel(
318 timer_call_t call);
319
320typedef struct wrap_timer_call {
321 cyc_handler_t hdlr;
322 cyc_time_t when;
323 uint64_t deadline;
324 struct call_entry call;
325} wrap_timer_call_t;
326
327#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
328#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
329
330static void
331_timer_call_apply_cyclic( void *ignore, void *vTChdl )
332{
333#pragma unused(ignore)
334 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
335
336 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
337
338 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
339 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
340
341 /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */
342 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
343 thread_wakeup((event_t)wrapTC);
344}
345
346static cyclic_id_t
347timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
348{
349 uint64_t now;
350
351 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
352 wrapTC->hdlr = *handler;
353 wrapTC->when = *when;
354
355 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
356
357 now = mach_absolute_time();
358 wrapTC->deadline = now;
359
360 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
361 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
362
363 return (cyclic_id_t)wrapTC;
364}
365
366static void
367timer_call_remove_cyclic(cyclic_id_t cyclic)
368{
369 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
370
371 while (!timer_call_cancel(&(wrapTC->call))) {
372 int ret = assert_wait(wrapTC, THREAD_UNINT);
373 ASSERT(ret == THREAD_WAITING);
374
375 wrapTC->when.cyt_interval = WAKEUP_REAPER;
376
377 ret = thread_block(THREAD_CONTINUE_NULL);
378 ASSERT(ret == THREAD_AWAKENED);
379 }
380}
381
382static void *
383timer_call_get_cyclic_arg(cyclic_id_t cyclic)
384{
385 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
386
387 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
388}
389
390cyclic_id_t
391cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
392{
393 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
394 if (NULL == wrapTC)
395 return CYCLIC_NONE;
396 else
397 return timer_call_add_cyclic( wrapTC, handler, when );
398}
399
400void
401cyclic_timer_remove(cyclic_id_t cyclic)
402{
403 ASSERT( cyclic != CYCLIC_NONE );
404
405 timer_call_remove_cyclic( cyclic );
406 _FREE((void *)cyclic, M_TEMP);
407}
408
409static void
410_cyclic_add_omni(cyclic_id_list_t cyc_list)
411{
412 cyc_time_t cT;
413 cyc_handler_t cH;
414 wrap_timer_call_t *wrapTC;
415 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
416 char *t;
417
418 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
419
420 t = (char *)cyc_list;
421 t += sizeof(cyc_omni_handler_t);
422 cyc_list = (cyclic_id_list_t)t;
423
424 t += sizeof(cyclic_id_t)*NCPU;
425 t += (sizeof(wrap_timer_call_t))*cpu_number();
426 wrapTC = (wrap_timer_call_t *)t;
427
428 cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT);
429}
430
431cyclic_id_list_t
432cyclic_add_omni(cyc_omni_handler_t *omni)
433{
434 cyclic_id_list_t cyc_list =
435 _MALLOC( (sizeof(wrap_timer_call_t))*NCPU +
436 sizeof(cyclic_id_t)*NCPU +
437 sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK);
438 if (NULL == cyc_list)
439 return (cyclic_id_list_t)CYCLIC_NONE;
440
441 *(cyc_omni_handler_t *)cyc_list = *omni;
442 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
443
444 return cyc_list;
445}
446
447static void
448_cyclic_remove_omni(cyclic_id_list_t cyc_list)
449{
450 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
451 void *oarg;
452 cyclic_id_t cid;
453 char *t;
454
455 t = (char *)cyc_list;
456 t += sizeof(cyc_omni_handler_t);
457 cyc_list = (cyclic_id_list_t)t;
458
459 cid = cyc_list[cpu_number()];
460 oarg = timer_call_get_cyclic_arg(cid);
461
462 timer_call_remove_cyclic( cid );
463 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
464}
465
466void
467cyclic_remove_omni(cyclic_id_list_t cyc_list)
468{
469 ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE );
470
471 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
472 _FREE(cyc_list, M_TEMP);
473}
474
475typedef struct wrap_thread_call {
476 thread_call_t TChdl;
477 cyc_handler_t hdlr;
478 cyc_time_t when;
479 uint64_t deadline;
480} wrap_thread_call_t;
481
482/*
483 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
484 * cleaner and the deadman, but too distant in time and place for the profile provider.
485 */
486static void
487_cyclic_apply( void *ignore, void *vTChdl )
488{
489#pragma unused(ignore)
490 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
491
492 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
493
494 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
495 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
496
497 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
498 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
499 thread_wakeup((event_t)wrapTC);
500}
501
502cyclic_id_t
503cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
504{
505 uint64_t now;
506
507 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
508 if (NULL == wrapTC)
509 return CYCLIC_NONE;
510
511 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
512 wrapTC->hdlr = *handler;
513 wrapTC->when = *when;
514
515 ASSERT(when->cyt_when == 0);
516 ASSERT(when->cyt_interval < WAKEUP_REAPER);
517
518 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
519
520 now = mach_absolute_time();
521 wrapTC->deadline = now;
522
523 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
524 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
525
526 return (cyclic_id_t)wrapTC;
527}
528
529static void
530noop_cyh_func(void * ignore)
531{
532#pragma unused(ignore)
533}
534
535void
536cyclic_remove(cyclic_id_t cyclic)
537{
538 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
539
540 ASSERT(cyclic != CYCLIC_NONE);
541
542 while (!thread_call_cancel(wrapTC->TChdl)) {
543 int ret = assert_wait(wrapTC, THREAD_UNINT);
544 ASSERT(ret == THREAD_WAITING);
545
546 wrapTC->when.cyt_interval = WAKEUP_REAPER;
547
548 ret = thread_block(THREAD_CONTINUE_NULL);
549 ASSERT(ret == THREAD_AWAKENED);
550 }
551
552 if (thread_call_free(wrapTC->TChdl))
553 _FREE(wrapTC, M_TEMP);
554 else {
555 /* Gut this cyclic and move on ... */
556 wrapTC->hdlr.cyh_func = noop_cyh_func;
557 wrapTC->when.cyt_interval = NEARLY_FOREVER;
558 }
559}
560
561/*
562 * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision)
563 */
564
565thread_call_t
566dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos)
567{
568#pragma unused(arg)
569 thread_call_t call = thread_call_allocate(func, NULL);
570
571 nanoseconds_to_absolutetime(nanos, &nanos);
572
573 /*
574 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot,
575 * and clock drift on later invocations is not a worry.
576 */
577 uint64_t deadline = mach_absolute_time() + nanos;
578
579 thread_call_enter_delayed(call, deadline);
580
581 return call;
582}
583
584/*
585 * ddi
586 */
587void
588ddi_report_dev(dev_info_t *devi)
589{
590#pragma unused(devi)
591}
592
593#define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */
594static void *soft[NSOFT_STATES];
595
596int
597ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
598{
599#pragma unused(n_items)
600 int i;
601
602 for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK);
603 *(size_t *)state_p = size;
604 return 0;
605}
606
607int
608ddi_soft_state_zalloc(void *state, int item)
609{
610#pragma unused(state)
611 if (item < NSOFT_STATES)
612 return DDI_SUCCESS;
613 else
614 return DDI_FAILURE;
615}
616
617void *
618ddi_get_soft_state(void *state, int item)
619{
620#pragma unused(state)
621 ASSERT(item < NSOFT_STATES);
622 return soft[item];
623}
624
625int
626ddi_soft_state_free(void *state, int item)
627{
628 ASSERT(item < NSOFT_STATES);
629 bzero( soft[item], (size_t)state );
630 return DDI_SUCCESS;
631}
632
633void
634ddi_soft_state_fini(void **state_p)
635{
636#pragma unused(state_p)
637 int i;
638
639 for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP );
640}
641
642static unsigned int gRegisteredProps = 0;
643static struct {
644 char name[32]; /* enough for "dof-data-" + digits */
645 int *data;
646 uint_t nelements;
647} gPropTable[16];
648
649kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
650
651kern_return_t
652_dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
653{
654 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) {
655 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK);
656
657 if (NULL == p)
658 return KERN_FAILURE;
659
660 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name));
661 gPropTable[gRegisteredProps].nelements = nelements;
662 gPropTable[gRegisteredProps].data = p;
663
664 while (nelements-- > 0) {
665 *p++ = (int)(*data++);
666 }
667
668 gRegisteredProps++;
669 return KERN_SUCCESS;
670 }
671 else
672 return KERN_FAILURE;
673}
674
675int
676ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
677 char *name, int **data, uint_t *nelements)
678{
679#pragma unused(match_dev,dip,flags)
680 unsigned int i;
681 for (i = 0; i < gRegisteredProps; ++i)
682 {
683 if (0 == strncmp(name, gPropTable[i].name,
684 sizeof(gPropTable[i].name))) {
685 *data = gPropTable[i].data;
686 *nelements = gPropTable[i].nelements;
687 return DDI_SUCCESS;
688 }
689 }
690 return DDI_FAILURE;
691}
692
693int
694ddi_prop_free(void *buf)
695{
696 _FREE(buf, M_TEMP);
697 return DDI_SUCCESS;
698}
699
700int
701ddi_driver_major(dev_info_t *devi) { return (int)major(devi); }
702
703int
704ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
705 minor_t minor_num, const char *node_type, int flag)
706{
707#pragma unused(spec_type,node_type,flag)
708 dev_t dev = makedev( (uint32_t)dip, minor_num );
709
710 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
711 return DDI_FAILURE;
712 else
713 return DDI_SUCCESS;
714}
715
716void
717ddi_remove_minor_node(dev_info_t *dip, char *name)
718{
719#pragma unused(dip,name)
720/* XXX called from dtrace_detach, so NOTREACHED for now. */
721}
722
723major_t
724getemajor( dev_t d )
725{
726 return (major_t) major(d);
727}
728
729minor_t
730getminor ( dev_t d )
731{
732 return (minor_t) minor(d);
733}
734
735dev_t
736makedevice(major_t major, minor_t minor)
737{
738 return makedev( major, minor );
739}
740
741int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
742{
743#pragma unused(dev, dip, flags, name)
744
745 return defvalue;
746}
747
748/*
749 * Kernel Debug Interface
750 */
751int
752kdi_dtrace_set(kdi_dtrace_set_t ignore)
753{
754#pragma unused(ignore)
755 return 0; /* Success */
756}
757
758extern void Debugger(const char*);
759
760void
761debug_enter(char *c) { Debugger(c); }
762
763/*
764 * kmem
765 */
766
767void *
768dt_kmem_alloc(size_t size, int kmflag)
769{
770#pragma unused(kmflag)
771
772/*
773 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
774 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
775 */
776#if defined(DTRACE_MEMORY_ZONES)
777 return dtrace_alloc(size);
778#else
779 return kalloc(size);
780#endif
781}
782
783void *
784dt_kmem_zalloc(size_t size, int kmflag)
785{
786#pragma unused(kmflag)
787
788/*
789 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
790 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
791 */
792#if defined(DTRACE_MEMORY_ZONES)
793 void* buf = dtrace_alloc(size);
794#else
795 void* buf = kalloc(size);
796#endif
797
798 if(!buf)
799 return NULL;
800
801 bzero(buf, size);
802
803 return buf;
804}
805
806void
807dt_kmem_free(void *buf, size_t size)
808{
809#pragma unused(size)
810 /*
811 * DTrace relies on this, its doing a lot of NULL frees.
812 * A null free causes the debug builds to panic.
813 */
814 if (buf == NULL) return;
815
816 ASSERT(size > 0);
817
818#if defined(DTRACE_MEMORY_ZONES)
819 dtrace_free(buf, size);
820#else
821 kfree(buf, size);
822#endif
823}
824
825
826
827/*
828 * aligned kmem allocator
829 * align should be a power of two
830 */
831
832void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
833{
834 void* buf;
835 intptr_t p;
836 void** buf_backup;
837
838 buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag);
839
840 if(!buf)
841 return NULL;
842
843 p = (intptr_t)buf;
844 p += sizeof(void*); /* now we have enough room to store the backup */
845 p = P2ROUNDUP(p, align); /* and now we're aligned */
846
847 buf_backup = (void**)(p - sizeof(void*));
848 *buf_backup = buf; /* back up the address we need to free */
849
850 return (void*)p;
851}
852
853void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
854{
855 void* buf;
856
857 buf = dt_kmem_alloc_aligned(size, align, kmflag);
858
859 if(!buf)
860 return NULL;
861
862 bzero(buf, size);
863
864 return buf;
865}
866
867void dt_kmem_free_aligned(void* buf, size_t size)
868{
869#pragma unused(size)
870 intptr_t p;
871 void** buf_backup;
872
873 p = (intptr_t)buf;
874 p -= sizeof(void*);
875 buf_backup = (void**)(p);
876
877 dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup));
878}
879
880/*
881 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
882 * doesn't specify constructor, destructor, or reclaim methods.
883 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
884 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
885 */
886kmem_cache_t *
887kmem_cache_create(
888 char *name, /* descriptive name for this cache */
889 size_t bufsize, /* size of the objects it manages */
890 size_t align, /* required object alignment */
891 int (*constructor)(void *, void *, int), /* object constructor */
892 void (*destructor)(void *, void *), /* object destructor */
893 void (*reclaim)(void *), /* memory reclaim callback */
894 void *private, /* pass-thru arg for constr/destr/reclaim */
895 vmem_t *vmp, /* vmem source for slab allocation */
896 int cflags) /* cache creation flags */
897{
898#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
899 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
900}
901
902void *
903kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
904{
905#pragma unused(kmflag)
906 size_t bufsize = (size_t)cp;
907 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
908}
909
910void
911kmem_cache_free(kmem_cache_t *cp, void *buf)
912{
913#pragma unused(cp)
914 _FREE(buf, M_TEMP);
915}
916
917void
918kmem_cache_destroy(kmem_cache_t *cp)
919{
920#pragma unused(cp)
921}
922
923/*
924 * taskq
925 */
926extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
927
928static void
929_taskq_apply( task_func_t func, thread_call_param_t arg )
930{
931 func( (void *)arg );
932}
933
934taskq_t *
935taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
936 int maxalloc, uint_t flags)
937{
938#pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
939
940 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
941}
942
943taskqid_t
944taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
945{
946#pragma unused(flags)
947 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
948 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
949 return (taskqid_t) tq /* for lack of anything better */;
950}
951
952void
953taskq_destroy(taskq_t *tq)
954{
955 thread_call_cancel( (thread_call_t) tq );
956 thread_call_free( (thread_call_t) tq );
957}
958
959pri_t maxclsyspri;
960
961/*
962 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
963 */
964typedef unsigned int u_daddr_t;
965#include "blist.h"
966
967/* By passing around blist *handles*, the underlying blist can be resized as needed. */
968struct blist_hdl {
969 blist_t blist;
970};
971
972vmem_t *
973vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
974 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
975{
976#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
977 blist_t bl;
978 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
979
980 ASSERT(quantum == 1);
981 ASSERT(NULL == ignore5);
982 ASSERT(NULL == ignore6);
983 ASSERT(NULL == source);
984 ASSERT(0 == qcache_max);
985 ASSERT(vmflag & VMC_IDENTIFIER);
986
987 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
988
989 p->blist = bl = blist_create( size );
990 blist_free(bl, 0, size);
991 if (base) blist_alloc( bl, (daddr_t)base ); /* Chomp off initial ID(s) */
992
993 return (vmem_t *)p;
994}
995
996void *
997vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
998{
999#pragma unused(vmflag)
1000 struct blist_hdl *q = (struct blist_hdl *)vmp;
1001 blist_t bl = q->blist;
1002 daddr_t p;
1003
1004 p = blist_alloc(bl, (daddr_t)size);
1005
1006 if ((daddr_t)-1 == p) {
1007 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
1008 q->blist = bl;
1009 p = blist_alloc(bl, (daddr_t)size);
1010 if ((daddr_t)-1 == p)
1011 panic("vmem_alloc: failure after blist_resize!");
1012 }
1013
1014 return (void *)p;
1015}
1016
1017void
1018vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1019{
1020 struct blist_hdl *p = (struct blist_hdl *)vmp;
1021
1022 blist_free( p->blist, (daddr_t)vaddr, (daddr_t)size );
1023}
1024
1025void
1026vmem_destroy(vmem_t *vmp)
1027{
1028 struct blist_hdl *p = (struct blist_hdl *)vmp;
1029
1030 blist_destroy( p->blist );
1031 _FREE( p, sizeof(struct blist_hdl) );
1032}
1033
1034/*
1035 * Timing
1036 */
1037
1038/*
1039 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
1040 * January 1, 1970. Because it can be called from probe context, it must take no locks.
1041 */
1042
1043hrtime_t
1044dtrace_gethrestime(void)
1045{
1046 uint32_t secs, nanosecs;
1047 uint64_t secs64, ns64;
1048
1049 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
1050 secs64 = (uint64_t)secs;
1051 ns64 = (uint64_t)nanosecs;
1052
1053 ns64 = ns64 + (secs64 * 1000000000LL);
1054 return ns64;
1055}
1056
1057/*
1058 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
1059 * Hence its primary use is to specify intervals.
1060 */
1061
1062hrtime_t
1063dtrace_abs_to_nano(uint64_t elapsed)
1064{
1065 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
1066
1067 /*
1068 * If this is the first time we've run, get the timebase.
1069 * We can use denom == 0 to indicate that sTimebaseInfo is
1070 * uninitialised because it makes no sense to have a zero
1071 * denominator in a fraction.
1072 */
1073
1074 if ( sTimebaseInfo.denom == 0 ) {
1075 (void) clock_timebase_info(&sTimebaseInfo);
1076 }
1077
1078 /*
1079 * Convert to nanoseconds.
1080 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
1081 *
1082 * Provided the final result is representable in 64 bits the following maneuver will
1083 * deliver that result without intermediate overflow.
1084 */
1085 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
1086 return elapsed;
1087 else if (sTimebaseInfo.denom == 1)
1088 return elapsed * (uint64_t)sTimebaseInfo.numer;
1089 else {
1090 /* Decompose elapsed = eta32 * 2^32 + eps32: */
1091 uint64_t eta32 = elapsed >> 32;
1092 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
1093
1094 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
1095
1096 /* Form product of elapsed64 (decomposed) and numer: */
1097 uint64_t mu64 = numer * eta32;
1098 uint64_t lambda64 = numer * eps32;
1099
1100 /* Divide the constituents by denom: */
1101 uint64_t q32 = mu64/denom;
1102 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
1103
1104 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
1105 }
1106}
1107
1108hrtime_t
1109dtrace_gethrtime(void)
1110{
1111 static uint64_t start = 0;
1112
1113 if (start == 0)
1114 start = mach_absolute_time();
1115
1116 return dtrace_abs_to_nano(mach_absolute_time() - start);
1117}
1118
1119/*
1120 * Atomicity and synchronization
1121 */
1122uint32_t
1123dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
1124{
1125 if (OSCompareAndSwap( cmp, new, (unsigned long *)target ))
1126 return cmp;
1127 else
1128 return ~cmp; /* Must return something *other* than cmp */
1129}
1130
1131void *
1132dtrace_casptr(void *target, void *cmp, void *new)
1133{
1134#if defined(__LP64__)
1135#error dtrace_casptr implementation missing for LP64
1136#else
1137 if (OSCompareAndSwap( (uint32_t)cmp, (uint32_t)new, (unsigned long *)target ))
1138 return cmp;
1139 else
1140 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
1141#endif
1142}
1143
1144/*
1145 * Interrupt manipulation
1146 */
1147dtrace_icookie_t
1148dtrace_interrupt_disable(void)
1149{
1150 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
1151}
1152
1153void
1154dtrace_interrupt_enable(dtrace_icookie_t reenable)
1155{
1156 (void)ml_set_interrupts_enabled((boolean_t)reenable);
1157}
1158
1159/*
1160 * MP coordination
1161 */
1162static void
1163dtrace_sync_func(void) {}
1164
1165/*
1166 * dtrace_sync() is not called from probe context.
1167 */
1168void
1169dtrace_sync(void)
1170{
1171 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1172}
1173
1174/*
1175 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1176 */
1177
1178extern kern_return_t dtrace_copyio_preflight(addr64_t);
1179extern kern_return_t dtrace_copyio_postflight(addr64_t);
1180
1181static int
1182dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1183{
1184#pragma unused(kaddr)
1185
1186 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1187 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1188
1189 ASSERT(kaddr + size >= kaddr);
1190
1191 if (ml_at_interrupt_context() || /* Avoid possible copyio page fault on int stack, which panics! */
1192 0 != recover || /* Avoid reentrancy into copyio facility. */
1193 uaddr + size < uaddr || /* Avoid address wrap. */
1194 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1195 {
1196 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1197 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1198 return (0);
1199 }
1200 return (1);
1201}
1202
1203void
1204dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len)
1205{
1206 if (dtrace_copycheck( src, dst, len )) {
1207 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1208 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1209 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1210 }
1211 dtrace_copyio_postflight(src);
1212 }
1213}
1214
1215void
1216dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len)
1217{
1218 size_t actual;
1219
1220 if (dtrace_copycheck( src, dst, len )) {
4a3eedf9
A
1221 /* copyin as many as 'len' bytes. */
1222 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1223
1224 /*
1225 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1226 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1227 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1228 * to the caller.
1229 */
1230 if (error && error != ENAMETOOLONG) {
2d21ac55
A
1231 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1232 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1233 }
1234 dtrace_copyio_postflight(src);
1235 }
1236}
1237
1238void
1239dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len)
1240{
1241 if (dtrace_copycheck( dst, src, len )) {
1242 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1243 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1244 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1245 }
1246 dtrace_copyio_postflight(dst);
1247 }
1248}
1249
1250void
1251dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len)
1252{
1253 size_t actual;
1254
1255 if (dtrace_copycheck( dst, src, len )) {
4a3eedf9
A
1256
1257 /*
1258 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1259 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1260 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1261 * to the caller.
1262 */
2d21ac55
A
1263 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1264 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1265 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1266 }
1267 dtrace_copyio_postflight(dst);
1268 }
1269}
1270
1271uint8_t
1272dtrace_fuword8(user_addr_t uaddr)
1273{
1274 uint8_t ret = 0;
1275
1276 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1277 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1278 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1279 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1280 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1281 }
1282 dtrace_copyio_postflight(uaddr);
1283 }
1284 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1285
1286 return(ret);
1287}
1288
1289uint16_t
1290dtrace_fuword16(user_addr_t uaddr)
1291{
1292 uint16_t ret = 0;
1293
1294 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1295 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1296 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1297 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1298 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1299 }
1300 dtrace_copyio_postflight(uaddr);
1301 }
1302 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1303
1304 return(ret);
1305}
1306
1307uint32_t
1308dtrace_fuword32(user_addr_t uaddr)
1309{
1310 uint32_t ret = 0;
1311
1312 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1313 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1314 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1315 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1316 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1317 }
1318 dtrace_copyio_postflight(uaddr);
1319 }
1320 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1321
1322 return(ret);
1323}
1324
1325uint64_t
1326dtrace_fuword64(user_addr_t uaddr)
1327{
1328 uint64_t ret = 0;
1329
1330 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1331 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1332 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1333 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1334 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1335 }
1336 dtrace_copyio_postflight(uaddr);
1337 }
1338 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1339
1340 return(ret);
1341}
1342
1343/*
1344 * Emulation of Solaris fuword / suword
1345 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1346 */
1347
1348int
1349fuword8(user_addr_t uaddr, uint8_t *value)
1350{
1351 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1352 return -1;
1353 }
1354
1355 return 0;
1356}
1357
1358int
1359fuword16(user_addr_t uaddr, uint16_t *value)
1360{
1361 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1362 return -1;
1363 }
1364
1365 return 0;
1366}
1367
1368int
1369fuword32(user_addr_t uaddr, uint32_t *value)
1370{
1371 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1372 return -1;
1373 }
1374
1375 return 0;
1376}
1377
1378int
1379fuword64(user_addr_t uaddr, uint64_t *value)
1380{
1381 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1382 return -1;
1383 }
1384
1385 return 0;
1386}
1387
1388void
1389fuword8_noerr(user_addr_t uaddr, uint8_t *value)
1390{
1391 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
1392 *value = 0;
1393 }
1394}
1395
1396void
1397fuword16_noerr(user_addr_t uaddr, uint16_t *value)
1398{
1399 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
1400 *value = 0;
1401 }
1402}
1403
1404void
1405fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1406{
1407 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1408 *value = 0;
1409 }
1410}
1411
1412void
1413fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1414{
1415 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1416 *value = 0;
1417 }
1418}
1419
1420int
1421suword64(user_addr_t addr, uint64_t value)
1422{
1423 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1424 return -1;
1425 }
1426
1427 return 0;
1428}
1429
1430int
1431suword32(user_addr_t addr, uint32_t value)
1432{
1433 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1434 return -1;
1435 }
1436
1437 return 0;
1438}
1439
1440int
1441suword16(user_addr_t addr, uint16_t value)
1442{
1443 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1444 return -1;
1445 }
1446
1447 return 0;
1448}
1449
1450int
1451suword8(user_addr_t addr, uint8_t value)
1452{
1453 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1454 return -1;
1455 }
1456
1457 return 0;
1458}
1459
1460
1461/*
1462 * Miscellaneous
1463 */
1464extern boolean_t dtrace_tally_fault(user_addr_t);
1465
1466boolean_t
1467dtrace_tally_fault(user_addr_t uaddr)
1468{
1469 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1470 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1471 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1472}
1473
1474void
1475dtrace_vpanic(const char *format, va_list alist)
1476{
1477 vuprintf( format, alist );
1478 panic("dtrace_vpanic");
1479}
1480
1481#define TOTTY 0x02
1482extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1483
1484int
1485vuprintf(const char *format, va_list ap)
1486{
1487 return prf(format, ap, TOTTY, NULL);
1488}
1489
1490/* Not called from probe context */
1491void cmn_err( int level, const char *format, ... )
1492{
1493#pragma unused(level)
1494 va_list alist;
1495
1496 va_start(alist, format);
1497 vuprintf(format, alist);
1498 va_end(alist);
1499 uprintf("\n");
1500}
1501
1502/*
1503 * History:
1504 * 2002-01-24 gvdl Initial implementation of strstr
1505 */
1506
1507__private_extern__ char *
1508strstr(const char *in, const char *str)
1509{
1510 char c;
1511 size_t len;
1512
1513 c = *str++;
1514 if (!c)
1515 return (char *) in; // Trivial empty string case
1516
1517 len = strlen(str);
1518 do {
1519 char sc;
1520
1521 do {
1522 sc = *in++;
1523 if (!sc)
1524 return (char *) 0;
1525 } while (sc != c);
1526 } while (strncmp(in, str, len) != 0);
1527
1528 return (char *) (in - 1);
1529}
1530
1531/*
1532 * Runtime and ABI
1533 */
1534uintptr_t
1535dtrace_caller(int ignore)
1536{
1537#pragma unused(ignore)
1538 return -1; /* Just as in Solaris dtrace_asm.s */
1539}
1540
1541int
1542dtrace_getstackdepth(int aframes)
1543{
1544 struct frame *fp = (struct frame *)dtrace_getfp();
1545 struct frame *nextfp, *minfp, *stacktop;
1546 int depth = 0;
1547 int on_intr;
1548
1549 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1550 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1551 else
1552 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + KERNEL_STACK_SIZE);
1553
1554 minfp = fp;
1555
1556 aframes++;
1557
1558 for (;;) {
1559 depth++;
1560
1561 nextfp = *(struct frame **)fp;
1562
1563 if (nextfp <= minfp || nextfp >= stacktop) {
1564 if (on_intr) {
1565 /*
1566 * Hop from interrupt stack to thread stack.
1567 */
1568 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1569
1570 minfp = (struct frame *)kstack_base;
1571 stacktop = (struct frame *)(kstack_base + KERNEL_STACK_SIZE);
1572
1573 on_intr = 0;
1574 continue;
1575 }
1576 break;
1577 }
1578
1579 fp = nextfp;
1580 minfp = fp;
1581 }
1582
1583 if (depth <= aframes)
1584 return (0);
1585
1586 return (depth - aframes);
1587}
1588
1589/*
1590 * Unconsidered
1591 */
1592void
1593dtrace_vtime_enable(void) {}
1594
1595void
1596dtrace_vtime_disable(void) {}
1597
1598#else /* else ! CONFIG_DTRACE */
1599
1600#include <sys/types.h>
1601#include <mach/vm_types.h>
1602#include <mach/kmod.h>
1603
1604/*
1605 * This exists to prevent build errors when dtrace is unconfigured.
1606 */
1607
1608kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1609
1610kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1611#pragma unused(arg1, arg2, arg3)
1612
1613 return KERN_FAILURE;
1614}
1615
1616#endif /* CONFIG_DTRACE */