]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/dtrace/dtrace_glue.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol
32 * from this file (_dtrace_register_anon_DOF) always needs to be exported for
33 * an external kext to link against.
34 */
35
36 #if CONFIG_DTRACE
37
38 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */
39 #include <kern/thread.h>
40 #include <mach/thread_status.h>
41
42 #include <stdarg.h>
43 #include <string.h>
44 #include <sys/malloc.h>
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <sys/proc_internal.h>
48 #include <sys/kauth.h>
49 #include <sys/user.h>
50 #include <sys/systm.h>
51 #include <sys/dtrace.h>
52 #include <sys/dtrace_impl.h>
53 #include <libkern/OSAtomic.h>
54 #include <kern/thread_call.h>
55 #include <kern/task.h>
56 #include <kern/sched_prim.h>
57 #include <kern/queue.h>
58 #include <miscfs/devfs/devfs.h>
59 #include <kern/kalloc.h>
60
61 #include <mach/vm_param.h>
62 #include <mach/mach_vm.h>
63 #include <mach/task.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
66
67 /*
68 * pid/proc
69 */
70 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
71 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
72
73 /* Not called from probe context */
74 proc_t *
75 sprlock(pid_t pid)
76 {
77 proc_t* p;
78
79 if ((p = proc_find(pid)) == PROC_NULL) {
80 return PROC_NULL;
81 }
82
83 task_suspend(p->task);
84
85 proc_lock(p);
86
87 lck_mtx_lock(&p->p_dtrace_sprlock);
88
89 return p;
90 }
91
92 /* Not called from probe context */
93 void
94 sprunlock(proc_t *p)
95 {
96 if (p != PROC_NULL) {
97 lck_mtx_unlock(&p->p_dtrace_sprlock);
98
99 proc_unlock(p);
100
101 task_resume(p->task);
102
103 proc_rele(p);
104 }
105 }
106
107 /*
108 * uread/uwrite
109 */
110
111 // These are not exported from vm_map.h.
112 extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
113 extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
114
115 /* Not called from probe context */
116 int
117 uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
118 {
119 kern_return_t ret;
120
121 ASSERT(p != PROC_NULL);
122 ASSERT(p->task != NULL);
123
124 task_t task = p->task;
125
126 /*
127 * Grab a reference to the task vm_map_t to make sure
128 * the map isn't pulled out from under us.
129 *
130 * Because the proc_lock is not held at all times on all code
131 * paths leading here, it is possible for the proc to have
132 * exited. If the map is null, fail.
133 */
134 vm_map_t map = get_task_map_reference(task);
135 if (map) {
136 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
137 vm_map_deallocate(map);
138 } else
139 ret = KERN_TERMINATED;
140
141 return (int)ret;
142 }
143
144
145 /* Not called from probe context */
146 int
147 uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
148 {
149 kern_return_t ret;
150
151 ASSERT(p != NULL);
152 ASSERT(p->task != NULL);
153
154 task_t task = p->task;
155
156 /*
157 * Grab a reference to the task vm_map_t to make sure
158 * the map isn't pulled out from under us.
159 *
160 * Because the proc_lock is not held at all times on all code
161 * paths leading here, it is possible for the proc to have
162 * exited. If the map is null, fail.
163 */
164 vm_map_t map = get_task_map_reference(task);
165 if (map) {
166 /* Find the memory permissions. */
167 uint32_t nestingDepth=999999;
168 vm_region_submap_short_info_data_64_t info;
169 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
170 mach_vm_address_t address = (mach_vm_address_t)a;
171 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
172
173 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
174 if (ret != KERN_SUCCESS)
175 goto done;
176
177 vm_prot_t reprotect;
178
179 if (!(info.protection & VM_PROT_WRITE)) {
180 /* Save the original protection values for restoration later */
181 reprotect = info.protection;
182
183 if (info.max_protection & VM_PROT_WRITE) {
184 /* The memory is not currently writable, but can be made writable. */
185 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE);
186 } else {
187 /*
188 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
189 *
190 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
191 */
192 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
193 }
194
195 if (ret != KERN_SUCCESS)
196 goto done;
197
198 } else {
199 /* The memory was already writable. */
200 reprotect = VM_PROT_NONE;
201 }
202
203 ret = vm_map_write_user( map,
204 buf,
205 (vm_map_address_t)a,
206 (vm_size_t)len);
207
208 if (ret != KERN_SUCCESS)
209 goto done;
210
211 if (reprotect != VM_PROT_NONE) {
212 ASSERT(reprotect & VM_PROT_EXECUTE);
213 ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
214 }
215
216 done:
217 vm_map_deallocate(map);
218 } else
219 ret = KERN_TERMINATED;
220
221 return (int)ret;
222 }
223
224 /*
225 * cpuvar
226 */
227 lck_mtx_t cpu_lock;
228 lck_mtx_t mod_lock;
229
230 cpu_t *cpu_list;
231 cpu_core_t *cpu_core; /* XXX TLB lockdown? */
232
233 /*
234 * cred_t
235 */
236
237 /*
238 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
239 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
240 */
241 cred_t *
242 dtrace_CRED(void)
243 {
244 struct uthread *uthread = get_bsdthread_info(current_thread());
245
246 if (uthread == NULL)
247 return NULL;
248 else
249 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
250 }
251
252 #define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr))
253 #define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \
254 HAS_ALLPRIVS(cr) : \
255 PRIV_ISASSERT(&CR_OEPRIV(cr), pr))
256
257 int PRIV_POLICY_CHOICE(void* cred, int priv, int all)
258 {
259 #pragma unused(priv, all)
260 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
261 }
262
263 int
264 PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
265 {
266 #pragma unused(priv, boolean)
267 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
268 }
269
270 gid_t
271 crgetgid(const cred_t *cr) { return cr->cr_groups[0]; }
272
273 uid_t
274 crgetuid(const cred_t *cr) { return cr->cr_uid; }
275
276 /*
277 * "cyclic"
278 */
279
280 /* osfmk/kern/timer_call.h */
281 typedef void *call_entry_param_t;
282 typedef void (*call_entry_func_t)(
283 call_entry_param_t param0,
284 call_entry_param_t param1);
285
286 typedef struct call_entry {
287 queue_chain_t q_link;
288 call_entry_func_t func;
289 call_entry_param_t param0;
290 call_entry_param_t param1;
291 uint64_t deadline;
292 enum {
293 IDLE,
294 PENDING,
295 DELAYED } state;
296 } call_entry_data_t;
297
298
299 typedef struct call_entry *timer_call_t;
300 typedef void *timer_call_param_t;
301 typedef void (*timer_call_func_t)(
302 timer_call_param_t param0,
303 timer_call_param_t param1);
304
305 extern void
306 timer_call_setup(
307 timer_call_t call,
308 timer_call_func_t func,
309 timer_call_param_t param0);
310
311 extern boolean_t
312 timer_call_enter1(
313 timer_call_t call,
314 timer_call_param_t param1,
315 uint64_t deadline);
316
317 extern boolean_t
318 timer_call_cancel(
319 timer_call_t call);
320
321 typedef struct wrap_timer_call {
322 cyc_handler_t hdlr;
323 cyc_time_t when;
324 uint64_t deadline;
325 struct call_entry call;
326 } wrap_timer_call_t;
327
328 #define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
329 #define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
330
331 static void
332 _timer_call_apply_cyclic( void *ignore, void *vTChdl )
333 {
334 #pragma unused(ignore)
335 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
336
337 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
338
339 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
340 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
341
342 /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */
343 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
344 thread_wakeup((event_t)wrapTC);
345 }
346
347 static cyclic_id_t
348 timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
349 {
350 uint64_t now;
351
352 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
353 wrapTC->hdlr = *handler;
354 wrapTC->when = *when;
355
356 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
357
358 now = mach_absolute_time();
359 wrapTC->deadline = now;
360
361 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
362 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );
363
364 return (cyclic_id_t)wrapTC;
365 }
366
367 static void
368 timer_call_remove_cyclic(cyclic_id_t cyclic)
369 {
370 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
371
372 while (!timer_call_cancel(&(wrapTC->call))) {
373 int ret = assert_wait(wrapTC, THREAD_UNINT);
374 ASSERT(ret == THREAD_WAITING);
375
376 wrapTC->when.cyt_interval = WAKEUP_REAPER;
377
378 ret = thread_block(THREAD_CONTINUE_NULL);
379 ASSERT(ret == THREAD_AWAKENED);
380 }
381 }
382
383 static void *
384 timer_call_get_cyclic_arg(cyclic_id_t cyclic)
385 {
386 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;
387
388 return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL);
389 }
390
391 cyclic_id_t
392 cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
393 {
394 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
395 if (NULL == wrapTC)
396 return CYCLIC_NONE;
397 else
398 return timer_call_add_cyclic( wrapTC, handler, when );
399 }
400
401 void
402 cyclic_timer_remove(cyclic_id_t cyclic)
403 {
404 ASSERT( cyclic != CYCLIC_NONE );
405
406 timer_call_remove_cyclic( cyclic );
407 _FREE((void *)cyclic, M_TEMP);
408 }
409
410 static void
411 _cyclic_add_omni(cyclic_id_list_t cyc_list)
412 {
413 cyc_time_t cT;
414 cyc_handler_t cH;
415 wrap_timer_call_t *wrapTC;
416 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
417 char *t;
418
419 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
420
421 t = (char *)cyc_list;
422 t += sizeof(cyc_omni_handler_t);
423 cyc_list = (cyclic_id_list_t)(uintptr_t)t;
424
425 t += sizeof(cyclic_id_t)*NCPU;
426 t += (sizeof(wrap_timer_call_t))*cpu_number();
427 wrapTC = (wrap_timer_call_t *)(uintptr_t)t;
428
429 cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT);
430 }
431
432 cyclic_id_list_t
433 cyclic_add_omni(cyc_omni_handler_t *omni)
434 {
435 cyclic_id_list_t cyc_list =
436 _MALLOC( (sizeof(wrap_timer_call_t))*NCPU +
437 sizeof(cyclic_id_t)*NCPU +
438 sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK);
439 if (NULL == cyc_list)
440 return (cyclic_id_list_t)CYCLIC_NONE;
441
442 *(cyc_omni_handler_t *)cyc_list = *omni;
443 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
444
445 return cyc_list;
446 }
447
448 static void
449 _cyclic_remove_omni(cyclic_id_list_t cyc_list)
450 {
451 cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list;
452 void *oarg;
453 cyclic_id_t cid;
454 char *t;
455
456 t = (char *)cyc_list;
457 t += sizeof(cyc_omni_handler_t);
458 cyc_list = (cyclic_id_list_t)(uintptr_t)t;
459
460 cid = cyc_list[cpu_number()];
461 oarg = timer_call_get_cyclic_arg(cid);
462
463 timer_call_remove_cyclic( cid );
464 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
465 }
466
467 void
468 cyclic_remove_omni(cyclic_id_list_t cyc_list)
469 {
470 ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE );
471
472 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
473 _FREE(cyc_list, M_TEMP);
474 }
475
476 typedef struct wrap_thread_call {
477 thread_call_t TChdl;
478 cyc_handler_t hdlr;
479 cyc_time_t when;
480 uint64_t deadline;
481 } wrap_thread_call_t;
482
483 /*
484 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
485 * cleaner and the deadman, but too distant in time and place for the profile provider.
486 */
487 static void
488 _cyclic_apply( void *ignore, void *vTChdl )
489 {
490 #pragma unused(ignore)
491 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
492
493 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
494
495 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) );
496 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
497
498 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
499 if (wrapTC->when.cyt_interval == WAKEUP_REAPER)
500 thread_wakeup((event_t)wrapTC);
501 }
502
503 cyclic_id_t
504 cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
505 {
506 uint64_t now;
507
508 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
509 if (NULL == wrapTC)
510 return CYCLIC_NONE;
511
512 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
513 wrapTC->hdlr = *handler;
514 wrapTC->when = *when;
515
516 ASSERT(when->cyt_when == 0);
517 ASSERT(when->cyt_interval < WAKEUP_REAPER);
518
519 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
520
521 now = mach_absolute_time();
522 wrapTC->deadline = now;
523
524 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
525 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
526
527 return (cyclic_id_t)wrapTC;
528 }
529
530 static void
531 noop_cyh_func(void * ignore)
532 {
533 #pragma unused(ignore)
534 }
535
536 void
537 cyclic_remove(cyclic_id_t cyclic)
538 {
539 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
540
541 ASSERT(cyclic != CYCLIC_NONE);
542
543 while (!thread_call_cancel(wrapTC->TChdl)) {
544 int ret = assert_wait(wrapTC, THREAD_UNINT);
545 ASSERT(ret == THREAD_WAITING);
546
547 wrapTC->when.cyt_interval = WAKEUP_REAPER;
548
549 ret = thread_block(THREAD_CONTINUE_NULL);
550 ASSERT(ret == THREAD_AWAKENED);
551 }
552
553 if (thread_call_free(wrapTC->TChdl))
554 _FREE(wrapTC, M_TEMP);
555 else {
556 /* Gut this cyclic and move on ... */
557 wrapTC->hdlr.cyh_func = noop_cyh_func;
558 wrapTC->when.cyt_interval = NEARLY_FOREVER;
559 }
560 }
561
562 /*
563 * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision)
564 */
565
566 thread_call_t
567 dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos)
568 {
569 #pragma unused(arg)
570 thread_call_t call = thread_call_allocate(func, NULL);
571
572 nanoseconds_to_absolutetime(nanos, &nanos);
573
574 /*
575 * This method does not use clock_deadline_for_periodic_event() because it is a one-shot,
576 * and clock drift on later invocations is not a worry.
577 */
578 uint64_t deadline = mach_absolute_time() + nanos;
579
580 thread_call_enter_delayed(call, deadline);
581
582 return call;
583 }
584
585 /*
586 * ddi
587 */
588 void
589 ddi_report_dev(dev_info_t *devi)
590 {
591 #pragma unused(devi)
592 }
593
594 #define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */
595 static void *soft[NSOFT_STATES];
596
597 int
598 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
599 {
600 #pragma unused(n_items)
601 int i;
602
603 for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK);
604 *(size_t *)state_p = size;
605 return 0;
606 }
607
608 int
609 ddi_soft_state_zalloc(void *state, int item)
610 {
611 #pragma unused(state)
612 if (item < NSOFT_STATES)
613 return DDI_SUCCESS;
614 else
615 return DDI_FAILURE;
616 }
617
618 void *
619 ddi_get_soft_state(void *state, int item)
620 {
621 #pragma unused(state)
622 ASSERT(item < NSOFT_STATES);
623 return soft[item];
624 }
625
626 int
627 ddi_soft_state_free(void *state, int item)
628 {
629 ASSERT(item < NSOFT_STATES);
630 bzero( soft[item], (size_t)state );
631 return DDI_SUCCESS;
632 }
633
634 void
635 ddi_soft_state_fini(void **state_p)
636 {
637 #pragma unused(state_p)
638 int i;
639
640 for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP );
641 }
642
643 static unsigned int gRegisteredProps = 0;
644 static struct {
645 char name[32]; /* enough for "dof-data-" + digits */
646 int *data;
647 uint_t nelements;
648 } gPropTable[16];
649
650 kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t);
651
652 kern_return_t
653 _dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements)
654 {
655 if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) {
656 int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK);
657
658 if (NULL == p)
659 return KERN_FAILURE;
660
661 strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name));
662 gPropTable[gRegisteredProps].nelements = nelements;
663 gPropTable[gRegisteredProps].data = p;
664
665 while (nelements-- > 0) {
666 *p++ = (int)(*data++);
667 }
668
669 gRegisteredProps++;
670 return KERN_SUCCESS;
671 }
672 else
673 return KERN_FAILURE;
674 }
675
676 int
677 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
678 const char *name, int **data, uint_t *nelements)
679 {
680 #pragma unused(match_dev,dip,flags)
681 unsigned int i;
682 for (i = 0; i < gRegisteredProps; ++i)
683 {
684 if (0 == strncmp(name, gPropTable[i].name,
685 sizeof(gPropTable[i].name))) {
686 *data = gPropTable[i].data;
687 *nelements = gPropTable[i].nelements;
688 return DDI_SUCCESS;
689 }
690 }
691 return DDI_FAILURE;
692 }
693
694 int
695 ddi_prop_free(void *buf)
696 {
697 _FREE(buf, M_TEMP);
698 return DDI_SUCCESS;
699 }
700
701 int
702 ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); }
703
704 int
705 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
706 minor_t minor_num, const char *node_type, int flag)
707 {
708 #pragma unused(spec_type,node_type,flag)
709 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
710
711 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 ))
712 return DDI_FAILURE;
713 else
714 return DDI_SUCCESS;
715 }
716
717 void
718 ddi_remove_minor_node(dev_info_t *dip, char *name)
719 {
720 #pragma unused(dip,name)
721 /* XXX called from dtrace_detach, so NOTREACHED for now. */
722 }
723
724 major_t
725 getemajor( dev_t d )
726 {
727 return (major_t) major(d);
728 }
729
730 minor_t
731 getminor ( dev_t d )
732 {
733 return (minor_t) minor(d);
734 }
735
736 dev_t
737 makedevice(major_t major, minor_t minor)
738 {
739 return makedev( major, minor );
740 }
741
742 int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue)
743 {
744 #pragma unused(dev, dip, flags, name)
745
746 return defvalue;
747 }
748
749 /*
750 * Kernel Debug Interface
751 */
752 int
753 kdi_dtrace_set(kdi_dtrace_set_t ignore)
754 {
755 #pragma unused(ignore)
756 return 0; /* Success */
757 }
758
759 extern void Debugger(const char*);
760
761 void
762 debug_enter(char *c) { Debugger(c); }
763
764 /*
765 * kmem
766 */
767
768 void *
769 dt_kmem_alloc(size_t size, int kmflag)
770 {
771 #pragma unused(kmflag)
772
773 /*
774 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
775 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
776 */
777 #if defined(DTRACE_MEMORY_ZONES)
778 return dtrace_alloc(size);
779 #else
780 return kalloc(size);
781 #endif
782 }
783
784 void *
785 dt_kmem_zalloc(size_t size, int kmflag)
786 {
787 #pragma unused(kmflag)
788
789 /*
790 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
791 * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock.
792 */
793 #if defined(DTRACE_MEMORY_ZONES)
794 void* buf = dtrace_alloc(size);
795 #else
796 void* buf = kalloc(size);
797 #endif
798
799 if(!buf)
800 return NULL;
801
802 bzero(buf, size);
803
804 return buf;
805 }
806
807 void
808 dt_kmem_free(void *buf, size_t size)
809 {
810 #pragma unused(size)
811 /*
812 * DTrace relies on this, its doing a lot of NULL frees.
813 * A null free causes the debug builds to panic.
814 */
815 if (buf == NULL) return;
816
817 ASSERT(size > 0);
818
819 #if defined(DTRACE_MEMORY_ZONES)
820 dtrace_free(buf, size);
821 #else
822 kfree(buf, size);
823 #endif
824 }
825
826
827
828 /*
829 * aligned kmem allocator
830 * align should be a power of two
831 */
832
833 void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag)
834 {
835 void* buf;
836 intptr_t p;
837 void** buf_backup;
838
839 buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag);
840
841 if(!buf)
842 return NULL;
843
844 p = (intptr_t)buf;
845 p += sizeof(void*); /* now we have enough room to store the backup */
846 p = P2ROUNDUP(p, align); /* and now we're aligned */
847
848 buf_backup = (void**)(p - sizeof(void*));
849 *buf_backup = buf; /* back up the address we need to free */
850
851 return (void*)p;
852 }
853
854 void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag)
855 {
856 void* buf;
857
858 buf = dt_kmem_alloc_aligned(size, align, kmflag);
859
860 if(!buf)
861 return NULL;
862
863 bzero(buf, size);
864
865 return buf;
866 }
867
868 void dt_kmem_free_aligned(void* buf, size_t size)
869 {
870 #pragma unused(size)
871 intptr_t p;
872 void** buf_backup;
873
874 p = (intptr_t)buf;
875 p -= sizeof(void*);
876 buf_backup = (void**)(p);
877
878 dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup));
879 }
880
881 /*
882 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
883 * doesn't specify constructor, destructor, or reclaim methods.
884 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
885 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
886 */
887 kmem_cache_t *
888 kmem_cache_create(
889 const char *name, /* descriptive name for this cache */
890 size_t bufsize, /* size of the objects it manages */
891 size_t align, /* required object alignment */
892 int (*constructor)(void *, void *, int), /* object constructor */
893 void (*destructor)(void *, void *), /* object destructor */
894 void (*reclaim)(void *), /* memory reclaim callback */
895 void *private, /* pass-thru arg for constr/destr/reclaim */
896 vmem_t *vmp, /* vmem source for slab allocation */
897 int cflags) /* cache creation flags */
898 {
899 #pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
900 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
901 }
902
903 void *
904 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
905 {
906 #pragma unused(kmflag)
907 size_t bufsize = (size_t)cp;
908 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
909 }
910
911 void
912 kmem_cache_free(kmem_cache_t *cp, void *buf)
913 {
914 #pragma unused(cp)
915 _FREE(buf, M_TEMP);
916 }
917
918 void
919 kmem_cache_destroy(kmem_cache_t *cp)
920 {
921 #pragma unused(cp)
922 }
923
924 /*
925 * taskq
926 */
927 extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */
928
929 static void
930 _taskq_apply( task_func_t func, thread_call_param_t arg )
931 {
932 func( (void *)arg );
933 }
934
935 taskq_t *
936 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
937 int maxalloc, uint_t flags)
938 {
939 #pragma unused(name,nthreads,pri,minalloc,maxalloc,flags)
940
941 return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL );
942 }
943
944 taskqid_t
945 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
946 {
947 #pragma unused(flags)
948 thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func );
949 thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg );
950 return (taskqid_t) tq /* for lack of anything better */;
951 }
952
953 void
954 taskq_destroy(taskq_t *tq)
955 {
956 thread_call_cancel( (thread_call_t) tq );
957 thread_call_free( (thread_call_t) tq );
958 }
959
960 pri_t maxclsyspri;
961
962 /*
963 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
964 */
965 typedef unsigned int u_daddr_t;
966 #include "blist.h"
967
968 /* By passing around blist *handles*, the underlying blist can be resized as needed. */
969 struct blist_hdl {
970 blist_t blist;
971 };
972
973 vmem_t *
974 vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
975 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
976 {
977 #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
978 blist_t bl;
979 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
980
981 ASSERT(quantum == 1);
982 ASSERT(NULL == ignore5);
983 ASSERT(NULL == ignore6);
984 ASSERT(NULL == source);
985 ASSERT(0 == qcache_max);
986 ASSERT(vmflag & VMC_IDENTIFIER);
987
988 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
989
990 p->blist = bl = blist_create( size );
991 blist_free(bl, 0, size);
992 if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
993
994 return (vmem_t *)p;
995 }
996
997 void *
998 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
999 {
1000 #pragma unused(vmflag)
1001 struct blist_hdl *q = (struct blist_hdl *)vmp;
1002 blist_t bl = q->blist;
1003 daddr_t p;
1004
1005 p = blist_alloc(bl, (daddr_t)size);
1006
1007 if ((daddr_t)-1 == p) {
1008 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
1009 q->blist = bl;
1010 p = blist_alloc(bl, (daddr_t)size);
1011 if ((daddr_t)-1 == p)
1012 panic("vmem_alloc: failure after blist_resize!");
1013 }
1014
1015 return (void *)(uintptr_t)p;
1016 }
1017
1018 void
1019 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1020 {
1021 struct blist_hdl *p = (struct blist_hdl *)vmp;
1022
1023 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
1024 }
1025
1026 void
1027 vmem_destroy(vmem_t *vmp)
1028 {
1029 struct blist_hdl *p = (struct blist_hdl *)vmp;
1030
1031 blist_destroy( p->blist );
1032 _FREE( p, sizeof(struct blist_hdl) );
1033 }
1034
1035 /*
1036 * Timing
1037 */
1038
1039 /*
1040 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
1041 * January 1, 1970. Because it can be called from probe context, it must take no locks.
1042 */
1043
1044 hrtime_t
1045 dtrace_gethrestime(void)
1046 {
1047 clock_sec_t secs;
1048 clock_nsec_t nanosecs;
1049 uint64_t secs64, ns64;
1050
1051 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
1052 secs64 = (uint64_t)secs;
1053 ns64 = (uint64_t)nanosecs;
1054
1055 ns64 = ns64 + (secs64 * 1000000000LL);
1056 return ns64;
1057 }
1058
1059 /*
1060 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
1061 * Hence its primary use is to specify intervals.
1062 */
1063
1064 hrtime_t
1065 dtrace_abs_to_nano(uint64_t elapsed)
1066 {
1067 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
1068
1069 /*
1070 * If this is the first time we've run, get the timebase.
1071 * We can use denom == 0 to indicate that sTimebaseInfo is
1072 * uninitialised because it makes no sense to have a zero
1073 * denominator in a fraction.
1074 */
1075
1076 if ( sTimebaseInfo.denom == 0 ) {
1077 (void) clock_timebase_info(&sTimebaseInfo);
1078 }
1079
1080 /*
1081 * Convert to nanoseconds.
1082 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
1083 *
1084 * Provided the final result is representable in 64 bits the following maneuver will
1085 * deliver that result without intermediate overflow.
1086 */
1087 if (sTimebaseInfo.denom == sTimebaseInfo.numer)
1088 return elapsed;
1089 else if (sTimebaseInfo.denom == 1)
1090 return elapsed * (uint64_t)sTimebaseInfo.numer;
1091 else {
1092 /* Decompose elapsed = eta32 * 2^32 + eps32: */
1093 uint64_t eta32 = elapsed >> 32;
1094 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
1095
1096 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
1097
1098 /* Form product of elapsed64 (decomposed) and numer: */
1099 uint64_t mu64 = numer * eta32;
1100 uint64_t lambda64 = numer * eps32;
1101
1102 /* Divide the constituents by denom: */
1103 uint64_t q32 = mu64/denom;
1104 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
1105
1106 return (q32 << 32) + ((r32 << 32) + lambda64)/denom;
1107 }
1108 }
1109
1110 hrtime_t
1111 dtrace_gethrtime(void)
1112 {
1113 static uint64_t start = 0;
1114
1115 if (start == 0)
1116 start = mach_absolute_time();
1117
1118 return dtrace_abs_to_nano(mach_absolute_time() - start);
1119 }
1120
1121 /*
1122 * Atomicity and synchronization
1123 */
1124 uint32_t
1125 dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
1126 {
1127 if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target ))
1128 return cmp;
1129 else
1130 return ~cmp; /* Must return something *other* than cmp */
1131 }
1132
1133 void *
1134 dtrace_casptr(void *target, void *cmp, void *new)
1135 {
1136 if (OSCompareAndSwapPtr( cmp, new, (void**)target ))
1137 return cmp;
1138 else
1139 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
1140 }
1141
1142 /*
1143 * Interrupt manipulation
1144 */
1145 dtrace_icookie_t
1146 dtrace_interrupt_disable(void)
1147 {
1148 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
1149 }
1150
1151 void
1152 dtrace_interrupt_enable(dtrace_icookie_t reenable)
1153 {
1154 (void)ml_set_interrupts_enabled((boolean_t)reenable);
1155 }
1156
1157 /*
1158 * MP coordination
1159 */
1160 static void
1161 dtrace_sync_func(void) {}
1162
1163 /*
1164 * dtrace_sync() is not called from probe context.
1165 */
1166 void
1167 dtrace_sync(void)
1168 {
1169 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
1170 }
1171
1172 /*
1173 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
1174 */
1175
1176 extern kern_return_t dtrace_copyio_preflight(addr64_t);
1177 extern kern_return_t dtrace_copyio_postflight(addr64_t);
1178
1179 static int
1180 dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1181 {
1182 #pragma unused(kaddr)
1183
1184 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1185 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1186
1187 ASSERT(kaddr + size >= kaddr);
1188
1189 if (ml_at_interrupt_context() || /* Avoid possible copyio page fault on int stack, which panics! */
1190 0 != recover || /* Avoid reentrancy into copyio facility. */
1191 uaddr + size < uaddr || /* Avoid address wrap. */
1192 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */
1193 {
1194 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1196 return (0);
1197 }
1198 return (1);
1199 }
1200
1201 void
1202 dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
1203 {
1204 #pragma unused(flags)
1205
1206 if (dtrace_copycheck( src, dst, len )) {
1207 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1208 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1209 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1210 }
1211 dtrace_copyio_postflight(src);
1212 }
1213 }
1214
1215 void
1216 dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
1217 {
1218 #pragma unused(flags)
1219
1220 size_t actual;
1221
1222 if (dtrace_copycheck( src, dst, len )) {
1223 /* copyin as many as 'len' bytes. */
1224 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1225
1226 /*
1227 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1228 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1229 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1230 * to the caller.
1231 */
1232 if (error && error != ENAMETOOLONG) {
1233 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1234 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1235 }
1236 dtrace_copyio_postflight(src);
1237 }
1238 }
1239
1240 void
1241 dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1242 {
1243 #pragma unused(flags)
1244
1245 if (dtrace_copycheck( dst, src, len )) {
1246 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1247 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1248 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1249 }
1250 dtrace_copyio_postflight(dst);
1251 }
1252 }
1253
1254 void
1255 dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
1256 {
1257 #pragma unused(flags)
1258
1259 size_t actual;
1260
1261 if (dtrace_copycheck( dst, src, len )) {
1262
1263 /*
1264 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1265 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1266 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1267 * to the caller.
1268 */
1269 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1270 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1271 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1272 }
1273 dtrace_copyio_postflight(dst);
1274 }
1275 }
1276
1277 uint8_t
1278 dtrace_fuword8(user_addr_t uaddr)
1279 {
1280 uint8_t ret = 0;
1281
1282 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1283 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1284 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1285 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1286 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1287 }
1288 dtrace_copyio_postflight(uaddr);
1289 }
1290 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1291
1292 return(ret);
1293 }
1294
1295 uint16_t
1296 dtrace_fuword16(user_addr_t uaddr)
1297 {
1298 uint16_t ret = 0;
1299
1300 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1301 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1302 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1303 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1304 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1305 }
1306 dtrace_copyio_postflight(uaddr);
1307 }
1308 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1309
1310 return(ret);
1311 }
1312
1313 uint32_t
1314 dtrace_fuword32(user_addr_t uaddr)
1315 {
1316 uint32_t ret = 0;
1317
1318 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1319 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1320 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1321 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1322 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1323 }
1324 dtrace_copyio_postflight(uaddr);
1325 }
1326 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1327
1328 return(ret);
1329 }
1330
1331 uint64_t
1332 dtrace_fuword64(user_addr_t uaddr)
1333 {
1334 uint64_t ret = 0;
1335
1336 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1337 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1338 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1339 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1340 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1341 }
1342 dtrace_copyio_postflight(uaddr);
1343 }
1344 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1345
1346 return(ret);
1347 }
1348
1349 /*
1350 * Emulation of Solaris fuword / suword
1351 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1352 */
1353
1354 int
1355 fuword8(user_addr_t uaddr, uint8_t *value)
1356 {
1357 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1358 return -1;
1359 }
1360
1361 return 0;
1362 }
1363
1364 int
1365 fuword16(user_addr_t uaddr, uint16_t *value)
1366 {
1367 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1368 return -1;
1369 }
1370
1371 return 0;
1372 }
1373
1374 int
1375 fuword32(user_addr_t uaddr, uint32_t *value)
1376 {
1377 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1378 return -1;
1379 }
1380
1381 return 0;
1382 }
1383
1384 int
1385 fuword64(user_addr_t uaddr, uint64_t *value)
1386 {
1387 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1388 return -1;
1389 }
1390
1391 return 0;
1392 }
1393
1394 void
1395 fuword8_noerr(user_addr_t uaddr, uint8_t *value)
1396 {
1397 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) {
1398 *value = 0;
1399 }
1400 }
1401
1402 void
1403 fuword16_noerr(user_addr_t uaddr, uint16_t *value)
1404 {
1405 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) {
1406 *value = 0;
1407 }
1408 }
1409
1410 void
1411 fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1412 {
1413 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1414 *value = 0;
1415 }
1416 }
1417
1418 void
1419 fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1420 {
1421 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1422 *value = 0;
1423 }
1424 }
1425
1426 int
1427 suword64(user_addr_t addr, uint64_t value)
1428 {
1429 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1430 return -1;
1431 }
1432
1433 return 0;
1434 }
1435
1436 int
1437 suword32(user_addr_t addr, uint32_t value)
1438 {
1439 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1440 return -1;
1441 }
1442
1443 return 0;
1444 }
1445
1446 int
1447 suword16(user_addr_t addr, uint16_t value)
1448 {
1449 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1450 return -1;
1451 }
1452
1453 return 0;
1454 }
1455
1456 int
1457 suword8(user_addr_t addr, uint8_t value)
1458 {
1459 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1460 return -1;
1461 }
1462
1463 return 0;
1464 }
1465
1466
1467 /*
1468 * Miscellaneous
1469 */
1470 extern boolean_t dtrace_tally_fault(user_addr_t);
1471
1472 boolean_t
1473 dtrace_tally_fault(user_addr_t uaddr)
1474 {
1475 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1476 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1477 return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE );
1478 }
1479
1480 void
1481 dtrace_vpanic(const char *format, va_list alist)
1482 {
1483 vuprintf( format, alist );
1484 panic("dtrace_vpanic");
1485 }
1486
1487 #define TOTTY 0x02
1488 extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1489
1490 int
1491 vuprintf(const char *format, va_list ap)
1492 {
1493 return prf(format, ap, TOTTY, NULL);
1494 }
1495
1496 /* Not called from probe context */
1497 void cmn_err( int level, const char *format, ... )
1498 {
1499 #pragma unused(level)
1500 va_list alist;
1501
1502 va_start(alist, format);
1503 vuprintf(format, alist);
1504 va_end(alist);
1505 uprintf("\n");
1506 }
1507
1508 /*
1509 * History:
1510 * 2002-01-24 gvdl Initial implementation of strstr
1511 */
1512
1513 __private_extern__ const char *
1514 strstr(const char *in, const char *str)
1515 {
1516 char c;
1517 size_t len;
1518
1519 c = *str++;
1520 if (!c)
1521 return (const char *) in; // Trivial empty string case
1522
1523 len = strlen(str);
1524 do {
1525 char sc;
1526
1527 do {
1528 sc = *in++;
1529 if (!sc)
1530 return (char *) 0;
1531 } while (sc != c);
1532 } while (strncmp(in, str, len) != 0);
1533
1534 return (const char *) (in - 1);
1535 }
1536
1537 /*
1538 * Runtime and ABI
1539 */
1540 uintptr_t
1541 dtrace_caller(int ignore)
1542 {
1543 #pragma unused(ignore)
1544 return -1; /* Just as in Solaris dtrace_asm.s */
1545 }
1546
1547 int
1548 dtrace_getstackdepth(int aframes)
1549 {
1550 struct frame *fp = (struct frame *)__builtin_frame_address(0);
1551 struct frame *nextfp, *minfp, *stacktop;
1552 int depth = 0;
1553 int on_intr;
1554
1555 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
1556 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
1557 else
1558 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
1559
1560 minfp = fp;
1561
1562 aframes++;
1563
1564 for (;;) {
1565 depth++;
1566
1567 nextfp = *(struct frame **)fp;
1568
1569 if (nextfp <= minfp || nextfp >= stacktop) {
1570 if (on_intr) {
1571 /*
1572 * Hop from interrupt stack to thread stack.
1573 */
1574 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
1575
1576 minfp = (struct frame *)kstack_base;
1577 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
1578
1579 on_intr = 0;
1580 continue;
1581 }
1582 break;
1583 }
1584
1585 fp = nextfp;
1586 minfp = fp;
1587 }
1588
1589 if (depth <= aframes)
1590 return (0);
1591
1592 return (depth - aframes);
1593 }
1594
1595 /*
1596 * Unconsidered
1597 */
1598 void
1599 dtrace_vtime_enable(void) {}
1600
1601 void
1602 dtrace_vtime_disable(void) {}
1603
1604 #else /* else ! CONFIG_DTRACE */
1605
1606 #include <sys/types.h>
1607 #include <mach/vm_types.h>
1608 #include <mach/kmod.h>
1609
1610 /*
1611 * This exists to prevent build errors when dtrace is unconfigured.
1612 */
1613
1614 kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t);
1615
1616 kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) {
1617 #pragma unused(arg1, arg2, arg3)
1618
1619 return KERN_FAILURE;
1620 }
1621
1622 #endif /* CONFIG_DTRACE */