]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/dtrace/dtrace_glue.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_glue.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
2d21ac55 29#include <kern/thread.h>
2d21ac55 30
2d21ac55
A
31#include <sys/time.h>
32#include <sys/proc.h>
2d21ac55
A
33#include <sys/kauth.h>
34#include <sys/user.h>
35#include <sys/systm.h>
36#include <sys/dtrace.h>
37#include <sys/dtrace_impl.h>
cb323159 38#include <machine/atomic.h>
d9a64523 39#include <libkern/OSKextLibPrivate.h>
39236c6e
A
40#include <kern/kern_types.h>
41#include <kern/timer_call.h>
2d21ac55
A
42#include <kern/thread_call.h>
43#include <kern/task.h>
44#include <kern/sched_prim.h>
2d21ac55
A
45#include <miscfs/devfs/devfs.h>
46#include <kern/kalloc.h>
47
48#include <mach/vm_param.h>
49#include <mach/mach_vm.h>
50#include <mach/task.h>
2d21ac55
A
51#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
52
53/*
54 * pid/proc
55 */
b0d623f7
A
56/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
57#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
2d21ac55 58
f427ee49
A
59KALLOC_HEAP_DEFINE(KHEAP_DTRACE, "dtrace", KHEAP_ID_DEFAULT);
60
d9a64523
A
61void
62dtrace_sprlock(proc_t *p)
63{
d9a64523
A
64 lck_mtx_lock(&p->p_dtrace_sprlock);
65}
66
67void
68dtrace_sprunlock(proc_t *p)
69{
70 lck_mtx_unlock(&p->p_dtrace_sprlock);
d9a64523
A
71}
72
2d21ac55 73/* Not called from probe context */
d9a64523 74proc_t *
2d21ac55
A
75sprlock(pid_t pid)
76{
77 proc_t* p;
78
79 if ((p = proc_find(pid)) == PROC_NULL) {
80 return PROC_NULL;
81 }
82
fe8ab488 83 task_suspend_internal(p->task);
2d21ac55 84
d9a64523 85 dtrace_sprlock(p);
2d21ac55 86
2d21ac55
A
87 return p;
88}
89
90/* Not called from probe context */
91void
92sprunlock(proc_t *p)
93{
94 if (p != PROC_NULL) {
d9a64523
A
95 dtrace_sprunlock(p);
96
fe8ab488 97 task_resume_internal(p->task);
2d21ac55
A
98
99 proc_rele(p);
100 }
101}
102
103/*
104 * uread/uwrite
105 */
106
107// These are not exported from vm_map.h.
108extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size);
109extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size);
110
111/* Not called from probe context */
112int
113uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
114{
115 kern_return_t ret;
116
117 ASSERT(p != PROC_NULL);
118 ASSERT(p->task != NULL);
119
120 task_t task = p->task;
121
122 /*
123 * Grab a reference to the task vm_map_t to make sure
124 * the map isn't pulled out from under us.
125 *
126 * Because the proc_lock is not held at all times on all code
127 * paths leading here, it is possible for the proc to have
128 * exited. If the map is null, fail.
129 */
130 vm_map_t map = get_task_map_reference(task);
131 if (map) {
132 ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
133 vm_map_deallocate(map);
0a7de745 134 } else {
2d21ac55 135 ret = KERN_TERMINATED;
0a7de745
A
136 }
137
2d21ac55
A
138 return (int)ret;
139}
140
141
142/* Not called from probe context */
143int
144uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
145{
146 kern_return_t ret;
147
148 ASSERT(p != NULL);
149 ASSERT(p->task != NULL);
150
151 task_t task = p->task;
152
153 /*
154 * Grab a reference to the task vm_map_t to make sure
155 * the map isn't pulled out from under us.
156 *
157 * Because the proc_lock is not held at all times on all code
158 * paths leading here, it is possible for the proc to have
159 * exited. If the map is null, fail.
160 */
161 vm_map_t map = get_task_map_reference(task);
162 if (map) {
163 /* Find the memory permissions. */
0a7de745 164 uint32_t nestingDepth = 999999;
2d21ac55
A
165 vm_region_submap_short_info_data_64_t info;
166 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
167 mach_vm_address_t address = (mach_vm_address_t)a;
168 mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
0a7de745 169
2d21ac55 170 ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
0a7de745 171 if (ret != KERN_SUCCESS) {
2d21ac55 172 goto done;
0a7de745 173 }
2d21ac55
A
174
175 vm_prot_t reprotect;
176
177 if (!(info.protection & VM_PROT_WRITE)) {
178 /* Save the original protection values for restoration later */
179 reprotect = info.protection;
180
181 if (info.max_protection & VM_PROT_WRITE) {
182 /* The memory is not currently writable, but can be made writable. */
0a7de745 183 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
2d21ac55
A
184 } else {
185 /*
186 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
187 *
188 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
189 */
0a7de745 190 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
2d21ac55
A
191 }
192
0a7de745 193 if (ret != KERN_SUCCESS) {
2d21ac55 194 goto done;
0a7de745 195 }
2d21ac55
A
196 } else {
197 /* The memory was already writable. */
198 reprotect = VM_PROT_NONE;
199 }
200
201 ret = vm_map_write_user( map,
0a7de745
A
202 buf,
203 (vm_map_address_t)a,
204 (vm_size_t)len);
2d21ac55 205
d9a64523
A
206 dtrace_flush_caches();
207
0a7de745 208 if (ret != KERN_SUCCESS) {
2d21ac55 209 goto done;
0a7de745 210 }
2d21ac55
A
211
212 if (reprotect != VM_PROT_NONE) {
213 ASSERT(reprotect & VM_PROT_EXECUTE);
0a7de745 214 ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
2d21ac55
A
215 }
216
217done:
218 vm_map_deallocate(map);
0a7de745 219 } else {
2d21ac55 220 ret = KERN_TERMINATED;
0a7de745 221 }
2d21ac55
A
222
223 return (int)ret;
224}
225
226/*
227 * cpuvar
228 */
c3c9b80d
A
229LCK_MTX_DECLARE_ATTR(cpu_lock, &dtrace_lck_grp, &dtrace_lck_attr);
230LCK_MTX_DECLARE_ATTR(cyc_lock, &dtrace_lck_grp, &dtrace_lck_attr);
231LCK_MTX_DECLARE_ATTR(mod_lock, &dtrace_lck_grp, &dtrace_lck_attr);
2d21ac55 232
6d2010ae 233dtrace_cpu_t *cpu_list;
2d21ac55
A
234cpu_core_t *cpu_core; /* XXX TLB lockdown? */
235
236/*
237 * cred_t
238 */
239
240/*
241 * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since
242 * that function may try to resolve a lazy credential binding, which entails taking the proc_lock.
0a7de745 243 */
2d21ac55
A
244cred_t *
245dtrace_CRED(void)
246{
247 struct uthread *uthread = get_bsdthread_info(current_thread());
248
0a7de745 249 if (uthread == NULL) {
2d21ac55 250 return NULL;
0a7de745 251 } else {
2d21ac55 252 return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */
0a7de745 253 }
2d21ac55
A
254}
255
0a7de745
A
256int
257PRIV_POLICY_CHOICE(void* cred, int priv, int all)
2d21ac55
A
258{
259#pragma unused(priv, all)
260 return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */
261}
262
0a7de745 263int
2d21ac55
A
264PRIV_POLICY_ONLY(void *cr, int priv, int boolean)
265{
266#pragma unused(priv, boolean)
267 return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */
268}
269
2d21ac55 270uid_t
0a7de745
A
271crgetuid(const cred_t *cr)
272{
273 cred_t copy_cr = *cr; return kauth_cred_getuid(&copy_cr);
274}
2d21ac55
A
275
276/*
277 * "cyclic"
278 */
279
2d21ac55 280typedef struct wrap_timer_call {
fe8ab488 281 /* node attributes */
0a7de745
A
282 cyc_handler_t hdlr;
283 cyc_time_t when;
284 uint64_t deadline;
285 int cpuid;
286 boolean_t suspended;
287 struct timer_call call;
fe8ab488
A
288
289 /* next item in the linked list */
290 LIST_ENTRY(wrap_timer_call) entries;
2d21ac55
A
291} wrap_timer_call_t;
292
0a7de745
A
293#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL
294#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL
fe8ab488 295
39037602
A
296
297typedef struct cyc_list {
298 cyc_omni_handler_t cyl_omni;
299 wrap_timer_call_t cyl_wrap_by_cpus[];
5ba3f43e 300#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
0a7de745 301} __attribute__ ((aligned(8))) cyc_list_t;
5ba3f43e 302#else
39037602 303} cyc_list_t;
5ba3f43e 304#endif
39037602 305
fe8ab488
A
306/* CPU going online/offline notifications */
307void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL;
308void dtrace_cpu_state_changed(int, boolean_t);
309
310void
0a7de745
A
311dtrace_install_cpu_hooks(void)
312{
fe8ab488
A
313 dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed;
314}
315
316void
0a7de745
A
317dtrace_cpu_state_changed(int cpuid, boolean_t is_running)
318{
fe8ab488 319#pragma unused(cpuid)
0a7de745
A
320 wrap_timer_call_t *wrapTC = NULL;
321 boolean_t suspend = (is_running ? FALSE : TRUE);
322 dtrace_icookie_t s;
fe8ab488
A
323
324 /* Ensure that we're not going to leave the CPU */
325 s = dtrace_interrupt_disable();
326 assert(cpuid == cpu_number());
327
328 LIST_FOREACH(wrapTC, &(cpu_list[cpu_number()].cpu_cyc_list), entries) {
329 assert(wrapTC->cpuid == cpu_number());
330 if (suspend) {
331 assert(!wrapTC->suspended);
332 /* If this fails, we'll panic anyway, so let's do this now. */
0a7de745 333 if (!timer_call_cancel(&wrapTC->call)) {
fe8ab488 334 panic("timer_call_set_suspend() failed to cancel a timer call");
0a7de745 335 }
fe8ab488
A
336 wrapTC->suspended = TRUE;
337 } else {
338 /* Rearm the timer, but ensure it was suspended first. */
339 assert(wrapTC->suspended);
340 clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(),
0a7de745 341 &wrapTC->deadline);
fe8ab488 342 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
0a7de745 343 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
fe8ab488
A
344 wrapTC->suspended = FALSE;
345 }
fe8ab488
A
346 }
347
348 /* Restore the previous interrupt state. */
349 dtrace_interrupt_enable(s);
350}
2d21ac55
A
351
352static void
353_timer_call_apply_cyclic( void *ignore, void *vTChdl )
354{
355#pragma unused(ignore)
356 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl;
357
358 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
359
0a7de745 360 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
39236c6e 361 timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL );
2d21ac55
A
362}
363
364static cyclic_id_t
365timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
366{
367 uint64_t now;
fe8ab488 368 dtrace_icookie_t s;
2d21ac55 369
0a7de745 370 timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL );
2d21ac55
A
371 wrapTC->hdlr = *handler;
372 wrapTC->when = *when;
373
374 nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );
375
376 now = mach_absolute_time();
377 wrapTC->deadline = now;
378
0a7de745 379 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
fe8ab488
A
380
381 /* Insert the timer to the list of the running timers on this CPU, and start it. */
382 s = dtrace_interrupt_disable();
0a7de745
A
383 wrapTC->cpuid = cpu_number();
384 LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries);
385 timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline,
386 TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
387 wrapTC->suspended = FALSE;
fe8ab488 388 dtrace_interrupt_enable(s);
2d21ac55
A
389
390 return (cyclic_id_t)wrapTC;
391}
392
fe8ab488
A
393/*
394 * Executed on the CPU the timer is running on.
395 */
2d21ac55 396static void
39037602 397timer_call_remove_cyclic(wrap_timer_call_t *wrapTC)
2d21ac55 398{
fe8ab488
A
399 assert(wrapTC);
400 assert(cpu_number() == wrapTC->cpuid);
2d21ac55 401
0a7de745 402 if (!timer_call_cancel(&wrapTC->call)) {
fe8ab488 403 panic("timer_call_remove_cyclic() failed to cancel a timer call");
0a7de745 404 }
2d21ac55 405
0a7de745 406 LIST_REMOVE(wrapTC, entries);
2d21ac55
A
407}
408
409static void *
39037602
A
410timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC)
411{
0a7de745 412 return wrapTC ? wrapTC->hdlr.cyh_arg : NULL;
39037602 413}
2d21ac55
A
414
415cyclic_id_t
416cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when)
417{
418 wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
0a7de745 419 if (NULL == wrapTC) {
2d21ac55 420 return CYCLIC_NONE;
0a7de745 421 } else {
2d21ac55 422 return timer_call_add_cyclic( wrapTC, handler, when );
0a7de745 423 }
2d21ac55
A
424}
425
0a7de745 426void
2d21ac55
A
427cyclic_timer_remove(cyclic_id_t cyclic)
428{
429 ASSERT( cyclic != CYCLIC_NONE );
430
fe8ab488
A
431 /* Removing a timer call must be done on the CPU the timer is running on. */
432 wrap_timer_call_t *wrapTC = (wrap_timer_call_t *) cyclic;
433 dtrace_xcall(wrapTC->cpuid, (dtrace_xcall_t) timer_call_remove_cyclic, (void*) cyclic);
434
2d21ac55
A
435 _FREE((void *)cyclic, M_TEMP);
436}
437
438static void
39037602 439_cyclic_add_omni(cyc_list_t *cyc_list)
2d21ac55
A
440{
441 cyc_time_t cT;
442 cyc_handler_t cH;
39037602 443 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
2d21ac55 444
39037602 445 (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT);
2d21ac55 446
39037602
A
447 wrap_timer_call_t *wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()];
448 timer_call_add_cyclic(wrapTC, &cH, &cT);
2d21ac55
A
449}
450
451cyclic_id_list_t
452cyclic_add_omni(cyc_omni_handler_t *omni)
453{
39037602 454 cyc_list_t *cyc_list =
0a7de745 455 _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK);
39037602 456
0a7de745 457 if (NULL == cyc_list) {
39037602 458 return NULL;
0a7de745 459 }
39037602
A
460
461 cyc_list->cyl_omni = *omni;
2d21ac55 462
2d21ac55
A
463 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list);
464
39037602 465 return (cyclic_id_list_t)cyc_list;
2d21ac55
A
466}
467
468static void
39037602 469_cyclic_remove_omni(cyc_list_t *cyc_list)
2d21ac55 470{
39037602 471 cyc_omni_handler_t *omni = &cyc_list->cyl_omni;
2d21ac55 472 void *oarg;
39037602 473 wrap_timer_call_t *wrapTC;
2d21ac55 474
fe8ab488
A
475 /*
476 * If the processor was offline when dtrace started, we did not allocate
477 * a cyclic timer for this CPU.
478 */
39037602
A
479 if ((wrapTC = &cyc_list->cyl_wrap_by_cpus[cpu_number()]) != NULL) {
480 oarg = timer_call_get_cyclic_arg(wrapTC);
481 timer_call_remove_cyclic(wrapTC);
fe8ab488
A
482 (omni->cyo_offline)(omni->cyo_arg, CPU, oarg);
483 }
2d21ac55
A
484}
485
486void
487cyclic_remove_omni(cyclic_id_list_t cyc_list)
488{
39037602 489 ASSERT(cyc_list != NULL);
2d21ac55
A
490
491 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list);
492 _FREE(cyc_list, M_TEMP);
493}
494
495typedef struct wrap_thread_call {
496 thread_call_t TChdl;
497 cyc_handler_t hdlr;
498 cyc_time_t when;
499 uint64_t deadline;
500} wrap_thread_call_t;
501
502/*
0a7de745 503 * _cyclic_apply will run on some thread under kernel_task. That's OK for the
2d21ac55
A
504 * cleaner and the deadman, but too distant in time and place for the profile provider.
505 */
506static void
507_cyclic_apply( void *ignore, void *vTChdl )
508{
509#pragma unused(ignore)
510 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl;
511
512 (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg );
513
0a7de745 514 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline));
2d21ac55
A
515 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
516
517 /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */
0a7de745 518 if (wrapTC->when.cyt_interval == WAKEUP_REAPER) {
2d21ac55 519 thread_wakeup((event_t)wrapTC);
0a7de745 520 }
2d21ac55
A
521}
522
523cyclic_id_t
524cyclic_add(cyc_handler_t *handler, cyc_time_t *when)
525{
526 uint64_t now;
527
528 wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK);
0a7de745 529 if (NULL == wrapTC) {
2d21ac55 530 return CYCLIC_NONE;
0a7de745 531 }
2d21ac55
A
532
533 wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL );
534 wrapTC->hdlr = *handler;
535 wrapTC->when = *when;
536
537 ASSERT(when->cyt_when == 0);
538 ASSERT(when->cyt_interval < WAKEUP_REAPER);
539
540 nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval);
541
542 now = mach_absolute_time();
543 wrapTC->deadline = now;
544
0a7de745 545 clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline));
2d21ac55
A
546 (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline );
547
548 return (cyclic_id_t)wrapTC;
549}
550
551static void
552noop_cyh_func(void * ignore)
553{
554#pragma unused(ignore)
555}
556
557void
558cyclic_remove(cyclic_id_t cyclic)
559{
560 wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;
561
562 ASSERT(cyclic != CYCLIC_NONE);
563
564 while (!thread_call_cancel(wrapTC->TChdl)) {
565 int ret = assert_wait(wrapTC, THREAD_UNINT);
566 ASSERT(ret == THREAD_WAITING);
567
568 wrapTC->when.cyt_interval = WAKEUP_REAPER;
569
570 ret = thread_block(THREAD_CONTINUE_NULL);
571 ASSERT(ret == THREAD_AWAKENED);
572 }
573
0a7de745 574 if (thread_call_free(wrapTC->TChdl)) {
2d21ac55 575 _FREE(wrapTC, M_TEMP);
0a7de745 576 } else {
2d21ac55
A
577 /* Gut this cyclic and move on ... */
578 wrapTC->hdlr.cyh_func = noop_cyh_func;
579 wrapTC->when.cyt_interval = NEARLY_FOREVER;
580 }
581}
582
2d21ac55 583int
0a7de745
A
584ddi_driver_major(dev_info_t *devi)
585{
586 return (int)major(CAST_DOWN_EXPLICIT(int, devi));
587}
2d21ac55
A
588
589int
590ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
591 minor_t minor_num, const char *node_type, int flag)
592{
593#pragma unused(spec_type,node_type,flag)
b0d623f7 594 dev_t dev = makedev( ddi_driver_major(dip), minor_num );
2d21ac55 595
0a7de745 596 if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) {
2d21ac55 597 return DDI_FAILURE;
0a7de745 598 } else {
2d21ac55 599 return DDI_SUCCESS;
0a7de745
A
600 }
601}
2d21ac55
A
602
603void
604ddi_remove_minor_node(dev_info_t *dip, char *name)
605{
606#pragma unused(dip,name)
607/* XXX called from dtrace_detach, so NOTREACHED for now. */
608}
609
610major_t
611getemajor( dev_t d )
612{
613 return (major_t) major(d);
614}
615
616minor_t
0a7de745 617getminor( dev_t d )
2d21ac55
A
618{
619 return (minor_t) minor(d);
620}
621
2d21ac55
A
622extern void Debugger(const char*);
623
624void
0a7de745
A
625debug_enter(char *c)
626{
627 Debugger(c);
628}
2d21ac55
A
629
630/*
631 * kmem
632 */
633
634void *
d9a64523 635dt_kmem_alloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
2d21ac55
A
636{
637#pragma unused(kmflag)
638
639/*
640 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
f427ee49 641 * Requests larger than 8K with M_NOWAIT fail in kalloc_ext.
2d21ac55 642 */
f427ee49 643 return kalloc_ext(KHEAP_DTRACE, size, Z_WAITOK, site).addr;
2d21ac55
A
644}
645
646void *
d9a64523 647dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site)
2d21ac55
A
648{
649#pragma unused(kmflag)
650
651/*
652 * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact).
f427ee49 653 * Requests larger than 8K with M_NOWAIT fail in kalloc_ext.
2d21ac55 654 */
f427ee49 655 return kalloc_ext(KHEAP_DTRACE, size, Z_WAITOK | Z_ZERO, site).addr;
2d21ac55
A
656}
657
658void
659dt_kmem_free(void *buf, size_t size)
660{
f427ee49 661 kheap_free(KHEAP_DTRACE, buf, size);
2d21ac55
A
662}
663
664
665
666/*
d9a64523 667 * aligned dt_kmem allocator
2d21ac55
A
668 * align should be a power of two
669 */
670
d9a64523
A
671void*
672dt_kmem_alloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *site)
2d21ac55 673{
fe8ab488
A
674 void *mem, **addr_to_free;
675 intptr_t mem_aligned;
676 size_t *size_to_free, hdr_size;
2d21ac55 677
fe8ab488
A
678 /* Must be a power of two. */
679 assert(align != 0);
680 assert((align & (align - 1)) == 0);
2d21ac55 681
fe8ab488
A
682 /*
683 * We are going to add a header to the allocation. It contains
684 * the address to free and the total size of the buffer.
685 */
686 hdr_size = sizeof(size_t) + sizeof(void*);
d9a64523 687 mem = dt_kmem_alloc_site(size + align + hdr_size, kmflag, site);
0a7de745 688 if (mem == NULL) {
2d21ac55 689 return NULL;
0a7de745 690 }
2d21ac55 691
fe8ab488
A
692 mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1));
693
694 /* Write the address to free in the header. */
695 addr_to_free = (void**) (mem_aligned - sizeof(void*));
696 *addr_to_free = mem;
2d21ac55 697
fe8ab488
A
698 /* Write the size to free in the header. */
699 size_to_free = (size_t*) (mem_aligned - hdr_size);
700 *size_to_free = size + align + hdr_size;
2d21ac55 701
fe8ab488 702 return (void*) mem_aligned;
2d21ac55
A
703}
704
d9a64523
A
705void*
706dt_kmem_zalloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_site_t *s)
2d21ac55
A
707{
708 void* buf;
709
d9a64523 710 buf = dt_kmem_alloc_aligned_site(size, align, kmflag, s);
2d21ac55 711
0a7de745 712 if (!buf) {
2d21ac55 713 return NULL;
0a7de745 714 }
2d21ac55
A
715
716 bzero(buf, size);
717
718 return buf;
719}
720
d9a64523
A
721void
722dt_kmem_free_aligned(void* buf, size_t size)
2d21ac55
A
723{
724#pragma unused(size)
fe8ab488
A
725 intptr_t ptr = (intptr_t) buf;
726 void **addr_to_free = (void**) (ptr - sizeof(void*));
727 size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*)));
2d21ac55 728
0a7de745 729 if (buf == NULL) {
fe8ab488 730 return;
0a7de745 731 }
2d21ac55 732
fe8ab488 733 dt_kmem_free(*addr_to_free, *size_to_free);
2d21ac55
A
734}
735
736/*
737 * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and
738 * doesn't specify constructor, destructor, or reclaim methods.
739 * At present, it always zeroes the block it obtains from kmem_cache_alloc().
740 * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE.
741 */
742kmem_cache_t *
743kmem_cache_create(
0a7de745
A
744 const char *name, /* descriptive name for this cache */
745 size_t bufsize, /* size of the objects it manages */
746 size_t align, /* required object alignment */
747 int (*constructor)(void *, void *, int), /* object constructor */
748 void (*destructor)(void *, void *), /* object destructor */
749 void (*reclaim)(void *), /* memory reclaim callback */
750 void *private, /* pass-thru arg for constr/destr/reclaim */
751 vmem_t *vmp, /* vmem source for slab allocation */
752 int cflags) /* cache creation flags */
2d21ac55
A
753{
754#pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags)
755 return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */
756}
0a7de745 757
2d21ac55
A
758void *
759kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
760{
761#pragma unused(kmflag)
762 size_t bufsize = (size_t)cp;
763 return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK);
764}
765
766void
767kmem_cache_free(kmem_cache_t *cp, void *buf)
768{
769#pragma unused(cp)
770 _FREE(buf, M_TEMP);
771}
772
773void
774kmem_cache_destroy(kmem_cache_t *cp)
775{
776#pragma unused(cp)
777}
778
2d21ac55
A
779/*
780 * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids
781 */
782typedef unsigned int u_daddr_t;
783#include "blist.h"
784
785/* By passing around blist *handles*, the underlying blist can be resized as needed. */
786struct blist_hdl {
0a7de745 787 blist_t blist;
2d21ac55
A
788};
789
0a7de745 790vmem_t *
2d21ac55 791vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5,
0a7de745 792 void *ignore6, vmem_t *source, size_t qcache_max, int vmflag)
2d21ac55
A
793{
794#pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag)
795 blist_t bl;
796 struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK);
0a7de745 797
2d21ac55
A
798 ASSERT(quantum == 1);
799 ASSERT(NULL == ignore5);
800 ASSERT(NULL == ignore6);
801 ASSERT(NULL == source);
802 ASSERT(0 == qcache_max);
f427ee49 803 ASSERT(size <= INT32_MAX);
2d21ac55 804 ASSERT(vmflag & VMC_IDENTIFIER);
0a7de745 805
2d21ac55 806 size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */
0a7de745 807
f427ee49
A
808 p->blist = bl = blist_create((daddr_t)size);
809 blist_free(bl, 0, (daddr_t)size);
0a7de745
A
810 if (base) {
811 blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */
812 }
2d21ac55
A
813 return (vmem_t *)p;
814}
0a7de745 815
2d21ac55
A
816void *
817vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
818{
819#pragma unused(vmflag)
820 struct blist_hdl *q = (struct blist_hdl *)vmp;
821 blist_t bl = q->blist;
822 daddr_t p;
0a7de745 823
2d21ac55 824 p = blist_alloc(bl, (daddr_t)size);
0a7de745 825
f427ee49 826 if (p == SWAPBLK_NONE) {
2d21ac55
A
827 blist_resize(&bl, (bl->bl_blocks) << 1, 1);
828 q->blist = bl;
829 p = blist_alloc(bl, (daddr_t)size);
f427ee49 830 if (p == SWAPBLK_NONE) {
2d21ac55 831 panic("vmem_alloc: failure after blist_resize!");
0a7de745 832 }
2d21ac55 833 }
0a7de745 834
b0d623f7 835 return (void *)(uintptr_t)p;
2d21ac55
A
836}
837
838void
839vmem_free(vmem_t *vmp, void *vaddr, size_t size)
840{
841 struct blist_hdl *p = (struct blist_hdl *)vmp;
0a7de745 842
b0d623f7 843 blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size );
2d21ac55
A
844}
845
846void
847vmem_destroy(vmem_t *vmp)
848{
849 struct blist_hdl *p = (struct blist_hdl *)vmp;
0a7de745 850
2d21ac55 851 blist_destroy( p->blist );
0a7de745 852 _FREE( p, sizeof(struct blist_hdl));
2d21ac55
A
853}
854
855/*
856 * Timing
857 */
858
859/*
0a7de745 860 * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at
2d21ac55
A
861 * January 1, 1970. Because it can be called from probe context, it must take no locks.
862 */
863
864hrtime_t
865dtrace_gethrestime(void)
866{
0a7de745
A
867 clock_sec_t secs;
868 clock_nsec_t nanosecs;
869 uint64_t secs64, ns64;
870
2d21ac55
A
871 clock_get_calendar_nanotime_nowait(&secs, &nanosecs);
872 secs64 = (uint64_t)secs;
873 ns64 = (uint64_t)nanosecs;
874
875 ns64 = ns64 + (secs64 * 1000000000LL);
876 return ns64;
877}
878
879/*
880 * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin.
881 * Hence its primary use is to specify intervals.
882 */
883
884hrtime_t
885dtrace_abs_to_nano(uint64_t elapsed)
886{
887 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
888
889 /*
890 * If this is the first time we've run, get the timebase.
891 * We can use denom == 0 to indicate that sTimebaseInfo is
892 * uninitialised because it makes no sense to have a zero
893 * denominator in a fraction.
894 */
895
0a7de745 896 if (sTimebaseInfo.denom == 0) {
2d21ac55
A
897 (void) clock_timebase_info(&sTimebaseInfo);
898 }
899
900 /*
901 * Convert to nanoseconds.
902 * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom;
903 *
904 * Provided the final result is representable in 64 bits the following maneuver will
905 * deliver that result without intermediate overflow.
906 */
0a7de745 907 if (sTimebaseInfo.denom == sTimebaseInfo.numer) {
2d21ac55 908 return elapsed;
0a7de745 909 } else if (sTimebaseInfo.denom == 1) {
2d21ac55 910 return elapsed * (uint64_t)sTimebaseInfo.numer;
0a7de745 911 } else {
2d21ac55
A
912 /* Decompose elapsed = eta32 * 2^32 + eps32: */
913 uint64_t eta32 = elapsed >> 32;
914 uint64_t eps32 = elapsed & 0x00000000ffffffffLL;
915
916 uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom;
917
918 /* Form product of elapsed64 (decomposed) and numer: */
919 uint64_t mu64 = numer * eta32;
920 uint64_t lambda64 = numer * eps32;
921
922 /* Divide the constituents by denom: */
0a7de745 923 uint64_t q32 = mu64 / denom;
2d21ac55
A
924 uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */
925
0a7de745 926 return (q32 << 32) + ((r32 << 32) + lambda64) / denom;
2d21ac55
A
927 }
928}
929
930hrtime_t
931dtrace_gethrtime(void)
932{
0a7de745
A
933 static uint64_t start = 0;
934
935 if (start == 0) {
2d21ac55 936 start = mach_absolute_time();
0a7de745
A
937 }
938
939 return dtrace_abs_to_nano(mach_absolute_time() - start);
2d21ac55
A
940}
941
942/*
943 * Atomicity and synchronization
944 */
945uint32_t
946dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
947{
0a7de745 948 if (OSCompareAndSwap((UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) {
2d21ac55 949 return cmp;
0a7de745 950 } else {
2d21ac55 951 return ~cmp; /* Must return something *other* than cmp */
0a7de745 952 }
2d21ac55
A
953}
954
955void *
956dtrace_casptr(void *target, void *cmp, void *new)
957{
0a7de745 958 if (OSCompareAndSwapPtr( cmp, new, (void**)target )) {
2d21ac55 959 return cmp;
0a7de745 960 } else {
2d21ac55 961 return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */
0a7de745 962 }
2d21ac55
A
963}
964
965/*
966 * Interrupt manipulation
967 */
968dtrace_icookie_t
969dtrace_interrupt_disable(void)
970{
971 return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE);
972}
973
974void
975dtrace_interrupt_enable(dtrace_icookie_t reenable)
976{
977 (void)ml_set_interrupts_enabled((boolean_t)reenable);
978}
979
980/*
981 * MP coordination
982 */
983static void
0a7de745
A
984dtrace_sync_func(void)
985{
986}
2d21ac55
A
987
988/*
989 * dtrace_sync() is not called from probe context.
990 */
991void
992dtrace_sync(void)
993{
994 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
995}
996
997/*
998 * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context.
999 */
1000
1001extern kern_return_t dtrace_copyio_preflight(addr64_t);
1002extern kern_return_t dtrace_copyio_postflight(addr64_t);
0a7de745 1003
2d21ac55
A
1004static int
1005dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size)
1006{
1007#pragma unused(kaddr)
1008
1009 vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */
1010 dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */
1011
1012 ASSERT(kaddr + size >= kaddr);
1013
0a7de745
A
1014 if (uaddr + size < uaddr || /* Avoid address wrap. */
1015 KERN_FAILURE == dtrace_copyio_preflight(uaddr)) { /* Machine specific setup/constraints. */
2d21ac55
A
1016 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1017 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
0a7de745 1018 return 0;
2d21ac55 1019 }
0a7de745 1020 return 1;
2d21ac55
A
1021}
1022
1023void
b0d623f7 1024dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1025{
b0d623f7 1026#pragma unused(flags)
0a7de745 1027
2d21ac55
A
1028 if (dtrace_copycheck( src, dst, len )) {
1029 if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) {
1030 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1031 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1032 }
1033 dtrace_copyio_postflight(src);
1034 }
1035}
1036
1037void
b0d623f7 1038dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1039{
b0d623f7 1040#pragma unused(flags)
0a7de745 1041
2d21ac55 1042 size_t actual;
0a7de745 1043
2d21ac55 1044 if (dtrace_copycheck( src, dst, len )) {
4a3eedf9
A
1045 /* copyin as many as 'len' bytes. */
1046 int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual);
1047
1048 /*
1049 * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was
1050 * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on.
1051 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1052 * to the caller.
1053 */
1054 if (error && error != ENAMETOOLONG) {
2d21ac55
A
1055 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1056 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src;
1057 }
1058 dtrace_copyio_postflight(src);
1059 }
1060}
1061
1062void
b0d623f7 1063dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1064{
b0d623f7 1065#pragma unused(flags)
0a7de745 1066
2d21ac55
A
1067 if (dtrace_copycheck( dst, src, len )) {
1068 if (copyout((const void *)src, dst, (vm_size_t)len)) {
1069 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1070 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1071 }
1072 dtrace_copyio_postflight(dst);
1073 }
1074}
1075
1076void
b0d623f7 1077dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags)
2d21ac55 1078{
b0d623f7 1079#pragma unused(flags)
0a7de745 1080
2d21ac55
A
1081 size_t actual;
1082
1083 if (dtrace_copycheck( dst, src, len )) {
4a3eedf9
A
1084 /*
1085 * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was
1086 * not encountered. We raise CPU_DTRACE_BADADDR in that case.
1087 * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left
1088 * to the caller.
1089 */
2d21ac55
A
1090 if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) {
1091 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1092 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst;
1093 }
1094 dtrace_copyio_postflight(dst);
1095 }
1096}
1097
5ba3f43e
A
1098extern const int copysize_limit_panic;
1099
0a7de745
A
1100int
1101dtrace_copy_maxsize(void)
d9a64523
A
1102{
1103 return copysize_limit_panic;
1104}
1105
1106
5ba3f43e
A
1107int
1108dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes)
1109{
d9a64523 1110 int maxsize = dtrace_copy_maxsize();
5ba3f43e
A
1111 /*
1112 * Partition the copyout in copysize_limit_panic-sized chunks
1113 */
d9a64523 1114 while (nbytes >= (vm_size_t)maxsize) {
0a7de745
A
1115 if (copyout(kaddr, uaddr, maxsize) != 0) {
1116 return EFAULT;
1117 }
5ba3f43e 1118
d9a64523
A
1119 nbytes -= maxsize;
1120 uaddr += maxsize;
1121 kaddr += maxsize;
5ba3f43e
A
1122 }
1123 if (nbytes > 0) {
0a7de745
A
1124 if (copyout(kaddr, uaddr, nbytes) != 0) {
1125 return EFAULT;
1126 }
5ba3f43e
A
1127 }
1128
0a7de745 1129 return 0;
5ba3f43e
A
1130}
1131
2d21ac55
A
1132uint8_t
1133dtrace_fuword8(user_addr_t uaddr)
1134{
1135 uint8_t ret = 0;
1136
1137 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1138 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1139 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1140 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1141 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1142 }
1143 dtrace_copyio_postflight(uaddr);
1144 }
1145 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1146
0a7de745 1147 return ret;
2d21ac55
A
1148}
1149
1150uint16_t
1151dtrace_fuword16(user_addr_t uaddr)
1152{
1153 uint16_t ret = 0;
1154
1155 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1156 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1157 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1158 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1159 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1160 }
1161 dtrace_copyio_postflight(uaddr);
1162 }
1163 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1164
0a7de745 1165 return ret;
2d21ac55
A
1166}
1167
1168uint32_t
1169dtrace_fuword32(user_addr_t uaddr)
1170{
1171 uint32_t ret = 0;
1172
1173 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1174 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1175 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1176 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1177 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1178 }
1179 dtrace_copyio_postflight(uaddr);
1180 }
1181 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1182
0a7de745 1183 return ret;
2d21ac55
A
1184}
1185
1186uint64_t
1187dtrace_fuword64(user_addr_t uaddr)
1188{
1189 uint64_t ret = 0;
1190
1191 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
1192 if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) {
1193 if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) {
1194 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1195 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
1196 }
1197 dtrace_copyio_postflight(uaddr);
1198 }
1199 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
1200
0a7de745 1201 return ret;
2d21ac55
A
1202}
1203
1204/*
1205 * Emulation of Solaris fuword / suword
1206 * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds.
1207 */
1208
1209int
1210fuword8(user_addr_t uaddr, uint8_t *value)
1211{
1212 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) {
1213 return -1;
1214 }
1215
1216 return 0;
1217}
1218
1219int
1220fuword16(user_addr_t uaddr, uint16_t *value)
1221{
1222 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) {
1223 return -1;
1224 }
1225
1226 return 0;
1227}
1228
1229int
1230fuword32(user_addr_t uaddr, uint32_t *value)
1231{
1232 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) {
1233 return -1;
1234 }
1235
1236 return 0;
1237}
1238
1239int
1240fuword64(user_addr_t uaddr, uint64_t *value)
1241{
1242 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) {
1243 return -1;
1244 }
1245
1246 return 0;
1247}
1248
2d21ac55
A
1249void
1250fuword32_noerr(user_addr_t uaddr, uint32_t *value)
1251{
1252 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) {
1253 *value = 0;
1254 }
1255}
1256
1257void
1258fuword64_noerr(user_addr_t uaddr, uint64_t *value)
1259{
1260 if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) {
1261 *value = 0;
1262 }
1263}
1264
1265int
1266suword64(user_addr_t addr, uint64_t value)
1267{
1268 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1269 return -1;
1270 }
1271
1272 return 0;
1273}
1274
1275int
1276suword32(user_addr_t addr, uint32_t value)
1277{
1278 if (copyout((const void *)&value, addr, sizeof(value)) != 0) {
1279 return -1;
1280 }
1281
1282 return 0;
1283}
1284
2d21ac55
A
1285/*
1286 * Miscellaneous
1287 */
1288extern boolean_t dtrace_tally_fault(user_addr_t);
1289
1290boolean_t
1291dtrace_tally_fault(user_addr_t uaddr)
1292{
1293 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1294 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
0a7de745 1295 return DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE;
2d21ac55
A
1296}
1297
2d21ac55
A
1298#define TOTTY 0x02
1299extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */
1300
1301int
1302vuprintf(const char *format, va_list ap)
1303{
1304 return prf(format, ap, TOTTY, NULL);
1305}
1306
1307/* Not called from probe context */
0a7de745
A
1308void
1309cmn_err( int level, const char *format, ... )
2d21ac55
A
1310{
1311#pragma unused(level)
1312 va_list alist;
1313
1314 va_start(alist, format);
1315 vuprintf(format, alist);
1316 va_end(alist);
1317 uprintf("\n");
1318}
1319
5ba3f43e
A
1320const void*
1321bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *))
1322{
1323 const char *base = base0;
1324 size_t lim;
1325 int cmp;
1326 const void *p;
1327 for (lim = nmemb; lim != 0; lim >>= 1) {
1328 p = base + (lim >> 1) * size;
1329 cmp = (*compar)(key, p);
0a7de745 1330 if (cmp == 0) {
5ba3f43e 1331 return p;
0a7de745
A
1332 }
1333 if (cmp > 0) { /* key > p: move right */
5ba3f43e
A
1334 base = (const char *)p + size;
1335 lim--;
0a7de745 1336 } /* else move left */
5ba3f43e 1337 }
0a7de745 1338 return NULL;
5ba3f43e
A
1339}
1340
2d21ac55
A
1341/*
1342 * Runtime and ABI
1343 */
1344uintptr_t
1345dtrace_caller(int ignore)
1346{
1347#pragma unused(ignore)
1348 return -1; /* Just as in Solaris dtrace_asm.s */
1349}
1350
1351int
1352dtrace_getstackdepth(int aframes)
1353{
b0d623f7 1354 struct frame *fp = (struct frame *)__builtin_frame_address(0);
2d21ac55
A
1355 struct frame *nextfp, *minfp, *stacktop;
1356 int depth = 0;
1357 int on_intr;
1358
0a7de745 1359 if ((on_intr = CPU_ON_INTR(CPU)) != 0) {
2d21ac55 1360 stacktop = (struct frame *)dtrace_get_cpu_int_stack_top();
0a7de745 1361 } else {
b0d623f7 1362 stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size);
0a7de745 1363 }
2d21ac55
A
1364
1365 minfp = fp;
1366
1367 aframes++;
1368
1369 for (;;) {
1370 depth++;
1371
1372 nextfp = *(struct frame **)fp;
1373
1374 if (nextfp <= minfp || nextfp >= stacktop) {
1375 if (on_intr) {
1376 /*
1377 * Hop from interrupt stack to thread stack.
1378 */
0a7de745 1379 vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread());
2d21ac55 1380
0a7de745
A
1381 minfp = (struct frame *)kstack_base;
1382 stacktop = (struct frame *)(kstack_base + kernel_stack_size);
2d21ac55
A
1383
1384 on_intr = 0;
1385 continue;
1386 }
1387 break;
1388 }
1389
1390 fp = nextfp;
1391 minfp = fp;
1392 }
1393
0a7de745
A
1394 if (depth <= aframes) {
1395 return 0;
1396 }
2d21ac55 1397
0a7de745 1398 return depth - aframes;
2d21ac55
A
1399}
1400
d9a64523
A
1401int
1402dtrace_addr_in_module(void* addr, struct modctl *ctl)
1403{
1404 return OSKextKextForAddress(addr) == (void*)ctl->mod_address;
1405}
1406
2d21ac55
A
1407/*
1408 * Unconsidered
1409 */
1410void
0a7de745
A
1411dtrace_vtime_enable(void)
1412{
1413}
2d21ac55
A
1414
1415void
0a7de745
A
1416dtrace_vtime_disable(void)
1417{
1418}