]>
Commit | Line | Data |
---|---|---|
2d21ac55 A |
1 | /* |
2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | ||
30 | /* | |
31 | * APPLE NOTE: This file is compiled even if dtrace is unconfig'd. A symbol | |
32 | * from this file (_dtrace_register_anon_DOF) always needs to be exported for | |
33 | * an external kext to link against. | |
34 | */ | |
35 | ||
36 | #if CONFIG_DTRACE | |
37 | ||
38 | #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from mach/ppc/thread_status.h */ | |
39 | #include <kern/thread.h> | |
40 | #include <mach/thread_status.h> | |
41 | ||
42 | #include <stdarg.h> | |
43 | #include <string.h> | |
44 | #include <sys/malloc.h> | |
45 | #include <sys/time.h> | |
46 | #include <sys/proc.h> | |
47 | #include <sys/proc_internal.h> | |
48 | #include <sys/kauth.h> | |
49 | #include <sys/user.h> | |
50 | #include <sys/systm.h> | |
51 | #include <sys/dtrace.h> | |
52 | #include <sys/dtrace_impl.h> | |
53 | #include <libkern/OSAtomic.h> | |
54 | #include <kern/thread_call.h> | |
55 | #include <kern/task.h> | |
56 | #include <kern/sched_prim.h> | |
57 | #include <kern/queue.h> | |
58 | #include <miscfs/devfs/devfs.h> | |
59 | #include <kern/kalloc.h> | |
60 | ||
61 | #include <mach/vm_param.h> | |
62 | #include <mach/mach_vm.h> | |
63 | #include <mach/task.h> | |
64 | #include <vm/pmap.h> | |
65 | #include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */ | |
66 | ||
67 | /* | |
68 | * pid/proc | |
69 | */ | |
b0d623f7 A |
70 | /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */ |
71 | #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */ | |
2d21ac55 A |
72 | |
73 | /* Not called from probe context */ | |
74 | proc_t * | |
75 | sprlock(pid_t pid) | |
76 | { | |
77 | proc_t* p; | |
78 | ||
79 | if ((p = proc_find(pid)) == PROC_NULL) { | |
80 | return PROC_NULL; | |
81 | } | |
82 | ||
83 | task_suspend(p->task); | |
84 | ||
85 | proc_lock(p); | |
86 | ||
87 | lck_mtx_lock(&p->p_dtrace_sprlock); | |
88 | ||
89 | return p; | |
90 | } | |
91 | ||
92 | /* Not called from probe context */ | |
93 | void | |
94 | sprunlock(proc_t *p) | |
95 | { | |
96 | if (p != PROC_NULL) { | |
97 | lck_mtx_unlock(&p->p_dtrace_sprlock); | |
98 | ||
99 | proc_unlock(p); | |
100 | ||
101 | task_resume(p->task); | |
102 | ||
103 | proc_rele(p); | |
104 | } | |
105 | } | |
106 | ||
107 | /* | |
108 | * uread/uwrite | |
109 | */ | |
110 | ||
111 | // These are not exported from vm_map.h. | |
112 | extern kern_return_t vm_map_read_user(vm_map_t map, vm_map_address_t src_addr, void *dst_p, vm_size_t size); | |
113 | extern kern_return_t vm_map_write_user(vm_map_t map, void *src_p, vm_map_address_t dst_addr, vm_size_t size); | |
114 | ||
115 | /* Not called from probe context */ | |
116 | int | |
117 | uread(proc_t *p, void *buf, user_size_t len, user_addr_t a) | |
118 | { | |
119 | kern_return_t ret; | |
120 | ||
121 | ASSERT(p != PROC_NULL); | |
122 | ASSERT(p->task != NULL); | |
123 | ||
124 | task_t task = p->task; | |
125 | ||
126 | /* | |
127 | * Grab a reference to the task vm_map_t to make sure | |
128 | * the map isn't pulled out from under us. | |
129 | * | |
130 | * Because the proc_lock is not held at all times on all code | |
131 | * paths leading here, it is possible for the proc to have | |
132 | * exited. If the map is null, fail. | |
133 | */ | |
134 | vm_map_t map = get_task_map_reference(task); | |
135 | if (map) { | |
136 | ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len); | |
137 | vm_map_deallocate(map); | |
138 | } else | |
139 | ret = KERN_TERMINATED; | |
140 | ||
141 | return (int)ret; | |
142 | } | |
143 | ||
144 | ||
145 | /* Not called from probe context */ | |
146 | int | |
147 | uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a) | |
148 | { | |
149 | kern_return_t ret; | |
150 | ||
151 | ASSERT(p != NULL); | |
152 | ASSERT(p->task != NULL); | |
153 | ||
154 | task_t task = p->task; | |
155 | ||
156 | /* | |
157 | * Grab a reference to the task vm_map_t to make sure | |
158 | * the map isn't pulled out from under us. | |
159 | * | |
160 | * Because the proc_lock is not held at all times on all code | |
161 | * paths leading here, it is possible for the proc to have | |
162 | * exited. If the map is null, fail. | |
163 | */ | |
164 | vm_map_t map = get_task_map_reference(task); | |
165 | if (map) { | |
166 | /* Find the memory permissions. */ | |
167 | uint32_t nestingDepth=999999; | |
168 | vm_region_submap_short_info_data_64_t info; | |
169 | mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; | |
170 | mach_vm_address_t address = (mach_vm_address_t)a; | |
171 | mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len; | |
172 | ||
173 | ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count); | |
174 | if (ret != KERN_SUCCESS) | |
175 | goto done; | |
176 | ||
177 | vm_prot_t reprotect; | |
178 | ||
179 | if (!(info.protection & VM_PROT_WRITE)) { | |
180 | /* Save the original protection values for restoration later */ | |
181 | reprotect = info.protection; | |
182 | ||
183 | if (info.max_protection & VM_PROT_WRITE) { | |
184 | /* The memory is not currently writable, but can be made writable. */ | |
185 | ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect | VM_PROT_WRITE); | |
186 | } else { | |
187 | /* | |
188 | * The memory is not currently writable, and cannot be made writable. We need to COW this memory. | |
189 | * | |
190 | * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails. | |
191 | */ | |
192 | ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE); | |
193 | } | |
194 | ||
195 | if (ret != KERN_SUCCESS) | |
196 | goto done; | |
197 | ||
198 | } else { | |
199 | /* The memory was already writable. */ | |
200 | reprotect = VM_PROT_NONE; | |
201 | } | |
202 | ||
203 | ret = vm_map_write_user( map, | |
204 | buf, | |
205 | (vm_map_address_t)a, | |
206 | (vm_size_t)len); | |
207 | ||
208 | if (ret != KERN_SUCCESS) | |
209 | goto done; | |
210 | ||
211 | if (reprotect != VM_PROT_NONE) { | |
212 | ASSERT(reprotect & VM_PROT_EXECUTE); | |
213 | ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect); | |
214 | } | |
215 | ||
216 | done: | |
217 | vm_map_deallocate(map); | |
218 | } else | |
219 | ret = KERN_TERMINATED; | |
220 | ||
221 | return (int)ret; | |
222 | } | |
223 | ||
224 | /* | |
225 | * cpuvar | |
226 | */ | |
227 | lck_mtx_t cpu_lock; | |
228 | lck_mtx_t mod_lock; | |
229 | ||
6d2010ae | 230 | dtrace_cpu_t *cpu_list; |
2d21ac55 A |
231 | cpu_core_t *cpu_core; /* XXX TLB lockdown? */ |
232 | ||
233 | /* | |
234 | * cred_t | |
235 | */ | |
236 | ||
237 | /* | |
238 | * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since | |
239 | * that function may try to resolve a lazy credential binding, which entails taking the proc_lock. | |
240 | */ | |
241 | cred_t * | |
242 | dtrace_CRED(void) | |
243 | { | |
244 | struct uthread *uthread = get_bsdthread_info(current_thread()); | |
245 | ||
246 | if (uthread == NULL) | |
247 | return NULL; | |
248 | else | |
249 | return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */ | |
250 | } | |
251 | ||
252 | #define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr)) | |
253 | #define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \ | |
254 | HAS_ALLPRIVS(cr) : \ | |
255 | PRIV_ISASSERT(&CR_OEPRIV(cr), pr)) | |
256 | ||
257 | int PRIV_POLICY_CHOICE(void* cred, int priv, int all) | |
258 | { | |
259 | #pragma unused(priv, all) | |
260 | return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */ | |
261 | } | |
262 | ||
263 | int | |
264 | PRIV_POLICY_ONLY(void *cr, int priv, int boolean) | |
265 | { | |
266 | #pragma unused(priv, boolean) | |
267 | return kauth_cred_issuser(cr); /* XXX TODO: HAS_PRIVILEGE(cr, priv); */ | |
268 | } | |
269 | ||
6d2010ae | 270 | /* XXX Get around const poisoning using structure assigns */ |
2d21ac55 | 271 | gid_t |
6d2010ae | 272 | crgetgid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getgid(©_cr); } |
2d21ac55 A |
273 | |
274 | uid_t | |
6d2010ae | 275 | crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); } |
2d21ac55 A |
276 | |
277 | /* | |
278 | * "cyclic" | |
279 | */ | |
280 | ||
281 | /* osfmk/kern/timer_call.h */ | |
2d21ac55 A |
282 | typedef void *timer_call_param_t; |
283 | typedef void (*timer_call_func_t)( | |
284 | timer_call_param_t param0, | |
285 | timer_call_param_t param1); | |
286 | ||
6d2010ae A |
287 | typedef struct timer_call { |
288 | queue_chain_t q_link; | |
289 | queue_t queue; | |
290 | timer_call_func_t func; | |
291 | timer_call_param_t param0; | |
292 | timer_call_param_t param1; | |
293 | decl_simple_lock_data(,lock); | |
294 | uint64_t deadline; | |
295 | uint64_t soft_deadline; | |
296 | uint32_t flags; | |
297 | boolean_t async_dequeue; | |
298 | } timer_call_data_t; | |
299 | ||
300 | typedef struct timer_call *timer_call_t; | |
301 | ||
2d21ac55 A |
302 | extern void |
303 | timer_call_setup( | |
304 | timer_call_t call, | |
305 | timer_call_func_t func, | |
306 | timer_call_param_t param0); | |
307 | ||
308 | extern boolean_t | |
309 | timer_call_enter1( | |
310 | timer_call_t call, | |
311 | timer_call_param_t param1, | |
6d2010ae A |
312 | uint64_t deadline, |
313 | uint32_t flags); | |
314 | ||
315 | #ifndef TIMER_CALL_CRITICAL | |
316 | #define TIMER_CALL_CRITICAL 0x1 | |
317 | #define TIMER_CALL_LOCAL 0x2 | |
318 | #endif /* TIMER_CALL_CRITICAL */ | |
2d21ac55 A |
319 | |
320 | extern boolean_t | |
321 | timer_call_cancel( | |
322 | timer_call_t call); | |
323 | ||
324 | typedef struct wrap_timer_call { | |
325 | cyc_handler_t hdlr; | |
326 | cyc_time_t when; | |
327 | uint64_t deadline; | |
6d2010ae | 328 | struct timer_call call; |
2d21ac55 A |
329 | } wrap_timer_call_t; |
330 | ||
331 | #define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL | |
332 | #define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL | |
333 | ||
334 | static void | |
335 | _timer_call_apply_cyclic( void *ignore, void *vTChdl ) | |
336 | { | |
337 | #pragma unused(ignore) | |
338 | wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)vTChdl; | |
339 | ||
340 | (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); | |
341 | ||
342 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); | |
6d2010ae | 343 | timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_CRITICAL | TIMER_CALL_LOCAL ); |
2d21ac55 A |
344 | |
345 | /* Did timer_call_remove_cyclic request a wakeup call when this timer call was re-armed? */ | |
346 | if (wrapTC->when.cyt_interval == WAKEUP_REAPER) | |
347 | thread_wakeup((event_t)wrapTC); | |
348 | } | |
349 | ||
350 | static cyclic_id_t | |
351 | timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when) | |
352 | { | |
353 | uint64_t now; | |
354 | ||
355 | timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL ); | |
356 | wrapTC->hdlr = *handler; | |
357 | wrapTC->when = *when; | |
358 | ||
359 | nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval ); | |
360 | ||
361 | now = mach_absolute_time(); | |
362 | wrapTC->deadline = now; | |
363 | ||
364 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); | |
6d2010ae | 365 | timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_CRITICAL | TIMER_CALL_LOCAL ); |
2d21ac55 A |
366 | |
367 | return (cyclic_id_t)wrapTC; | |
368 | } | |
369 | ||
370 | static void | |
371 | timer_call_remove_cyclic(cyclic_id_t cyclic) | |
372 | { | |
373 | wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic; | |
374 | ||
375 | while (!timer_call_cancel(&(wrapTC->call))) { | |
376 | int ret = assert_wait(wrapTC, THREAD_UNINT); | |
377 | ASSERT(ret == THREAD_WAITING); | |
378 | ||
379 | wrapTC->when.cyt_interval = WAKEUP_REAPER; | |
380 | ||
381 | ret = thread_block(THREAD_CONTINUE_NULL); | |
382 | ASSERT(ret == THREAD_AWAKENED); | |
383 | } | |
384 | } | |
385 | ||
386 | static void * | |
387 | timer_call_get_cyclic_arg(cyclic_id_t cyclic) | |
388 | { | |
389 | wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic; | |
390 | ||
391 | return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL); | |
392 | } | |
393 | ||
394 | cyclic_id_t | |
395 | cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when) | |
396 | { | |
397 | wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); | |
398 | if (NULL == wrapTC) | |
399 | return CYCLIC_NONE; | |
400 | else | |
401 | return timer_call_add_cyclic( wrapTC, handler, when ); | |
402 | } | |
403 | ||
404 | void | |
405 | cyclic_timer_remove(cyclic_id_t cyclic) | |
406 | { | |
407 | ASSERT( cyclic != CYCLIC_NONE ); | |
408 | ||
409 | timer_call_remove_cyclic( cyclic ); | |
410 | _FREE((void *)cyclic, M_TEMP); | |
411 | } | |
412 | ||
413 | static void | |
414 | _cyclic_add_omni(cyclic_id_list_t cyc_list) | |
415 | { | |
416 | cyc_time_t cT; | |
417 | cyc_handler_t cH; | |
418 | wrap_timer_call_t *wrapTC; | |
419 | cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list; | |
420 | char *t; | |
421 | ||
422 | (omni->cyo_online)(omni->cyo_arg, CPU, &cH, &cT); | |
423 | ||
424 | t = (char *)cyc_list; | |
425 | t += sizeof(cyc_omni_handler_t); | |
b0d623f7 | 426 | cyc_list = (cyclic_id_list_t)(uintptr_t)t; |
2d21ac55 A |
427 | |
428 | t += sizeof(cyclic_id_t)*NCPU; | |
429 | t += (sizeof(wrap_timer_call_t))*cpu_number(); | |
b0d623f7 | 430 | wrapTC = (wrap_timer_call_t *)(uintptr_t)t; |
2d21ac55 A |
431 | |
432 | cyc_list[cpu_number()] = timer_call_add_cyclic(wrapTC, &cH, &cT); | |
433 | } | |
434 | ||
435 | cyclic_id_list_t | |
436 | cyclic_add_omni(cyc_omni_handler_t *omni) | |
437 | { | |
438 | cyclic_id_list_t cyc_list = | |
439 | _MALLOC( (sizeof(wrap_timer_call_t))*NCPU + | |
440 | sizeof(cyclic_id_t)*NCPU + | |
441 | sizeof(cyc_omni_handler_t), M_TEMP, M_ZERO | M_WAITOK); | |
442 | if (NULL == cyc_list) | |
443 | return (cyclic_id_list_t)CYCLIC_NONE; | |
444 | ||
445 | *(cyc_omni_handler_t *)cyc_list = *omni; | |
446 | dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_add_omni, (void *)cyc_list); | |
447 | ||
448 | return cyc_list; | |
449 | } | |
450 | ||
451 | static void | |
452 | _cyclic_remove_omni(cyclic_id_list_t cyc_list) | |
453 | { | |
454 | cyc_omni_handler_t *omni = (cyc_omni_handler_t *)cyc_list; | |
455 | void *oarg; | |
456 | cyclic_id_t cid; | |
457 | char *t; | |
458 | ||
459 | t = (char *)cyc_list; | |
460 | t += sizeof(cyc_omni_handler_t); | |
b0d623f7 | 461 | cyc_list = (cyclic_id_list_t)(uintptr_t)t; |
2d21ac55 A |
462 | |
463 | cid = cyc_list[cpu_number()]; | |
464 | oarg = timer_call_get_cyclic_arg(cid); | |
465 | ||
466 | timer_call_remove_cyclic( cid ); | |
467 | (omni->cyo_offline)(omni->cyo_arg, CPU, oarg); | |
468 | } | |
469 | ||
470 | void | |
471 | cyclic_remove_omni(cyclic_id_list_t cyc_list) | |
472 | { | |
473 | ASSERT( cyc_list != (cyclic_id_list_t)CYCLIC_NONE ); | |
474 | ||
475 | dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)_cyclic_remove_omni, (void *)cyc_list); | |
476 | _FREE(cyc_list, M_TEMP); | |
477 | } | |
478 | ||
479 | typedef struct wrap_thread_call { | |
480 | thread_call_t TChdl; | |
481 | cyc_handler_t hdlr; | |
482 | cyc_time_t when; | |
483 | uint64_t deadline; | |
484 | } wrap_thread_call_t; | |
485 | ||
486 | /* | |
487 | * _cyclic_apply will run on some thread under kernel_task. That's OK for the | |
488 | * cleaner and the deadman, but too distant in time and place for the profile provider. | |
489 | */ | |
490 | static void | |
491 | _cyclic_apply( void *ignore, void *vTChdl ) | |
492 | { | |
493 | #pragma unused(ignore) | |
494 | wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)vTChdl; | |
495 | ||
496 | (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); | |
497 | ||
498 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); | |
499 | (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); | |
500 | ||
501 | /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */ | |
502 | if (wrapTC->when.cyt_interval == WAKEUP_REAPER) | |
503 | thread_wakeup((event_t)wrapTC); | |
504 | } | |
505 | ||
506 | cyclic_id_t | |
507 | cyclic_add(cyc_handler_t *handler, cyc_time_t *when) | |
508 | { | |
509 | uint64_t now; | |
510 | ||
511 | wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK); | |
512 | if (NULL == wrapTC) | |
513 | return CYCLIC_NONE; | |
514 | ||
515 | wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL ); | |
516 | wrapTC->hdlr = *handler; | |
517 | wrapTC->when = *when; | |
518 | ||
519 | ASSERT(when->cyt_when == 0); | |
520 | ASSERT(when->cyt_interval < WAKEUP_REAPER); | |
521 | ||
522 | nanoseconds_to_absolutetime(wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval); | |
523 | ||
524 | now = mach_absolute_time(); | |
525 | wrapTC->deadline = now; | |
526 | ||
527 | clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); | |
528 | (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); | |
529 | ||
530 | return (cyclic_id_t)wrapTC; | |
531 | } | |
532 | ||
533 | static void | |
534 | noop_cyh_func(void * ignore) | |
535 | { | |
536 | #pragma unused(ignore) | |
537 | } | |
538 | ||
539 | void | |
540 | cyclic_remove(cyclic_id_t cyclic) | |
541 | { | |
542 | wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic; | |
543 | ||
544 | ASSERT(cyclic != CYCLIC_NONE); | |
545 | ||
546 | while (!thread_call_cancel(wrapTC->TChdl)) { | |
547 | int ret = assert_wait(wrapTC, THREAD_UNINT); | |
548 | ASSERT(ret == THREAD_WAITING); | |
549 | ||
550 | wrapTC->when.cyt_interval = WAKEUP_REAPER; | |
551 | ||
552 | ret = thread_block(THREAD_CONTINUE_NULL); | |
553 | ASSERT(ret == THREAD_AWAKENED); | |
554 | } | |
555 | ||
556 | if (thread_call_free(wrapTC->TChdl)) | |
557 | _FREE(wrapTC, M_TEMP); | |
558 | else { | |
559 | /* Gut this cyclic and move on ... */ | |
560 | wrapTC->hdlr.cyh_func = noop_cyh_func; | |
561 | wrapTC->when.cyt_interval = NEARLY_FOREVER; | |
562 | } | |
563 | } | |
564 | ||
565 | /* | |
566 | * timeout / untimeout (converted to dtrace_timeout / dtrace_untimeout due to name collision) | |
567 | */ | |
568 | ||
569 | thread_call_t | |
570 | dtrace_timeout(void (*func)(void *, void *), void* arg, uint64_t nanos) | |
571 | { | |
572 | #pragma unused(arg) | |
573 | thread_call_t call = thread_call_allocate(func, NULL); | |
574 | ||
575 | nanoseconds_to_absolutetime(nanos, &nanos); | |
576 | ||
577 | /* | |
578 | * This method does not use clock_deadline_for_periodic_event() because it is a one-shot, | |
579 | * and clock drift on later invocations is not a worry. | |
580 | */ | |
581 | uint64_t deadline = mach_absolute_time() + nanos; | |
582 | ||
583 | thread_call_enter_delayed(call, deadline); | |
584 | ||
585 | return call; | |
586 | } | |
587 | ||
588 | /* | |
589 | * ddi | |
590 | */ | |
591 | void | |
592 | ddi_report_dev(dev_info_t *devi) | |
593 | { | |
594 | #pragma unused(devi) | |
595 | } | |
596 | ||
597 | #define NSOFT_STATES 32 /* XXX No more than 32 clients at a time, please. */ | |
598 | static void *soft[NSOFT_STATES]; | |
599 | ||
600 | int | |
601 | ddi_soft_state_init(void **state_p, size_t size, size_t n_items) | |
602 | { | |
603 | #pragma unused(n_items) | |
604 | int i; | |
605 | ||
606 | for (i = 0; i < NSOFT_STATES; ++i) soft[i] = _MALLOC(size, M_TEMP, M_ZERO | M_WAITOK); | |
607 | *(size_t *)state_p = size; | |
608 | return 0; | |
609 | } | |
610 | ||
611 | int | |
612 | ddi_soft_state_zalloc(void *state, int item) | |
613 | { | |
614 | #pragma unused(state) | |
615 | if (item < NSOFT_STATES) | |
616 | return DDI_SUCCESS; | |
617 | else | |
618 | return DDI_FAILURE; | |
619 | } | |
620 | ||
621 | void * | |
622 | ddi_get_soft_state(void *state, int item) | |
623 | { | |
624 | #pragma unused(state) | |
625 | ASSERT(item < NSOFT_STATES); | |
626 | return soft[item]; | |
627 | } | |
628 | ||
629 | int | |
630 | ddi_soft_state_free(void *state, int item) | |
631 | { | |
632 | ASSERT(item < NSOFT_STATES); | |
633 | bzero( soft[item], (size_t)state ); | |
634 | return DDI_SUCCESS; | |
635 | } | |
636 | ||
637 | void | |
638 | ddi_soft_state_fini(void **state_p) | |
639 | { | |
640 | #pragma unused(state_p) | |
641 | int i; | |
642 | ||
643 | for (i = 0; i < NSOFT_STATES; ++i) _FREE( soft[i], M_TEMP ); | |
644 | } | |
645 | ||
646 | static unsigned int gRegisteredProps = 0; | |
647 | static struct { | |
648 | char name[32]; /* enough for "dof-data-" + digits */ | |
649 | int *data; | |
650 | uint_t nelements; | |
651 | } gPropTable[16]; | |
652 | ||
653 | kern_return_t _dtrace_register_anon_DOF(char *, uchar_t *, uint_t); | |
654 | ||
655 | kern_return_t | |
656 | _dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements) | |
657 | { | |
658 | if (gRegisteredProps < sizeof(gPropTable)/sizeof(gPropTable[0])) { | |
659 | int *p = (int *)_MALLOC(nelements*sizeof(int), M_TEMP, M_WAITOK); | |
660 | ||
661 | if (NULL == p) | |
662 | return KERN_FAILURE; | |
663 | ||
664 | strlcpy(gPropTable[gRegisteredProps].name, name, sizeof(gPropTable[0].name)); | |
665 | gPropTable[gRegisteredProps].nelements = nelements; | |
666 | gPropTable[gRegisteredProps].data = p; | |
667 | ||
668 | while (nelements-- > 0) { | |
669 | *p++ = (int)(*data++); | |
670 | } | |
671 | ||
672 | gRegisteredProps++; | |
673 | return KERN_SUCCESS; | |
674 | } | |
675 | else | |
676 | return KERN_FAILURE; | |
677 | } | |
678 | ||
679 | int | |
680 | ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, | |
b0d623f7 | 681 | const char *name, int **data, uint_t *nelements) |
2d21ac55 A |
682 | { |
683 | #pragma unused(match_dev,dip,flags) | |
684 | unsigned int i; | |
685 | for (i = 0; i < gRegisteredProps; ++i) | |
686 | { | |
687 | if (0 == strncmp(name, gPropTable[i].name, | |
688 | sizeof(gPropTable[i].name))) { | |
689 | *data = gPropTable[i].data; | |
690 | *nelements = gPropTable[i].nelements; | |
691 | return DDI_SUCCESS; | |
692 | } | |
693 | } | |
694 | return DDI_FAILURE; | |
695 | } | |
696 | ||
697 | int | |
698 | ddi_prop_free(void *buf) | |
699 | { | |
700 | _FREE(buf, M_TEMP); | |
701 | return DDI_SUCCESS; | |
702 | } | |
703 | ||
704 | int | |
b0d623f7 | 705 | ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); } |
2d21ac55 A |
706 | |
707 | int | |
708 | ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type, | |
709 | minor_t minor_num, const char *node_type, int flag) | |
710 | { | |
711 | #pragma unused(spec_type,node_type,flag) | |
b0d623f7 | 712 | dev_t dev = makedev( ddi_driver_major(dip), minor_num ); |
2d21ac55 A |
713 | |
714 | if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) | |
715 | return DDI_FAILURE; | |
716 | else | |
717 | return DDI_SUCCESS; | |
718 | } | |
719 | ||
720 | void | |
721 | ddi_remove_minor_node(dev_info_t *dip, char *name) | |
722 | { | |
723 | #pragma unused(dip,name) | |
724 | /* XXX called from dtrace_detach, so NOTREACHED for now. */ | |
725 | } | |
726 | ||
727 | major_t | |
728 | getemajor( dev_t d ) | |
729 | { | |
730 | return (major_t) major(d); | |
731 | } | |
732 | ||
733 | minor_t | |
734 | getminor ( dev_t d ) | |
735 | { | |
736 | return (minor_t) minor(d); | |
737 | } | |
738 | ||
739 | dev_t | |
740 | makedevice(major_t major, minor_t minor) | |
741 | { | |
742 | return makedev( major, minor ); | |
743 | } | |
744 | ||
745 | int ddi_getprop(dev_t dev, dev_info_t *dip, int flags, const char *name, int defvalue) | |
746 | { | |
747 | #pragma unused(dev, dip, flags, name) | |
748 | ||
749 | return defvalue; | |
750 | } | |
751 | ||
752 | /* | |
753 | * Kernel Debug Interface | |
754 | */ | |
755 | int | |
756 | kdi_dtrace_set(kdi_dtrace_set_t ignore) | |
757 | { | |
758 | #pragma unused(ignore) | |
759 | return 0; /* Success */ | |
760 | } | |
761 | ||
762 | extern void Debugger(const char*); | |
763 | ||
764 | void | |
765 | debug_enter(char *c) { Debugger(c); } | |
766 | ||
767 | /* | |
768 | * kmem | |
769 | */ | |
770 | ||
771 | void * | |
772 | dt_kmem_alloc(size_t size, int kmflag) | |
773 | { | |
774 | #pragma unused(kmflag) | |
775 | ||
776 | /* | |
777 | * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). | |
778 | * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. | |
779 | */ | |
780 | #if defined(DTRACE_MEMORY_ZONES) | |
781 | return dtrace_alloc(size); | |
782 | #else | |
783 | return kalloc(size); | |
784 | #endif | |
785 | } | |
786 | ||
787 | void * | |
788 | dt_kmem_zalloc(size_t size, int kmflag) | |
789 | { | |
790 | #pragma unused(kmflag) | |
791 | ||
792 | /* | |
793 | * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). | |
794 | * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. | |
795 | */ | |
796 | #if defined(DTRACE_MEMORY_ZONES) | |
797 | void* buf = dtrace_alloc(size); | |
798 | #else | |
799 | void* buf = kalloc(size); | |
800 | #endif | |
801 | ||
802 | if(!buf) | |
803 | return NULL; | |
804 | ||
805 | bzero(buf, size); | |
806 | ||
807 | return buf; | |
808 | } | |
809 | ||
810 | void | |
811 | dt_kmem_free(void *buf, size_t size) | |
812 | { | |
813 | #pragma unused(size) | |
814 | /* | |
815 | * DTrace relies on this, its doing a lot of NULL frees. | |
816 | * A null free causes the debug builds to panic. | |
817 | */ | |
818 | if (buf == NULL) return; | |
819 | ||
820 | ASSERT(size > 0); | |
821 | ||
822 | #if defined(DTRACE_MEMORY_ZONES) | |
823 | dtrace_free(buf, size); | |
824 | #else | |
825 | kfree(buf, size); | |
826 | #endif | |
827 | } | |
828 | ||
829 | ||
830 | ||
831 | /* | |
832 | * aligned kmem allocator | |
833 | * align should be a power of two | |
834 | */ | |
835 | ||
836 | void* dt_kmem_alloc_aligned(size_t size, size_t align, int kmflag) | |
837 | { | |
838 | void* buf; | |
839 | intptr_t p; | |
840 | void** buf_backup; | |
841 | ||
842 | buf = dt_kmem_alloc(align + sizeof(void*) + size, kmflag); | |
843 | ||
844 | if(!buf) | |
845 | return NULL; | |
846 | ||
847 | p = (intptr_t)buf; | |
848 | p += sizeof(void*); /* now we have enough room to store the backup */ | |
849 | p = P2ROUNDUP(p, align); /* and now we're aligned */ | |
850 | ||
851 | buf_backup = (void**)(p - sizeof(void*)); | |
852 | *buf_backup = buf; /* back up the address we need to free */ | |
853 | ||
854 | return (void*)p; | |
855 | } | |
856 | ||
857 | void* dt_kmem_zalloc_aligned(size_t size, size_t align, int kmflag) | |
858 | { | |
859 | void* buf; | |
860 | ||
861 | buf = dt_kmem_alloc_aligned(size, align, kmflag); | |
862 | ||
863 | if(!buf) | |
864 | return NULL; | |
865 | ||
866 | bzero(buf, size); | |
867 | ||
868 | return buf; | |
869 | } | |
870 | ||
871 | void dt_kmem_free_aligned(void* buf, size_t size) | |
872 | { | |
873 | #pragma unused(size) | |
874 | intptr_t p; | |
875 | void** buf_backup; | |
876 | ||
877 | p = (intptr_t)buf; | |
878 | p -= sizeof(void*); | |
879 | buf_backup = (void**)(p); | |
880 | ||
881 | dt_kmem_free(*buf_backup, size + ((char*)buf - (char*)*buf_backup)); | |
882 | } | |
883 | ||
884 | /* | |
885 | * dtrace wants to manage just a single block: dtrace_state_percpu_t * NCPU, and | |
886 | * doesn't specify constructor, destructor, or reclaim methods. | |
887 | * At present, it always zeroes the block it obtains from kmem_cache_alloc(). | |
888 | * We'll manage this constricted use of kmem_cache with ordinary _MALLOC and _FREE. | |
889 | */ | |
890 | kmem_cache_t * | |
891 | kmem_cache_create( | |
b0d623f7 | 892 | const char *name, /* descriptive name for this cache */ |
2d21ac55 A |
893 | size_t bufsize, /* size of the objects it manages */ |
894 | size_t align, /* required object alignment */ | |
895 | int (*constructor)(void *, void *, int), /* object constructor */ | |
896 | void (*destructor)(void *, void *), /* object destructor */ | |
897 | void (*reclaim)(void *), /* memory reclaim callback */ | |
898 | void *private, /* pass-thru arg for constr/destr/reclaim */ | |
899 | vmem_t *vmp, /* vmem source for slab allocation */ | |
900 | int cflags) /* cache creation flags */ | |
901 | { | |
902 | #pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags) | |
903 | return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */ | |
904 | } | |
905 | ||
906 | void * | |
907 | kmem_cache_alloc(kmem_cache_t *cp, int kmflag) | |
908 | { | |
909 | #pragma unused(kmflag) | |
910 | size_t bufsize = (size_t)cp; | |
911 | return (void *)_MALLOC(bufsize, M_TEMP, M_WAITOK); | |
912 | } | |
913 | ||
914 | void | |
915 | kmem_cache_free(kmem_cache_t *cp, void *buf) | |
916 | { | |
917 | #pragma unused(cp) | |
918 | _FREE(buf, M_TEMP); | |
919 | } | |
920 | ||
921 | void | |
922 | kmem_cache_destroy(kmem_cache_t *cp) | |
923 | { | |
924 | #pragma unused(cp) | |
925 | } | |
926 | ||
927 | /* | |
928 | * taskq | |
929 | */ | |
930 | extern void thread_call_setup(thread_call_t, thread_call_func_t, thread_call_param_t); /* XXX MACH_KERNEL_PRIVATE */ | |
931 | ||
932 | static void | |
933 | _taskq_apply( task_func_t func, thread_call_param_t arg ) | |
934 | { | |
935 | func( (void *)arg ); | |
936 | } | |
937 | ||
938 | taskq_t * | |
939 | taskq_create(const char *name, int nthreads, pri_t pri, int minalloc, | |
940 | int maxalloc, uint_t flags) | |
941 | { | |
942 | #pragma unused(name,nthreads,pri,minalloc,maxalloc,flags) | |
943 | ||
944 | return (taskq_t *)thread_call_allocate( (thread_call_func_t)_taskq_apply, NULL ); | |
945 | } | |
946 | ||
947 | taskqid_t | |
948 | taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | |
949 | { | |
950 | #pragma unused(flags) | |
951 | thread_call_setup( (thread_call_t) tq, (thread_call_func_t)_taskq_apply, (thread_call_param_t)func ); | |
952 | thread_call_enter1( (thread_call_t) tq, (thread_call_param_t)arg ); | |
953 | return (taskqid_t) tq /* for lack of anything better */; | |
954 | } | |
955 | ||
956 | void | |
957 | taskq_destroy(taskq_t *tq) | |
958 | { | |
959 | thread_call_cancel( (thread_call_t) tq ); | |
960 | thread_call_free( (thread_call_t) tq ); | |
961 | } | |
962 | ||
963 | pri_t maxclsyspri; | |
964 | ||
965 | /* | |
966 | * vmem (Solaris "slab" allocator) used by DTrace solely to hand out resource ids | |
967 | */ | |
968 | typedef unsigned int u_daddr_t; | |
969 | #include "blist.h" | |
970 | ||
971 | /* By passing around blist *handles*, the underlying blist can be resized as needed. */ | |
972 | struct blist_hdl { | |
973 | blist_t blist; | |
974 | }; | |
975 | ||
976 | vmem_t * | |
977 | vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5, | |
978 | void *ignore6, vmem_t *source, size_t qcache_max, int vmflag) | |
979 | { | |
980 | #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag) | |
981 | blist_t bl; | |
982 | struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK); | |
983 | ||
984 | ASSERT(quantum == 1); | |
985 | ASSERT(NULL == ignore5); | |
986 | ASSERT(NULL == ignore6); | |
987 | ASSERT(NULL == source); | |
988 | ASSERT(0 == qcache_max); | |
989 | ASSERT(vmflag & VMC_IDENTIFIER); | |
990 | ||
991 | size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */ | |
992 | ||
993 | p->blist = bl = blist_create( size ); | |
994 | blist_free(bl, 0, size); | |
b0d623f7 | 995 | if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */ |
2d21ac55 A |
996 | |
997 | return (vmem_t *)p; | |
998 | } | |
999 | ||
1000 | void * | |
1001 | vmem_alloc(vmem_t *vmp, size_t size, int vmflag) | |
1002 | { | |
1003 | #pragma unused(vmflag) | |
1004 | struct blist_hdl *q = (struct blist_hdl *)vmp; | |
1005 | blist_t bl = q->blist; | |
1006 | daddr_t p; | |
1007 | ||
1008 | p = blist_alloc(bl, (daddr_t)size); | |
1009 | ||
1010 | if ((daddr_t)-1 == p) { | |
1011 | blist_resize(&bl, (bl->bl_blocks) << 1, 1); | |
1012 | q->blist = bl; | |
1013 | p = blist_alloc(bl, (daddr_t)size); | |
1014 | if ((daddr_t)-1 == p) | |
1015 | panic("vmem_alloc: failure after blist_resize!"); | |
1016 | } | |
1017 | ||
b0d623f7 | 1018 | return (void *)(uintptr_t)p; |
2d21ac55 A |
1019 | } |
1020 | ||
1021 | void | |
1022 | vmem_free(vmem_t *vmp, void *vaddr, size_t size) | |
1023 | { | |
1024 | struct blist_hdl *p = (struct blist_hdl *)vmp; | |
1025 | ||
b0d623f7 | 1026 | blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size ); |
2d21ac55 A |
1027 | } |
1028 | ||
1029 | void | |
1030 | vmem_destroy(vmem_t *vmp) | |
1031 | { | |
1032 | struct blist_hdl *p = (struct blist_hdl *)vmp; | |
1033 | ||
1034 | blist_destroy( p->blist ); | |
1035 | _FREE( p, sizeof(struct blist_hdl) ); | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * Timing | |
1040 | */ | |
1041 | ||
1042 | /* | |
1043 | * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at | |
1044 | * January 1, 1970. Because it can be called from probe context, it must take no locks. | |
1045 | */ | |
1046 | ||
1047 | hrtime_t | |
1048 | dtrace_gethrestime(void) | |
1049 | { | |
b0d623f7 A |
1050 | clock_sec_t secs; |
1051 | clock_nsec_t nanosecs; | |
2d21ac55 A |
1052 | uint64_t secs64, ns64; |
1053 | ||
1054 | clock_get_calendar_nanotime_nowait(&secs, &nanosecs); | |
1055 | secs64 = (uint64_t)secs; | |
1056 | ns64 = (uint64_t)nanosecs; | |
1057 | ||
1058 | ns64 = ns64 + (secs64 * 1000000000LL); | |
1059 | return ns64; | |
1060 | } | |
1061 | ||
1062 | /* | |
1063 | * dtrace_gethrtime() provides high-resolution timestamps with machine-dependent origin. | |
1064 | * Hence its primary use is to specify intervals. | |
1065 | */ | |
1066 | ||
1067 | hrtime_t | |
1068 | dtrace_abs_to_nano(uint64_t elapsed) | |
1069 | { | |
1070 | static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 }; | |
1071 | ||
1072 | /* | |
1073 | * If this is the first time we've run, get the timebase. | |
1074 | * We can use denom == 0 to indicate that sTimebaseInfo is | |
1075 | * uninitialised because it makes no sense to have a zero | |
1076 | * denominator in a fraction. | |
1077 | */ | |
1078 | ||
1079 | if ( sTimebaseInfo.denom == 0 ) { | |
1080 | (void) clock_timebase_info(&sTimebaseInfo); | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * Convert to nanoseconds. | |
1085 | * return (elapsed * (uint64_t)sTimebaseInfo.numer)/(uint64_t)sTimebaseInfo.denom; | |
1086 | * | |
1087 | * Provided the final result is representable in 64 bits the following maneuver will | |
1088 | * deliver that result without intermediate overflow. | |
1089 | */ | |
1090 | if (sTimebaseInfo.denom == sTimebaseInfo.numer) | |
1091 | return elapsed; | |
1092 | else if (sTimebaseInfo.denom == 1) | |
1093 | return elapsed * (uint64_t)sTimebaseInfo.numer; | |
1094 | else { | |
1095 | /* Decompose elapsed = eta32 * 2^32 + eps32: */ | |
1096 | uint64_t eta32 = elapsed >> 32; | |
1097 | uint64_t eps32 = elapsed & 0x00000000ffffffffLL; | |
1098 | ||
1099 | uint32_t numer = sTimebaseInfo.numer, denom = sTimebaseInfo.denom; | |
1100 | ||
1101 | /* Form product of elapsed64 (decomposed) and numer: */ | |
1102 | uint64_t mu64 = numer * eta32; | |
1103 | uint64_t lambda64 = numer * eps32; | |
1104 | ||
1105 | /* Divide the constituents by denom: */ | |
1106 | uint64_t q32 = mu64/denom; | |
1107 | uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */ | |
1108 | ||
1109 | return (q32 << 32) + ((r32 << 32) + lambda64)/denom; | |
1110 | } | |
1111 | } | |
1112 | ||
1113 | hrtime_t | |
1114 | dtrace_gethrtime(void) | |
1115 | { | |
1116 | static uint64_t start = 0; | |
1117 | ||
1118 | if (start == 0) | |
1119 | start = mach_absolute_time(); | |
1120 | ||
1121 | return dtrace_abs_to_nano(mach_absolute_time() - start); | |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * Atomicity and synchronization | |
1126 | */ | |
1127 | uint32_t | |
1128 | dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) | |
1129 | { | |
b0d623f7 | 1130 | if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) |
2d21ac55 A |
1131 | return cmp; |
1132 | else | |
1133 | return ~cmp; /* Must return something *other* than cmp */ | |
1134 | } | |
1135 | ||
1136 | void * | |
1137 | dtrace_casptr(void *target, void *cmp, void *new) | |
1138 | { | |
b0d623f7 | 1139 | if (OSCompareAndSwapPtr( cmp, new, (void**)target )) |
2d21ac55 A |
1140 | return cmp; |
1141 | else | |
1142 | return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */ | |
2d21ac55 A |
1143 | } |
1144 | ||
1145 | /* | |
1146 | * Interrupt manipulation | |
1147 | */ | |
1148 | dtrace_icookie_t | |
1149 | dtrace_interrupt_disable(void) | |
1150 | { | |
1151 | return (dtrace_icookie_t)ml_set_interrupts_enabled(FALSE); | |
1152 | } | |
1153 | ||
1154 | void | |
1155 | dtrace_interrupt_enable(dtrace_icookie_t reenable) | |
1156 | { | |
1157 | (void)ml_set_interrupts_enabled((boolean_t)reenable); | |
1158 | } | |
1159 | ||
1160 | /* | |
1161 | * MP coordination | |
1162 | */ | |
1163 | static void | |
1164 | dtrace_sync_func(void) {} | |
1165 | ||
1166 | /* | |
1167 | * dtrace_sync() is not called from probe context. | |
1168 | */ | |
1169 | void | |
1170 | dtrace_sync(void) | |
1171 | { | |
1172 | dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL); | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * The dtrace_copyin/out/instr and dtrace_fuword* routines can be called from probe context. | |
1177 | */ | |
1178 | ||
1179 | extern kern_return_t dtrace_copyio_preflight(addr64_t); | |
1180 | extern kern_return_t dtrace_copyio_postflight(addr64_t); | |
1181 | ||
1182 | static int | |
1183 | dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size) | |
1184 | { | |
1185 | #pragma unused(kaddr) | |
1186 | ||
1187 | vm_offset_t recover = dtrace_set_thread_recover( current_thread(), 0 ); /* Snare any extant recovery point. */ | |
1188 | dtrace_set_thread_recover( current_thread(), recover ); /* Put it back. We *must not* re-enter and overwrite. */ | |
1189 | ||
1190 | ASSERT(kaddr + size >= kaddr); | |
1191 | ||
1192 | if (ml_at_interrupt_context() || /* Avoid possible copyio page fault on int stack, which panics! */ | |
1193 | 0 != recover || /* Avoid reentrancy into copyio facility. */ | |
1194 | uaddr + size < uaddr || /* Avoid address wrap. */ | |
1195 | KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */ | |
1196 | { | |
1197 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1198 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; | |
1199 | return (0); | |
1200 | } | |
1201 | return (1); | |
1202 | } | |
1203 | ||
1204 | void | |
b0d623f7 | 1205 | dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) |
2d21ac55 | 1206 | { |
b0d623f7 A |
1207 | #pragma unused(flags) |
1208 | ||
2d21ac55 A |
1209 | if (dtrace_copycheck( src, dst, len )) { |
1210 | if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) { | |
1211 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1212 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src; | |
1213 | } | |
1214 | dtrace_copyio_postflight(src); | |
1215 | } | |
1216 | } | |
1217 | ||
1218 | void | |
b0d623f7 | 1219 | dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) |
2d21ac55 | 1220 | { |
b0d623f7 A |
1221 | #pragma unused(flags) |
1222 | ||
2d21ac55 A |
1223 | size_t actual; |
1224 | ||
1225 | if (dtrace_copycheck( src, dst, len )) { | |
4a3eedf9 A |
1226 | /* copyin as many as 'len' bytes. */ |
1227 | int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual); | |
1228 | ||
1229 | /* | |
1230 | * ENAMETOOLONG is returned when 'len' bytes have been copied in but the NUL terminator was | |
1231 | * not encountered. That does not require raising CPU_DTRACE_BADADDR, and we press on. | |
1232 | * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left | |
1233 | * to the caller. | |
1234 | */ | |
1235 | if (error && error != ENAMETOOLONG) { | |
2d21ac55 A |
1236 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); |
1237 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = src; | |
1238 | } | |
1239 | dtrace_copyio_postflight(src); | |
1240 | } | |
1241 | } | |
1242 | ||
1243 | void | |
b0d623f7 | 1244 | dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) |
2d21ac55 | 1245 | { |
b0d623f7 A |
1246 | #pragma unused(flags) |
1247 | ||
2d21ac55 A |
1248 | if (dtrace_copycheck( dst, src, len )) { |
1249 | if (copyout((const void *)src, dst, (vm_size_t)len)) { | |
1250 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1251 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst; | |
1252 | } | |
1253 | dtrace_copyio_postflight(dst); | |
1254 | } | |
1255 | } | |
1256 | ||
1257 | void | |
b0d623f7 | 1258 | dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) |
2d21ac55 | 1259 | { |
b0d623f7 A |
1260 | #pragma unused(flags) |
1261 | ||
2d21ac55 A |
1262 | size_t actual; |
1263 | ||
1264 | if (dtrace_copycheck( dst, src, len )) { | |
4a3eedf9 A |
1265 | |
1266 | /* | |
1267 | * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was | |
1268 | * not encountered. We raise CPU_DTRACE_BADADDR in that case. | |
1269 | * Note that we do *not* stuff a NUL terminator when returning ENAMETOOLONG, that's left | |
1270 | * to the caller. | |
1271 | */ | |
2d21ac55 A |
1272 | if (copyoutstr((const void *)src, dst, (size_t)len, &actual)) { |
1273 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1274 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = dst; | |
1275 | } | |
1276 | dtrace_copyio_postflight(dst); | |
1277 | } | |
1278 | } | |
1279 | ||
1280 | uint8_t | |
1281 | dtrace_fuword8(user_addr_t uaddr) | |
1282 | { | |
1283 | uint8_t ret = 0; | |
1284 | ||
1285 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); | |
1286 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { | |
1287 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { | |
1288 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1289 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; | |
1290 | } | |
1291 | dtrace_copyio_postflight(uaddr); | |
1292 | } | |
1293 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); | |
1294 | ||
1295 | return(ret); | |
1296 | } | |
1297 | ||
1298 | uint16_t | |
1299 | dtrace_fuword16(user_addr_t uaddr) | |
1300 | { | |
1301 | uint16_t ret = 0; | |
1302 | ||
1303 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); | |
1304 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { | |
1305 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { | |
1306 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1307 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; | |
1308 | } | |
1309 | dtrace_copyio_postflight(uaddr); | |
1310 | } | |
1311 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); | |
1312 | ||
1313 | return(ret); | |
1314 | } | |
1315 | ||
1316 | uint32_t | |
1317 | dtrace_fuword32(user_addr_t uaddr) | |
1318 | { | |
1319 | uint32_t ret = 0; | |
1320 | ||
1321 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); | |
1322 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { | |
1323 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { | |
1324 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1325 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; | |
1326 | } | |
1327 | dtrace_copyio_postflight(uaddr); | |
1328 | } | |
1329 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); | |
1330 | ||
1331 | return(ret); | |
1332 | } | |
1333 | ||
1334 | uint64_t | |
1335 | dtrace_fuword64(user_addr_t uaddr) | |
1336 | { | |
1337 | uint64_t ret = 0; | |
1338 | ||
1339 | DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); | |
1340 | if (dtrace_copycheck( uaddr, (uintptr_t)&ret, sizeof(ret))) { | |
1341 | if (copyin((const user_addr_t)uaddr, (char *)&ret, sizeof(ret))) { | |
1342 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1343 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; | |
1344 | } | |
1345 | dtrace_copyio_postflight(uaddr); | |
1346 | } | |
1347 | DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); | |
1348 | ||
1349 | return(ret); | |
1350 | } | |
1351 | ||
1352 | /* | |
1353 | * Emulation of Solaris fuword / suword | |
1354 | * Called from the fasttrap provider, so the use of copyin/out requires fewer safegaurds. | |
1355 | */ | |
1356 | ||
1357 | int | |
1358 | fuword8(user_addr_t uaddr, uint8_t *value) | |
1359 | { | |
1360 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t)) != 0) { | |
1361 | return -1; | |
1362 | } | |
1363 | ||
1364 | return 0; | |
1365 | } | |
1366 | ||
1367 | int | |
1368 | fuword16(user_addr_t uaddr, uint16_t *value) | |
1369 | { | |
1370 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t)) != 0) { | |
1371 | return -1; | |
1372 | } | |
1373 | ||
1374 | return 0; | |
1375 | } | |
1376 | ||
1377 | int | |
1378 | fuword32(user_addr_t uaddr, uint32_t *value) | |
1379 | { | |
1380 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t)) != 0) { | |
1381 | return -1; | |
1382 | } | |
1383 | ||
1384 | return 0; | |
1385 | } | |
1386 | ||
1387 | int | |
1388 | fuword64(user_addr_t uaddr, uint64_t *value) | |
1389 | { | |
1390 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t)) != 0) { | |
1391 | return -1; | |
1392 | } | |
1393 | ||
1394 | return 0; | |
1395 | } | |
1396 | ||
1397 | void | |
1398 | fuword8_noerr(user_addr_t uaddr, uint8_t *value) | |
1399 | { | |
1400 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint8_t))) { | |
1401 | *value = 0; | |
1402 | } | |
1403 | } | |
1404 | ||
1405 | void | |
1406 | fuword16_noerr(user_addr_t uaddr, uint16_t *value) | |
1407 | { | |
1408 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint16_t))) { | |
1409 | *value = 0; | |
1410 | } | |
1411 | } | |
1412 | ||
1413 | void | |
1414 | fuword32_noerr(user_addr_t uaddr, uint32_t *value) | |
1415 | { | |
1416 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint32_t))) { | |
1417 | *value = 0; | |
1418 | } | |
1419 | } | |
1420 | ||
1421 | void | |
1422 | fuword64_noerr(user_addr_t uaddr, uint64_t *value) | |
1423 | { | |
1424 | if (copyin((const user_addr_t)uaddr, (char *)value, sizeof(uint64_t))) { | |
1425 | *value = 0; | |
1426 | } | |
1427 | } | |
1428 | ||
1429 | int | |
1430 | suword64(user_addr_t addr, uint64_t value) | |
1431 | { | |
1432 | if (copyout((const void *)&value, addr, sizeof(value)) != 0) { | |
1433 | return -1; | |
1434 | } | |
1435 | ||
1436 | return 0; | |
1437 | } | |
1438 | ||
1439 | int | |
1440 | suword32(user_addr_t addr, uint32_t value) | |
1441 | { | |
1442 | if (copyout((const void *)&value, addr, sizeof(value)) != 0) { | |
1443 | return -1; | |
1444 | } | |
1445 | ||
1446 | return 0; | |
1447 | } | |
1448 | ||
1449 | int | |
1450 | suword16(user_addr_t addr, uint16_t value) | |
1451 | { | |
1452 | if (copyout((const void *)&value, addr, sizeof(value)) != 0) { | |
1453 | return -1; | |
1454 | } | |
1455 | ||
1456 | return 0; | |
1457 | } | |
1458 | ||
1459 | int | |
1460 | suword8(user_addr_t addr, uint8_t value) | |
1461 | { | |
1462 | if (copyout((const void *)&value, addr, sizeof(value)) != 0) { | |
1463 | return -1; | |
1464 | } | |
1465 | ||
1466 | return 0; | |
1467 | } | |
1468 | ||
1469 | ||
1470 | /* | |
1471 | * Miscellaneous | |
1472 | */ | |
1473 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
1474 | ||
1475 | boolean_t | |
1476 | dtrace_tally_fault(user_addr_t uaddr) | |
1477 | { | |
1478 | DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); | |
1479 | cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; | |
1480 | return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE ); | |
1481 | } | |
1482 | ||
2d21ac55 A |
1483 | #define TOTTY 0x02 |
1484 | extern int prf(const char *, va_list, int, struct tty *); /* bsd/kern/subr_prf.h */ | |
1485 | ||
1486 | int | |
1487 | vuprintf(const char *format, va_list ap) | |
1488 | { | |
1489 | return prf(format, ap, TOTTY, NULL); | |
1490 | } | |
1491 | ||
1492 | /* Not called from probe context */ | |
1493 | void cmn_err( int level, const char *format, ... ) | |
1494 | { | |
1495 | #pragma unused(level) | |
1496 | va_list alist; | |
1497 | ||
1498 | va_start(alist, format); | |
1499 | vuprintf(format, alist); | |
1500 | va_end(alist); | |
1501 | uprintf("\n"); | |
1502 | } | |
1503 | ||
1504 | /* | |
1505 | * History: | |
1506 | * 2002-01-24 gvdl Initial implementation of strstr | |
1507 | */ | |
1508 | ||
b0d623f7 | 1509 | __private_extern__ const char * |
2d21ac55 A |
1510 | strstr(const char *in, const char *str) |
1511 | { | |
1512 | char c; | |
1513 | size_t len; | |
1514 | ||
1515 | c = *str++; | |
1516 | if (!c) | |
b0d623f7 | 1517 | return (const char *) in; // Trivial empty string case |
2d21ac55 A |
1518 | |
1519 | len = strlen(str); | |
1520 | do { | |
1521 | char sc; | |
1522 | ||
1523 | do { | |
1524 | sc = *in++; | |
1525 | if (!sc) | |
1526 | return (char *) 0; | |
1527 | } while (sc != c); | |
1528 | } while (strncmp(in, str, len) != 0); | |
1529 | ||
b0d623f7 | 1530 | return (const char *) (in - 1); |
2d21ac55 A |
1531 | } |
1532 | ||
1533 | /* | |
1534 | * Runtime and ABI | |
1535 | */ | |
1536 | uintptr_t | |
1537 | dtrace_caller(int ignore) | |
1538 | { | |
1539 | #pragma unused(ignore) | |
1540 | return -1; /* Just as in Solaris dtrace_asm.s */ | |
1541 | } | |
1542 | ||
1543 | int | |
1544 | dtrace_getstackdepth(int aframes) | |
1545 | { | |
b0d623f7 | 1546 | struct frame *fp = (struct frame *)__builtin_frame_address(0); |
2d21ac55 A |
1547 | struct frame *nextfp, *minfp, *stacktop; |
1548 | int depth = 0; | |
1549 | int on_intr; | |
1550 | ||
1551 | if ((on_intr = CPU_ON_INTR(CPU)) != 0) | |
1552 | stacktop = (struct frame *)dtrace_get_cpu_int_stack_top(); | |
1553 | else | |
b0d623f7 | 1554 | stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size); |
2d21ac55 A |
1555 | |
1556 | minfp = fp; | |
1557 | ||
1558 | aframes++; | |
1559 | ||
1560 | for (;;) { | |
1561 | depth++; | |
1562 | ||
1563 | nextfp = *(struct frame **)fp; | |
1564 | ||
1565 | if (nextfp <= minfp || nextfp >= stacktop) { | |
1566 | if (on_intr) { | |
1567 | /* | |
1568 | * Hop from interrupt stack to thread stack. | |
1569 | */ | |
1570 | vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread()); | |
1571 | ||
1572 | minfp = (struct frame *)kstack_base; | |
b0d623f7 | 1573 | stacktop = (struct frame *)(kstack_base + kernel_stack_size); |
2d21ac55 A |
1574 | |
1575 | on_intr = 0; | |
1576 | continue; | |
1577 | } | |
1578 | break; | |
1579 | } | |
1580 | ||
1581 | fp = nextfp; | |
1582 | minfp = fp; | |
1583 | } | |
1584 | ||
1585 | if (depth <= aframes) | |
1586 | return (0); | |
1587 | ||
1588 | return (depth - aframes); | |
1589 | } | |
1590 | ||
1591 | /* | |
1592 | * Unconsidered | |
1593 | */ | |
1594 | void | |
1595 | dtrace_vtime_enable(void) {} | |
1596 | ||
1597 | void | |
1598 | dtrace_vtime_disable(void) {} | |
1599 | ||
1600 | #else /* else ! CONFIG_DTRACE */ | |
1601 | ||
1602 | #include <sys/types.h> | |
1603 | #include <mach/vm_types.h> | |
1604 | #include <mach/kmod.h> | |
1605 | ||
1606 | /* | |
1607 | * This exists to prevent build errors when dtrace is unconfigured. | |
1608 | */ | |
1609 | ||
1610 | kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t); | |
1611 | ||
1612 | kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) { | |
1613 | #pragma unused(arg1, arg2, arg3) | |
1614 | ||
1615 | return KERN_FAILURE; | |
1616 | } | |
1617 | ||
1618 | #endif /* CONFIG_DTRACE */ |