]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <machine/spl.h> | |
24 | ||
25 | #define HZ 100 | |
26 | #include <mach/clock_types.h> | |
27 | #include <mach/mach_types.h> | |
28 | #include <machine/machine_routines.h> | |
29 | ||
30 | #include <sys/kdebug.h> | |
31 | #include <sys/errno.h> | |
32 | #include <sys/param.h> | |
33 | #include <sys/proc.h> | |
34 | #include <sys/vm.h> | |
35 | #include <sys/sysctl.h> | |
36 | ||
37 | #include <kern/thread.h> | |
38 | #include <kern/task.h> | |
39 | #include <vm/vm_kern.h> | |
40 | #include <sys/lock.h> | |
41 | ||
42 | /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */ | |
43 | kd_buf * kd_bufptr; | |
44 | unsigned int kd_buftomem=0; | |
45 | kd_buf * kd_buffer=0; | |
46 | kd_buf * kd_buflast; | |
47 | kd_buf * kd_readlast; | |
48 | unsigned int nkdbufs = 8192; | |
49 | unsigned int kd_bufsize = 0; | |
50 | unsigned int kdebug_flags = 0; | |
51 | unsigned int kdebug_enable=0; | |
52 | unsigned int kdebug_nolog=1; | |
53 | unsigned int kdlog_beg=0; | |
54 | unsigned int kdlog_end=0; | |
55 | unsigned int kdlog_value1=0; | |
56 | unsigned int kdlog_value2=0; | |
57 | unsigned int kdlog_value3=0; | |
58 | unsigned int kdlog_value4=0; | |
59 | ||
60 | unsigned long long kd_prev_timebase = 0LL; | |
61 | decl_simple_lock_data(,kd_trace_lock); | |
62 | ||
63 | kd_threadmap *kd_mapptr = 0; | |
64 | unsigned int kd_mapsize = 0; | |
65 | unsigned int kd_mapcount = 0; | |
66 | unsigned int kd_maptomem = 0; | |
67 | ||
68 | pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */ | |
69 | ||
70 | #define DBG_FUNC_MASK 0xfffffffc | |
71 | ||
72 | #ifdef ppc | |
73 | extern natural_t rtclock_decrementer_min; | |
74 | #endif /* ppc */ | |
75 | ||
76 | struct kdebug_args { | |
77 | int code; | |
78 | int arg1; | |
79 | int arg2; | |
80 | int arg3; | |
81 | int arg4; | |
82 | int arg5; | |
83 | }; | |
84 | ||
85 | struct krt | |
86 | { | |
87 | kd_threadmap *map; /* pointer to the map buffer */ | |
88 | int count; | |
89 | int maxcount; | |
90 | struct proc *p; | |
91 | }; | |
92 | ||
93 | typedef struct krt krt_t; | |
94 | ||
95 | /* Support syscall SYS_kdebug_trace */ | |
96 | kdebug_trace(p, uap, retval) | |
97 | struct proc *p; | |
98 | struct kdebug_args *uap; | |
99 | register_t *retval; | |
100 | { | |
101 | if (kdebug_nolog) | |
102 | return(EINVAL); | |
103 | ||
104 | kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0); | |
105 | return(0); | |
106 | } | |
107 | ||
108 | ||
109 | void | |
110 | kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5) | |
111 | unsigned int debugid, arg1, arg2, arg3, arg4, arg5; | |
112 | { | |
113 | kd_buf * kd; | |
114 | struct proc *curproc; | |
115 | int s; | |
116 | unsigned long long now; | |
117 | mach_timespec_t *tsp; | |
118 | ||
119 | s = ml_set_interrupts_enabled(FALSE); | |
120 | ||
121 | if (kdebug_nolog) | |
122 | { | |
123 | ml_set_interrupts_enabled(s); | |
124 | return; | |
125 | } | |
126 | ||
0b4e3aa0 | 127 | usimple_lock(&kd_trace_lock); |
1c79356b A |
128 | if (kdebug_flags & KDBG_PIDCHECK) |
129 | { | |
130 | /* If kdebug flag is not set for current proc, return */ | |
131 | curproc = current_proc(); | |
132 | if ((curproc && !(curproc->p_flag & P_KDEBUG)) && | |
133 | ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) | |
134 | { | |
0b4e3aa0 | 135 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
136 | ml_set_interrupts_enabled(s); |
137 | return; | |
138 | } | |
139 | } | |
140 | else if (kdebug_flags & KDBG_PIDEXCLUDE) | |
141 | { | |
142 | /* If kdebug flag is set for current proc, return */ | |
143 | curproc = current_proc(); | |
144 | if ((curproc && (curproc->p_flag & P_KDEBUG)) && | |
145 | ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) | |
146 | { | |
0b4e3aa0 | 147 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
148 | ml_set_interrupts_enabled(s); |
149 | return; | |
150 | } | |
151 | } | |
152 | ||
153 | if (kdebug_flags & KDBG_RANGECHECK) | |
154 | { | |
155 | if ((debugid < kdlog_beg) || (debugid > kdlog_end) | |
156 | && (debugid >> 24 != DBG_TRACE)) | |
157 | { | |
0b4e3aa0 | 158 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
159 | ml_set_interrupts_enabled(s); |
160 | return; | |
161 | } | |
162 | } | |
163 | else if (kdebug_flags & KDBG_VALCHECK) | |
164 | { | |
165 | if ((debugid & DBG_FUNC_MASK) != kdlog_value1 && | |
166 | (debugid & DBG_FUNC_MASK) != kdlog_value2 && | |
167 | (debugid & DBG_FUNC_MASK) != kdlog_value3 && | |
168 | (debugid & DBG_FUNC_MASK) != kdlog_value4 && | |
169 | (debugid >> 24 != DBG_TRACE)) | |
170 | { | |
0b4e3aa0 | 171 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
172 | ml_set_interrupts_enabled(s); |
173 | return; | |
174 | } | |
175 | } | |
176 | kd = kd_bufptr; | |
177 | kd->debugid = debugid; | |
178 | kd->arg1 = arg1; | |
179 | kd->arg2 = arg2; | |
180 | kd->arg3 = arg3; | |
181 | kd->arg4 = arg4; | |
182 | kd->arg5 = (int)current_thread(); | |
183 | if (cpu_number()) | |
184 | kd->arg5 |= KDBG_CPU_MASK; | |
185 | ||
186 | ml_get_timebase((unsigned long long *)&kd->timestamp); | |
187 | ||
188 | /* Watch for out of order timestamps */ | |
189 | now = (((unsigned long long)kd->timestamp.tv_sec) << 32) | | |
190 | (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec)); | |
191 | ||
192 | if (now < kd_prev_timebase) | |
193 | { | |
194 | /* timestamps are out of order -- adjust */ | |
195 | kd_prev_timebase++; | |
196 | tsp = (mach_timespec_t *)&kd_prev_timebase; | |
197 | kd->timestamp.tv_sec = tsp->tv_sec; | |
198 | kd->timestamp.tv_nsec = tsp->tv_nsec; | |
199 | } | |
200 | else | |
201 | { | |
202 | /* Then just store the previous timestamp */ | |
203 | kd_prev_timebase = now; | |
204 | } | |
205 | ||
206 | ||
207 | kd_bufptr++; | |
208 | ||
209 | if (kd_bufptr >= kd_buflast) | |
210 | kd_bufptr = kd_buffer; | |
211 | if (kd_bufptr == kd_readlast) { | |
212 | if (kdebug_flags & KDBG_NOWRAP) | |
213 | kdebug_nolog = 1; | |
214 | kdebug_flags |= KDBG_WRAPPED; | |
215 | } | |
0b4e3aa0 | 216 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
217 | ml_set_interrupts_enabled(s); |
218 | } | |
219 | ||
220 | void | |
221 | kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5) | |
222 | unsigned int debugid, arg1, arg2, arg3, arg4, arg5; | |
223 | { | |
224 | kd_buf * kd; | |
225 | struct proc *curproc; | |
226 | int s; | |
227 | unsigned long long now; | |
228 | mach_timespec_t *tsp; | |
229 | ||
230 | s = ml_set_interrupts_enabled(FALSE); | |
231 | ||
232 | if (kdebug_nolog) | |
233 | { | |
234 | ml_set_interrupts_enabled(s); | |
235 | return; | |
236 | } | |
237 | ||
0b4e3aa0 | 238 | usimple_lock(&kd_trace_lock); |
1c79356b A |
239 | if (kdebug_flags & KDBG_PIDCHECK) |
240 | { | |
241 | /* If kdebug flag is not set for current proc, return */ | |
242 | curproc = current_proc(); | |
243 | if ((curproc && !(curproc->p_flag & P_KDEBUG)) && | |
244 | ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) | |
245 | { | |
0b4e3aa0 | 246 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
247 | ml_set_interrupts_enabled(s); |
248 | return; | |
249 | } | |
250 | } | |
251 | else if (kdebug_flags & KDBG_PIDEXCLUDE) | |
252 | { | |
253 | /* If kdebug flag is set for current proc, return */ | |
254 | curproc = current_proc(); | |
255 | if ((curproc && (curproc->p_flag & P_KDEBUG)) && | |
256 | ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) | |
257 | { | |
0b4e3aa0 | 258 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
259 | ml_set_interrupts_enabled(s); |
260 | return; | |
261 | } | |
262 | } | |
263 | ||
264 | if (kdebug_flags & KDBG_RANGECHECK) | |
265 | { | |
266 | if ((debugid < kdlog_beg) || (debugid > kdlog_end) | |
267 | && (debugid >> 24 != DBG_TRACE)) | |
268 | { | |
0b4e3aa0 | 269 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
270 | ml_set_interrupts_enabled(s); |
271 | return; | |
272 | } | |
273 | } | |
274 | else if (kdebug_flags & KDBG_VALCHECK) | |
275 | { | |
276 | if ((debugid & DBG_FUNC_MASK) != kdlog_value1 && | |
277 | (debugid & DBG_FUNC_MASK) != kdlog_value2 && | |
278 | (debugid & DBG_FUNC_MASK) != kdlog_value3 && | |
279 | (debugid & DBG_FUNC_MASK) != kdlog_value4 && | |
280 | (debugid >> 24 != DBG_TRACE)) | |
281 | { | |
0b4e3aa0 | 282 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
283 | ml_set_interrupts_enabled(s); |
284 | return; | |
285 | } | |
286 | } | |
287 | ||
288 | kd = kd_bufptr; | |
289 | kd->debugid = debugid; | |
290 | kd->arg1 = arg1; | |
291 | kd->arg2 = arg2; | |
292 | kd->arg3 = arg3; | |
293 | kd->arg4 = arg4; | |
294 | kd->arg5 = arg5; | |
295 | ml_get_timebase((unsigned long long *)&kd->timestamp); | |
296 | ||
297 | /* Watch for out of order timestamps */ | |
298 | now = (((unsigned long long)kd->timestamp.tv_sec) << 32) | | |
299 | (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec)); | |
300 | ||
301 | if (now < kd_prev_timebase) | |
302 | { | |
303 | /* timestamps are out of order -- adjust */ | |
304 | kd_prev_timebase++; | |
305 | tsp = (mach_timespec_t *)&kd_prev_timebase; | |
306 | kd->timestamp.tv_sec = tsp->tv_sec; | |
307 | kd->timestamp.tv_nsec = tsp->tv_nsec; | |
308 | } | |
309 | else | |
310 | { | |
311 | /* Then just store the previous timestamp */ | |
312 | kd_prev_timebase = now; | |
313 | } | |
314 | ||
315 | kd_bufptr++; | |
316 | ||
317 | if (kd_bufptr >= kd_buflast) | |
318 | kd_bufptr = kd_buffer; | |
319 | if (kd_bufptr == kd_readlast) { | |
320 | if (kdebug_flags & KDBG_NOWRAP) | |
321 | kdebug_nolog = 1; | |
322 | kdebug_flags |= KDBG_WRAPPED; | |
323 | } | |
0b4e3aa0 | 324 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
325 | ml_set_interrupts_enabled(s); |
326 | } | |
327 | ||
328 | ||
329 | kdbg_bootstrap() | |
330 | { | |
331 | kd_bufsize = nkdbufs * sizeof(kd_buf); | |
332 | if (kmem_alloc(kernel_map, &kd_buftomem, | |
333 | (vm_size_t)kd_bufsize) == KERN_SUCCESS) | |
334 | kd_buffer = (kd_buf *) kd_buftomem; | |
335 | else kd_buffer= (kd_buf *) 0; | |
336 | kdebug_flags &= ~KDBG_WRAPPED; | |
337 | if (kd_buffer) { | |
338 | simple_lock_init(&kd_trace_lock); | |
339 | kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT); | |
340 | kd_bufptr = kd_buffer; | |
341 | kd_buflast = &kd_bufptr[nkdbufs]; | |
342 | kd_readlast = kd_bufptr; | |
343 | kd_prev_timebase = 0LL; | |
344 | return(0); | |
345 | } else { | |
346 | kd_bufsize=0; | |
347 | kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT); | |
348 | return(EINVAL); | |
349 | } | |
350 | ||
351 | } | |
352 | ||
353 | kdbg_reinit() | |
354 | { | |
355 | int x; | |
356 | int ret=0; | |
357 | ||
358 | kdebug_enable = 0; | |
359 | kdebug_nolog = 1; | |
360 | ||
361 | if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) | |
362 | kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize); | |
363 | ||
364 | if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) | |
365 | { | |
366 | kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); | |
367 | kdebug_flags &= ~KDBG_MAPINIT; | |
368 | kd_mapsize = 0; | |
369 | kd_mapptr = (kd_threadmap *) 0; | |
370 | kd_mapcount = 0; | |
371 | } | |
372 | ||
373 | ret= kdbg_bootstrap(); | |
374 | ||
375 | return(ret); | |
376 | } | |
377 | ||
378 | void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4) | |
379 | { | |
380 | int i; | |
381 | char *dbg_nameptr; | |
382 | int dbg_namelen; | |
383 | long dbg_parms[4]; | |
384 | ||
385 | if (!proc) | |
386 | { | |
387 | *arg1 = 0; | |
388 | *arg2 = 0; | |
389 | *arg3 = 0; | |
390 | *arg4 = 0; | |
391 | return; | |
392 | } | |
393 | ||
394 | /* Collect the pathname for tracing */ | |
395 | dbg_nameptr = proc->p_comm; | |
396 | dbg_namelen = strlen(proc->p_comm); | |
397 | dbg_parms[0]=0L; | |
398 | dbg_parms[1]=0L; | |
399 | dbg_parms[2]=0L; | |
400 | dbg_parms[3]=0L; | |
401 | ||
402 | if(dbg_namelen > sizeof(dbg_parms)) | |
403 | dbg_namelen = sizeof(dbg_parms); | |
404 | ||
405 | for(i=0;dbg_namelen > 0; i++) | |
406 | { | |
407 | dbg_parms[i]=*(long*)dbg_nameptr; | |
408 | dbg_nameptr += sizeof(long); | |
409 | dbg_namelen -= sizeof(long); | |
410 | } | |
411 | ||
412 | *arg1=dbg_parms[0]; | |
413 | *arg2=dbg_parms[1]; | |
414 | *arg3=dbg_parms[2]; | |
415 | *arg4=dbg_parms[3]; | |
416 | } | |
417 | ||
418 | kdbg_resolve_map(thread_act_t th_act, krt_t *t) | |
419 | { | |
420 | kd_threadmap *mapptr; | |
421 | ||
422 | if(t->count < t->maxcount) | |
423 | { | |
424 | mapptr=&t->map[t->count]; | |
425 | mapptr->thread = (unsigned int)getshuttle_thread(th_act); | |
426 | mapptr->valid = 1; | |
427 | (void) strncpy (mapptr->command, t->p->p_comm, | |
428 | sizeof(t->p->p_comm)-1); | |
429 | mapptr->command[sizeof(t->p->p_comm)-1] = '\0'; | |
430 | t->count++; | |
431 | } | |
432 | } | |
433 | ||
434 | void kdbg_mapinit() | |
435 | { | |
436 | struct proc *p; | |
437 | struct krt akrt; | |
438 | ||
439 | if (kdebug_flags & KDBG_MAPINIT) | |
440 | return; | |
441 | ||
442 | /* Calculate size of thread map buffer */ | |
443 | for (p = allproc.lh_first, kd_mapcount=0; p; | |
444 | p = p->p_list.le_next) | |
445 | { | |
446 | kd_mapcount += get_task_numacts((task_t)p->task); | |
447 | } | |
448 | ||
449 | kd_mapsize = kd_mapcount * sizeof(kd_threadmap); | |
450 | if((kmem_alloc(kernel_map, & kd_maptomem, | |
451 | (vm_size_t)kd_mapsize) == KERN_SUCCESS)) | |
452 | kd_mapptr = (kd_threadmap *) kd_maptomem; | |
453 | else | |
454 | kd_mapptr = (kd_threadmap *) 0; | |
455 | ||
456 | if (kd_mapptr) | |
457 | { | |
458 | kdebug_flags |= KDBG_MAPINIT; | |
459 | /* Initialize thread map data */ | |
460 | akrt.map = kd_mapptr; | |
461 | akrt.count = 0; | |
462 | akrt.maxcount = kd_mapcount; | |
463 | ||
464 | for (p = allproc.lh_first; p; p = p->p_list.le_next) | |
465 | { | |
466 | akrt.p = p; | |
467 | task_act_iterate_wth_args((task_t)p->task, kdbg_resolve_map, &akrt); | |
468 | } | |
469 | } | |
470 | } | |
471 | ||
472 | kdbg_clear() | |
473 | { | |
474 | int x; | |
475 | ||
476 | /* Clean up the trace buffer */ | |
477 | global_state_pid = -1; | |
478 | kdebug_enable = 0; | |
479 | kdebug_nolog = 1; | |
480 | kdebug_flags &= ~KDBG_BUFINIT; | |
481 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
482 | kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK); | |
483 | kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE); | |
484 | kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize); | |
485 | kd_buffer = (kd_buf *)0; | |
486 | kd_bufsize = 0; | |
487 | kd_prev_timebase = 0LL; | |
488 | ||
489 | /* Clean up the thread map buffer */ | |
490 | kdebug_flags &= ~KDBG_MAPINIT; | |
491 | kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); | |
492 | kd_mapptr = (kd_threadmap *) 0; | |
493 | kd_mapsize = 0; | |
494 | kd_mapcount = 0; | |
495 | } | |
496 | ||
497 | kdbg_setpid(kd_regtype *kdr) | |
498 | { | |
499 | pid_t pid; | |
500 | int flag, ret=0; | |
501 | struct proc *p; | |
502 | ||
503 | pid = (pid_t)kdr->value1; | |
504 | flag = (int)kdr->value2; | |
505 | ||
506 | if (pid > 0) | |
507 | { | |
508 | if ((p = pfind(pid)) == NULL) | |
509 | ret = ESRCH; | |
510 | else | |
511 | { | |
512 | if (flag == 1) /* turn on pid check for this and all pids */ | |
513 | { | |
514 | kdebug_flags |= KDBG_PIDCHECK; | |
515 | kdebug_flags &= ~KDBG_PIDEXCLUDE; | |
516 | p->p_flag |= P_KDEBUG; | |
517 | } | |
518 | else /* turn off pid check for this pid value */ | |
519 | { | |
520 | /* Don't turn off all pid checking though */ | |
521 | /* kdebug_flags &= ~KDBG_PIDCHECK;*/ | |
522 | p->p_flag &= ~P_KDEBUG; | |
523 | } | |
524 | } | |
525 | } | |
526 | else | |
527 | ret = EINVAL; | |
528 | return(ret); | |
529 | } | |
530 | ||
531 | /* This is for pid exclusion in the trace buffer */ | |
532 | kdbg_setpidex(kd_regtype *kdr) | |
533 | { | |
534 | pid_t pid; | |
535 | int flag, ret=0; | |
536 | struct proc *p; | |
537 | ||
538 | pid = (pid_t)kdr->value1; | |
539 | flag = (int)kdr->value2; | |
540 | ||
541 | if (pid > 0) | |
542 | { | |
543 | if ((p = pfind(pid)) == NULL) | |
544 | ret = ESRCH; | |
545 | else | |
546 | { | |
547 | if (flag == 1) /* turn on pid exclusion */ | |
548 | { | |
549 | kdebug_flags |= KDBG_PIDEXCLUDE; | |
550 | kdebug_flags &= ~KDBG_PIDCHECK; | |
551 | p->p_flag |= P_KDEBUG; | |
552 | } | |
553 | else /* turn off pid exclusion for this pid value */ | |
554 | { | |
555 | /* Don't turn off all pid exclusion though */ | |
556 | /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/ | |
557 | p->p_flag &= ~P_KDEBUG; | |
558 | } | |
559 | } | |
560 | } | |
561 | else | |
562 | ret = EINVAL; | |
563 | return(ret); | |
564 | } | |
565 | ||
566 | /* This is for setting a minimum decrementer value */ | |
567 | kdbg_setrtcdec(kd_regtype *kdr) | |
568 | { | |
569 | int ret=0; | |
570 | natural_t decval; | |
571 | ||
572 | decval = (natural_t)kdr->value1; | |
573 | ||
574 | if (decval && decval < KDBG_MINRTCDEC) | |
575 | ret = EINVAL; | |
576 | #ifdef ppc | |
577 | else | |
578 | rtclock_decrementer_min = decval; | |
579 | #else | |
580 | else | |
581 | ret = EOPNOTSUPP; | |
582 | #endif /* ppc */ | |
583 | ||
584 | return(ret); | |
585 | } | |
586 | ||
587 | kdbg_setreg(kd_regtype * kdr) | |
588 | { | |
589 | int i,j, ret=0; | |
590 | unsigned int val_1, val_2, val; | |
591 | switch (kdr->type) { | |
592 | ||
593 | case KDBG_CLASSTYPE : | |
594 | val_1 = (kdr->value1 & 0xff); | |
595 | val_2 = (kdr->value2 & 0xff); | |
596 | kdlog_beg = (val_1<<24); | |
597 | kdlog_end = (val_2<<24); | |
598 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
599 | kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ | |
600 | kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE); | |
601 | break; | |
602 | case KDBG_SUBCLSTYPE : | |
603 | val_1 = (kdr->value1 & 0xff); | |
604 | val_2 = (kdr->value2 & 0xff); | |
605 | val = val_2 + 1; | |
606 | kdlog_beg = ((val_1<<24) | (val_2 << 16)); | |
607 | kdlog_end = ((val_1<<24) | (val << 16)); | |
608 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
609 | kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ | |
610 | kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE); | |
611 | break; | |
612 | case KDBG_RANGETYPE : | |
613 | kdlog_beg = (kdr->value1); | |
614 | kdlog_end = (kdr->value2); | |
615 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
616 | kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ | |
617 | kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE); | |
618 | break; | |
619 | case KDBG_VALCHECK: | |
620 | kdlog_value1 = (kdr->value1); | |
621 | kdlog_value2 = (kdr->value2); | |
622 | kdlog_value3 = (kdr->value3); | |
623 | kdlog_value4 = (kdr->value4); | |
624 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
625 | kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */ | |
626 | kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */ | |
627 | break; | |
628 | case KDBG_TYPENONE : | |
629 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
630 | kdlog_beg = 0; | |
631 | kdlog_end = 0; | |
632 | break; | |
633 | default : | |
634 | ret = EINVAL; | |
635 | break; | |
636 | } | |
637 | return(ret); | |
638 | } | |
639 | ||
640 | kdbg_getreg(kd_regtype * kdr) | |
641 | { | |
642 | int i,j, ret=0; | |
643 | unsigned int val_1, val_2, val; | |
644 | #if 0 | |
645 | switch (kdr->type) { | |
646 | case KDBG_CLASSTYPE : | |
647 | val_1 = (kdr->value1 & 0xff); | |
648 | val_2 = val_1 + 1; | |
649 | kdlog_beg = (val_1<<24); | |
650 | kdlog_end = (val_2<<24); | |
651 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
652 | kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE); | |
653 | break; | |
654 | case KDBG_SUBCLSTYPE : | |
655 | val_1 = (kdr->value1 & 0xff); | |
656 | val_2 = (kdr->value2 & 0xff); | |
657 | val = val_2 + 1; | |
658 | kdlog_beg = ((val_1<<24) | (val_2 << 16)); | |
659 | kdlog_end = ((val_1<<24) | (val << 16)); | |
660 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
661 | kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE); | |
662 | break; | |
663 | case KDBG_RANGETYPE : | |
664 | kdlog_beg = (kdr->value1); | |
665 | kdlog_end = (kdr->value2); | |
666 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
667 | kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE); | |
668 | break; | |
669 | case KDBG_TYPENONE : | |
670 | kdebug_flags &= (unsigned int)~KDBG_CKTYPES; | |
671 | kdlog_beg = 0; | |
672 | kdlog_end = 0; | |
673 | break; | |
674 | default : | |
675 | ret = EINVAL; | |
676 | break; | |
677 | } | |
678 | #endif /* 0 */ | |
679 | return(EINVAL); | |
680 | } | |
681 | ||
682 | ||
683 | ||
684 | kdbg_readmap(kd_threadmap *buffer, size_t *number) | |
685 | { | |
686 | int avail = *number; | |
687 | int ret = 0; | |
688 | int count = 0; | |
689 | ||
690 | count = avail/sizeof (kd_threadmap); | |
691 | ||
692 | if (count && (count <= kd_mapcount)) | |
693 | { | |
694 | if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) | |
695 | { | |
696 | if (*number < kd_mapsize) | |
697 | ret=EINVAL; | |
698 | else | |
699 | { | |
700 | if (copyout(kd_mapptr, buffer, kd_mapsize)) | |
701 | ret=EINVAL; | |
702 | } | |
703 | } | |
704 | else | |
705 | ret=EINVAL; | |
706 | } | |
707 | else | |
708 | ret=EINVAL; | |
709 | ||
710 | if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) | |
711 | { | |
712 | kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); | |
713 | kdebug_flags &= ~KDBG_MAPINIT; | |
714 | kd_mapsize = 0; | |
715 | kd_mapptr = (kd_threadmap *) 0; | |
716 | kd_mapcount = 0; | |
717 | } | |
718 | ||
719 | return(ret); | |
720 | } | |
721 | ||
722 | ||
723 | kdbg_control(name, namelen, where, sizep) | |
724 | int *name; | |
725 | u_int namelen; | |
726 | char *where; | |
727 | size_t *sizep; | |
728 | { | |
729 | int ret=0; | |
730 | int size=*sizep; | |
731 | int max_entries; | |
732 | unsigned int value = name[1]; | |
733 | kd_regtype kd_Reg; | |
734 | kbufinfo_t kd_bufinfo; | |
735 | ||
736 | pid_t curpid; | |
737 | struct proc *p, *curproc; | |
738 | ||
739 | if(curproc = current_proc()) | |
740 | curpid = curproc->p_pid; | |
741 | else | |
742 | return (ESRCH); | |
743 | ||
744 | if (global_state_pid == -1) | |
745 | global_state_pid = curpid; | |
746 | else if (global_state_pid != curpid) | |
747 | { | |
748 | if((p = pfind(global_state_pid)) == NULL) | |
749 | { | |
750 | /* The global pid no longer exists */ | |
751 | global_state_pid = curpid; | |
752 | } | |
753 | else | |
754 | { | |
755 | /* The global pid exists, deny this request */ | |
756 | return(EBUSY); | |
757 | } | |
758 | } | |
759 | ||
760 | switch(name[0]) { | |
761 | case KERN_KDEFLAGS: | |
762 | value &= KDBG_USERFLAGS; | |
763 | kdebug_flags |= value; | |
764 | break; | |
765 | case KERN_KDDFLAGS: | |
766 | value &= KDBG_USERFLAGS; | |
767 | kdebug_flags &= ~value; | |
768 | break; | |
769 | case KERN_KDENABLE: /* used to enable or disable */ | |
770 | if (value) | |
771 | { | |
772 | /* enable only if buffer is initialized */ | |
773 | if (!(kdebug_flags & KDBG_BUFINIT)) | |
774 | { | |
775 | ret=EINVAL; | |
776 | break; | |
777 | } | |
778 | } | |
779 | kdebug_enable=(value)?1:0; | |
780 | kdebug_nolog = (value)?0:1; | |
781 | if (kdebug_enable) | |
782 | kdbg_mapinit(); | |
783 | break; | |
784 | case KERN_KDSETBUF: | |
785 | /* We allow a maximum buffer size of 25% of memory */ | |
786 | /* 'value' is the desired number of trace entries */ | |
787 | max_entries = (mem_size/4) / sizeof(kd_buf); | |
788 | if (value <= max_entries) | |
789 | nkdbufs = value; | |
790 | else | |
791 | nkdbufs = max_entries; | |
792 | break; | |
793 | case KERN_KDGETBUF: | |
794 | if(size < sizeof(kbufinfo_t)) { | |
795 | ret=EINVAL; | |
796 | break; | |
797 | } | |
798 | kd_bufinfo.nkdbufs = nkdbufs; | |
799 | kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap); | |
800 | kd_bufinfo.nolog = kdebug_nolog; | |
801 | kd_bufinfo.flags = kdebug_flags; | |
802 | if(copyout (&kd_bufinfo, where, sizeof(kbufinfo_t))) { | |
803 | ret=EINVAL; | |
804 | } | |
805 | break; | |
806 | case KERN_KDSETUP: | |
807 | ret=kdbg_reinit(); | |
808 | break; | |
809 | case KERN_KDREMOVE: | |
810 | kdbg_clear(); | |
811 | break; | |
812 | case KERN_KDSETREG: | |
813 | if(size < sizeof(kd_regtype)) { | |
814 | ret=EINVAL; | |
815 | break; | |
816 | } | |
817 | if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { | |
818 | ret= EINVAL; | |
819 | break; | |
820 | } | |
821 | ret = kdbg_setreg(&kd_Reg); | |
822 | break; | |
823 | case KERN_KDGETREG: | |
824 | if(size < sizeof(kd_regtype)) { | |
825 | ret = EINVAL; | |
826 | break; | |
827 | } | |
828 | ret = kdbg_getreg(&kd_Reg); | |
829 | if (copyout(&kd_Reg, where, sizeof(kd_regtype))){ | |
830 | ret=EINVAL; | |
831 | } | |
832 | break; | |
833 | case KERN_KDREADTR: | |
834 | ret = kdbg_read(where, sizep); | |
835 | break; | |
836 | case KERN_KDPIDTR: | |
837 | if (size < sizeof(kd_regtype)) { | |
838 | ret = EINVAL; | |
839 | break; | |
840 | } | |
841 | if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { | |
842 | ret= EINVAL; | |
843 | break; | |
844 | } | |
845 | ret = kdbg_setpid(&kd_Reg); | |
846 | break; | |
847 | case KERN_KDPIDEX: | |
848 | if (size < sizeof(kd_regtype)) { | |
849 | ret = EINVAL; | |
850 | break; | |
851 | } | |
852 | if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { | |
853 | ret= EINVAL; | |
854 | break; | |
855 | } | |
856 | ret = kdbg_setpidex(&kd_Reg); | |
857 | break; | |
858 | case KERN_KDTHRMAP: | |
859 | ret = kdbg_readmap((kd_threadmap *)where, sizep); | |
860 | break; | |
861 | case KERN_KDSETRTCDEC: | |
862 | if (size < sizeof(kd_regtype)) { | |
863 | ret = EINVAL; | |
864 | break; | |
865 | } | |
866 | if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { | |
867 | ret= EINVAL; | |
868 | break; | |
869 | } | |
870 | ret = kdbg_setrtcdec(&kd_Reg); | |
871 | break; | |
872 | ||
873 | default: | |
874 | ret= EINVAL; | |
875 | } | |
876 | return(ret); | |
877 | } | |
878 | ||
879 | kdbg_read(kd_buf * buffer, size_t *number) | |
880 | { | |
881 | int avail=*number; | |
882 | int count=0; | |
883 | int copycount=0; | |
884 | int totalcount=0; | |
885 | int s; | |
886 | unsigned int my_kdebug_flags; | |
887 | kd_buf * my_kd_bufptr; | |
888 | ||
889 | s = ml_set_interrupts_enabled(FALSE); | |
0b4e3aa0 | 890 | usimple_lock(&kd_trace_lock); |
1c79356b A |
891 | my_kdebug_flags = kdebug_flags; |
892 | my_kd_bufptr = kd_bufptr; | |
0b4e3aa0 | 893 | usimple_unlock(&kd_trace_lock); |
1c79356b A |
894 | ml_set_interrupts_enabled(s); |
895 | ||
896 | count = avail/sizeof(kd_buf); | |
897 | if (count) { | |
898 | if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) { | |
899 | if (count > nkdbufs) | |
900 | count = nkdbufs; | |
901 | if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast)) | |
902 | { | |
903 | copycount = my_kd_bufptr-kd_readlast; | |
904 | if (copycount > count) | |
905 | copycount = count; | |
906 | ||
907 | if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) | |
908 | { | |
909 | *number = 0; | |
910 | return(EINVAL); | |
911 | } | |
912 | kd_readlast += copycount; | |
913 | *number = copycount; | |
914 | return(0); | |
915 | } | |
916 | else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast)) | |
917 | { | |
918 | *number = 0; | |
919 | return(0); | |
920 | } | |
921 | else | |
922 | { | |
923 | if (my_kdebug_flags & KDBG_WRAPPED) | |
924 | { | |
925 | kd_readlast = my_kd_bufptr; | |
926 | kdebug_flags &= ~KDBG_WRAPPED; | |
927 | } | |
928 | ||
929 | /* Note that by setting kd_readlast equal to my_kd_bufptr, | |
930 | we now treat the kd_buffer read the same as if we weren't | |
931 | wrapped and my_kd_bufptr was less than kd_readlast. | |
932 | */ | |
933 | ||
934 | /* first copyout from readlast to end of kd_buffer */ | |
935 | copycount = kd_buflast - kd_readlast; | |
936 | if (copycount > count) | |
937 | copycount = count; | |
938 | if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) | |
939 | { | |
940 | *number = 0; | |
941 | return(EINVAL); | |
942 | } | |
943 | buffer += copycount; | |
944 | count -= copycount; | |
945 | totalcount = copycount; | |
946 | kd_readlast += copycount; | |
947 | if (kd_readlast == kd_buflast) | |
948 | kd_readlast = kd_buffer; | |
949 | if (count == 0) | |
950 | { | |
951 | *number = totalcount; | |
952 | return(0); | |
953 | } | |
954 | ||
955 | /* second copyout from top of kd_buffer to bufptr */ | |
956 | copycount = my_kd_bufptr - kd_readlast; | |
957 | if (copycount > count) | |
958 | copycount = count; | |
959 | if (copycount == 0) | |
960 | { | |
961 | *number = totalcount; | |
962 | return(0); | |
963 | } | |
964 | if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) | |
965 | { | |
966 | return(EINVAL); | |
967 | } | |
968 | kd_readlast += copycount; | |
969 | totalcount += copycount; | |
970 | *number = totalcount; | |
971 | return(0); | |
972 | } | |
973 | } /* end if KDBG_BUFINIT */ | |
974 | } /* end if count */ | |
975 | return (EINVAL); | |
976 | } |