]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
82c594c8bc649e8985566bc73a6e345cd89c87c4
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <machine/spl.h>
24
25 #define HZ 100
26 #include <mach/clock_types.h>
27 #include <mach/mach_types.h>
28 #include <machine/machine_routines.h>
29
30 #include <sys/kdebug.h>
31 #include <sys/errno.h>
32 #include <sys/param.h>
33 #include <sys/proc.h>
34 #include <sys/vm.h>
35 #include <sys/sysctl.h>
36
37 #include <kern/thread.h>
38 #include <kern/task.h>
39 #include <vm/vm_kern.h>
40 #include <sys/lock.h>
41
42 /* trace enable status */
43 unsigned int kdebug_enable = 0;
44
45 /* track timestamps for security server's entropy needs */
46 mach_timespec_t * kd_entropy_buffer = 0;
47 unsigned int kd_entropy_bufsize = 0;
48 unsigned int kd_entropy_count = 0;
49 unsigned int kd_entropy_indx = 0;
50 unsigned int kd_entropy_buftomem = 0;
51
52 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
53 kd_buf * kd_bufptr;
54 unsigned int kd_buftomem=0;
55 kd_buf * kd_buffer=0;
56 kd_buf * kd_buflast;
57 kd_buf * kd_readlast;
58 unsigned int nkdbufs = 8192;
59 unsigned int kd_bufsize = 0;
60 unsigned int kdebug_flags = 0;
61 unsigned int kdebug_nolog=1;
62 unsigned int kdlog_beg=0;
63 unsigned int kdlog_end=0;
64 unsigned int kdlog_value1=0;
65 unsigned int kdlog_value2=0;
66 unsigned int kdlog_value3=0;
67 unsigned int kdlog_value4=0;
68
69 unsigned long long kd_prev_timebase = 0LL;
70 decl_simple_lock_data(,kd_trace_lock);
71
72 kd_threadmap *kd_mapptr = 0;
73 unsigned int kd_mapsize = 0;
74 unsigned int kd_mapcount = 0;
75 unsigned int kd_maptomem = 0;
76
77 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
78
79 #define DBG_FUNC_MASK 0xfffffffc
80
81 #ifdef ppc
82 extern natural_t rtclock_decrementer_min;
83 #endif /* ppc */
84
85 struct kdebug_args {
86 int code;
87 int arg1;
88 int arg2;
89 int arg3;
90 int arg4;
91 int arg5;
92 };
93
94 /* task to string structure */
95 struct tts
96 {
97 task_t *task;
98 char task_comm[20]; /* from procs p_comm */
99 };
100
101 typedef struct tts tts_t;
102
103 struct krt
104 {
105 kd_threadmap *map; /* pointer to the map buffer */
106 int count;
107 int maxcount;
108 struct tts *atts;
109 };
110
111 typedef struct krt krt_t;
112
113 /* This is for the CHUD toolkit call */
114 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
115 unsigned int arg2, unsigned int arg3,
116 unsigned int arg4, unsigned int arg5);
117
118 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
119
120 /* Support syscall SYS_kdebug_trace */
121 kdebug_trace(p, uap, retval)
122 struct proc *p;
123 struct kdebug_args *uap;
124 register_t *retval;
125 {
126 if (kdebug_nolog)
127 return(EINVAL);
128
129 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
130 return(0);
131 }
132
133
134 void
135 kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5)
136 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
137 {
138 kd_buf * kd;
139 struct proc *curproc;
140 int s;
141 unsigned long long now;
142 mach_timespec_t *tsp;
143
144 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
145 if (kdebug_chudhook)
146 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
147
148 if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
149 (kdebug_enable & KDEBUG_ENABLE_TRACE)))
150 return;
151 }
152
153 s = ml_set_interrupts_enabled(FALSE);
154
155 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
156 {
157 if (kd_entropy_indx < kd_entropy_count)
158 {
159 ml_get_timebase((unsigned long long *) &kd_entropy_buffer [ kd_entropy_indx]);
160 kd_entropy_indx++;
161 }
162
163 if (kd_entropy_indx == kd_entropy_count)
164 {
165 /* Disable entropy collection */
166 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
167 }
168 }
169
170 if (kdebug_nolog)
171 {
172 ml_set_interrupts_enabled(s);
173 return;
174 }
175
176 usimple_lock(&kd_trace_lock);
177 if (kdebug_flags & KDBG_PIDCHECK)
178 {
179 /* If kdebug flag is not set for current proc, return */
180 curproc = current_proc();
181 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
182 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
183 {
184 usimple_unlock(&kd_trace_lock);
185 ml_set_interrupts_enabled(s);
186 return;
187 }
188 }
189 else if (kdebug_flags & KDBG_PIDEXCLUDE)
190 {
191 /* If kdebug flag is set for current proc, return */
192 curproc = current_proc();
193 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
194 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
195 {
196 usimple_unlock(&kd_trace_lock);
197 ml_set_interrupts_enabled(s);
198 return;
199 }
200 }
201
202 if (kdebug_flags & KDBG_RANGECHECK)
203 {
204 if ((debugid < kdlog_beg) || (debugid > kdlog_end)
205 && (debugid >> 24 != DBG_TRACE))
206 {
207 usimple_unlock(&kd_trace_lock);
208 ml_set_interrupts_enabled(s);
209 return;
210 }
211 }
212 else if (kdebug_flags & KDBG_VALCHECK)
213 {
214 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
215 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
216 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
217 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
218 (debugid >> 24 != DBG_TRACE))
219 {
220 usimple_unlock(&kd_trace_lock);
221 ml_set_interrupts_enabled(s);
222 return;
223 }
224 }
225 kd = kd_bufptr;
226 kd->debugid = debugid;
227 kd->arg1 = arg1;
228 kd->arg2 = arg2;
229 kd->arg3 = arg3;
230 kd->arg4 = arg4;
231 kd->arg5 = (int)current_thread();
232 if (cpu_number())
233 kd->arg5 |= KDBG_CPU_MASK;
234
235 ml_get_timebase((unsigned long long *)&kd->timestamp);
236
237 /* Watch for out of order timestamps */
238 now = (((unsigned long long)kd->timestamp.tv_sec) << 32) |
239 (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec));
240
241 if (now < kd_prev_timebase)
242 {
243 /* timestamps are out of order -- adjust */
244 kd_prev_timebase++;
245 tsp = (mach_timespec_t *)&kd_prev_timebase;
246 kd->timestamp.tv_sec = tsp->tv_sec;
247 kd->timestamp.tv_nsec = tsp->tv_nsec;
248 }
249 else
250 {
251 /* Then just store the previous timestamp */
252 kd_prev_timebase = now;
253 }
254
255
256 kd_bufptr++;
257
258 if (kd_bufptr >= kd_buflast)
259 kd_bufptr = kd_buffer;
260 if (kd_bufptr == kd_readlast) {
261 if (kdebug_flags & KDBG_NOWRAP)
262 kdebug_nolog = 1;
263 kdebug_flags |= KDBG_WRAPPED;
264 }
265 usimple_unlock(&kd_trace_lock);
266 ml_set_interrupts_enabled(s);
267 }
268
269 void
270 kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5)
271 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
272 {
273 kd_buf * kd;
274 struct proc *curproc;
275 int s;
276 unsigned long long now;
277 mach_timespec_t *tsp;
278
279 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
280 if (kdebug_chudhook)
281 (void)kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
282
283 if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
284 (kdebug_enable & KDEBUG_ENABLE_TRACE)))
285 return;
286 }
287
288 s = ml_set_interrupts_enabled(FALSE);
289
290 if (kdebug_nolog)
291 {
292 ml_set_interrupts_enabled(s);
293 return;
294 }
295
296 usimple_lock(&kd_trace_lock);
297 if (kdebug_flags & KDBG_PIDCHECK)
298 {
299 /* If kdebug flag is not set for current proc, return */
300 curproc = current_proc();
301 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
302 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
303 {
304 usimple_unlock(&kd_trace_lock);
305 ml_set_interrupts_enabled(s);
306 return;
307 }
308 }
309 else if (kdebug_flags & KDBG_PIDEXCLUDE)
310 {
311 /* If kdebug flag is set for current proc, return */
312 curproc = current_proc();
313 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
314 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
315 {
316 usimple_unlock(&kd_trace_lock);
317 ml_set_interrupts_enabled(s);
318 return;
319 }
320 }
321
322 if (kdebug_flags & KDBG_RANGECHECK)
323 {
324 if ((debugid < kdlog_beg) || (debugid > kdlog_end)
325 && (debugid >> 24 != DBG_TRACE))
326 {
327 usimple_unlock(&kd_trace_lock);
328 ml_set_interrupts_enabled(s);
329 return;
330 }
331 }
332 else if (kdebug_flags & KDBG_VALCHECK)
333 {
334 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
335 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
336 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
337 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
338 (debugid >> 24 != DBG_TRACE))
339 {
340 usimple_unlock(&kd_trace_lock);
341 ml_set_interrupts_enabled(s);
342 return;
343 }
344 }
345
346 kd = kd_bufptr;
347 kd->debugid = debugid;
348 kd->arg1 = arg1;
349 kd->arg2 = arg2;
350 kd->arg3 = arg3;
351 kd->arg4 = arg4;
352 kd->arg5 = arg5;
353 ml_get_timebase((unsigned long long *)&kd->timestamp);
354
355 /* Watch for out of order timestamps */
356 now = (((unsigned long long)kd->timestamp.tv_sec) << 32) |
357 (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec));
358
359 if (now < kd_prev_timebase)
360 {
361 /* timestamps are out of order -- adjust */
362 kd_prev_timebase++;
363 tsp = (mach_timespec_t *)&kd_prev_timebase;
364 kd->timestamp.tv_sec = tsp->tv_sec;
365 kd->timestamp.tv_nsec = tsp->tv_nsec;
366 }
367 else
368 {
369 /* Then just store the previous timestamp */
370 kd_prev_timebase = now;
371 }
372
373 kd_bufptr++;
374
375 if (kd_bufptr >= kd_buflast)
376 kd_bufptr = kd_buffer;
377 if (kd_bufptr == kd_readlast) {
378 if (kdebug_flags & KDBG_NOWRAP)
379 kdebug_nolog = 1;
380 kdebug_flags |= KDBG_WRAPPED;
381 }
382 usimple_unlock(&kd_trace_lock);
383 ml_set_interrupts_enabled(s);
384 }
385
386
387 kdbg_bootstrap()
388 {
389 kd_bufsize = nkdbufs * sizeof(kd_buf);
390 if (kmem_alloc(kernel_map, &kd_buftomem,
391 (vm_size_t)kd_bufsize) == KERN_SUCCESS)
392 kd_buffer = (kd_buf *) kd_buftomem;
393 else kd_buffer= (kd_buf *) 0;
394 kdebug_flags &= ~KDBG_WRAPPED;
395 if (kd_buffer) {
396 simple_lock_init(&kd_trace_lock);
397 kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT);
398 kd_bufptr = kd_buffer;
399 kd_buflast = &kd_bufptr[nkdbufs];
400 kd_readlast = kd_bufptr;
401 kd_prev_timebase = 0LL;
402 return(0);
403 } else {
404 kd_bufsize=0;
405 kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT);
406 return(EINVAL);
407 }
408
409 }
410
411 kdbg_reinit()
412 {
413 int x;
414 int ret=0;
415
416 /* Disable trace collecting */
417 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
418 kdebug_nolog = 1;
419
420 if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer)
421 kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize);
422
423 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
424 {
425 kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize);
426 kdebug_flags &= ~KDBG_MAPINIT;
427 kd_mapsize = 0;
428 kd_mapptr = (kd_threadmap *) 0;
429 kd_mapcount = 0;
430 }
431
432 ret= kdbg_bootstrap();
433
434 return(ret);
435 }
436
437 void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
438 {
439 int i;
440 char *dbg_nameptr;
441 int dbg_namelen;
442 long dbg_parms[4];
443
444 if (!proc)
445 {
446 *arg1 = 0;
447 *arg2 = 0;
448 *arg3 = 0;
449 *arg4 = 0;
450 return;
451 }
452
453 /* Collect the pathname for tracing */
454 dbg_nameptr = proc->p_comm;
455 dbg_namelen = strlen(proc->p_comm);
456 dbg_parms[0]=0L;
457 dbg_parms[1]=0L;
458 dbg_parms[2]=0L;
459 dbg_parms[3]=0L;
460
461 if(dbg_namelen > sizeof(dbg_parms))
462 dbg_namelen = sizeof(dbg_parms);
463
464 for(i=0;dbg_namelen > 0; i++)
465 {
466 dbg_parms[i]=*(long*)dbg_nameptr;
467 dbg_nameptr += sizeof(long);
468 dbg_namelen -= sizeof(long);
469 }
470
471 *arg1=dbg_parms[0];
472 *arg2=dbg_parms[1];
473 *arg3=dbg_parms[2];
474 *arg4=dbg_parms[3];
475 }
476
477 kdbg_resolve_map(thread_act_t th_act, krt_t *t)
478 {
479 kd_threadmap *mapptr;
480
481 if(t->count < t->maxcount)
482 {
483 mapptr=&t->map[t->count];
484 mapptr->thread = (unsigned int)getshuttle_thread(th_act);
485 mapptr->valid = 1;
486 (void) strncpy (mapptr->command, t->atts->task_comm,
487 sizeof(t->atts->task_comm)-1);
488 mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
489 t->count++;
490 }
491 }
492
493 void kdbg_mapinit()
494 {
495 struct proc *p;
496 struct krt akrt;
497 int tts_count; /* number of task-to-string structures */
498 struct tts *tts_mapptr;
499 unsigned int tts_mapsize = 0;
500 unsigned int tts_maptomem=0;
501 int i;
502
503
504 if (kdebug_flags & KDBG_MAPINIT)
505 return;
506
507 /* Calculate the sizes of map buffers*/
508 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
509 p = p->p_list.le_next)
510 {
511 kd_mapcount += get_task_numacts((task_t)p->task);
512 tts_count++;
513 }
514
515 /*
516 * The proc count could change during buffer allocation,
517 * so introduce a small fudge factor to bump up the
518 * buffer sizes. This gives new tasks some chance of
519 * making into the tables. Bump up by 10%.
520 */
521 kd_mapcount += kd_mapcount/10;
522 tts_count += tts_count/10;
523
524 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
525 if((kmem_alloc(kernel_map, & kd_maptomem,
526 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
527 kd_mapptr = (kd_threadmap *) kd_maptomem;
528 else
529 kd_mapptr = (kd_threadmap *) 0;
530
531 tts_mapsize = tts_count * sizeof(struct tts);
532 if((kmem_alloc(kernel_map, & tts_maptomem,
533 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
534 tts_mapptr = (struct tts *) tts_maptomem;
535 else
536 tts_mapptr = (struct tts *) 0;
537
538
539 /*
540 * We need to save the procs command string
541 * and take a reference for each task associated
542 * with a valid process
543 */
544
545 if (tts_mapptr) {
546 for (p = allproc.lh_first, i=0; p && i < tts_count;
547 p = p->p_list.le_next) {
548 if (p->p_flag & P_WEXIT)
549 continue;
550
551 if (task_reference_try(p->task)) {
552 tts_mapptr[i].task = p->task;
553 (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
554 i++;
555 }
556 }
557 tts_count = i;
558 }
559
560
561 if (kd_mapptr && tts_mapptr)
562 {
563 kdebug_flags |= KDBG_MAPINIT;
564 /* Initialize thread map data */
565 akrt.map = kd_mapptr;
566 akrt.count = 0;
567 akrt.maxcount = kd_mapcount;
568
569 for (i=0; i < tts_count; i++)
570 {
571 akrt.atts = &tts_mapptr[i];
572 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
573 task_deallocate(tts_mapptr[i].task);
574 }
575 kmem_free(kernel_map, (char *)tts_mapptr, tts_mapsize);
576 }
577 }
578
579 kdbg_clear()
580 {
581 int x;
582
583 /* Clean up the trace buffer */
584 global_state_pid = -1;
585 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
586 kdebug_nolog = 1;
587 kdebug_flags &= ~KDBG_BUFINIT;
588 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
589 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
590 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
591 kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize);
592 kd_buffer = (kd_buf *)0;
593 kd_bufsize = 0;
594 kd_prev_timebase = 0LL;
595
596 /* Clean up the thread map buffer */
597 kdebug_flags &= ~KDBG_MAPINIT;
598 kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize);
599 kd_mapptr = (kd_threadmap *) 0;
600 kd_mapsize = 0;
601 kd_mapcount = 0;
602 }
603
604 kdbg_setpid(kd_regtype *kdr)
605 {
606 pid_t pid;
607 int flag, ret=0;
608 struct proc *p;
609
610 pid = (pid_t)kdr->value1;
611 flag = (int)kdr->value2;
612
613 if (pid > 0)
614 {
615 if ((p = pfind(pid)) == NULL)
616 ret = ESRCH;
617 else
618 {
619 if (flag == 1) /* turn on pid check for this and all pids */
620 {
621 kdebug_flags |= KDBG_PIDCHECK;
622 kdebug_flags &= ~KDBG_PIDEXCLUDE;
623 p->p_flag |= P_KDEBUG;
624 }
625 else /* turn off pid check for this pid value */
626 {
627 /* Don't turn off all pid checking though */
628 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
629 p->p_flag &= ~P_KDEBUG;
630 }
631 }
632 }
633 else
634 ret = EINVAL;
635 return(ret);
636 }
637
638 /* This is for pid exclusion in the trace buffer */
639 kdbg_setpidex(kd_regtype *kdr)
640 {
641 pid_t pid;
642 int flag, ret=0;
643 struct proc *p;
644
645 pid = (pid_t)kdr->value1;
646 flag = (int)kdr->value2;
647
648 if (pid > 0)
649 {
650 if ((p = pfind(pid)) == NULL)
651 ret = ESRCH;
652 else
653 {
654 if (flag == 1) /* turn on pid exclusion */
655 {
656 kdebug_flags |= KDBG_PIDEXCLUDE;
657 kdebug_flags &= ~KDBG_PIDCHECK;
658 p->p_flag |= P_KDEBUG;
659 }
660 else /* turn off pid exclusion for this pid value */
661 {
662 /* Don't turn off all pid exclusion though */
663 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
664 p->p_flag &= ~P_KDEBUG;
665 }
666 }
667 }
668 else
669 ret = EINVAL;
670 return(ret);
671 }
672
673 /* This is for setting a minimum decrementer value */
674 kdbg_setrtcdec(kd_regtype *kdr)
675 {
676 int ret=0;
677 natural_t decval;
678
679 decval = (natural_t)kdr->value1;
680
681 if (decval && decval < KDBG_MINRTCDEC)
682 ret = EINVAL;
683 #ifdef ppc
684 else
685 rtclock_decrementer_min = decval;
686 #else
687 else
688 ret = EOPNOTSUPP;
689 #endif /* ppc */
690
691 return(ret);
692 }
693
694 kdbg_setreg(kd_regtype * kdr)
695 {
696 int i,j, ret=0;
697 unsigned int val_1, val_2, val;
698 switch (kdr->type) {
699
700 case KDBG_CLASSTYPE :
701 val_1 = (kdr->value1 & 0xff);
702 val_2 = (kdr->value2 & 0xff);
703 kdlog_beg = (val_1<<24);
704 kdlog_end = (val_2<<24);
705 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
706 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
707 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
708 break;
709 case KDBG_SUBCLSTYPE :
710 val_1 = (kdr->value1 & 0xff);
711 val_2 = (kdr->value2 & 0xff);
712 val = val_2 + 1;
713 kdlog_beg = ((val_1<<24) | (val_2 << 16));
714 kdlog_end = ((val_1<<24) | (val << 16));
715 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
716 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
717 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
718 break;
719 case KDBG_RANGETYPE :
720 kdlog_beg = (kdr->value1);
721 kdlog_end = (kdr->value2);
722 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
723 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
724 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
725 break;
726 case KDBG_VALCHECK:
727 kdlog_value1 = (kdr->value1);
728 kdlog_value2 = (kdr->value2);
729 kdlog_value3 = (kdr->value3);
730 kdlog_value4 = (kdr->value4);
731 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
732 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
733 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
734 break;
735 case KDBG_TYPENONE :
736 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
737 kdlog_beg = 0;
738 kdlog_end = 0;
739 break;
740 default :
741 ret = EINVAL;
742 break;
743 }
744 return(ret);
745 }
746
747 kdbg_getreg(kd_regtype * kdr)
748 {
749 int i,j, ret=0;
750 unsigned int val_1, val_2, val;
751 #if 0
752 switch (kdr->type) {
753 case KDBG_CLASSTYPE :
754 val_1 = (kdr->value1 & 0xff);
755 val_2 = val_1 + 1;
756 kdlog_beg = (val_1<<24);
757 kdlog_end = (val_2<<24);
758 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
759 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
760 break;
761 case KDBG_SUBCLSTYPE :
762 val_1 = (kdr->value1 & 0xff);
763 val_2 = (kdr->value2 & 0xff);
764 val = val_2 + 1;
765 kdlog_beg = ((val_1<<24) | (val_2 << 16));
766 kdlog_end = ((val_1<<24) | (val << 16));
767 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
768 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
769 break;
770 case KDBG_RANGETYPE :
771 kdlog_beg = (kdr->value1);
772 kdlog_end = (kdr->value2);
773 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
774 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
775 break;
776 case KDBG_TYPENONE :
777 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
778 kdlog_beg = 0;
779 kdlog_end = 0;
780 break;
781 default :
782 ret = EINVAL;
783 break;
784 }
785 #endif /* 0 */
786 return(EINVAL);
787 }
788
789
790
791 kdbg_readmap(kd_threadmap *buffer, size_t *number)
792 {
793 int avail = *number;
794 int ret = 0;
795 int count = 0;
796
797 count = avail/sizeof (kd_threadmap);
798
799 if (count && (count <= kd_mapcount))
800 {
801 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
802 {
803 if (*number < kd_mapsize)
804 ret=EINVAL;
805 else
806 {
807 if (copyout(kd_mapptr, buffer, kd_mapsize))
808 ret=EINVAL;
809 }
810 }
811 else
812 ret=EINVAL;
813 }
814 else
815 ret=EINVAL;
816
817 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
818 {
819 kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize);
820 kdebug_flags &= ~KDBG_MAPINIT;
821 kd_mapsize = 0;
822 kd_mapptr = (kd_threadmap *) 0;
823 kd_mapcount = 0;
824 }
825
826 return(ret);
827 }
828
829 kdbg_getentropy (mach_timespec_t * buffer, size_t *number, int ms_timeout)
830 {
831 int avail = *number;
832 int ret = 0;
833 int count = 0; /* The number of timestamp entries that will fill buffer */
834
835 if (kd_entropy_buffer)
836 return(EBUSY);
837
838 kd_entropy_count = avail/sizeof(mach_timespec_t);
839 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
840 kd_entropy_indx = 0;
841
842 /* Enforce maximum entropy entries here if needed */
843
844 /* allocate entropy buffer */
845 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
846 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
847 {
848 kd_entropy_buffer = (mach_timespec_t *)kd_entropy_buftomem;
849 }
850 else
851 {
852 kd_entropy_buffer = (mach_timespec_t *) 0;
853 kd_entropy_count = 0;
854 kd_entropy_indx = 0;
855 return (EINVAL);
856 }
857
858 if (ms_timeout < 10)
859 ms_timeout = 10;
860
861 /* Enable entropy sampling */
862 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
863
864 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
865
866 /* Disable entropy sampling */
867 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
868
869 *number = 0;
870 ret = 0;
871
872 if (kd_entropy_indx > 0)
873 {
874 /* copyout the buffer */
875 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
876 ret = EINVAL;
877 else
878 *number = kd_entropy_indx;
879 }
880
881 /* Always cleanup */
882 kd_entropy_count = 0;
883 kd_entropy_indx = 0;
884 kd_entropy_buftomem = 0;
885 kmem_free(kernel_map, (char *)kd_entropy_buffer, kd_entropy_bufsize);
886 kd_entropy_buffer = (mach_timespec_t *) 0;
887 return(ret);
888 }
889
890
891 /*
892 * This function is provided for the CHUD toolkit only.
893 * int val:
894 * zero disables kdebug_chudhook function call
895 * non-zero enables kdebug_chudhook function call
896 * char *fn:
897 * address of the enabled kdebug_chudhook function
898 */
899
900 void kdbg_control_chud(int val, void *fn)
901 {
902 if (val) {
903 /* enable chudhook */
904 kdebug_enable |= KDEBUG_ENABLE_CHUD;
905 kdebug_chudhook = fn;
906 }
907 else {
908 /* disable chudhook */
909 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
910 kdebug_chudhook = 0;
911 }
912 }
913
914
915 kdbg_control(name, namelen, where, sizep)
916 int *name;
917 u_int namelen;
918 char *where;
919 size_t *sizep;
920 {
921 int ret=0;
922 int size=*sizep;
923 int max_entries;
924 unsigned int value = name[1];
925 kd_regtype kd_Reg;
926 kbufinfo_t kd_bufinfo;
927
928 pid_t curpid;
929 struct proc *p, *curproc;
930
931 if (name[0] == KERN_KDGETBUF) {
932 /*
933 Does not alter the global_state_pid
934 This is a passive request.
935 */
936 if (size < sizeof(kd_bufinfo.nkdbufs)) {
937 /*
938 There is not enough room to return even
939 the first element of the info structure.
940 */
941 return(EINVAL);
942 }
943
944 kd_bufinfo.nkdbufs = nkdbufs;
945 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
946 kd_bufinfo.nolog = kdebug_nolog;
947 kd_bufinfo.flags = kdebug_flags;
948 kd_bufinfo.bufid = global_state_pid;
949
950 if(size >= sizeof(kbufinfo_t)) {
951 /* Provide all the info we have */
952 if(copyout (&kd_bufinfo, where, sizeof(kbufinfo_t)))
953 return(EINVAL);
954 }
955 else {
956 /*
957 For backwards compatibility, only provide
958 as much info as there is room for.
959 */
960 if(copyout (&kd_bufinfo, where, size))
961 return(EINVAL);
962 }
963 return(0);
964 }
965 else if (name[0] == KERN_KDGETENTROPY) {
966 if (kd_entropy_buffer)
967 return(EBUSY);
968 else
969 ret = kdbg_getentropy((mach_timespec_t *)where, sizep, value);
970 return (ret);
971 }
972
973 if(curproc = current_proc())
974 curpid = curproc->p_pid;
975 else
976 return (ESRCH);
977
978 if (global_state_pid == -1)
979 global_state_pid = curpid;
980 else if (global_state_pid != curpid)
981 {
982 if((p = pfind(global_state_pid)) == NULL)
983 {
984 /* The global pid no longer exists */
985 global_state_pid = curpid;
986 }
987 else
988 {
989 /* The global pid exists, deny this request */
990 return(EBUSY);
991 }
992 }
993
994 switch(name[0]) {
995 case KERN_KDEFLAGS:
996 value &= KDBG_USERFLAGS;
997 kdebug_flags |= value;
998 break;
999 case KERN_KDDFLAGS:
1000 value &= KDBG_USERFLAGS;
1001 kdebug_flags &= ~value;
1002 break;
1003 case KERN_KDENABLE: /* used to enable or disable */
1004 if (value)
1005 {
1006 /* enable only if buffer is initialized */
1007 if (!(kdebug_flags & KDBG_BUFINIT))
1008 {
1009 ret=EINVAL;
1010 break;
1011 }
1012 }
1013
1014 if (value)
1015 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1016 else
1017 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1018
1019 kdebug_nolog = (value)?0:1;
1020
1021 if (kdebug_enable & KDEBUG_ENABLE_TRACE)
1022 kdbg_mapinit();
1023 break;
1024 case KERN_KDSETBUF:
1025 /* We allow a maximum buffer size of 25% of memory */
1026 /* 'value' is the desired number of trace entries */
1027 max_entries = (mem_size/4) / sizeof(kd_buf);
1028 if (value <= max_entries)
1029 nkdbufs = value;
1030 else
1031 nkdbufs = max_entries;
1032 break;
1033 case KERN_KDSETUP:
1034 ret=kdbg_reinit();
1035 break;
1036 case KERN_KDREMOVE:
1037 kdbg_clear();
1038 break;
1039 case KERN_KDSETREG:
1040 if(size < sizeof(kd_regtype)) {
1041 ret=EINVAL;
1042 break;
1043 }
1044 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1045 ret= EINVAL;
1046 break;
1047 }
1048 ret = kdbg_setreg(&kd_Reg);
1049 break;
1050 case KERN_KDGETREG:
1051 if(size < sizeof(kd_regtype)) {
1052 ret = EINVAL;
1053 break;
1054 }
1055 ret = kdbg_getreg(&kd_Reg);
1056 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1057 ret=EINVAL;
1058 }
1059 break;
1060 case KERN_KDREADTR:
1061 ret = kdbg_read(where, sizep);
1062 break;
1063 case KERN_KDPIDTR:
1064 if (size < sizeof(kd_regtype)) {
1065 ret = EINVAL;
1066 break;
1067 }
1068 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1069 ret= EINVAL;
1070 break;
1071 }
1072 ret = kdbg_setpid(&kd_Reg);
1073 break;
1074 case KERN_KDPIDEX:
1075 if (size < sizeof(kd_regtype)) {
1076 ret = EINVAL;
1077 break;
1078 }
1079 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1080 ret= EINVAL;
1081 break;
1082 }
1083 ret = kdbg_setpidex(&kd_Reg);
1084 break;
1085 case KERN_KDTHRMAP:
1086 ret = kdbg_readmap((kd_threadmap *)where, sizep);
1087 break;
1088 case KERN_KDSETRTCDEC:
1089 if (size < sizeof(kd_regtype)) {
1090 ret = EINVAL;
1091 break;
1092 }
1093 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1094 ret= EINVAL;
1095 break;
1096 }
1097 ret = kdbg_setrtcdec(&kd_Reg);
1098 break;
1099
1100 default:
1101 ret= EINVAL;
1102 }
1103 return(ret);
1104 }
1105
1106 kdbg_read(kd_buf * buffer, size_t *number)
1107 {
1108 int avail=*number;
1109 int count=0;
1110 int copycount=0;
1111 int totalcount=0;
1112 int s;
1113 unsigned int my_kdebug_flags;
1114 kd_buf * my_kd_bufptr;
1115
1116 s = ml_set_interrupts_enabled(FALSE);
1117 usimple_lock(&kd_trace_lock);
1118 my_kdebug_flags = kdebug_flags;
1119 my_kd_bufptr = kd_bufptr;
1120 usimple_unlock(&kd_trace_lock);
1121 ml_set_interrupts_enabled(s);
1122
1123 count = avail/sizeof(kd_buf);
1124 if (count) {
1125 if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
1126 if (count > nkdbufs)
1127 count = nkdbufs;
1128 if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast))
1129 {
1130 copycount = my_kd_bufptr-kd_readlast;
1131 if (copycount > count)
1132 copycount = count;
1133
1134 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1135 {
1136 *number = 0;
1137 return(EINVAL);
1138 }
1139 kd_readlast += copycount;
1140 *number = copycount;
1141 return(0);
1142 }
1143 else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast))
1144 {
1145 *number = 0;
1146 return(0);
1147 }
1148 else
1149 {
1150 if (my_kdebug_flags & KDBG_WRAPPED)
1151 {
1152 kd_readlast = my_kd_bufptr;
1153 kdebug_flags &= ~KDBG_WRAPPED;
1154 }
1155
1156 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1157 we now treat the kd_buffer read the same as if we weren't
1158 wrapped and my_kd_bufptr was less than kd_readlast.
1159 */
1160
1161 /* first copyout from readlast to end of kd_buffer */
1162 copycount = kd_buflast - kd_readlast;
1163 if (copycount > count)
1164 copycount = count;
1165 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1166 {
1167 *number = 0;
1168 return(EINVAL);
1169 }
1170 buffer += copycount;
1171 count -= copycount;
1172 totalcount = copycount;
1173 kd_readlast += copycount;
1174 if (kd_readlast == kd_buflast)
1175 kd_readlast = kd_buffer;
1176 if (count == 0)
1177 {
1178 *number = totalcount;
1179 return(0);
1180 }
1181
1182 /* second copyout from top of kd_buffer to bufptr */
1183 copycount = my_kd_bufptr - kd_readlast;
1184 if (copycount > count)
1185 copycount = count;
1186 if (copycount == 0)
1187 {
1188 *number = totalcount;
1189 return(0);
1190 }
1191 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1192 {
1193 return(EINVAL);
1194 }
1195 kd_readlast += copycount;
1196 totalcount += copycount;
1197 *number = totalcount;
1198 return(0);
1199 }
1200 } /* end if KDBG_BUFINIT */
1201 } /* end if count */
1202 return (EINVAL);
1203 }