]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <machine/spl.h>
27
28 #define HZ 100
29 #include <mach/clock_types.h>
30 #include <mach/mach_types.h>
31 #include <machine/machine_routines.h>
32
33 #include <sys/kdebug.h>
34 #include <sys/errno.h>
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/vm.h>
38 #include <sys/sysctl.h>
39
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <vm/vm_kern.h>
43 #include <sys/lock.h>
44
45 /* trace enable status */
46 unsigned int kdebug_enable = 0;
47
48 /* track timestamps for security server's entropy needs */
49 mach_timespec_t * kd_entropy_buffer = 0;
50 unsigned int kd_entropy_bufsize = 0;
51 unsigned int kd_entropy_count = 0;
52 unsigned int kd_entropy_indx = 0;
53 unsigned int kd_entropy_buftomem = 0;
54
55 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
56 kd_buf * kd_bufptr;
57 unsigned int kd_buftomem=0;
58 kd_buf * kd_buffer=0;
59 kd_buf * kd_buflast;
60 kd_buf * kd_readlast;
61 unsigned int nkdbufs = 8192;
62 unsigned int kd_bufsize = 0;
63 unsigned int kdebug_flags = 0;
64 unsigned int kdebug_nolog=1;
65 unsigned int kdlog_beg=0;
66 unsigned int kdlog_end=0;
67 unsigned int kdlog_value1=0;
68 unsigned int kdlog_value2=0;
69 unsigned int kdlog_value3=0;
70 unsigned int kdlog_value4=0;
71
72 unsigned long long kd_prev_timebase = 0LL;
73 decl_simple_lock_data(,kd_trace_lock);
74
75 kd_threadmap *kd_mapptr = 0;
76 unsigned int kd_mapsize = 0;
77 unsigned int kd_mapcount = 0;
78 unsigned int kd_maptomem = 0;
79
80 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
81
82 #define DBG_FUNC_MASK 0xfffffffc
83
84 #ifdef ppc
85 extern natural_t rtclock_decrementer_min;
86 #endif /* ppc */
87
88 struct kdebug_args {
89 int code;
90 int arg1;
91 int arg2;
92 int arg3;
93 int arg4;
94 int arg5;
95 };
96
97 /* task to string structure */
98 struct tts
99 {
100 task_t *task;
101 char task_comm[20]; /* from procs p_comm */
102 };
103
104 typedef struct tts tts_t;
105
106 struct krt
107 {
108 kd_threadmap *map; /* pointer to the map buffer */
109 int count;
110 int maxcount;
111 struct tts *atts;
112 };
113
114 typedef struct krt krt_t;
115
116 /* This is for the CHUD toolkit call */
117 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
118 unsigned int arg2, unsigned int arg3,
119 unsigned int arg4, unsigned int arg5);
120
121 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
122
123 /* Support syscall SYS_kdebug_trace */
124 kdebug_trace(p, uap, retval)
125 struct proc *p;
126 struct kdebug_args *uap;
127 register_t *retval;
128 {
129 if (kdebug_nolog)
130 return(EINVAL);
131
132 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
133 return(0);
134 }
135
136
137 void
138 kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5)
139 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
140 {
141 kd_buf * kd;
142 struct proc *curproc;
143 int s;
144 unsigned long long now;
145 mach_timespec_t *tsp;
146
147 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
148 if (kdebug_chudhook)
149 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
150
151 if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
152 (kdebug_enable & KDEBUG_ENABLE_TRACE)))
153 return;
154 }
155
156 s = ml_set_interrupts_enabled(FALSE);
157
158 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
159 {
160 if (kd_entropy_indx < kd_entropy_count)
161 {
162 ml_get_timebase((unsigned long long *) &kd_entropy_buffer [ kd_entropy_indx]);
163 kd_entropy_indx++;
164 }
165
166 if (kd_entropy_indx == kd_entropy_count)
167 {
168 /* Disable entropy collection */
169 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
170 }
171 }
172
173 if (kdebug_nolog)
174 {
175 ml_set_interrupts_enabled(s);
176 return;
177 }
178
179 usimple_lock(&kd_trace_lock);
180 if (kdebug_flags & KDBG_PIDCHECK)
181 {
182 /* If kdebug flag is not set for current proc, return */
183 curproc = current_proc();
184 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
185 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
186 {
187 usimple_unlock(&kd_trace_lock);
188 ml_set_interrupts_enabled(s);
189 return;
190 }
191 }
192 else if (kdebug_flags & KDBG_PIDEXCLUDE)
193 {
194 /* If kdebug flag is set for current proc, return */
195 curproc = current_proc();
196 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
197 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
198 {
199 usimple_unlock(&kd_trace_lock);
200 ml_set_interrupts_enabled(s);
201 return;
202 }
203 }
204
205 if (kdebug_flags & KDBG_RANGECHECK)
206 {
207 if ((debugid < kdlog_beg) || (debugid > kdlog_end)
208 && (debugid >> 24 != DBG_TRACE))
209 {
210 usimple_unlock(&kd_trace_lock);
211 ml_set_interrupts_enabled(s);
212 return;
213 }
214 }
215 else if (kdebug_flags & KDBG_VALCHECK)
216 {
217 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
218 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
219 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
220 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
221 (debugid >> 24 != DBG_TRACE))
222 {
223 usimple_unlock(&kd_trace_lock);
224 ml_set_interrupts_enabled(s);
225 return;
226 }
227 }
228 kd = kd_bufptr;
229 kd->debugid = debugid;
230 kd->arg1 = arg1;
231 kd->arg2 = arg2;
232 kd->arg3 = arg3;
233 kd->arg4 = arg4;
234 kd->arg5 = (int)current_thread();
235 if (cpu_number())
236 kd->arg5 |= KDBG_CPU_MASK;
237
238 ml_get_timebase((unsigned long long *)&kd->timestamp);
239
240 /* Watch for out of order timestamps */
241 now = (((unsigned long long)kd->timestamp.tv_sec) << 32) |
242 (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec));
243
244 if (now < kd_prev_timebase)
245 {
246 /* timestamps are out of order -- adjust */
247 kd_prev_timebase++;
248 tsp = (mach_timespec_t *)&kd_prev_timebase;
249 kd->timestamp.tv_sec = tsp->tv_sec;
250 kd->timestamp.tv_nsec = tsp->tv_nsec;
251 }
252 else
253 {
254 /* Then just store the previous timestamp */
255 kd_prev_timebase = now;
256 }
257
258
259 kd_bufptr++;
260
261 if (kd_bufptr >= kd_buflast)
262 kd_bufptr = kd_buffer;
263 if (kd_bufptr == kd_readlast) {
264 if (kdebug_flags & KDBG_NOWRAP)
265 kdebug_nolog = 1;
266 kdebug_flags |= KDBG_WRAPPED;
267 }
268 usimple_unlock(&kd_trace_lock);
269 ml_set_interrupts_enabled(s);
270 }
271
272 void
273 kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5)
274 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
275 {
276 kd_buf * kd;
277 struct proc *curproc;
278 int s;
279 unsigned long long now;
280 mach_timespec_t *tsp;
281
282 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
283 if (kdebug_chudhook)
284 (void)kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
285
286 if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
287 (kdebug_enable & KDEBUG_ENABLE_TRACE)))
288 return;
289 }
290
291 s = ml_set_interrupts_enabled(FALSE);
292
293 if (kdebug_nolog)
294 {
295 ml_set_interrupts_enabled(s);
296 return;
297 }
298
299 usimple_lock(&kd_trace_lock);
300 if (kdebug_flags & KDBG_PIDCHECK)
301 {
302 /* If kdebug flag is not set for current proc, return */
303 curproc = current_proc();
304 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
305 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
306 {
307 usimple_unlock(&kd_trace_lock);
308 ml_set_interrupts_enabled(s);
309 return;
310 }
311 }
312 else if (kdebug_flags & KDBG_PIDEXCLUDE)
313 {
314 /* If kdebug flag is set for current proc, return */
315 curproc = current_proc();
316 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
317 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
318 {
319 usimple_unlock(&kd_trace_lock);
320 ml_set_interrupts_enabled(s);
321 return;
322 }
323 }
324
325 if (kdebug_flags & KDBG_RANGECHECK)
326 {
327 if ((debugid < kdlog_beg) || (debugid > kdlog_end)
328 && (debugid >> 24 != DBG_TRACE))
329 {
330 usimple_unlock(&kd_trace_lock);
331 ml_set_interrupts_enabled(s);
332 return;
333 }
334 }
335 else if (kdebug_flags & KDBG_VALCHECK)
336 {
337 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
338 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
339 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
340 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
341 (debugid >> 24 != DBG_TRACE))
342 {
343 usimple_unlock(&kd_trace_lock);
344 ml_set_interrupts_enabled(s);
345 return;
346 }
347 }
348
349 kd = kd_bufptr;
350 kd->debugid = debugid;
351 kd->arg1 = arg1;
352 kd->arg2 = arg2;
353 kd->arg3 = arg3;
354 kd->arg4 = arg4;
355 kd->arg5 = arg5;
356 ml_get_timebase((unsigned long long *)&kd->timestamp);
357
358 /* Watch for out of order timestamps */
359 now = (((unsigned long long)kd->timestamp.tv_sec) << 32) |
360 (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec));
361
362 if (now < kd_prev_timebase)
363 {
364 /* timestamps are out of order -- adjust */
365 kd_prev_timebase++;
366 tsp = (mach_timespec_t *)&kd_prev_timebase;
367 kd->timestamp.tv_sec = tsp->tv_sec;
368 kd->timestamp.tv_nsec = tsp->tv_nsec;
369 }
370 else
371 {
372 /* Then just store the previous timestamp */
373 kd_prev_timebase = now;
374 }
375
376 kd_bufptr++;
377
378 if (kd_bufptr >= kd_buflast)
379 kd_bufptr = kd_buffer;
380 if (kd_bufptr == kd_readlast) {
381 if (kdebug_flags & KDBG_NOWRAP)
382 kdebug_nolog = 1;
383 kdebug_flags |= KDBG_WRAPPED;
384 }
385 usimple_unlock(&kd_trace_lock);
386 ml_set_interrupts_enabled(s);
387 }
388
389
390 kdbg_bootstrap()
391 {
392 kd_bufsize = nkdbufs * sizeof(kd_buf);
393 if (kmem_alloc(kernel_map, &kd_buftomem,
394 (vm_size_t)kd_bufsize) == KERN_SUCCESS)
395 kd_buffer = (kd_buf *) kd_buftomem;
396 else kd_buffer= (kd_buf *) 0;
397 kdebug_flags &= ~KDBG_WRAPPED;
398 if (kd_buffer) {
399 simple_lock_init(&kd_trace_lock);
400 kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT);
401 kd_bufptr = kd_buffer;
402 kd_buflast = &kd_bufptr[nkdbufs];
403 kd_readlast = kd_bufptr;
404 kd_prev_timebase = 0LL;
405 return(0);
406 } else {
407 kd_bufsize=0;
408 kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT);
409 return(EINVAL);
410 }
411
412 }
413
414 kdbg_reinit()
415 {
416 int x;
417 int ret=0;
418
419 /* Disable trace collecting */
420 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
421 kdebug_nolog = 1;
422
423 if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer)
424 kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize);
425
426 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
427 {
428 kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize);
429 kdebug_flags &= ~KDBG_MAPINIT;
430 kd_mapsize = 0;
431 kd_mapptr = (kd_threadmap *) 0;
432 kd_mapcount = 0;
433 }
434
435 ret= kdbg_bootstrap();
436
437 return(ret);
438 }
439
440 void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
441 {
442 int i;
443 char *dbg_nameptr;
444 int dbg_namelen;
445 long dbg_parms[4];
446
447 if (!proc)
448 {
449 *arg1 = 0;
450 *arg2 = 0;
451 *arg3 = 0;
452 *arg4 = 0;
453 return;
454 }
455
456 /* Collect the pathname for tracing */
457 dbg_nameptr = proc->p_comm;
458 dbg_namelen = strlen(proc->p_comm);
459 dbg_parms[0]=0L;
460 dbg_parms[1]=0L;
461 dbg_parms[2]=0L;
462 dbg_parms[3]=0L;
463
464 if(dbg_namelen > sizeof(dbg_parms))
465 dbg_namelen = sizeof(dbg_parms);
466
467 for(i=0;dbg_namelen > 0; i++)
468 {
469 dbg_parms[i]=*(long*)dbg_nameptr;
470 dbg_nameptr += sizeof(long);
471 dbg_namelen -= sizeof(long);
472 }
473
474 *arg1=dbg_parms[0];
475 *arg2=dbg_parms[1];
476 *arg3=dbg_parms[2];
477 *arg4=dbg_parms[3];
478 }
479
480 kdbg_resolve_map(thread_act_t th_act, krt_t *t)
481 {
482 kd_threadmap *mapptr;
483
484 if(t->count < t->maxcount)
485 {
486 mapptr=&t->map[t->count];
487 mapptr->thread = (unsigned int)getshuttle_thread(th_act);
488 mapptr->valid = 1;
489 (void) strncpy (mapptr->command, t->atts->task_comm,
490 sizeof(t->atts->task_comm)-1);
491 mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
492 t->count++;
493 }
494 }
495
496 void kdbg_mapinit()
497 {
498 struct proc *p;
499 struct krt akrt;
500 int tts_count; /* number of task-to-string structures */
501 struct tts *tts_mapptr;
502 unsigned int tts_mapsize = 0;
503 unsigned int tts_maptomem=0;
504 int i;
505
506
507 if (kdebug_flags & KDBG_MAPINIT)
508 return;
509
510 /* Calculate the sizes of map buffers*/
511 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
512 p = p->p_list.le_next)
513 {
514 kd_mapcount += get_task_numacts((task_t)p->task);
515 tts_count++;
516 }
517
518 /*
519 * The proc count could change during buffer allocation,
520 * so introduce a small fudge factor to bump up the
521 * buffer sizes. This gives new tasks some chance of
522 * making into the tables. Bump up by 10%.
523 */
524 kd_mapcount += kd_mapcount/10;
525 tts_count += tts_count/10;
526
527 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
528 if((kmem_alloc(kernel_map, & kd_maptomem,
529 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
530 kd_mapptr = (kd_threadmap *) kd_maptomem;
531 else
532 kd_mapptr = (kd_threadmap *) 0;
533
534 tts_mapsize = tts_count * sizeof(struct tts);
535 if((kmem_alloc(kernel_map, & tts_maptomem,
536 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
537 tts_mapptr = (struct tts *) tts_maptomem;
538 else
539 tts_mapptr = (struct tts *) 0;
540
541
542 /*
543 * We need to save the procs command string
544 * and take a reference for each task associated
545 * with a valid process
546 */
547
548 if (tts_mapptr) {
549 for (p = allproc.lh_first, i=0; p && i < tts_count;
550 p = p->p_list.le_next) {
551 if (p->p_flag & P_WEXIT)
552 continue;
553
554 if (task_reference_try(p->task)) {
555 tts_mapptr[i].task = p->task;
556 (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
557 i++;
558 }
559 }
560 tts_count = i;
561 }
562
563
564 if (kd_mapptr && tts_mapptr)
565 {
566 kdebug_flags |= KDBG_MAPINIT;
567 /* Initialize thread map data */
568 akrt.map = kd_mapptr;
569 akrt.count = 0;
570 akrt.maxcount = kd_mapcount;
571
572 for (i=0; i < tts_count; i++)
573 {
574 akrt.atts = &tts_mapptr[i];
575 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
576 task_deallocate(tts_mapptr[i].task);
577 }
578 kmem_free(kernel_map, (char *)tts_mapptr, tts_mapsize);
579 }
580 }
581
582 kdbg_clear()
583 {
584 int x;
585
586 /* Clean up the trace buffer */
587 global_state_pid = -1;
588 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
589 kdebug_nolog = 1;
590 kdebug_flags &= ~KDBG_BUFINIT;
591 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
592 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
593 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
594 kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize);
595 kd_buffer = (kd_buf *)0;
596 kd_bufsize = 0;
597 kd_prev_timebase = 0LL;
598
599 /* Clean up the thread map buffer */
600 kdebug_flags &= ~KDBG_MAPINIT;
601 kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize);
602 kd_mapptr = (kd_threadmap *) 0;
603 kd_mapsize = 0;
604 kd_mapcount = 0;
605 }
606
607 kdbg_setpid(kd_regtype *kdr)
608 {
609 pid_t pid;
610 int flag, ret=0;
611 struct proc *p;
612
613 pid = (pid_t)kdr->value1;
614 flag = (int)kdr->value2;
615
616 if (pid > 0)
617 {
618 if ((p = pfind(pid)) == NULL)
619 ret = ESRCH;
620 else
621 {
622 if (flag == 1) /* turn on pid check for this and all pids */
623 {
624 kdebug_flags |= KDBG_PIDCHECK;
625 kdebug_flags &= ~KDBG_PIDEXCLUDE;
626 p->p_flag |= P_KDEBUG;
627 }
628 else /* turn off pid check for this pid value */
629 {
630 /* Don't turn off all pid checking though */
631 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
632 p->p_flag &= ~P_KDEBUG;
633 }
634 }
635 }
636 else
637 ret = EINVAL;
638 return(ret);
639 }
640
641 /* This is for pid exclusion in the trace buffer */
642 kdbg_setpidex(kd_regtype *kdr)
643 {
644 pid_t pid;
645 int flag, ret=0;
646 struct proc *p;
647
648 pid = (pid_t)kdr->value1;
649 flag = (int)kdr->value2;
650
651 if (pid > 0)
652 {
653 if ((p = pfind(pid)) == NULL)
654 ret = ESRCH;
655 else
656 {
657 if (flag == 1) /* turn on pid exclusion */
658 {
659 kdebug_flags |= KDBG_PIDEXCLUDE;
660 kdebug_flags &= ~KDBG_PIDCHECK;
661 p->p_flag |= P_KDEBUG;
662 }
663 else /* turn off pid exclusion for this pid value */
664 {
665 /* Don't turn off all pid exclusion though */
666 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
667 p->p_flag &= ~P_KDEBUG;
668 }
669 }
670 }
671 else
672 ret = EINVAL;
673 return(ret);
674 }
675
676 /* This is for setting a minimum decrementer value */
677 kdbg_setrtcdec(kd_regtype *kdr)
678 {
679 int ret=0;
680 natural_t decval;
681
682 decval = (natural_t)kdr->value1;
683
684 if (decval && decval < KDBG_MINRTCDEC)
685 ret = EINVAL;
686 #ifdef ppc
687 else
688 rtclock_decrementer_min = decval;
689 #else
690 else
691 ret = EOPNOTSUPP;
692 #endif /* ppc */
693
694 return(ret);
695 }
696
697 kdbg_setreg(kd_regtype * kdr)
698 {
699 int i,j, ret=0;
700 unsigned int val_1, val_2, val;
701 switch (kdr->type) {
702
703 case KDBG_CLASSTYPE :
704 val_1 = (kdr->value1 & 0xff);
705 val_2 = (kdr->value2 & 0xff);
706 kdlog_beg = (val_1<<24);
707 kdlog_end = (val_2<<24);
708 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
709 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
710 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
711 break;
712 case KDBG_SUBCLSTYPE :
713 val_1 = (kdr->value1 & 0xff);
714 val_2 = (kdr->value2 & 0xff);
715 val = val_2 + 1;
716 kdlog_beg = ((val_1<<24) | (val_2 << 16));
717 kdlog_end = ((val_1<<24) | (val << 16));
718 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
719 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
720 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
721 break;
722 case KDBG_RANGETYPE :
723 kdlog_beg = (kdr->value1);
724 kdlog_end = (kdr->value2);
725 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
726 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
727 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
728 break;
729 case KDBG_VALCHECK:
730 kdlog_value1 = (kdr->value1);
731 kdlog_value2 = (kdr->value2);
732 kdlog_value3 = (kdr->value3);
733 kdlog_value4 = (kdr->value4);
734 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
735 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
736 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
737 break;
738 case KDBG_TYPENONE :
739 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
740 kdlog_beg = 0;
741 kdlog_end = 0;
742 break;
743 default :
744 ret = EINVAL;
745 break;
746 }
747 return(ret);
748 }
749
750 kdbg_getreg(kd_regtype * kdr)
751 {
752 int i,j, ret=0;
753 unsigned int val_1, val_2, val;
754 #if 0
755 switch (kdr->type) {
756 case KDBG_CLASSTYPE :
757 val_1 = (kdr->value1 & 0xff);
758 val_2 = val_1 + 1;
759 kdlog_beg = (val_1<<24);
760 kdlog_end = (val_2<<24);
761 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
762 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
763 break;
764 case KDBG_SUBCLSTYPE :
765 val_1 = (kdr->value1 & 0xff);
766 val_2 = (kdr->value2 & 0xff);
767 val = val_2 + 1;
768 kdlog_beg = ((val_1<<24) | (val_2 << 16));
769 kdlog_end = ((val_1<<24) | (val << 16));
770 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
771 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
772 break;
773 case KDBG_RANGETYPE :
774 kdlog_beg = (kdr->value1);
775 kdlog_end = (kdr->value2);
776 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
777 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
778 break;
779 case KDBG_TYPENONE :
780 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
781 kdlog_beg = 0;
782 kdlog_end = 0;
783 break;
784 default :
785 ret = EINVAL;
786 break;
787 }
788 #endif /* 0 */
789 return(EINVAL);
790 }
791
792
793
794 kdbg_readmap(kd_threadmap *buffer, size_t *number)
795 {
796 int avail = *number;
797 int ret = 0;
798 int count = 0;
799
800 count = avail/sizeof (kd_threadmap);
801
802 if (count && (count <= kd_mapcount))
803 {
804 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
805 {
806 if (*number < kd_mapsize)
807 ret=EINVAL;
808 else
809 {
810 if (copyout(kd_mapptr, buffer, kd_mapsize))
811 ret=EINVAL;
812 }
813 }
814 else
815 ret=EINVAL;
816 }
817 else
818 ret=EINVAL;
819
820 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
821 {
822 kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize);
823 kdebug_flags &= ~KDBG_MAPINIT;
824 kd_mapsize = 0;
825 kd_mapptr = (kd_threadmap *) 0;
826 kd_mapcount = 0;
827 }
828
829 return(ret);
830 }
831
832 kdbg_getentropy (mach_timespec_t * buffer, size_t *number, int ms_timeout)
833 {
834 int avail = *number;
835 int ret = 0;
836 int count = 0; /* The number of timestamp entries that will fill buffer */
837
838 if (kd_entropy_buffer)
839 return(EBUSY);
840
841 kd_entropy_count = avail/sizeof(mach_timespec_t);
842 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
843 kd_entropy_indx = 0;
844
845 /* Enforce maximum entropy entries here if needed */
846
847 /* allocate entropy buffer */
848 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
849 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
850 {
851 kd_entropy_buffer = (mach_timespec_t *)kd_entropy_buftomem;
852 }
853 else
854 {
855 kd_entropy_buffer = (mach_timespec_t *) 0;
856 kd_entropy_count = 0;
857 kd_entropy_indx = 0;
858 return (EINVAL);
859 }
860
861 if (ms_timeout < 10)
862 ms_timeout = 10;
863
864 /* Enable entropy sampling */
865 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
866
867 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
868
869 /* Disable entropy sampling */
870 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
871
872 *number = 0;
873 ret = 0;
874
875 if (kd_entropy_indx > 0)
876 {
877 /* copyout the buffer */
878 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
879 ret = EINVAL;
880 else
881 *number = kd_entropy_indx;
882 }
883
884 /* Always cleanup */
885 kd_entropy_count = 0;
886 kd_entropy_indx = 0;
887 kd_entropy_buftomem = 0;
888 kmem_free(kernel_map, (char *)kd_entropy_buffer, kd_entropy_bufsize);
889 kd_entropy_buffer = (mach_timespec_t *) 0;
890 return(ret);
891 }
892
893
894 /*
895 * This function is provided for the CHUD toolkit only.
896 * int val:
897 * zero disables kdebug_chudhook function call
898 * non-zero enables kdebug_chudhook function call
899 * char *fn:
900 * address of the enabled kdebug_chudhook function
901 */
902
903 void kdbg_control_chud(int val, void *fn)
904 {
905 if (val) {
906 /* enable chudhook */
907 kdebug_enable |= KDEBUG_ENABLE_CHUD;
908 kdebug_chudhook = fn;
909 }
910 else {
911 /* disable chudhook */
912 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
913 kdebug_chudhook = 0;
914 }
915 }
916
917
918 kdbg_control(name, namelen, where, sizep)
919 int *name;
920 u_int namelen;
921 char *where;
922 size_t *sizep;
923 {
924 int ret=0;
925 int size=*sizep;
926 int max_entries;
927 unsigned int value = name[1];
928 kd_regtype kd_Reg;
929 kbufinfo_t kd_bufinfo;
930
931 pid_t curpid;
932 struct proc *p, *curproc;
933
934 if (name[0] == KERN_KDGETBUF) {
935 /*
936 Does not alter the global_state_pid
937 This is a passive request.
938 */
939 if (size < sizeof(kd_bufinfo.nkdbufs)) {
940 /*
941 There is not enough room to return even
942 the first element of the info structure.
943 */
944 return(EINVAL);
945 }
946
947 kd_bufinfo.nkdbufs = nkdbufs;
948 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
949 kd_bufinfo.nolog = kdebug_nolog;
950 kd_bufinfo.flags = kdebug_flags;
951 kd_bufinfo.bufid = global_state_pid;
952
953 if(size >= sizeof(kbufinfo_t)) {
954 /* Provide all the info we have */
955 if(copyout (&kd_bufinfo, where, sizeof(kbufinfo_t)))
956 return(EINVAL);
957 }
958 else {
959 /*
960 For backwards compatibility, only provide
961 as much info as there is room for.
962 */
963 if(copyout (&kd_bufinfo, where, size))
964 return(EINVAL);
965 }
966 return(0);
967 }
968 else if (name[0] == KERN_KDGETENTROPY) {
969 if (kd_entropy_buffer)
970 return(EBUSY);
971 else
972 ret = kdbg_getentropy((mach_timespec_t *)where, sizep, value);
973 return (ret);
974 }
975
976 if(curproc = current_proc())
977 curpid = curproc->p_pid;
978 else
979 return (ESRCH);
980
981 if (global_state_pid == -1)
982 global_state_pid = curpid;
983 else if (global_state_pid != curpid)
984 {
985 if((p = pfind(global_state_pid)) == NULL)
986 {
987 /* The global pid no longer exists */
988 global_state_pid = curpid;
989 }
990 else
991 {
992 /* The global pid exists, deny this request */
993 return(EBUSY);
994 }
995 }
996
997 switch(name[0]) {
998 case KERN_KDEFLAGS:
999 value &= KDBG_USERFLAGS;
1000 kdebug_flags |= value;
1001 break;
1002 case KERN_KDDFLAGS:
1003 value &= KDBG_USERFLAGS;
1004 kdebug_flags &= ~value;
1005 break;
1006 case KERN_KDENABLE: /* used to enable or disable */
1007 if (value)
1008 {
1009 /* enable only if buffer is initialized */
1010 if (!(kdebug_flags & KDBG_BUFINIT))
1011 {
1012 ret=EINVAL;
1013 break;
1014 }
1015 }
1016
1017 if (value)
1018 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1019 else
1020 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1021
1022 kdebug_nolog = (value)?0:1;
1023
1024 if (kdebug_enable & KDEBUG_ENABLE_TRACE)
1025 kdbg_mapinit();
1026 break;
1027 case KERN_KDSETBUF:
1028 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1029 /* 'value' is the desired number of trace entries */
1030 max_entries = (sane_size/4) / sizeof(kd_buf);
1031 if (value <= max_entries)
1032 nkdbufs = value;
1033 else
1034 nkdbufs = max_entries;
1035 break;
1036 case KERN_KDSETUP:
1037 ret=kdbg_reinit();
1038 break;
1039 case KERN_KDREMOVE:
1040 kdbg_clear();
1041 break;
1042 case KERN_KDSETREG:
1043 if(size < sizeof(kd_regtype)) {
1044 ret=EINVAL;
1045 break;
1046 }
1047 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1048 ret= EINVAL;
1049 break;
1050 }
1051 ret = kdbg_setreg(&kd_Reg);
1052 break;
1053 case KERN_KDGETREG:
1054 if(size < sizeof(kd_regtype)) {
1055 ret = EINVAL;
1056 break;
1057 }
1058 ret = kdbg_getreg(&kd_Reg);
1059 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1060 ret=EINVAL;
1061 }
1062 break;
1063 case KERN_KDREADTR:
1064 ret = kdbg_read(where, sizep);
1065 break;
1066 case KERN_KDPIDTR:
1067 if (size < sizeof(kd_regtype)) {
1068 ret = EINVAL;
1069 break;
1070 }
1071 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1072 ret= EINVAL;
1073 break;
1074 }
1075 ret = kdbg_setpid(&kd_Reg);
1076 break;
1077 case KERN_KDPIDEX:
1078 if (size < sizeof(kd_regtype)) {
1079 ret = EINVAL;
1080 break;
1081 }
1082 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1083 ret= EINVAL;
1084 break;
1085 }
1086 ret = kdbg_setpidex(&kd_Reg);
1087 break;
1088 case KERN_KDTHRMAP:
1089 ret = kdbg_readmap((kd_threadmap *)where, sizep);
1090 break;
1091 case KERN_KDSETRTCDEC:
1092 if (size < sizeof(kd_regtype)) {
1093 ret = EINVAL;
1094 break;
1095 }
1096 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1097 ret= EINVAL;
1098 break;
1099 }
1100 ret = kdbg_setrtcdec(&kd_Reg);
1101 break;
1102
1103 default:
1104 ret= EINVAL;
1105 }
1106 return(ret);
1107 }
1108
1109 kdbg_read(kd_buf * buffer, size_t *number)
1110 {
1111 int avail=*number;
1112 int count=0;
1113 int copycount=0;
1114 int totalcount=0;
1115 int s;
1116 unsigned int my_kdebug_flags;
1117 kd_buf * my_kd_bufptr;
1118
1119 s = ml_set_interrupts_enabled(FALSE);
1120 usimple_lock(&kd_trace_lock);
1121 my_kdebug_flags = kdebug_flags;
1122 my_kd_bufptr = kd_bufptr;
1123 usimple_unlock(&kd_trace_lock);
1124 ml_set_interrupts_enabled(s);
1125
1126 count = avail/sizeof(kd_buf);
1127 if (count) {
1128 if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
1129 if (count > nkdbufs)
1130 count = nkdbufs;
1131 if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast))
1132 {
1133 copycount = my_kd_bufptr-kd_readlast;
1134 if (copycount > count)
1135 copycount = count;
1136
1137 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1138 {
1139 *number = 0;
1140 return(EINVAL);
1141 }
1142 kd_readlast += copycount;
1143 *number = copycount;
1144 return(0);
1145 }
1146 else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast))
1147 {
1148 *number = 0;
1149 return(0);
1150 }
1151 else
1152 {
1153 if (my_kdebug_flags & KDBG_WRAPPED)
1154 {
1155 kd_readlast = my_kd_bufptr;
1156 kdebug_flags &= ~KDBG_WRAPPED;
1157 }
1158
1159 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1160 we now treat the kd_buffer read the same as if we weren't
1161 wrapped and my_kd_bufptr was less than kd_readlast.
1162 */
1163
1164 /* first copyout from readlast to end of kd_buffer */
1165 copycount = kd_buflast - kd_readlast;
1166 if (copycount > count)
1167 copycount = count;
1168 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1169 {
1170 *number = 0;
1171 return(EINVAL);
1172 }
1173 buffer += copycount;
1174 count -= copycount;
1175 totalcount = copycount;
1176 kd_readlast += copycount;
1177 if (kd_readlast == kd_buflast)
1178 kd_readlast = kd_buffer;
1179 if (count == 0)
1180 {
1181 *number = totalcount;
1182 return(0);
1183 }
1184
1185 /* second copyout from top of kd_buffer to bufptr */
1186 copycount = my_kd_bufptr - kd_readlast;
1187 if (copycount > count)
1188 copycount = count;
1189 if (copycount == 0)
1190 {
1191 *number = totalcount;
1192 return(0);
1193 }
1194 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1195 {
1196 return(EINVAL);
1197 }
1198 kd_readlast += copycount;
1199 totalcount += copycount;
1200 *number = totalcount;
1201 return(0);
1202 }
1203 } /* end if KDBG_BUFINIT */
1204 } /* end if count */
1205 return (EINVAL);
1206 }
1207
1208 unsigned char *getProcName(struct proc *proc);
1209 unsigned char *getProcName(struct proc *proc) {
1210
1211 return &proc->p_comm; /* Return pointer to the proc name */
1212
1213 }