]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
381b74fe2f83a531e2ce374837b57d1e6b1f062b
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <machine/spl.h>
24
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/proc_internal.h>
28 #include <sys/vm.h>
29 #include <sys/sysctl.h>
30 #include <sys/kdebug.h>
31 #include <sys/sysproto.h>
32
33 #define HZ 100
34 #include <mach/clock_types.h>
35 #include <mach/mach_types.h>
36 #include <mach/mach_time.h>
37 #include <machine/machine_routines.h>
38
39 #include <kern/thread.h>
40 #include <kern/task.h>
41 #include <vm/vm_kern.h>
42 #include <sys/lock.h>
43
44 /* trace enable status */
45 unsigned int kdebug_enable = 0;
46
47 /* track timestamps for security server's entropy needs */
48 uint64_t * kd_entropy_buffer = 0;
49 unsigned int kd_entropy_bufsize = 0;
50 unsigned int kd_entropy_count = 0;
51 unsigned int kd_entropy_indx = 0;
52 unsigned int kd_entropy_buftomem = 0;
53
54
55 #define SLOW_NOLOG 0x01
56 #define SLOW_CHECKS 0x02
57 #define SLOW_ENTROPY 0x04
58
59 unsigned int kdebug_slowcheck=SLOW_NOLOG;
60
61 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
62 kd_buf * kd_bufptr;
63 unsigned int kd_buftomem=0;
64 kd_buf * kd_buffer=0;
65 kd_buf * kd_buflast;
66 kd_buf * kd_readlast;
67 unsigned int nkdbufs = 8192;
68 unsigned int kd_bufsize = 0;
69 unsigned int kdebug_flags = 0;
70 unsigned int kdlog_beg=0;
71 unsigned int kdlog_end=0;
72 unsigned int kdlog_value1=0;
73 unsigned int kdlog_value2=0;
74 unsigned int kdlog_value3=0;
75 unsigned int kdlog_value4=0;
76
77 unsigned long long kd_prev_timebase = 0LL;
78
79 static lck_mtx_t * kd_trace_mtx;
80 static lck_grp_t * kd_trace_mtx_grp;
81 static lck_attr_t * kd_trace_mtx_attr;
82 static lck_grp_attr_t *kd_trace_mtx_grp_attr;
83
84 static lck_spin_t * kd_trace_lock;
85 static lck_grp_t * kd_trace_lock_grp;
86 static lck_attr_t * kd_trace_lock_attr;
87 static lck_grp_attr_t *kd_trace_lock_grp_attr;
88
89 kd_threadmap *kd_mapptr = 0;
90 unsigned int kd_mapsize = 0;
91 unsigned int kd_mapcount = 0;
92 unsigned int kd_maptomem = 0;
93
94 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
95
96 #define DBG_FUNC_MASK 0xfffffffc
97
98 #ifdef ppc
99 extern natural_t rtclock_decrementer_min;
100 #endif /* ppc */
101
102 /* task to string structure */
103 struct tts
104 {
105 task_t *task; /* from procs task */
106 pid_t pid; /* from procs p_pid */
107 char task_comm[20]; /* from procs p_comm */
108 };
109
110 typedef struct tts tts_t;
111
112 struct krt
113 {
114 kd_threadmap *map; /* pointer to the map buffer */
115 int count;
116 int maxcount;
117 struct tts *atts;
118 };
119
120 typedef struct krt krt_t;
121
122 /* This is for the CHUD toolkit call */
123 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
124 unsigned int arg2, unsigned int arg3,
125 unsigned int arg4, unsigned int arg5);
126
127 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
128
129
130 /* Support syscall SYS_kdebug_trace */
131 kdebug_trace(p, uap, retval)
132 struct proc *p;
133 struct kdebug_trace_args *uap;
134 register_t *retval;
135 {
136 if ( (kdebug_enable == 0) )
137 return(EINVAL);
138
139 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
140 return(0);
141 }
142
143
144 void
145 kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5)
146 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
147 {
148 kd_buf * kd;
149 struct proc *curproc;
150 int s;
151 unsigned long long now;
152
153
154 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
155 if (kdebug_chudhook)
156 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
157
158 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
159 return;
160 }
161 s = ml_set_interrupts_enabled(FALSE);
162 lck_spin_lock(kd_trace_lock);
163
164 if (kdebug_slowcheck == 0)
165 goto record_trace;
166
167 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
168 {
169 if (kd_entropy_indx < kd_entropy_count)
170 {
171 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
172 kd_entropy_indx++;
173 }
174
175 if (kd_entropy_indx == kd_entropy_count)
176 {
177 /* Disable entropy collection */
178 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
179 kdebug_slowcheck &= ~SLOW_ENTROPY;
180 }
181 }
182
183 if ( (kdebug_slowcheck & SLOW_NOLOG) )
184 {
185 lck_spin_unlock(kd_trace_lock);
186 ml_set_interrupts_enabled(s);
187 return;
188 }
189
190 if (kdebug_flags & KDBG_PIDCHECK)
191 {
192 /* If kdebug flag is not set for current proc, return */
193 curproc = current_proc();
194 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
195 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
196 {
197 lck_spin_unlock(kd_trace_lock);
198 ml_set_interrupts_enabled(s);
199 return;
200 }
201 }
202 else if (kdebug_flags & KDBG_PIDEXCLUDE)
203 {
204 /* If kdebug flag is set for current proc, return */
205 curproc = current_proc();
206 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
207 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
208 {
209 lck_spin_unlock(kd_trace_lock);
210 ml_set_interrupts_enabled(s);
211 return;
212 }
213 }
214
215 if (kdebug_flags & KDBG_RANGECHECK)
216 {
217 if ((debugid < kdlog_beg) || (debugid >= kdlog_end)
218 && (debugid >> 24 != DBG_TRACE))
219 {
220 lck_spin_unlock(kd_trace_lock);
221 ml_set_interrupts_enabled(s);
222 return;
223 }
224 }
225 else if (kdebug_flags & KDBG_VALCHECK)
226 {
227 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
228 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
229 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
230 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
231 (debugid >> 24 != DBG_TRACE))
232 {
233 lck_spin_unlock(kd_trace_lock);
234 ml_set_interrupts_enabled(s);
235 return;
236 }
237 }
238
239 record_trace:
240 kd = kd_bufptr;
241 kd->debugid = debugid;
242 kd->arg1 = arg1;
243 kd->arg2 = arg2;
244 kd->arg3 = arg3;
245 kd->arg4 = arg4;
246 kd->arg5 = (int)current_thread();
247
248 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
249
250 /* Watch for out of order timestamps */
251
252 if (now < kd_prev_timebase)
253 {
254 now = ++kd_prev_timebase & KDBG_TIMESTAMP_MASK;
255 }
256 else
257 {
258 /* Then just store the previous timestamp */
259 kd_prev_timebase = now;
260 }
261 kd->timestamp = now | (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT);
262
263 kd_bufptr++;
264
265 if (kd_bufptr >= kd_buflast)
266 kd_bufptr = kd_buffer;
267 if (kd_bufptr == kd_readlast) {
268 if (kdebug_flags & KDBG_NOWRAP)
269 kdebug_slowcheck |= SLOW_NOLOG;
270 kdebug_flags |= KDBG_WRAPPED;
271 }
272 lck_spin_unlock(kd_trace_lock);
273 ml_set_interrupts_enabled(s);
274 }
275
276 void
277 kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5)
278 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
279 {
280 kd_buf * kd;
281 struct proc *curproc;
282 int s;
283 unsigned long long now;
284
285 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
286 if (kdebug_chudhook)
287 (void)kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
288
289 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
290 return;
291 }
292 s = ml_set_interrupts_enabled(FALSE);
293 lck_spin_lock(kd_trace_lock);
294
295 if (kdebug_slowcheck == 0)
296 goto record_trace1;
297
298 if ( (kdebug_slowcheck & SLOW_NOLOG) )
299 {
300 lck_spin_unlock(kd_trace_lock);
301 ml_set_interrupts_enabled(s);
302 return;
303 }
304
305 if (kdebug_flags & KDBG_PIDCHECK)
306 {
307 /* If kdebug flag is not set for current proc, return */
308 curproc = current_proc();
309 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
310 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
311 {
312 lck_spin_unlock(kd_trace_lock);
313 ml_set_interrupts_enabled(s);
314 return;
315 }
316 }
317 else if (kdebug_flags & KDBG_PIDEXCLUDE)
318 {
319 /* If kdebug flag is set for current proc, return */
320 curproc = current_proc();
321 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
322 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
323 {
324 lck_spin_unlock(kd_trace_lock);
325 ml_set_interrupts_enabled(s);
326 return;
327 }
328 }
329
330 if (kdebug_flags & KDBG_RANGECHECK)
331 {
332 if ((debugid < kdlog_beg) || (debugid >= kdlog_end)
333 && (debugid >> 24 != DBG_TRACE))
334 {
335 lck_spin_unlock(kd_trace_lock);
336 ml_set_interrupts_enabled(s);
337 return;
338 }
339 }
340 else if (kdebug_flags & KDBG_VALCHECK)
341 {
342 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
343 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
344 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
345 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
346 (debugid >> 24 != DBG_TRACE))
347 {
348 lck_spin_unlock(kd_trace_lock);
349 ml_set_interrupts_enabled(s);
350 return;
351 }
352 }
353
354 record_trace1:
355 kd = kd_bufptr;
356 kd->debugid = debugid;
357 kd->arg1 = arg1;
358 kd->arg2 = arg2;
359 kd->arg3 = arg3;
360 kd->arg4 = arg4;
361 kd->arg5 = arg5;
362
363 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
364
365 /* Watch for out of order timestamps */
366
367 if (now < kd_prev_timebase)
368 {
369 now = ++kd_prev_timebase & KDBG_TIMESTAMP_MASK;
370 }
371 else
372 {
373 /* Then just store the previous timestamp */
374 kd_prev_timebase = now;
375 }
376 kd->timestamp = now | (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT);
377
378 kd_bufptr++;
379
380 if (kd_bufptr >= kd_buflast)
381 kd_bufptr = kd_buffer;
382 if (kd_bufptr == kd_readlast) {
383 if (kdebug_flags & KDBG_NOWRAP)
384 kdebug_slowcheck |= SLOW_NOLOG;
385 kdebug_flags |= KDBG_WRAPPED;
386 }
387 lck_spin_unlock(kd_trace_lock);
388 ml_set_interrupts_enabled(s);
389 }
390
391
392 static void
393 kdbg_lock_init()
394 {
395
396 if (kdebug_flags & KDBG_LOCKINIT)
397 return;
398 /*
399 * allocate lock group attribute and group
400 */
401 kd_trace_lock_grp_attr = lck_grp_attr_alloc_init();
402 //lck_grp_attr_setstat(kd_trace_lock_grp_attr);
403 kd_trace_lock_grp = lck_grp_alloc_init("kdebug", kd_trace_lock_grp_attr);
404
405 kd_trace_mtx_grp_attr = lck_grp_attr_alloc_init();
406 //lck_grp_attr_setstat(kd_trace_mtx_grp_attr);
407 kd_trace_mtx_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_grp_attr);
408
409 /*
410 * allocate the lock attribute
411 */
412 kd_trace_lock_attr = lck_attr_alloc_init();
413 //lck_attr_setdebug(kd_trace_lock_attr);
414
415 kd_trace_mtx_attr = lck_attr_alloc_init();
416 //lck_attr_setdebug(kd_trace_mtx_attr);
417
418
419 /*
420 * allocate and initialize spin lock and mutex
421 */
422 kd_trace_lock = lck_spin_alloc_init(kd_trace_lock_grp, kd_trace_lock_attr);
423 kd_trace_mtx = lck_mtx_alloc_init(kd_trace_mtx_grp, kd_trace_mtx_attr);
424
425 kdebug_flags |= KDBG_LOCKINIT;
426 }
427
428
429 int
430 kdbg_bootstrap()
431 {
432
433 kd_bufsize = nkdbufs * sizeof(kd_buf);
434
435 if (kmem_alloc(kernel_map, &kd_buftomem,
436 (vm_size_t)kd_bufsize) == KERN_SUCCESS)
437 kd_buffer = (kd_buf *) kd_buftomem;
438 else
439 kd_buffer= (kd_buf *) 0;
440 kdebug_flags &= ~KDBG_WRAPPED;
441
442 if (kd_buffer) {
443 kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT);
444 kd_bufptr = kd_buffer;
445 kd_buflast = &kd_bufptr[nkdbufs];
446 kd_readlast = kd_bufptr;
447 kd_prev_timebase = 0LL;
448 return(0);
449 } else {
450 kd_bufsize=0;
451 kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT);
452 return(EINVAL);
453 }
454
455 }
456
457 kdbg_reinit()
458 {
459 int s;
460 int ret=0;
461
462 /*
463 * Disable trace collecting
464 * First make sure we're not in
465 * the middle of cutting a trace
466 */
467 s = ml_set_interrupts_enabled(FALSE);
468 lck_spin_lock(kd_trace_lock);
469
470 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
471 kdebug_slowcheck |= SLOW_NOLOG;
472
473 lck_spin_unlock(kd_trace_lock);
474 ml_set_interrupts_enabled(s);
475
476 if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer)
477 kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
478
479 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
480 {
481 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
482 kdebug_flags &= ~KDBG_MAPINIT;
483 kd_mapsize = 0;
484 kd_mapptr = (kd_threadmap *) 0;
485 kd_mapcount = 0;
486 }
487
488 ret= kdbg_bootstrap();
489
490 return(ret);
491 }
492
493 void kdbg_trace_data(struct proc *proc, long *arg_pid)
494 {
495 if (!proc)
496 *arg_pid = 0;
497 else
498 *arg_pid = proc->p_pid;
499
500 return;
501 }
502
503
504 void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
505 {
506 int i;
507 char *dbg_nameptr;
508 int dbg_namelen;
509 long dbg_parms[4];
510
511 if (!proc)
512 {
513 *arg1 = 0;
514 *arg2 = 0;
515 *arg3 = 0;
516 *arg4 = 0;
517 return;
518 }
519
520 /* Collect the pathname for tracing */
521 dbg_nameptr = proc->p_comm;
522 dbg_namelen = strlen(proc->p_comm);
523 dbg_parms[0]=0L;
524 dbg_parms[1]=0L;
525 dbg_parms[2]=0L;
526 dbg_parms[3]=0L;
527
528 if(dbg_namelen > sizeof(dbg_parms))
529 dbg_namelen = sizeof(dbg_parms);
530
531 for(i=0;dbg_namelen > 0; i++)
532 {
533 dbg_parms[i]=*(long*)dbg_nameptr;
534 dbg_nameptr += sizeof(long);
535 dbg_namelen -= sizeof(long);
536 }
537
538 *arg1=dbg_parms[0];
539 *arg2=dbg_parms[1];
540 *arg3=dbg_parms[2];
541 *arg4=dbg_parms[3];
542 }
543
544 static void
545 kdbg_resolve_map(thread_t th_act, krt_t *t)
546 {
547 kd_threadmap *mapptr;
548
549 if(t->count < t->maxcount)
550 {
551 mapptr=&t->map[t->count];
552 mapptr->thread = (unsigned int)th_act;
553 (void) strncpy (mapptr->command, t->atts->task_comm,
554 sizeof(t->atts->task_comm)-1);
555 mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
556
557 /*
558 Some kernel threads have no associated pid.
559 We still need to mark the entry as valid.
560 */
561 if (t->atts->pid)
562 mapptr->valid = t->atts->pid;
563 else
564 mapptr->valid = 1;
565
566 t->count++;
567 }
568 }
569
570 void kdbg_mapinit()
571 {
572 struct proc *p;
573 struct krt akrt;
574 int tts_count; /* number of task-to-string structures */
575 struct tts *tts_mapptr;
576 unsigned int tts_mapsize = 0;
577 unsigned int tts_maptomem=0;
578 int i;
579
580
581 if (kdebug_flags & KDBG_MAPINIT)
582 return;
583
584 /* Calculate the sizes of map buffers*/
585 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
586 p = p->p_list.le_next)
587 {
588 kd_mapcount += get_task_numacts((task_t)p->task);
589 tts_count++;
590 }
591
592 /*
593 * The proc count could change during buffer allocation,
594 * so introduce a small fudge factor to bump up the
595 * buffer sizes. This gives new tasks some chance of
596 * making into the tables. Bump up by 10%.
597 */
598 kd_mapcount += kd_mapcount/10;
599 tts_count += tts_count/10;
600
601 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
602 if((kmem_alloc(kernel_map, & kd_maptomem,
603 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
604 {
605 kd_mapptr = (kd_threadmap *) kd_maptomem;
606 bzero(kd_mapptr, kd_mapsize);
607 }
608 else
609 kd_mapptr = (kd_threadmap *) 0;
610
611 tts_mapsize = tts_count * sizeof(struct tts);
612 if((kmem_alloc(kernel_map, & tts_maptomem,
613 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
614 {
615 tts_mapptr = (struct tts *) tts_maptomem;
616 bzero(tts_mapptr, tts_mapsize);
617 }
618 else
619 tts_mapptr = (struct tts *) 0;
620
621
622 /*
623 * We need to save the procs command string
624 * and take a reference for each task associated
625 * with a valid process
626 */
627
628 if (tts_mapptr) {
629 for (p = allproc.lh_first, i=0; p && i < tts_count;
630 p = p->p_list.le_next) {
631 if (p->p_flag & P_WEXIT)
632 continue;
633
634 if (p->task) {
635 task_reference(p->task);
636 tts_mapptr[i].task = p->task;
637 tts_mapptr[i].pid = p->p_pid;
638 (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
639 i++;
640 }
641 }
642 tts_count = i;
643 }
644
645
646 if (kd_mapptr && tts_mapptr)
647 {
648 kdebug_flags |= KDBG_MAPINIT;
649 /* Initialize thread map data */
650 akrt.map = kd_mapptr;
651 akrt.count = 0;
652 akrt.maxcount = kd_mapcount;
653
654 for (i=0; i < tts_count; i++)
655 {
656 akrt.atts = &tts_mapptr[i];
657 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
658 task_deallocate((task_t) tts_mapptr[i].task);
659 }
660 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
661 }
662 }
663
664 static void
665 kdbg_clear(void)
666 {
667 int s;
668
669 /*
670 * Clean up the trace buffer
671 * First make sure we're not in
672 * the middle of cutting a trace
673 */
674 s = ml_set_interrupts_enabled(FALSE);
675 lck_spin_lock(kd_trace_lock);
676
677 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
678 kdebug_slowcheck = SLOW_NOLOG;
679
680 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
681 kdebug_slowcheck |= SLOW_ENTROPY;
682
683 lck_spin_unlock(kd_trace_lock);
684 ml_set_interrupts_enabled(s);
685
686 global_state_pid = -1;
687 kdebug_flags &= ~KDBG_BUFINIT;
688 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
689 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
690 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
691 kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
692 kd_buffer = (kd_buf *)0;
693 kd_bufsize = 0;
694 kd_prev_timebase = 0LL;
695
696 /* Clean up the thread map buffer */
697 kdebug_flags &= ~KDBG_MAPINIT;
698 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
699 kd_mapptr = (kd_threadmap *) 0;
700 kd_mapsize = 0;
701 kd_mapcount = 0;
702 }
703
704 kdbg_setpid(kd_regtype *kdr)
705 {
706 pid_t pid;
707 int flag, ret=0;
708 struct proc *p;
709
710 pid = (pid_t)kdr->value1;
711 flag = (int)kdr->value2;
712
713 if (pid > 0)
714 {
715 if ((p = pfind(pid)) == NULL)
716 ret = ESRCH;
717 else
718 {
719 if (flag == 1) /* turn on pid check for this and all pids */
720 {
721 kdebug_flags |= KDBG_PIDCHECK;
722 kdebug_flags &= ~KDBG_PIDEXCLUDE;
723 kdebug_slowcheck |= SLOW_CHECKS;
724
725 p->p_flag |= P_KDEBUG;
726 }
727 else /* turn off pid check for this pid value */
728 {
729 /* Don't turn off all pid checking though */
730 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
731 p->p_flag &= ~P_KDEBUG;
732 }
733 }
734 }
735 else
736 ret = EINVAL;
737 return(ret);
738 }
739
740 /* This is for pid exclusion in the trace buffer */
741 kdbg_setpidex(kd_regtype *kdr)
742 {
743 pid_t pid;
744 int flag, ret=0;
745 struct proc *p;
746
747 pid = (pid_t)kdr->value1;
748 flag = (int)kdr->value2;
749
750 if (pid > 0)
751 {
752 if ((p = pfind(pid)) == NULL)
753 ret = ESRCH;
754 else
755 {
756 if (flag == 1) /* turn on pid exclusion */
757 {
758 kdebug_flags |= KDBG_PIDEXCLUDE;
759 kdebug_flags &= ~KDBG_PIDCHECK;
760 kdebug_slowcheck |= SLOW_CHECKS;
761
762 p->p_flag |= P_KDEBUG;
763 }
764 else /* turn off pid exclusion for this pid value */
765 {
766 /* Don't turn off all pid exclusion though */
767 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
768 p->p_flag &= ~P_KDEBUG;
769 }
770 }
771 }
772 else
773 ret = EINVAL;
774 return(ret);
775 }
776
777 /* This is for setting a minimum decrementer value */
778 kdbg_setrtcdec(kd_regtype *kdr)
779 {
780 int ret=0;
781 natural_t decval;
782
783 decval = (natural_t)kdr->value1;
784
785 if (decval && decval < KDBG_MINRTCDEC)
786 ret = EINVAL;
787 #ifdef ppc
788 else
789 rtclock_decrementer_min = decval;
790 #else
791 else
792 ret = ENOTSUP;
793 #endif /* ppc */
794
795 return(ret);
796 }
797
798 kdbg_setreg(kd_regtype * kdr)
799 {
800 int i,j, ret=0;
801 unsigned int val_1, val_2, val;
802 switch (kdr->type) {
803
804 case KDBG_CLASSTYPE :
805 val_1 = (kdr->value1 & 0xff);
806 val_2 = (kdr->value2 & 0xff);
807 kdlog_beg = (val_1<<24);
808 kdlog_end = (val_2<<24);
809 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
810 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
811 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
812 kdebug_slowcheck |= SLOW_CHECKS;
813 break;
814 case KDBG_SUBCLSTYPE :
815 val_1 = (kdr->value1 & 0xff);
816 val_2 = (kdr->value2 & 0xff);
817 val = val_2 + 1;
818 kdlog_beg = ((val_1<<24) | (val_2 << 16));
819 kdlog_end = ((val_1<<24) | (val << 16));
820 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
821 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
822 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
823 kdebug_slowcheck |= SLOW_CHECKS;
824 break;
825 case KDBG_RANGETYPE :
826 kdlog_beg = (kdr->value1);
827 kdlog_end = (kdr->value2);
828 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
829 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
830 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
831 kdebug_slowcheck |= SLOW_CHECKS;
832 break;
833 case KDBG_VALCHECK:
834 kdlog_value1 = (kdr->value1);
835 kdlog_value2 = (kdr->value2);
836 kdlog_value3 = (kdr->value3);
837 kdlog_value4 = (kdr->value4);
838 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
839 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
840 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
841 kdebug_slowcheck |= SLOW_CHECKS;
842 break;
843 case KDBG_TYPENONE :
844 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
845
846 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
847 kdebug_slowcheck |= SLOW_CHECKS;
848 else
849 kdebug_slowcheck &= ~SLOW_CHECKS;
850
851 kdlog_beg = 0;
852 kdlog_end = 0;
853 break;
854 default :
855 ret = EINVAL;
856 break;
857 }
858 return(ret);
859 }
860
861 kdbg_getreg(kd_regtype * kdr)
862 {
863 int i,j, ret=0;
864 unsigned int val_1, val_2, val;
865 #if 0
866 switch (kdr->type) {
867 case KDBG_CLASSTYPE :
868 val_1 = (kdr->value1 & 0xff);
869 val_2 = val_1 + 1;
870 kdlog_beg = (val_1<<24);
871 kdlog_end = (val_2<<24);
872 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
873 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
874 break;
875 case KDBG_SUBCLSTYPE :
876 val_1 = (kdr->value1 & 0xff);
877 val_2 = (kdr->value2 & 0xff);
878 val = val_2 + 1;
879 kdlog_beg = ((val_1<<24) | (val_2 << 16));
880 kdlog_end = ((val_1<<24) | (val << 16));
881 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
882 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
883 break;
884 case KDBG_RANGETYPE :
885 kdlog_beg = (kdr->value1);
886 kdlog_end = (kdr->value2);
887 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
888 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
889 break;
890 case KDBG_TYPENONE :
891 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
892 kdlog_beg = 0;
893 kdlog_end = 0;
894 break;
895 default :
896 ret = EINVAL;
897 break;
898 }
899 #endif /* 0 */
900 return(EINVAL);
901 }
902
903
904 int
905 kdbg_readmap(user_addr_t buffer, size_t *number)
906 {
907 int avail = *number;
908 int ret = 0;
909 int count = 0;
910
911 count = avail/sizeof (kd_threadmap);
912
913 if (count && (count <= kd_mapcount))
914 {
915 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
916 {
917 if (*number < kd_mapsize)
918 ret=EINVAL;
919 else
920 {
921 if (copyout(kd_mapptr, buffer, kd_mapsize))
922 ret=EINVAL;
923 }
924 }
925 else
926 ret=EINVAL;
927 }
928 else
929 ret=EINVAL;
930
931 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
932 {
933 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
934 kdebug_flags &= ~KDBG_MAPINIT;
935 kd_mapsize = 0;
936 kd_mapptr = (kd_threadmap *) 0;
937 kd_mapcount = 0;
938 }
939
940 return(ret);
941 }
942
943 int
944 kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
945 {
946 int avail = *number;
947 int ret = 0;
948 int count = 0; /* The number of timestamp entries that will fill buffer */
949
950 if (kd_entropy_buffer)
951 return(EBUSY);
952
953 kd_entropy_count = avail/sizeof(mach_timespec_t);
954 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
955 kd_entropy_indx = 0;
956
957 /* Enforce maximum entropy entries here if needed */
958
959 /* allocate entropy buffer */
960 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
961 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
962 {
963 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
964 }
965 else
966 {
967 kd_entropy_buffer = (uint64_t *) 0;
968 kd_entropy_count = 0;
969 kd_entropy_indx = 0;
970 return (EINVAL);
971 }
972
973 if (ms_timeout < 10)
974 ms_timeout = 10;
975
976 /* Enable entropy sampling */
977 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
978 kdebug_slowcheck |= SLOW_ENTROPY;
979
980 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
981
982 /* Disable entropy sampling */
983 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
984 kdebug_slowcheck &= ~SLOW_ENTROPY;
985
986 *number = 0;
987 ret = 0;
988
989 if (kd_entropy_indx > 0)
990 {
991 /* copyout the buffer */
992 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
993 ret = EINVAL;
994 else
995 *number = kd_entropy_indx;
996 }
997
998 /* Always cleanup */
999 kd_entropy_count = 0;
1000 kd_entropy_indx = 0;
1001 kd_entropy_buftomem = 0;
1002 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
1003 kd_entropy_buffer = (uint64_t *) 0;
1004 return(ret);
1005 }
1006
1007
1008 /*
1009 * This function is provided for the CHUD toolkit only.
1010 * int val:
1011 * zero disables kdebug_chudhook function call
1012 * non-zero enables kdebug_chudhook function call
1013 * char *fn:
1014 * address of the enabled kdebug_chudhook function
1015 */
1016
1017 void kdbg_control_chud(int val, void *fn)
1018 {
1019 if (val) {
1020 /* enable chudhook */
1021 kdebug_chudhook = fn;
1022 kdebug_enable |= KDEBUG_ENABLE_CHUD;
1023 }
1024 else {
1025 /* disable chudhook */
1026 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1027 kdebug_chudhook = 0;
1028 }
1029 }
1030
1031
1032 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1033 {
1034 int ret=0;
1035 int size=*sizep;
1036 int max_entries;
1037 unsigned int value = name[1];
1038 kd_regtype kd_Reg;
1039 kbufinfo_t kd_bufinfo;
1040 pid_t curpid;
1041 struct proc *p, *curproc;
1042
1043
1044 kdbg_lock_init();
1045 lck_mtx_lock(kd_trace_mtx);
1046
1047 if (name[0] == KERN_KDGETBUF) {
1048 /*
1049 * Does not alter the global_state_pid
1050 * This is a passive request.
1051 */
1052 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1053 /*
1054 * There is not enough room to return even
1055 * the first element of the info structure.
1056 */
1057 lck_mtx_unlock(kd_trace_mtx);
1058
1059 return(EINVAL);
1060 }
1061 kd_bufinfo.nkdbufs = nkdbufs;
1062 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1063
1064 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1065 kd_bufinfo.nolog = 1;
1066 else
1067 kd_bufinfo.nolog = 0;
1068 kd_bufinfo.flags = kdebug_flags;
1069 kd_bufinfo.bufid = global_state_pid;
1070
1071 if (size >= sizeof(kd_bufinfo)) {
1072 /*
1073 * Provide all the info we have
1074 */
1075 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
1076 lck_mtx_unlock(kd_trace_mtx);
1077
1078 return(EINVAL);
1079 }
1080 }
1081 else {
1082 /*
1083 * For backwards compatibility, only provide
1084 * as much info as there is room for.
1085 */
1086 if (copyout (&kd_bufinfo, where, size)) {
1087 lck_mtx_unlock(kd_trace_mtx);
1088
1089 return(EINVAL);
1090 }
1091 }
1092 lck_mtx_unlock(kd_trace_mtx);
1093
1094 return(0);
1095 } else if (name[0] == KERN_KDGETENTROPY) {
1096 if (kd_entropy_buffer)
1097 ret = EBUSY;
1098 else
1099 ret = kdbg_getentropy(where, sizep, value);
1100 lck_mtx_unlock(kd_trace_mtx);
1101
1102 return (ret);
1103 }
1104
1105 if (curproc = current_proc())
1106 curpid = curproc->p_pid;
1107 else {
1108 lck_mtx_unlock(kd_trace_mtx);
1109
1110 return (ESRCH);
1111 }
1112 if (global_state_pid == -1)
1113 global_state_pid = curpid;
1114 else if (global_state_pid != curpid) {
1115 if ((p = pfind(global_state_pid)) == NULL) {
1116 /*
1117 * The global pid no longer exists
1118 */
1119 global_state_pid = curpid;
1120 } else {
1121 /*
1122 * The global pid exists, deny this request
1123 */
1124 lck_mtx_unlock(kd_trace_mtx);
1125
1126 return(EBUSY);
1127 }
1128 }
1129
1130 switch(name[0]) {
1131 case KERN_KDEFLAGS:
1132 value &= KDBG_USERFLAGS;
1133 kdebug_flags |= value;
1134 break;
1135 case KERN_KDDFLAGS:
1136 value &= KDBG_USERFLAGS;
1137 kdebug_flags &= ~value;
1138 break;
1139 case KERN_KDENABLE: /* used to enable or disable */
1140 if (value)
1141 {
1142 /* enable only if buffer is initialized */
1143 if (!(kdebug_flags & KDBG_BUFINIT))
1144 {
1145 ret=EINVAL;
1146 break;
1147 }
1148 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1149 kdebug_slowcheck &= ~SLOW_NOLOG;
1150 }
1151 else
1152 {
1153 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1154 kdebug_slowcheck |= SLOW_NOLOG;
1155 }
1156 kdbg_mapinit();
1157 break;
1158 case KERN_KDSETBUF:
1159 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1160 /* 'value' is the desired number of trace entries */
1161 max_entries = (sane_size/4) / sizeof(kd_buf);
1162 if (value <= max_entries)
1163 nkdbufs = value;
1164 else
1165 nkdbufs = max_entries;
1166 break;
1167 case KERN_KDSETUP:
1168 ret=kdbg_reinit();
1169 break;
1170 case KERN_KDREMOVE:
1171 kdbg_clear();
1172 break;
1173 case KERN_KDSETREG:
1174 if(size < sizeof(kd_regtype)) {
1175 ret=EINVAL;
1176 break;
1177 }
1178 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1179 ret= EINVAL;
1180 break;
1181 }
1182 ret = kdbg_setreg(&kd_Reg);
1183 break;
1184 case KERN_KDGETREG:
1185 if(size < sizeof(kd_regtype)) {
1186 ret = EINVAL;
1187 break;
1188 }
1189 ret = kdbg_getreg(&kd_Reg);
1190 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1191 ret=EINVAL;
1192 }
1193 break;
1194 case KERN_KDREADTR:
1195 ret = kdbg_read(where, sizep);
1196 break;
1197 case KERN_KDPIDTR:
1198 if (size < sizeof(kd_regtype)) {
1199 ret = EINVAL;
1200 break;
1201 }
1202 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1203 ret= EINVAL;
1204 break;
1205 }
1206 ret = kdbg_setpid(&kd_Reg);
1207 break;
1208 case KERN_KDPIDEX:
1209 if (size < sizeof(kd_regtype)) {
1210 ret = EINVAL;
1211 break;
1212 }
1213 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1214 ret= EINVAL;
1215 break;
1216 }
1217 ret = kdbg_setpidex(&kd_Reg);
1218 break;
1219 case KERN_KDTHRMAP:
1220 ret = kdbg_readmap(where, sizep);
1221 break;
1222 case KERN_KDSETRTCDEC:
1223 if (size < sizeof(kd_regtype)) {
1224 ret = EINVAL;
1225 break;
1226 }
1227 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1228 ret= EINVAL;
1229 break;
1230 }
1231 ret = kdbg_setrtcdec(&kd_Reg);
1232 break;
1233
1234 default:
1235 ret= EINVAL;
1236 }
1237 lck_mtx_unlock(kd_trace_mtx);
1238
1239 return(ret);
1240 }
1241
1242 kdbg_read(user_addr_t buffer, size_t *number)
1243 {
1244 int avail=*number;
1245 int count=0;
1246 int copycount=0;
1247 int totalcount=0;
1248 int s;
1249 unsigned int my_kdebug_flags;
1250 kd_buf * my_kd_bufptr;
1251
1252 s = ml_set_interrupts_enabled(FALSE);
1253 lck_spin_lock(kd_trace_lock);
1254
1255 my_kdebug_flags = kdebug_flags;
1256 my_kd_bufptr = kd_bufptr;
1257
1258 lck_spin_unlock(kd_trace_lock);
1259 ml_set_interrupts_enabled(s);
1260
1261 count = avail/sizeof(kd_buf);
1262
1263 if (count) {
1264 if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
1265 if (count > nkdbufs)
1266 count = nkdbufs;
1267
1268 if (!(my_kdebug_flags & KDBG_WRAPPED)) {
1269 if (my_kd_bufptr == kd_readlast) {
1270 *number = 0;
1271 return(0);
1272 }
1273 if (my_kd_bufptr > kd_readlast) {
1274 copycount = my_kd_bufptr - kd_readlast;
1275 if (copycount > count)
1276 copycount = count;
1277
1278 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) {
1279 *number = 0;
1280 return(EINVAL);
1281 }
1282 kd_readlast += copycount;
1283 *number = copycount;
1284 return(0);
1285 }
1286 }
1287 if ( (my_kdebug_flags & KDBG_WRAPPED) ) {
1288 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1289 * we now treat the kd_buffer read the same as if we weren't
1290 * wrapped and my_kd_bufptr was less than kd_readlast.
1291 */
1292 kd_readlast = my_kd_bufptr;
1293 kdebug_flags &= ~KDBG_WRAPPED;
1294 }
1295 /*
1296 * first copyout from readlast to end of kd_buffer
1297 */
1298 copycount = kd_buflast - kd_readlast;
1299 if (copycount > count)
1300 copycount = count;
1301 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) {
1302 *number = 0;
1303 return(EINVAL);
1304 }
1305 buffer += (copycount * sizeof(kd_buf));
1306 count -= copycount;
1307 totalcount = copycount;
1308 kd_readlast += copycount;
1309
1310 if (kd_readlast == kd_buflast)
1311 kd_readlast = kd_buffer;
1312 if (count == 0) {
1313 *number = totalcount;
1314 return(0);
1315 }
1316 /* second copyout from top of kd_buffer to bufptr */
1317 copycount = my_kd_bufptr - kd_readlast;
1318 if (copycount > count)
1319 copycount = count;
1320 if (copycount == 0) {
1321 *number = totalcount;
1322 return(0);
1323 }
1324 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1325 return(EINVAL);
1326
1327 kd_readlast += copycount;
1328 totalcount += copycount;
1329 *number = totalcount;
1330 return(0);
1331
1332 } /* end if KDBG_BUFINIT */
1333 } /* end if count */
1334 return (EINVAL);
1335 }
1336
1337 unsigned char *getProcName(struct proc *proc);
1338 unsigned char *getProcName(struct proc *proc) {
1339
1340 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1341
1342 }