]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
901dbd2a823bf1871b81c8e043700522bcf4ab82
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23 #include <machine/spl.h>
24
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/proc_internal.h>
28 #include <sys/vm.h>
29 #include <sys/sysctl.h>
30 #include <sys/kdebug.h>
31 #include <sys/sysproto.h>
32
33 #define HZ 100
34 #include <mach/clock_types.h>
35 #include <mach/mach_types.h>
36 #include <mach/mach_time.h>
37 #include <machine/machine_routines.h>
38
39 #include <kern/thread.h>
40 #include <kern/task.h>
41 #include <vm/vm_kern.h>
42 #include <sys/lock.h>
43
44 /* trace enable status */
45 unsigned int kdebug_enable = 0;
46
47 /* track timestamps for security server's entropy needs */
48 uint64_t * kd_entropy_buffer = 0;
49 unsigned int kd_entropy_bufsize = 0;
50 unsigned int kd_entropy_count = 0;
51 unsigned int kd_entropy_indx = 0;
52 unsigned int kd_entropy_buftomem = 0;
53
54
55 #define SLOW_NOLOG 0x01
56 #define SLOW_CHECKS 0x02
57 #define SLOW_ENTROPY 0x04
58
59 unsigned int kdebug_slowcheck=SLOW_NOLOG;
60
61 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
62 kd_buf * kd_bufptr;
63 unsigned int kd_buftomem=0;
64 kd_buf * kd_buffer=0;
65 kd_buf * kd_buflast;
66 kd_buf * kd_readlast;
67 unsigned int nkdbufs = 8192;
68 unsigned int kd_bufsize = 0;
69 unsigned int kdebug_flags = 0;
70 unsigned int kdlog_beg=0;
71 unsigned int kdlog_end=0;
72 unsigned int kdlog_value1=0;
73 unsigned int kdlog_value2=0;
74 unsigned int kdlog_value3=0;
75 unsigned int kdlog_value4=0;
76
77 unsigned long long kd_prev_timebase = 0LL;
78
79 static lck_mtx_t * kd_trace_mtx;
80 static lck_grp_t * kd_trace_mtx_grp;
81 static lck_attr_t * kd_trace_mtx_attr;
82 static lck_grp_attr_t *kd_trace_mtx_grp_attr;
83
84 static lck_spin_t * kd_trace_lock;
85 static lck_grp_t * kd_trace_lock_grp;
86 static lck_attr_t * kd_trace_lock_attr;
87 static lck_grp_attr_t *kd_trace_lock_grp_attr;
88
89 kd_threadmap *kd_mapptr = 0;
90 unsigned int kd_mapsize = 0;
91 unsigned int kd_mapcount = 0;
92 unsigned int kd_maptomem = 0;
93
94 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
95
96 #define DBG_FUNC_MASK 0xfffffffc
97
98 /* task to string structure */
99 struct tts
100 {
101 task_t *task; /* from procs task */
102 pid_t pid; /* from procs p_pid */
103 char task_comm[20]; /* from procs p_comm */
104 };
105
106 typedef struct tts tts_t;
107
108 struct krt
109 {
110 kd_threadmap *map; /* pointer to the map buffer */
111 int count;
112 int maxcount;
113 struct tts *atts;
114 };
115
116 typedef struct krt krt_t;
117
118 /* This is for the CHUD toolkit call */
119 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
120 unsigned int arg2, unsigned int arg3,
121 unsigned int arg4, unsigned int arg5);
122
123 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
124
125
126 /* Support syscall SYS_kdebug_trace */
127 kdebug_trace(p, uap, retval)
128 struct proc *p;
129 struct kdebug_trace_args *uap;
130 register_t *retval;
131 {
132 if ( (kdebug_enable == 0) )
133 return(EINVAL);
134
135 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
136 return(0);
137 }
138
139
140 void
141 kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5)
142 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
143 {
144 kd_buf * kd;
145 struct proc *curproc;
146 int s;
147 unsigned long long now;
148
149
150 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
151 if (kdebug_chudhook)
152 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
153
154 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
155 return;
156 }
157 s = ml_set_interrupts_enabled(FALSE);
158 lck_spin_lock(kd_trace_lock);
159
160 if (kdebug_slowcheck == 0)
161 goto record_trace;
162
163 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
164 {
165 if (kd_entropy_indx < kd_entropy_count)
166 {
167 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
168 kd_entropy_indx++;
169 }
170
171 if (kd_entropy_indx == kd_entropy_count)
172 {
173 /* Disable entropy collection */
174 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
175 kdebug_slowcheck &= ~SLOW_ENTROPY;
176 }
177 }
178
179 if ( (kdebug_slowcheck & SLOW_NOLOG) )
180 {
181 lck_spin_unlock(kd_trace_lock);
182 ml_set_interrupts_enabled(s);
183 return;
184 }
185
186 if (kdebug_flags & KDBG_PIDCHECK)
187 {
188 /* If kdebug flag is not set for current proc, return */
189 curproc = current_proc();
190 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
191 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
192 {
193 lck_spin_unlock(kd_trace_lock);
194 ml_set_interrupts_enabled(s);
195 return;
196 }
197 }
198 else if (kdebug_flags & KDBG_PIDEXCLUDE)
199 {
200 /* If kdebug flag is set for current proc, return */
201 curproc = current_proc();
202 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
203 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
204 {
205 lck_spin_unlock(kd_trace_lock);
206 ml_set_interrupts_enabled(s);
207 return;
208 }
209 }
210
211 if (kdebug_flags & KDBG_RANGECHECK)
212 {
213 if ((debugid < kdlog_beg) || (debugid >= kdlog_end)
214 && (debugid >> 24 != DBG_TRACE))
215 {
216 lck_spin_unlock(kd_trace_lock);
217 ml_set_interrupts_enabled(s);
218 return;
219 }
220 }
221 else if (kdebug_flags & KDBG_VALCHECK)
222 {
223 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
224 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
225 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
226 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
227 (debugid >> 24 != DBG_TRACE))
228 {
229 lck_spin_unlock(kd_trace_lock);
230 ml_set_interrupts_enabled(s);
231 return;
232 }
233 }
234
235 record_trace:
236 kd = kd_bufptr;
237 kd->debugid = debugid;
238 kd->arg1 = arg1;
239 kd->arg2 = arg2;
240 kd->arg3 = arg3;
241 kd->arg4 = arg4;
242 kd->arg5 = (int)current_thread();
243
244 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
245
246 /* Watch for out of order timestamps */
247
248 if (now < kd_prev_timebase)
249 {
250 now = ++kd_prev_timebase & KDBG_TIMESTAMP_MASK;
251 }
252 else
253 {
254 /* Then just store the previous timestamp */
255 kd_prev_timebase = now;
256 }
257 kd->timestamp = now | (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT);
258
259 kd_bufptr++;
260
261 if (kd_bufptr >= kd_buflast)
262 kd_bufptr = kd_buffer;
263 if (kd_bufptr == kd_readlast) {
264 if (kdebug_flags & KDBG_NOWRAP)
265 kdebug_slowcheck |= SLOW_NOLOG;
266 kdebug_flags |= KDBG_WRAPPED;
267 }
268 lck_spin_unlock(kd_trace_lock);
269 ml_set_interrupts_enabled(s);
270 }
271
272 void
273 kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5)
274 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
275 {
276 kd_buf * kd;
277 struct proc *curproc;
278 int s;
279 unsigned long long now;
280
281 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
282 if (kdebug_chudhook)
283 (void)kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
284
285 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
286 return;
287 }
288 s = ml_set_interrupts_enabled(FALSE);
289 lck_spin_lock(kd_trace_lock);
290
291 if (kdebug_slowcheck == 0)
292 goto record_trace1;
293
294 if ( (kdebug_slowcheck & SLOW_NOLOG) )
295 {
296 lck_spin_unlock(kd_trace_lock);
297 ml_set_interrupts_enabled(s);
298 return;
299 }
300
301 if (kdebug_flags & KDBG_PIDCHECK)
302 {
303 /* If kdebug flag is not set for current proc, return */
304 curproc = current_proc();
305 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
306 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
307 {
308 lck_spin_unlock(kd_trace_lock);
309 ml_set_interrupts_enabled(s);
310 return;
311 }
312 }
313 else if (kdebug_flags & KDBG_PIDEXCLUDE)
314 {
315 /* If kdebug flag is set for current proc, return */
316 curproc = current_proc();
317 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
318 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
319 {
320 lck_spin_unlock(kd_trace_lock);
321 ml_set_interrupts_enabled(s);
322 return;
323 }
324 }
325
326 if (kdebug_flags & KDBG_RANGECHECK)
327 {
328 if ((debugid < kdlog_beg) || (debugid >= kdlog_end)
329 && (debugid >> 24 != DBG_TRACE))
330 {
331 lck_spin_unlock(kd_trace_lock);
332 ml_set_interrupts_enabled(s);
333 return;
334 }
335 }
336 else if (kdebug_flags & KDBG_VALCHECK)
337 {
338 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
339 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
340 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
341 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
342 (debugid >> 24 != DBG_TRACE))
343 {
344 lck_spin_unlock(kd_trace_lock);
345 ml_set_interrupts_enabled(s);
346 return;
347 }
348 }
349
350 record_trace1:
351 kd = kd_bufptr;
352 kd->debugid = debugid;
353 kd->arg1 = arg1;
354 kd->arg2 = arg2;
355 kd->arg3 = arg3;
356 kd->arg4 = arg4;
357 kd->arg5 = arg5;
358
359 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
360
361 /* Watch for out of order timestamps */
362
363 if (now < kd_prev_timebase)
364 {
365 now = ++kd_prev_timebase & KDBG_TIMESTAMP_MASK;
366 }
367 else
368 {
369 /* Then just store the previous timestamp */
370 kd_prev_timebase = now;
371 }
372 kd->timestamp = now | (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT);
373
374 kd_bufptr++;
375
376 if (kd_bufptr >= kd_buflast)
377 kd_bufptr = kd_buffer;
378 if (kd_bufptr == kd_readlast) {
379 if (kdebug_flags & KDBG_NOWRAP)
380 kdebug_slowcheck |= SLOW_NOLOG;
381 kdebug_flags |= KDBG_WRAPPED;
382 }
383 lck_spin_unlock(kd_trace_lock);
384 ml_set_interrupts_enabled(s);
385 }
386
387
388 static void
389 kdbg_lock_init()
390 {
391
392 if (kdebug_flags & KDBG_LOCKINIT)
393 return;
394 /*
395 * allocate lock group attribute and group
396 */
397 kd_trace_lock_grp_attr = lck_grp_attr_alloc_init();
398 //lck_grp_attr_setstat(kd_trace_lock_grp_attr);
399 kd_trace_lock_grp = lck_grp_alloc_init("kdebug", kd_trace_lock_grp_attr);
400
401 kd_trace_mtx_grp_attr = lck_grp_attr_alloc_init();
402 //lck_grp_attr_setstat(kd_trace_mtx_grp_attr);
403 kd_trace_mtx_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_grp_attr);
404
405 /*
406 * allocate the lock attribute
407 */
408 kd_trace_lock_attr = lck_attr_alloc_init();
409 //lck_attr_setdebug(kd_trace_lock_attr);
410
411 kd_trace_mtx_attr = lck_attr_alloc_init();
412 //lck_attr_setdebug(kd_trace_mtx_attr);
413
414
415 /*
416 * allocate and initialize spin lock and mutex
417 */
418 kd_trace_lock = lck_spin_alloc_init(kd_trace_lock_grp, kd_trace_lock_attr);
419 kd_trace_mtx = lck_mtx_alloc_init(kd_trace_mtx_grp, kd_trace_mtx_attr);
420
421 kdebug_flags |= KDBG_LOCKINIT;
422 }
423
424
425 int
426 kdbg_bootstrap()
427 {
428
429 kd_bufsize = nkdbufs * sizeof(kd_buf);
430
431 if (kmem_alloc(kernel_map, &kd_buftomem,
432 (vm_size_t)kd_bufsize) == KERN_SUCCESS)
433 kd_buffer = (kd_buf *) kd_buftomem;
434 else
435 kd_buffer= (kd_buf *) 0;
436 kdebug_flags &= ~KDBG_WRAPPED;
437
438 if (kd_buffer) {
439 kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT);
440 kd_bufptr = kd_buffer;
441 kd_buflast = &kd_bufptr[nkdbufs];
442 kd_readlast = kd_bufptr;
443 kd_prev_timebase = 0LL;
444 return(0);
445 } else {
446 kd_bufsize=0;
447 kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT);
448 return(EINVAL);
449 }
450
451 }
452
453 kdbg_reinit()
454 {
455 int s;
456 int ret=0;
457
458 /*
459 * Disable trace collecting
460 * First make sure we're not in
461 * the middle of cutting a trace
462 */
463 s = ml_set_interrupts_enabled(FALSE);
464 lck_spin_lock(kd_trace_lock);
465
466 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
467 kdebug_slowcheck |= SLOW_NOLOG;
468
469 lck_spin_unlock(kd_trace_lock);
470 ml_set_interrupts_enabled(s);
471
472 if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer)
473 kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
474
475 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
476 {
477 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
478 kdebug_flags &= ~KDBG_MAPINIT;
479 kd_mapsize = 0;
480 kd_mapptr = (kd_threadmap *) 0;
481 kd_mapcount = 0;
482 }
483
484 ret= kdbg_bootstrap();
485
486 return(ret);
487 }
488
489 void kdbg_trace_data(struct proc *proc, long *arg_pid)
490 {
491 if (!proc)
492 *arg_pid = 0;
493 else
494 *arg_pid = proc->p_pid;
495
496 return;
497 }
498
499
500 void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
501 {
502 int i;
503 char *dbg_nameptr;
504 int dbg_namelen;
505 long dbg_parms[4];
506
507 if (!proc)
508 {
509 *arg1 = 0;
510 *arg2 = 0;
511 *arg3 = 0;
512 *arg4 = 0;
513 return;
514 }
515
516 /* Collect the pathname for tracing */
517 dbg_nameptr = proc->p_comm;
518 dbg_namelen = strlen(proc->p_comm);
519 dbg_parms[0]=0L;
520 dbg_parms[1]=0L;
521 dbg_parms[2]=0L;
522 dbg_parms[3]=0L;
523
524 if(dbg_namelen > sizeof(dbg_parms))
525 dbg_namelen = sizeof(dbg_parms);
526
527 for(i=0;dbg_namelen > 0; i++)
528 {
529 dbg_parms[i]=*(long*)dbg_nameptr;
530 dbg_nameptr += sizeof(long);
531 dbg_namelen -= sizeof(long);
532 }
533
534 *arg1=dbg_parms[0];
535 *arg2=dbg_parms[1];
536 *arg3=dbg_parms[2];
537 *arg4=dbg_parms[3];
538 }
539
540 static void
541 kdbg_resolve_map(thread_t th_act, krt_t *t)
542 {
543 kd_threadmap *mapptr;
544
545 if(t->count < t->maxcount)
546 {
547 mapptr=&t->map[t->count];
548 mapptr->thread = (unsigned int)th_act;
549 (void) strncpy (mapptr->command, t->atts->task_comm,
550 sizeof(t->atts->task_comm)-1);
551 mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
552
553 /*
554 Some kernel threads have no associated pid.
555 We still need to mark the entry as valid.
556 */
557 if (t->atts->pid)
558 mapptr->valid = t->atts->pid;
559 else
560 mapptr->valid = 1;
561
562 t->count++;
563 }
564 }
565
566 void kdbg_mapinit()
567 {
568 struct proc *p;
569 struct krt akrt;
570 int tts_count; /* number of task-to-string structures */
571 struct tts *tts_mapptr;
572 unsigned int tts_mapsize = 0;
573 unsigned int tts_maptomem=0;
574 int i;
575
576
577 if (kdebug_flags & KDBG_MAPINIT)
578 return;
579
580 /* Calculate the sizes of map buffers*/
581 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
582 p = p->p_list.le_next)
583 {
584 kd_mapcount += get_task_numacts((task_t)p->task);
585 tts_count++;
586 }
587
588 /*
589 * The proc count could change during buffer allocation,
590 * so introduce a small fudge factor to bump up the
591 * buffer sizes. This gives new tasks some chance of
592 * making into the tables. Bump up by 10%.
593 */
594 kd_mapcount += kd_mapcount/10;
595 tts_count += tts_count/10;
596
597 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
598 if((kmem_alloc(kernel_map, & kd_maptomem,
599 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
600 {
601 kd_mapptr = (kd_threadmap *) kd_maptomem;
602 bzero(kd_mapptr, kd_mapsize);
603 }
604 else
605 kd_mapptr = (kd_threadmap *) 0;
606
607 tts_mapsize = tts_count * sizeof(struct tts);
608 if((kmem_alloc(kernel_map, & tts_maptomem,
609 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
610 {
611 tts_mapptr = (struct tts *) tts_maptomem;
612 bzero(tts_mapptr, tts_mapsize);
613 }
614 else
615 tts_mapptr = (struct tts *) 0;
616
617
618 /*
619 * We need to save the procs command string
620 * and take a reference for each task associated
621 * with a valid process
622 */
623
624 if (tts_mapptr) {
625 for (p = allproc.lh_first, i=0; p && i < tts_count;
626 p = p->p_list.le_next) {
627 if (p->p_flag & P_WEXIT)
628 continue;
629
630 if (p->task) {
631 task_reference(p->task);
632 tts_mapptr[i].task = p->task;
633 tts_mapptr[i].pid = p->p_pid;
634 (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
635 i++;
636 }
637 }
638 tts_count = i;
639 }
640
641
642 if (kd_mapptr && tts_mapptr)
643 {
644 kdebug_flags |= KDBG_MAPINIT;
645 /* Initialize thread map data */
646 akrt.map = kd_mapptr;
647 akrt.count = 0;
648 akrt.maxcount = kd_mapcount;
649
650 for (i=0; i < tts_count; i++)
651 {
652 akrt.atts = &tts_mapptr[i];
653 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
654 task_deallocate((task_t) tts_mapptr[i].task);
655 }
656 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
657 }
658 }
659
660 static void
661 kdbg_clear(void)
662 {
663 int s;
664
665 /*
666 * Clean up the trace buffer
667 * First make sure we're not in
668 * the middle of cutting a trace
669 */
670 s = ml_set_interrupts_enabled(FALSE);
671 lck_spin_lock(kd_trace_lock);
672
673 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
674 kdebug_slowcheck = SLOW_NOLOG;
675
676 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
677 kdebug_slowcheck |= SLOW_ENTROPY;
678
679 lck_spin_unlock(kd_trace_lock);
680 ml_set_interrupts_enabled(s);
681
682 global_state_pid = -1;
683 kdebug_flags &= ~KDBG_BUFINIT;
684 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
685 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
686 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
687 kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
688 kd_buffer = (kd_buf *)0;
689 kd_bufsize = 0;
690 kd_prev_timebase = 0LL;
691
692 /* Clean up the thread map buffer */
693 kdebug_flags &= ~KDBG_MAPINIT;
694 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
695 kd_mapptr = (kd_threadmap *) 0;
696 kd_mapsize = 0;
697 kd_mapcount = 0;
698 }
699
700 kdbg_setpid(kd_regtype *kdr)
701 {
702 pid_t pid;
703 int flag, ret=0;
704 struct proc *p;
705
706 pid = (pid_t)kdr->value1;
707 flag = (int)kdr->value2;
708
709 if (pid > 0)
710 {
711 if ((p = pfind(pid)) == NULL)
712 ret = ESRCH;
713 else
714 {
715 if (flag == 1) /* turn on pid check for this and all pids */
716 {
717 kdebug_flags |= KDBG_PIDCHECK;
718 kdebug_flags &= ~KDBG_PIDEXCLUDE;
719 kdebug_slowcheck |= SLOW_CHECKS;
720
721 p->p_flag |= P_KDEBUG;
722 }
723 else /* turn off pid check for this pid value */
724 {
725 /* Don't turn off all pid checking though */
726 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
727 p->p_flag &= ~P_KDEBUG;
728 }
729 }
730 }
731 else
732 ret = EINVAL;
733 return(ret);
734 }
735
736 /* This is for pid exclusion in the trace buffer */
737 kdbg_setpidex(kd_regtype *kdr)
738 {
739 pid_t pid;
740 int flag, ret=0;
741 struct proc *p;
742
743 pid = (pid_t)kdr->value1;
744 flag = (int)kdr->value2;
745
746 if (pid > 0)
747 {
748 if ((p = pfind(pid)) == NULL)
749 ret = ESRCH;
750 else
751 {
752 if (flag == 1) /* turn on pid exclusion */
753 {
754 kdebug_flags |= KDBG_PIDEXCLUDE;
755 kdebug_flags &= ~KDBG_PIDCHECK;
756 kdebug_slowcheck |= SLOW_CHECKS;
757
758 p->p_flag |= P_KDEBUG;
759 }
760 else /* turn off pid exclusion for this pid value */
761 {
762 /* Don't turn off all pid exclusion though */
763 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
764 p->p_flag &= ~P_KDEBUG;
765 }
766 }
767 }
768 else
769 ret = EINVAL;
770 return(ret);
771 }
772
773 /* This is for setting a maximum decrementer value */
774 kdbg_setrtcdec(kd_regtype *kdr)
775 {
776 int ret=0;
777 natural_t decval;
778
779 decval = (natural_t)kdr->value1;
780
781 if (decval && decval < KDBG_MINRTCDEC)
782 ret = EINVAL;
783 #ifdef ppc
784 else {
785
786 extern uint32_t maxDec;
787
788 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
789 }
790 #else
791 else
792 ret = ENOTSUP;
793 #endif /* ppc */
794
795 return(ret);
796 }
797
798 kdbg_setreg(kd_regtype * kdr)
799 {
800 int i,j, ret=0;
801 unsigned int val_1, val_2, val;
802 switch (kdr->type) {
803
804 case KDBG_CLASSTYPE :
805 val_1 = (kdr->value1 & 0xff);
806 val_2 = (kdr->value2 & 0xff);
807 kdlog_beg = (val_1<<24);
808 kdlog_end = (val_2<<24);
809 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
810 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
811 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
812 kdebug_slowcheck |= SLOW_CHECKS;
813 break;
814 case KDBG_SUBCLSTYPE :
815 val_1 = (kdr->value1 & 0xff);
816 val_2 = (kdr->value2 & 0xff);
817 val = val_2 + 1;
818 kdlog_beg = ((val_1<<24) | (val_2 << 16));
819 kdlog_end = ((val_1<<24) | (val << 16));
820 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
821 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
822 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
823 kdebug_slowcheck |= SLOW_CHECKS;
824 break;
825 case KDBG_RANGETYPE :
826 kdlog_beg = (kdr->value1);
827 kdlog_end = (kdr->value2);
828 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
829 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
830 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
831 kdebug_slowcheck |= SLOW_CHECKS;
832 break;
833 case KDBG_VALCHECK:
834 kdlog_value1 = (kdr->value1);
835 kdlog_value2 = (kdr->value2);
836 kdlog_value3 = (kdr->value3);
837 kdlog_value4 = (kdr->value4);
838 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
839 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
840 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
841 kdebug_slowcheck |= SLOW_CHECKS;
842 break;
843 case KDBG_TYPENONE :
844 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
845
846 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
847 kdebug_slowcheck |= SLOW_CHECKS;
848 else
849 kdebug_slowcheck &= ~SLOW_CHECKS;
850
851 kdlog_beg = 0;
852 kdlog_end = 0;
853 break;
854 default :
855 ret = EINVAL;
856 break;
857 }
858 return(ret);
859 }
860
861 kdbg_getreg(kd_regtype * kdr)
862 {
863 int i,j, ret=0;
864 unsigned int val_1, val_2, val;
865 #if 0
866 switch (kdr->type) {
867 case KDBG_CLASSTYPE :
868 val_1 = (kdr->value1 & 0xff);
869 val_2 = val_1 + 1;
870 kdlog_beg = (val_1<<24);
871 kdlog_end = (val_2<<24);
872 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
873 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
874 break;
875 case KDBG_SUBCLSTYPE :
876 val_1 = (kdr->value1 & 0xff);
877 val_2 = (kdr->value2 & 0xff);
878 val = val_2 + 1;
879 kdlog_beg = ((val_1<<24) | (val_2 << 16));
880 kdlog_end = ((val_1<<24) | (val << 16));
881 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
882 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
883 break;
884 case KDBG_RANGETYPE :
885 kdlog_beg = (kdr->value1);
886 kdlog_end = (kdr->value2);
887 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
888 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
889 break;
890 case KDBG_TYPENONE :
891 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
892 kdlog_beg = 0;
893 kdlog_end = 0;
894 break;
895 default :
896 ret = EINVAL;
897 break;
898 }
899 #endif /* 0 */
900 return(EINVAL);
901 }
902
903
904 int
905 kdbg_readmap(user_addr_t buffer, size_t *number)
906 {
907 int avail = *number;
908 int ret = 0;
909 int count = 0;
910
911 count = avail/sizeof (kd_threadmap);
912
913 if (count && (count <= kd_mapcount))
914 {
915 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
916 {
917 if (*number < kd_mapsize)
918 ret=EINVAL;
919 else
920 {
921 if (copyout(kd_mapptr, buffer, kd_mapsize))
922 ret=EINVAL;
923 }
924 }
925 else
926 ret=EINVAL;
927 }
928 else
929 ret=EINVAL;
930
931 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
932 {
933 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
934 kdebug_flags &= ~KDBG_MAPINIT;
935 kd_mapsize = 0;
936 kd_mapptr = (kd_threadmap *) 0;
937 kd_mapcount = 0;
938 }
939
940 return(ret);
941 }
942
943 int
944 kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
945 {
946 int avail = *number;
947 int ret = 0;
948 int count = 0; /* The number of timestamp entries that will fill buffer */
949
950 if (kd_entropy_buffer)
951 return(EBUSY);
952
953 kd_entropy_count = avail/sizeof(mach_timespec_t);
954 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
955 kd_entropy_indx = 0;
956
957 /* Enforce maximum entropy entries here if needed */
958
959 /* allocate entropy buffer */
960 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
961 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
962 {
963 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
964 }
965 else
966 {
967 kd_entropy_buffer = (uint64_t *) 0;
968 kd_entropy_count = 0;
969 kd_entropy_indx = 0;
970 return (EINVAL);
971 }
972
973 if (ms_timeout < 10)
974 ms_timeout = 10;
975
976 /* Enable entropy sampling */
977 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
978 kdebug_slowcheck |= SLOW_ENTROPY;
979
980 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
981
982 /* Disable entropy sampling */
983 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
984 kdebug_slowcheck &= ~SLOW_ENTROPY;
985
986 *number = 0;
987 ret = 0;
988
989 if (kd_entropy_indx > 0)
990 {
991 /* copyout the buffer */
992 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
993 ret = EINVAL;
994 else
995 *number = kd_entropy_indx;
996 }
997
998 /* Always cleanup */
999 kd_entropy_count = 0;
1000 kd_entropy_indx = 0;
1001 kd_entropy_buftomem = 0;
1002 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
1003 kd_entropy_buffer = (uint64_t *) 0;
1004 return(ret);
1005 }
1006
1007
1008 /*
1009 * This function is provided for the CHUD toolkit only.
1010 * int val:
1011 * zero disables kdebug_chudhook function call
1012 * non-zero enables kdebug_chudhook function call
1013 * char *fn:
1014 * address of the enabled kdebug_chudhook function
1015 */
1016
1017 void kdbg_control_chud(int val, void *fn)
1018 {
1019 if (val) {
1020 /* enable chudhook */
1021 kdebug_chudhook = fn;
1022 kdebug_enable |= KDEBUG_ENABLE_CHUD;
1023 }
1024 else {
1025 /* disable chudhook */
1026 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1027 kdebug_chudhook = 0;
1028 }
1029 }
1030
1031
1032 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1033 {
1034 int ret=0;
1035 int size=*sizep;
1036 int max_entries;
1037 unsigned int value = name[1];
1038 kd_regtype kd_Reg;
1039 kbufinfo_t kd_bufinfo;
1040 pid_t curpid;
1041 struct proc *p, *curproc;
1042
1043
1044 kdbg_lock_init();
1045 lck_mtx_lock(kd_trace_mtx);
1046
1047 if (name[0] == KERN_KDGETBUF) {
1048 /*
1049 * Does not alter the global_state_pid
1050 * This is a passive request.
1051 */
1052 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1053 /*
1054 * There is not enough room to return even
1055 * the first element of the info structure.
1056 */
1057 lck_mtx_unlock(kd_trace_mtx);
1058
1059 return(EINVAL);
1060 }
1061 kd_bufinfo.nkdbufs = nkdbufs;
1062 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1063
1064 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1065 kd_bufinfo.nolog = 1;
1066 else
1067 kd_bufinfo.nolog = 0;
1068 kd_bufinfo.flags = kdebug_flags;
1069 kd_bufinfo.bufid = global_state_pid;
1070
1071 if (size >= sizeof(kd_bufinfo)) {
1072 /*
1073 * Provide all the info we have
1074 */
1075 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
1076 lck_mtx_unlock(kd_trace_mtx);
1077
1078 return(EINVAL);
1079 }
1080 }
1081 else {
1082 /*
1083 * For backwards compatibility, only provide
1084 * as much info as there is room for.
1085 */
1086 if (copyout (&kd_bufinfo, where, size)) {
1087 lck_mtx_unlock(kd_trace_mtx);
1088
1089 return(EINVAL);
1090 }
1091 }
1092 lck_mtx_unlock(kd_trace_mtx);
1093
1094 return(0);
1095 } else if (name[0] == KERN_KDGETENTROPY) {
1096 if (kd_entropy_buffer)
1097 ret = EBUSY;
1098 else
1099 ret = kdbg_getentropy(where, sizep, value);
1100 lck_mtx_unlock(kd_trace_mtx);
1101
1102 return (ret);
1103 }
1104
1105 if (curproc = current_proc())
1106 curpid = curproc->p_pid;
1107 else {
1108 lck_mtx_unlock(kd_trace_mtx);
1109
1110 return (ESRCH);
1111 }
1112 if (global_state_pid == -1)
1113 global_state_pid = curpid;
1114 else if (global_state_pid != curpid) {
1115 if ((p = pfind(global_state_pid)) == NULL) {
1116 /*
1117 * The global pid no longer exists
1118 */
1119 global_state_pid = curpid;
1120 } else {
1121 /*
1122 * The global pid exists, deny this request
1123 */
1124 lck_mtx_unlock(kd_trace_mtx);
1125
1126 return(EBUSY);
1127 }
1128 }
1129
1130 switch(name[0]) {
1131 case KERN_KDEFLAGS:
1132 value &= KDBG_USERFLAGS;
1133 kdebug_flags |= value;
1134 break;
1135 case KERN_KDDFLAGS:
1136 value &= KDBG_USERFLAGS;
1137 kdebug_flags &= ~value;
1138 break;
1139 case KERN_KDENABLE: /* used to enable or disable */
1140 if (value)
1141 {
1142 /* enable only if buffer is initialized */
1143 if (!(kdebug_flags & KDBG_BUFINIT))
1144 {
1145 ret=EINVAL;
1146 break;
1147 }
1148 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1149 kdebug_slowcheck &= ~SLOW_NOLOG;
1150 }
1151 else
1152 {
1153 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1154 kdebug_slowcheck |= SLOW_NOLOG;
1155 }
1156 kdbg_mapinit();
1157 break;
1158 case KERN_KDSETBUF:
1159 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1160 /* 'value' is the desired number of trace entries */
1161 max_entries = (sane_size/4) / sizeof(kd_buf);
1162 if (value <= max_entries)
1163 nkdbufs = value;
1164 else
1165 nkdbufs = max_entries;
1166 break;
1167 case KERN_KDSETUP:
1168 ret=kdbg_reinit();
1169 break;
1170 case KERN_KDREMOVE:
1171 kdbg_clear();
1172 break;
1173 case KERN_KDSETREG:
1174 if(size < sizeof(kd_regtype)) {
1175 ret=EINVAL;
1176 break;
1177 }
1178 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1179 ret= EINVAL;
1180 break;
1181 }
1182 ret = kdbg_setreg(&kd_Reg);
1183 break;
1184 case KERN_KDGETREG:
1185 if(size < sizeof(kd_regtype)) {
1186 ret = EINVAL;
1187 break;
1188 }
1189 ret = kdbg_getreg(&kd_Reg);
1190 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1191 ret=EINVAL;
1192 }
1193 break;
1194 case KERN_KDREADTR:
1195 ret = kdbg_read(where, sizep);
1196 break;
1197 case KERN_KDPIDTR:
1198 if (size < sizeof(kd_regtype)) {
1199 ret = EINVAL;
1200 break;
1201 }
1202 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1203 ret= EINVAL;
1204 break;
1205 }
1206 ret = kdbg_setpid(&kd_Reg);
1207 break;
1208 case KERN_KDPIDEX:
1209 if (size < sizeof(kd_regtype)) {
1210 ret = EINVAL;
1211 break;
1212 }
1213 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1214 ret= EINVAL;
1215 break;
1216 }
1217 ret = kdbg_setpidex(&kd_Reg);
1218 break;
1219 case KERN_KDTHRMAP:
1220 ret = kdbg_readmap(where, sizep);
1221 break;
1222 case KERN_KDSETRTCDEC:
1223 if (size < sizeof(kd_regtype)) {
1224 ret = EINVAL;
1225 break;
1226 }
1227 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1228 ret= EINVAL;
1229 break;
1230 }
1231 ret = kdbg_setrtcdec(&kd_Reg);
1232 break;
1233
1234 default:
1235 ret= EINVAL;
1236 }
1237 lck_mtx_unlock(kd_trace_mtx);
1238
1239 return(ret);
1240 }
1241
1242 kdbg_read(user_addr_t buffer, size_t *number)
1243 {
1244 int avail=*number;
1245 int count=0;
1246 int copycount=0;
1247 int totalcount=0;
1248 int s;
1249 unsigned int my_kdebug_flags;
1250 kd_buf * my_kd_bufptr;
1251
1252 s = ml_set_interrupts_enabled(FALSE);
1253 lck_spin_lock(kd_trace_lock);
1254
1255 my_kdebug_flags = kdebug_flags;
1256 my_kd_bufptr = kd_bufptr;
1257
1258 lck_spin_unlock(kd_trace_lock);
1259 ml_set_interrupts_enabled(s);
1260
1261 count = avail/sizeof(kd_buf);
1262
1263 if (count) {
1264 if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
1265 if (count > nkdbufs)
1266 count = nkdbufs;
1267
1268 if (!(my_kdebug_flags & KDBG_WRAPPED)) {
1269 if (my_kd_bufptr == kd_readlast) {
1270 *number = 0;
1271 return(0);
1272 }
1273 if (my_kd_bufptr > kd_readlast) {
1274 copycount = my_kd_bufptr - kd_readlast;
1275 if (copycount > count)
1276 copycount = count;
1277
1278 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) {
1279 *number = 0;
1280 return(EINVAL);
1281 }
1282 kd_readlast += copycount;
1283 *number = copycount;
1284 return(0);
1285 }
1286 }
1287 if ( (my_kdebug_flags & KDBG_WRAPPED) ) {
1288 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1289 * we now treat the kd_buffer read the same as if we weren't
1290 * wrapped and my_kd_bufptr was less than kd_readlast.
1291 */
1292 kd_readlast = my_kd_bufptr;
1293 kdebug_flags &= ~KDBG_WRAPPED;
1294 }
1295 /*
1296 * first copyout from readlast to end of kd_buffer
1297 */
1298 copycount = kd_buflast - kd_readlast;
1299 if (copycount > count)
1300 copycount = count;
1301 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) {
1302 *number = 0;
1303 return(EINVAL);
1304 }
1305 buffer += (copycount * sizeof(kd_buf));
1306 count -= copycount;
1307 totalcount = copycount;
1308 kd_readlast += copycount;
1309
1310 if (kd_readlast == kd_buflast)
1311 kd_readlast = kd_buffer;
1312 if (count == 0) {
1313 *number = totalcount;
1314 return(0);
1315 }
1316 /* second copyout from top of kd_buffer to bufptr */
1317 copycount = my_kd_bufptr - kd_readlast;
1318 if (copycount > count)
1319 copycount = count;
1320 if (copycount == 0) {
1321 *number = totalcount;
1322 return(0);
1323 }
1324 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1325 return(EINVAL);
1326
1327 kd_readlast += copycount;
1328 totalcount += copycount;
1329 *number = totalcount;
1330 return(0);
1331
1332 } /* end if KDBG_BUFINIT */
1333 } /* end if count */
1334 return (EINVAL);
1335 }
1336
1337 unsigned char *getProcName(struct proc *proc);
1338 unsigned char *getProcName(struct proc *proc) {
1339
1340 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1341
1342 }