]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <machine/spl.h>
24
25#define HZ 100
26#include <mach/clock_types.h>
27#include <mach/mach_types.h>
55e303ae 28#include <mach/mach_time.h>
1c79356b
A
29#include <machine/machine_routines.h>
30
31#include <sys/kdebug.h>
32#include <sys/errno.h>
33#include <sys/param.h>
34#include <sys/proc.h>
35#include <sys/vm.h>
36#include <sys/sysctl.h>
37
38#include <kern/thread.h>
39#include <kern/task.h>
40#include <vm/vm_kern.h>
41#include <sys/lock.h>
42
9bccf70c
A
43/* trace enable status */
44unsigned int kdebug_enable = 0;
45
46/* track timestamps for security server's entropy needs */
55e303ae 47uint64_t * kd_entropy_buffer = 0;
9bccf70c
A
48unsigned int kd_entropy_bufsize = 0;
49unsigned int kd_entropy_count = 0;
50unsigned int kd_entropy_indx = 0;
51unsigned int kd_entropy_buftomem = 0;
52
1c79356b
A
53/* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
54kd_buf * kd_bufptr;
55unsigned int kd_buftomem=0;
56kd_buf * kd_buffer=0;
57kd_buf * kd_buflast;
58kd_buf * kd_readlast;
59unsigned int nkdbufs = 8192;
60unsigned int kd_bufsize = 0;
61unsigned int kdebug_flags = 0;
1c79356b
A
62unsigned int kdebug_nolog=1;
63unsigned int kdlog_beg=0;
64unsigned int kdlog_end=0;
65unsigned int kdlog_value1=0;
66unsigned int kdlog_value2=0;
67unsigned int kdlog_value3=0;
68unsigned int kdlog_value4=0;
69
70unsigned long long kd_prev_timebase = 0LL;
71decl_simple_lock_data(,kd_trace_lock);
72
73kd_threadmap *kd_mapptr = 0;
74unsigned int kd_mapsize = 0;
75unsigned int kd_mapcount = 0;
76unsigned int kd_maptomem = 0;
77
78pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
79
80#define DBG_FUNC_MASK 0xfffffffc
81
82#ifdef ppc
83extern natural_t rtclock_decrementer_min;
84#endif /* ppc */
85
86struct kdebug_args {
87 int code;
88 int arg1;
89 int arg2;
90 int arg3;
91 int arg4;
92 int arg5;
93};
94
9bccf70c
A
95/* task to string structure */
96struct tts
97{
55e303ae
A
98 task_t *task; /* from procs task */
99 pid_t pid; /* from procs p_pid */
9bccf70c
A
100 char task_comm[20]; /* from procs p_comm */
101};
102
103typedef struct tts tts_t;
104
1c79356b
A
105struct krt
106{
107 kd_threadmap *map; /* pointer to the map buffer */
108 int count;
109 int maxcount;
9bccf70c 110 struct tts *atts;
1c79356b
A
111};
112
113typedef struct krt krt_t;
114
9bccf70c
A
115/* This is for the CHUD toolkit call */
116typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
117 unsigned int arg2, unsigned int arg3,
118 unsigned int arg4, unsigned int arg5);
119
120kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
121
1c79356b
A
122/* Support syscall SYS_kdebug_trace */
123kdebug_trace(p, uap, retval)
124 struct proc *p;
125 struct kdebug_args *uap;
126 register_t *retval;
127{
128 if (kdebug_nolog)
129 return(EINVAL);
130
131 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
132 return(0);
133}
134
135
136void
137kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5)
138unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
139{
140 kd_buf * kd;
141 struct proc *curproc;
142 int s;
143 unsigned long long now;
144 mach_timespec_t *tsp;
145
9bccf70c
A
146 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
147 if (kdebug_chudhook)
148 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
149
150 if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
151 (kdebug_enable & KDEBUG_ENABLE_TRACE)))
152 return;
153 }
154
1c79356b
A
155 s = ml_set_interrupts_enabled(FALSE);
156
9bccf70c
A
157 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
158 {
159 if (kd_entropy_indx < kd_entropy_count)
160 {
55e303ae 161 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
9bccf70c
A
162 kd_entropy_indx++;
163 }
164
165 if (kd_entropy_indx == kd_entropy_count)
166 {
167 /* Disable entropy collection */
168 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
169 }
170 }
171
1c79356b
A
172 if (kdebug_nolog)
173 {
174 ml_set_interrupts_enabled(s);
175 return;
176 }
177
0b4e3aa0 178 usimple_lock(&kd_trace_lock);
1c79356b
A
179 if (kdebug_flags & KDBG_PIDCHECK)
180 {
181 /* If kdebug flag is not set for current proc, return */
182 curproc = current_proc();
183 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
184 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
185 {
0b4e3aa0 186 usimple_unlock(&kd_trace_lock);
1c79356b
A
187 ml_set_interrupts_enabled(s);
188 return;
189 }
190 }
191 else if (kdebug_flags & KDBG_PIDEXCLUDE)
192 {
193 /* If kdebug flag is set for current proc, return */
194 curproc = current_proc();
195 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
196 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
197 {
0b4e3aa0 198 usimple_unlock(&kd_trace_lock);
1c79356b
A
199 ml_set_interrupts_enabled(s);
200 return;
201 }
202 }
203
204 if (kdebug_flags & KDBG_RANGECHECK)
205 {
206 if ((debugid < kdlog_beg) || (debugid > kdlog_end)
207 && (debugid >> 24 != DBG_TRACE))
208 {
0b4e3aa0 209 usimple_unlock(&kd_trace_lock);
1c79356b
A
210 ml_set_interrupts_enabled(s);
211 return;
212 }
213 }
214 else if (kdebug_flags & KDBG_VALCHECK)
215 {
216 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
217 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
218 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
219 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
220 (debugid >> 24 != DBG_TRACE))
221 {
0b4e3aa0 222 usimple_unlock(&kd_trace_lock);
1c79356b
A
223 ml_set_interrupts_enabled(s);
224 return;
225 }
226 }
227 kd = kd_bufptr;
228 kd->debugid = debugid;
229 kd->arg1 = arg1;
230 kd->arg2 = arg2;
231 kd->arg3 = arg3;
232 kd->arg4 = arg4;
55e303ae 233 kd->arg5 = (int)current_act();
1c79356b
A
234 if (cpu_number())
235 kd->arg5 |= KDBG_CPU_MASK;
236
55e303ae 237 now = kd->timestamp = mach_absolute_time();
1c79356b
A
238
239 /* Watch for out of order timestamps */
1c79356b
A
240
241 if (now < kd_prev_timebase)
242 {
55e303ae 243 kd->timestamp = ++kd_prev_timebase;
1c79356b
A
244 }
245 else
246 {
247 /* Then just store the previous timestamp */
248 kd_prev_timebase = now;
249 }
250
251
252 kd_bufptr++;
253
254 if (kd_bufptr >= kd_buflast)
255 kd_bufptr = kd_buffer;
256 if (kd_bufptr == kd_readlast) {
257 if (kdebug_flags & KDBG_NOWRAP)
258 kdebug_nolog = 1;
259 kdebug_flags |= KDBG_WRAPPED;
260 }
0b4e3aa0 261 usimple_unlock(&kd_trace_lock);
1c79356b
A
262 ml_set_interrupts_enabled(s);
263}
264
265void
266kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5)
267unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
268{
269 kd_buf * kd;
270 struct proc *curproc;
271 int s;
272 unsigned long long now;
273 mach_timespec_t *tsp;
274
9bccf70c
A
275 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
276 if (kdebug_chudhook)
277 (void)kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
278
279 if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
280 (kdebug_enable & KDEBUG_ENABLE_TRACE)))
281 return;
282 }
283
1c79356b
A
284 s = ml_set_interrupts_enabled(FALSE);
285
286 if (kdebug_nolog)
287 {
288 ml_set_interrupts_enabled(s);
289 return;
290 }
291
0b4e3aa0 292 usimple_lock(&kd_trace_lock);
1c79356b
A
293 if (kdebug_flags & KDBG_PIDCHECK)
294 {
295 /* If kdebug flag is not set for current proc, return */
296 curproc = current_proc();
297 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
298 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
299 {
0b4e3aa0 300 usimple_unlock(&kd_trace_lock);
1c79356b
A
301 ml_set_interrupts_enabled(s);
302 return;
303 }
304 }
305 else if (kdebug_flags & KDBG_PIDEXCLUDE)
306 {
307 /* If kdebug flag is set for current proc, return */
308 curproc = current_proc();
309 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
310 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
311 {
0b4e3aa0 312 usimple_unlock(&kd_trace_lock);
1c79356b
A
313 ml_set_interrupts_enabled(s);
314 return;
315 }
316 }
317
318 if (kdebug_flags & KDBG_RANGECHECK)
319 {
320 if ((debugid < kdlog_beg) || (debugid > kdlog_end)
321 && (debugid >> 24 != DBG_TRACE))
322 {
0b4e3aa0 323 usimple_unlock(&kd_trace_lock);
1c79356b
A
324 ml_set_interrupts_enabled(s);
325 return;
326 }
327 }
328 else if (kdebug_flags & KDBG_VALCHECK)
329 {
330 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
331 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
332 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
333 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
334 (debugid >> 24 != DBG_TRACE))
335 {
0b4e3aa0 336 usimple_unlock(&kd_trace_lock);
1c79356b
A
337 ml_set_interrupts_enabled(s);
338 return;
339 }
340 }
341
342 kd = kd_bufptr;
343 kd->debugid = debugid;
344 kd->arg1 = arg1;
345 kd->arg2 = arg2;
346 kd->arg3 = arg3;
347 kd->arg4 = arg4;
348 kd->arg5 = arg5;
55e303ae 349 now = kd->timestamp = mach_absolute_time();
1c79356b
A
350
351 /* Watch for out of order timestamps */
1c79356b
A
352
353 if (now < kd_prev_timebase)
354 {
355 /* timestamps are out of order -- adjust */
55e303ae 356 kd->timestamp = ++kd_prev_timebase;
1c79356b
A
357 }
358 else
359 {
360 /* Then just store the previous timestamp */
361 kd_prev_timebase = now;
362 }
363
364 kd_bufptr++;
365
366 if (kd_bufptr >= kd_buflast)
367 kd_bufptr = kd_buffer;
368 if (kd_bufptr == kd_readlast) {
369 if (kdebug_flags & KDBG_NOWRAP)
370 kdebug_nolog = 1;
371 kdebug_flags |= KDBG_WRAPPED;
372 }
0b4e3aa0 373 usimple_unlock(&kd_trace_lock);
1c79356b
A
374 ml_set_interrupts_enabled(s);
375}
376
377
378kdbg_bootstrap()
379{
380 kd_bufsize = nkdbufs * sizeof(kd_buf);
381 if (kmem_alloc(kernel_map, &kd_buftomem,
382 (vm_size_t)kd_bufsize) == KERN_SUCCESS)
383 kd_buffer = (kd_buf *) kd_buftomem;
384 else kd_buffer= (kd_buf *) 0;
385 kdebug_flags &= ~KDBG_WRAPPED;
386 if (kd_buffer) {
387 simple_lock_init(&kd_trace_lock);
388 kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT);
389 kd_bufptr = kd_buffer;
390 kd_buflast = &kd_bufptr[nkdbufs];
391 kd_readlast = kd_bufptr;
392 kd_prev_timebase = 0LL;
393 return(0);
394 } else {
395 kd_bufsize=0;
396 kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT);
397 return(EINVAL);
398 }
399
400}
401
402kdbg_reinit()
403{
404 int x;
405 int ret=0;
406
9bccf70c
A
407 /* Disable trace collecting */
408 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1c79356b
A
409 kdebug_nolog = 1;
410
411 if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer)
55e303ae 412 kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
1c79356b
A
413
414 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
415 {
55e303ae 416 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
417 kdebug_flags &= ~KDBG_MAPINIT;
418 kd_mapsize = 0;
419 kd_mapptr = (kd_threadmap *) 0;
420 kd_mapcount = 0;
421 }
422
423 ret= kdbg_bootstrap();
424
425 return(ret);
426}
427
55e303ae
A
428void kdbg_trace_data(struct proc *proc, long *arg_pid)
429{
430 if (!proc)
431 *arg_pid = 0;
432 else
433 *arg_pid = proc->p_pid;
434
435 return;
436}
437
438
1c79356b
A
439void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
440{
441 int i;
442 char *dbg_nameptr;
443 int dbg_namelen;
444 long dbg_parms[4];
445
446 if (!proc)
447 {
448 *arg1 = 0;
449 *arg2 = 0;
450 *arg3 = 0;
451 *arg4 = 0;
452 return;
453 }
454
455 /* Collect the pathname for tracing */
456 dbg_nameptr = proc->p_comm;
457 dbg_namelen = strlen(proc->p_comm);
458 dbg_parms[0]=0L;
459 dbg_parms[1]=0L;
460 dbg_parms[2]=0L;
461 dbg_parms[3]=0L;
462
463 if(dbg_namelen > sizeof(dbg_parms))
464 dbg_namelen = sizeof(dbg_parms);
465
466 for(i=0;dbg_namelen > 0; i++)
467 {
468 dbg_parms[i]=*(long*)dbg_nameptr;
469 dbg_nameptr += sizeof(long);
470 dbg_namelen -= sizeof(long);
471 }
472
473 *arg1=dbg_parms[0];
474 *arg2=dbg_parms[1];
475 *arg3=dbg_parms[2];
476 *arg4=dbg_parms[3];
477}
478
479kdbg_resolve_map(thread_act_t th_act, krt_t *t)
480{
481 kd_threadmap *mapptr;
482
483 if(t->count < t->maxcount)
484 {
485 mapptr=&t->map[t->count];
55e303ae 486 mapptr->thread = (unsigned int)th_act;
9bccf70c
A
487 (void) strncpy (mapptr->command, t->atts->task_comm,
488 sizeof(t->atts->task_comm)-1);
489 mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
55e303ae
A
490
491 /*
492 Some kernel threads have no associated pid.
493 We still need to mark the entry as valid.
494 */
495 if (t->atts->pid)
496 mapptr->valid = t->atts->pid;
497 else
498 mapptr->valid = 1;
499
1c79356b
A
500 t->count++;
501 }
502}
503
504void kdbg_mapinit()
505{
506 struct proc *p;
507 struct krt akrt;
9bccf70c
A
508 int tts_count; /* number of task-to-string structures */
509 struct tts *tts_mapptr;
510 unsigned int tts_mapsize = 0;
511 unsigned int tts_maptomem=0;
512 int i;
513
1c79356b
A
514
515 if (kdebug_flags & KDBG_MAPINIT)
516 return;
517
9bccf70c
A
518 /* Calculate the sizes of map buffers*/
519 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
1c79356b
A
520 p = p->p_list.le_next)
521 {
522 kd_mapcount += get_task_numacts((task_t)p->task);
9bccf70c 523 tts_count++;
1c79356b
A
524 }
525
9bccf70c
A
526 /*
527 * The proc count could change during buffer allocation,
528 * so introduce a small fudge factor to bump up the
529 * buffer sizes. This gives new tasks some chance of
530 * making into the tables. Bump up by 10%.
531 */
532 kd_mapcount += kd_mapcount/10;
533 tts_count += tts_count/10;
534
1c79356b
A
535 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
536 if((kmem_alloc(kernel_map, & kd_maptomem,
537 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
55e303ae 538 {
1c79356b 539 kd_mapptr = (kd_threadmap *) kd_maptomem;
55e303ae
A
540 bzero(kd_mapptr, kd_mapsize);
541 }
1c79356b
A
542 else
543 kd_mapptr = (kd_threadmap *) 0;
544
9bccf70c
A
545 tts_mapsize = tts_count * sizeof(struct tts);
546 if((kmem_alloc(kernel_map, & tts_maptomem,
547 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
55e303ae 548 {
9bccf70c 549 tts_mapptr = (struct tts *) tts_maptomem;
55e303ae
A
550 bzero(tts_mapptr, tts_mapsize);
551 }
9bccf70c
A
552 else
553 tts_mapptr = (struct tts *) 0;
554
555
556 /*
557 * We need to save the procs command string
558 * and take a reference for each task associated
559 * with a valid process
560 */
561
562 if (tts_mapptr) {
563 for (p = allproc.lh_first, i=0; p && i < tts_count;
564 p = p->p_list.le_next) {
565 if (p->p_flag & P_WEXIT)
566 continue;
567
568 if (task_reference_try(p->task)) {
569 tts_mapptr[i].task = p->task;
55e303ae 570 tts_mapptr[i].pid = p->p_pid;
9bccf70c
A
571 (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
572 i++;
573 }
574 }
575 tts_count = i;
576 }
577
578
579 if (kd_mapptr && tts_mapptr)
1c79356b
A
580 {
581 kdebug_flags |= KDBG_MAPINIT;
582 /* Initialize thread map data */
583 akrt.map = kd_mapptr;
584 akrt.count = 0;
585 akrt.maxcount = kd_mapcount;
586
9bccf70c 587 for (i=0; i < tts_count; i++)
1c79356b 588 {
9bccf70c
A
589 akrt.atts = &tts_mapptr[i];
590 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
55e303ae 591 task_deallocate((task_t) tts_mapptr[i].task);
9bccf70c 592 }
55e303ae 593 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1c79356b
A
594 }
595}
596
597kdbg_clear()
598{
599int x;
600
601 /* Clean up the trace buffer */
602 global_state_pid = -1;
9bccf70c 603 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1c79356b
A
604 kdebug_nolog = 1;
605 kdebug_flags &= ~KDBG_BUFINIT;
606 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
607 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
608 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
55e303ae 609 kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
1c79356b
A
610 kd_buffer = (kd_buf *)0;
611 kd_bufsize = 0;
612 kd_prev_timebase = 0LL;
613
614 /* Clean up the thread map buffer */
615 kdebug_flags &= ~KDBG_MAPINIT;
55e303ae 616 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
617 kd_mapptr = (kd_threadmap *) 0;
618 kd_mapsize = 0;
619 kd_mapcount = 0;
620}
621
622kdbg_setpid(kd_regtype *kdr)
623{
624 pid_t pid;
625 int flag, ret=0;
626 struct proc *p;
627
628 pid = (pid_t)kdr->value1;
629 flag = (int)kdr->value2;
630
631 if (pid > 0)
632 {
633 if ((p = pfind(pid)) == NULL)
634 ret = ESRCH;
635 else
636 {
637 if (flag == 1) /* turn on pid check for this and all pids */
638 {
639 kdebug_flags |= KDBG_PIDCHECK;
640 kdebug_flags &= ~KDBG_PIDEXCLUDE;
641 p->p_flag |= P_KDEBUG;
642 }
643 else /* turn off pid check for this pid value */
644 {
645 /* Don't turn off all pid checking though */
646 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
647 p->p_flag &= ~P_KDEBUG;
648 }
649 }
650 }
651 else
652 ret = EINVAL;
653 return(ret);
654}
655
656/* This is for pid exclusion in the trace buffer */
657kdbg_setpidex(kd_regtype *kdr)
658{
659 pid_t pid;
660 int flag, ret=0;
661 struct proc *p;
662
663 pid = (pid_t)kdr->value1;
664 flag = (int)kdr->value2;
665
666 if (pid > 0)
667 {
668 if ((p = pfind(pid)) == NULL)
669 ret = ESRCH;
670 else
671 {
672 if (flag == 1) /* turn on pid exclusion */
673 {
674 kdebug_flags |= KDBG_PIDEXCLUDE;
675 kdebug_flags &= ~KDBG_PIDCHECK;
676 p->p_flag |= P_KDEBUG;
677 }
678 else /* turn off pid exclusion for this pid value */
679 {
680 /* Don't turn off all pid exclusion though */
681 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
682 p->p_flag &= ~P_KDEBUG;
683 }
684 }
685 }
686 else
687 ret = EINVAL;
688 return(ret);
689}
690
691/* This is for setting a minimum decrementer value */
692kdbg_setrtcdec(kd_regtype *kdr)
693{
694 int ret=0;
695 natural_t decval;
696
697 decval = (natural_t)kdr->value1;
698
699 if (decval && decval < KDBG_MINRTCDEC)
700 ret = EINVAL;
701#ifdef ppc
702 else
703 rtclock_decrementer_min = decval;
704#else
705 else
706 ret = EOPNOTSUPP;
707#endif /* ppc */
708
709 return(ret);
710}
711
712kdbg_setreg(kd_regtype * kdr)
713{
714 int i,j, ret=0;
715 unsigned int val_1, val_2, val;
716 switch (kdr->type) {
717
718 case KDBG_CLASSTYPE :
719 val_1 = (kdr->value1 & 0xff);
720 val_2 = (kdr->value2 & 0xff);
721 kdlog_beg = (val_1<<24);
722 kdlog_end = (val_2<<24);
723 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
724 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
725 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
726 break;
727 case KDBG_SUBCLSTYPE :
728 val_1 = (kdr->value1 & 0xff);
729 val_2 = (kdr->value2 & 0xff);
730 val = val_2 + 1;
731 kdlog_beg = ((val_1<<24) | (val_2 << 16));
732 kdlog_end = ((val_1<<24) | (val << 16));
733 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
734 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
735 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
736 break;
737 case KDBG_RANGETYPE :
738 kdlog_beg = (kdr->value1);
739 kdlog_end = (kdr->value2);
740 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
741 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
742 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
743 break;
744 case KDBG_VALCHECK:
745 kdlog_value1 = (kdr->value1);
746 kdlog_value2 = (kdr->value2);
747 kdlog_value3 = (kdr->value3);
748 kdlog_value4 = (kdr->value4);
749 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
750 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
751 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
752 break;
753 case KDBG_TYPENONE :
754 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
755 kdlog_beg = 0;
756 kdlog_end = 0;
757 break;
758 default :
759 ret = EINVAL;
760 break;
761 }
762 return(ret);
763}
764
765kdbg_getreg(kd_regtype * kdr)
766{
767 int i,j, ret=0;
768 unsigned int val_1, val_2, val;
769#if 0
770 switch (kdr->type) {
771 case KDBG_CLASSTYPE :
772 val_1 = (kdr->value1 & 0xff);
773 val_2 = val_1 + 1;
774 kdlog_beg = (val_1<<24);
775 kdlog_end = (val_2<<24);
776 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
777 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
778 break;
779 case KDBG_SUBCLSTYPE :
780 val_1 = (kdr->value1 & 0xff);
781 val_2 = (kdr->value2 & 0xff);
782 val = val_2 + 1;
783 kdlog_beg = ((val_1<<24) | (val_2 << 16));
784 kdlog_end = ((val_1<<24) | (val << 16));
785 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
786 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
787 break;
788 case KDBG_RANGETYPE :
789 kdlog_beg = (kdr->value1);
790 kdlog_end = (kdr->value2);
791 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
792 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
793 break;
794 case KDBG_TYPENONE :
795 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
796 kdlog_beg = 0;
797 kdlog_end = 0;
798 break;
799 default :
800 ret = EINVAL;
801 break;
802 }
803#endif /* 0 */
804 return(EINVAL);
805}
806
807
808
809kdbg_readmap(kd_threadmap *buffer, size_t *number)
810{
811 int avail = *number;
812 int ret = 0;
813 int count = 0;
814
815 count = avail/sizeof (kd_threadmap);
816
817 if (count && (count <= kd_mapcount))
818 {
819 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
820 {
821 if (*number < kd_mapsize)
822 ret=EINVAL;
823 else
824 {
825 if (copyout(kd_mapptr, buffer, kd_mapsize))
826 ret=EINVAL;
827 }
828 }
829 else
830 ret=EINVAL;
831 }
832 else
833 ret=EINVAL;
834
835 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
836 {
55e303ae 837 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
838 kdebug_flags &= ~KDBG_MAPINIT;
839 kd_mapsize = 0;
840 kd_mapptr = (kd_threadmap *) 0;
841 kd_mapcount = 0;
842 }
843
844 return(ret);
845}
846
9bccf70c
A
847kdbg_getentropy (mach_timespec_t * buffer, size_t *number, int ms_timeout)
848{
849 int avail = *number;
850 int ret = 0;
851 int count = 0; /* The number of timestamp entries that will fill buffer */
852
853 if (kd_entropy_buffer)
854 return(EBUSY);
855
856 kd_entropy_count = avail/sizeof(mach_timespec_t);
857 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
858 kd_entropy_indx = 0;
859
860 /* Enforce maximum entropy entries here if needed */
861
862 /* allocate entropy buffer */
863 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
864 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
865 {
55e303ae 866 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
9bccf70c
A
867 }
868 else
869 {
55e303ae 870 kd_entropy_buffer = (uint64_t *) 0;
9bccf70c
A
871 kd_entropy_count = 0;
872 kd_entropy_indx = 0;
873 return (EINVAL);
874 }
875
876 if (ms_timeout < 10)
877 ms_timeout = 10;
878
879 /* Enable entropy sampling */
880 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
881
882 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
883
884 /* Disable entropy sampling */
885 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
886
887 *number = 0;
888 ret = 0;
889
890 if (kd_entropy_indx > 0)
891 {
892 /* copyout the buffer */
893 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
894 ret = EINVAL;
895 else
896 *number = kd_entropy_indx;
897 }
898
899 /* Always cleanup */
900 kd_entropy_count = 0;
901 kd_entropy_indx = 0;
902 kd_entropy_buftomem = 0;
55e303ae
A
903 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
904 kd_entropy_buffer = (uint64_t *) 0;
9bccf70c
A
905 return(ret);
906}
907
908
909/*
910 * This function is provided for the CHUD toolkit only.
911 * int val:
912 * zero disables kdebug_chudhook function call
913 * non-zero enables kdebug_chudhook function call
914 * char *fn:
915 * address of the enabled kdebug_chudhook function
916*/
917
918void kdbg_control_chud(int val, void *fn)
919{
920 if (val) {
921 /* enable chudhook */
922 kdebug_enable |= KDEBUG_ENABLE_CHUD;
923 kdebug_chudhook = fn;
924 }
925 else {
926 /* disable chudhook */
927 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
928 kdebug_chudhook = 0;
929 }
930}
1c79356b 931
9bccf70c 932
1c79356b
A
933kdbg_control(name, namelen, where, sizep)
934int *name;
935u_int namelen;
936char *where;
937size_t *sizep;
938{
939int ret=0;
940int size=*sizep;
941int max_entries;
942unsigned int value = name[1];
943kd_regtype kd_Reg;
944kbufinfo_t kd_bufinfo;
945
946pid_t curpid;
947struct proc *p, *curproc;
948
9bccf70c
A
949 if (name[0] == KERN_KDGETBUF) {
950 /*
951 Does not alter the global_state_pid
952 This is a passive request.
953 */
954 if (size < sizeof(kd_bufinfo.nkdbufs)) {
955 /*
956 There is not enough room to return even
957 the first element of the info structure.
958 */
959 return(EINVAL);
960 }
961
962 kd_bufinfo.nkdbufs = nkdbufs;
963 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
964 kd_bufinfo.nolog = kdebug_nolog;
965 kd_bufinfo.flags = kdebug_flags;
966 kd_bufinfo.bufid = global_state_pid;
967
968 if(size >= sizeof(kbufinfo_t)) {
969 /* Provide all the info we have */
970 if(copyout (&kd_bufinfo, where, sizeof(kbufinfo_t)))
971 return(EINVAL);
972 }
973 else {
974 /*
975 For backwards compatibility, only provide
976 as much info as there is room for.
977 */
978 if(copyout (&kd_bufinfo, where, size))
979 return(EINVAL);
980 }
981 return(0);
982 }
983 else if (name[0] == KERN_KDGETENTROPY) {
984 if (kd_entropy_buffer)
985 return(EBUSY);
986 else
987 ret = kdbg_getentropy((mach_timespec_t *)where, sizep, value);
988 return (ret);
989 }
990
1c79356b
A
991 if(curproc = current_proc())
992 curpid = curproc->p_pid;
993 else
994 return (ESRCH);
995
996 if (global_state_pid == -1)
997 global_state_pid = curpid;
998 else if (global_state_pid != curpid)
999 {
1000 if((p = pfind(global_state_pid)) == NULL)
1001 {
1002 /* The global pid no longer exists */
1003 global_state_pid = curpid;
1004 }
1005 else
1006 {
1007 /* The global pid exists, deny this request */
1008 return(EBUSY);
1009 }
1010 }
1011
1012 switch(name[0]) {
1013 case KERN_KDEFLAGS:
1014 value &= KDBG_USERFLAGS;
1015 kdebug_flags |= value;
1016 break;
1017 case KERN_KDDFLAGS:
1018 value &= KDBG_USERFLAGS;
1019 kdebug_flags &= ~value;
1020 break;
1021 case KERN_KDENABLE: /* used to enable or disable */
1022 if (value)
1023 {
1024 /* enable only if buffer is initialized */
1025 if (!(kdebug_flags & KDBG_BUFINIT))
1026 {
1027 ret=EINVAL;
1028 break;
1029 }
1030 }
9bccf70c
A
1031
1032 if (value)
1033 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1034 else
1035 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1036
1c79356b 1037 kdebug_nolog = (value)?0:1;
9bccf70c
A
1038
1039 if (kdebug_enable & KDEBUG_ENABLE_TRACE)
1c79356b
A
1040 kdbg_mapinit();
1041 break;
1042 case KERN_KDSETBUF:
55e303ae 1043 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1c79356b 1044 /* 'value' is the desired number of trace entries */
55e303ae 1045 max_entries = (sane_size/4) / sizeof(kd_buf);
1c79356b
A
1046 if (value <= max_entries)
1047 nkdbufs = value;
1048 else
1049 nkdbufs = max_entries;
1050 break;
1c79356b
A
1051 case KERN_KDSETUP:
1052 ret=kdbg_reinit();
1053 break;
1054 case KERN_KDREMOVE:
1055 kdbg_clear();
1056 break;
1057 case KERN_KDSETREG:
1058 if(size < sizeof(kd_regtype)) {
1059 ret=EINVAL;
1060 break;
1061 }
1062 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1063 ret= EINVAL;
1064 break;
1065 }
1066 ret = kdbg_setreg(&kd_Reg);
1067 break;
1068 case KERN_KDGETREG:
1069 if(size < sizeof(kd_regtype)) {
1070 ret = EINVAL;
1071 break;
1072 }
1073 ret = kdbg_getreg(&kd_Reg);
1074 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1075 ret=EINVAL;
1076 }
1077 break;
1078 case KERN_KDREADTR:
1079 ret = kdbg_read(where, sizep);
1080 break;
1081 case KERN_KDPIDTR:
1082 if (size < sizeof(kd_regtype)) {
1083 ret = EINVAL;
1084 break;
1085 }
1086 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1087 ret= EINVAL;
1088 break;
1089 }
1090 ret = kdbg_setpid(&kd_Reg);
1091 break;
1092 case KERN_KDPIDEX:
1093 if (size < sizeof(kd_regtype)) {
1094 ret = EINVAL;
1095 break;
1096 }
1097 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1098 ret= EINVAL;
1099 break;
1100 }
1101 ret = kdbg_setpidex(&kd_Reg);
1102 break;
1103 case KERN_KDTHRMAP:
1104 ret = kdbg_readmap((kd_threadmap *)where, sizep);
1105 break;
1106 case KERN_KDSETRTCDEC:
1107 if (size < sizeof(kd_regtype)) {
1108 ret = EINVAL;
1109 break;
1110 }
1111 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1112 ret= EINVAL;
1113 break;
1114 }
1115 ret = kdbg_setrtcdec(&kd_Reg);
1116 break;
1117
1118 default:
1119 ret= EINVAL;
1120 }
1121 return(ret);
1122}
1123
1124kdbg_read(kd_buf * buffer, size_t *number)
1125{
1126int avail=*number;
1127int count=0;
1128int copycount=0;
1129int totalcount=0;
1130int s;
1131unsigned int my_kdebug_flags;
1132kd_buf * my_kd_bufptr;
1133
1134 s = ml_set_interrupts_enabled(FALSE);
0b4e3aa0 1135 usimple_lock(&kd_trace_lock);
1c79356b
A
1136 my_kdebug_flags = kdebug_flags;
1137 my_kd_bufptr = kd_bufptr;
0b4e3aa0 1138 usimple_unlock(&kd_trace_lock);
1c79356b
A
1139 ml_set_interrupts_enabled(s);
1140
1141 count = avail/sizeof(kd_buf);
1142 if (count) {
1143 if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
1144 if (count > nkdbufs)
1145 count = nkdbufs;
1146 if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast))
1147 {
1148 copycount = my_kd_bufptr-kd_readlast;
1149 if (copycount > count)
1150 copycount = count;
1151
1152 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1153 {
1154 *number = 0;
1155 return(EINVAL);
1156 }
1157 kd_readlast += copycount;
1158 *number = copycount;
1159 return(0);
1160 }
1161 else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast))
1162 {
1163 *number = 0;
1164 return(0);
1165 }
1166 else
1167 {
1168 if (my_kdebug_flags & KDBG_WRAPPED)
1169 {
1170 kd_readlast = my_kd_bufptr;
1171 kdebug_flags &= ~KDBG_WRAPPED;
1172 }
1173
1174 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1175 we now treat the kd_buffer read the same as if we weren't
1176 wrapped and my_kd_bufptr was less than kd_readlast.
1177 */
1178
1179 /* first copyout from readlast to end of kd_buffer */
1180 copycount = kd_buflast - kd_readlast;
1181 if (copycount > count)
1182 copycount = count;
1183 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1184 {
1185 *number = 0;
1186 return(EINVAL);
1187 }
1188 buffer += copycount;
1189 count -= copycount;
1190 totalcount = copycount;
1191 kd_readlast += copycount;
1192 if (kd_readlast == kd_buflast)
1193 kd_readlast = kd_buffer;
1194 if (count == 0)
1195 {
1196 *number = totalcount;
1197 return(0);
1198 }
1199
1200 /* second copyout from top of kd_buffer to bufptr */
1201 copycount = my_kd_bufptr - kd_readlast;
1202 if (copycount > count)
1203 copycount = count;
1204 if (copycount == 0)
1205 {
1206 *number = totalcount;
1207 return(0);
1208 }
1209 if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
1210 {
1211 return(EINVAL);
1212 }
1213 kd_readlast += copycount;
1214 totalcount += copycount;
1215 *number = totalcount;
1216 return(0);
1217 }
1218 } /* end if KDBG_BUFINIT */
1219 } /* end if count */
1220 return (EINVAL);
1221}
55e303ae
A
1222
1223unsigned char *getProcName(struct proc *proc);
1224unsigned char *getProcName(struct proc *proc) {
1225
1226 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1227
1228}