2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 #include <machine/spl.h>
29 #include <mach/clock_types.h>
30 #include <mach/mach_types.h>
31 #include <machine/machine_routines.h>
33 #include <sys/kdebug.h>
34 #include <sys/errno.h>
35 #include <sys/param.h>
38 #include <sys/sysctl.h>
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <vm/vm_kern.h>
45 /* trace enable status */
46 unsigned int kdebug_enable
= 0;
48 /* track timestamps for security server's entropy needs */
49 mach_timespec_t
* kd_entropy_buffer
= 0;
50 unsigned int kd_entropy_bufsize
= 0;
51 unsigned int kd_entropy_count
= 0;
52 unsigned int kd_entropy_indx
= 0;
53 unsigned int kd_entropy_buftomem
= 0;
55 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
57 unsigned int kd_buftomem
=0;
61 unsigned int nkdbufs
= 8192;
62 unsigned int kd_bufsize
= 0;
63 unsigned int kdebug_flags
= 0;
64 unsigned int kdebug_nolog
=1;
65 unsigned int kdlog_beg
=0;
66 unsigned int kdlog_end
=0;
67 unsigned int kdlog_value1
=0;
68 unsigned int kdlog_value2
=0;
69 unsigned int kdlog_value3
=0;
70 unsigned int kdlog_value4
=0;
72 unsigned long long kd_prev_timebase
= 0LL;
73 decl_simple_lock_data(,kd_trace_lock
);
75 kd_threadmap
*kd_mapptr
= 0;
76 unsigned int kd_mapsize
= 0;
77 unsigned int kd_mapcount
= 0;
78 unsigned int kd_maptomem
= 0;
80 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
82 #define DBG_FUNC_MASK 0xfffffffc
85 extern natural_t rtclock_decrementer_min
;
97 /* task to string structure */
101 char task_comm
[20]; /* from procs p_comm */
104 typedef struct tts tts_t
;
108 kd_threadmap
*map
; /* pointer to the map buffer */
114 typedef struct krt krt_t
;
116 /* This is for the CHUD toolkit call */
117 typedef void (*kd_chudhook_fn
) (unsigned int debugid
, unsigned int arg1
,
118 unsigned int arg2
, unsigned int arg3
,
119 unsigned int arg4
, unsigned int arg5
);
121 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
123 /* Support syscall SYS_kdebug_trace */
124 kdebug_trace(p
, uap
, retval
)
126 struct kdebug_args
*uap
;
132 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
138 kernel_debug(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
139 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
142 struct proc
*curproc
;
144 unsigned long long now
;
145 mach_timespec_t
*tsp
;
147 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
149 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
151 if (!((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) ||
152 (kdebug_enable
& KDEBUG_ENABLE_TRACE
)))
156 s
= ml_set_interrupts_enabled(FALSE
);
158 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
160 if (kd_entropy_indx
< kd_entropy_count
)
162 ml_get_timebase((unsigned long long *) &kd_entropy_buffer
[ kd_entropy_indx
]);
166 if (kd_entropy_indx
== kd_entropy_count
)
168 /* Disable entropy collection */
169 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
175 ml_set_interrupts_enabled(s
);
179 usimple_lock(&kd_trace_lock
);
180 if (kdebug_flags
& KDBG_PIDCHECK
)
182 /* If kdebug flag is not set for current proc, return */
183 curproc
= current_proc();
184 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
185 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
187 usimple_unlock(&kd_trace_lock
);
188 ml_set_interrupts_enabled(s
);
192 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
194 /* If kdebug flag is set for current proc, return */
195 curproc
= current_proc();
196 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
197 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
199 usimple_unlock(&kd_trace_lock
);
200 ml_set_interrupts_enabled(s
);
205 if (kdebug_flags
& KDBG_RANGECHECK
)
207 if ((debugid
< kdlog_beg
) || (debugid
> kdlog_end
)
208 && (debugid
>> 24 != DBG_TRACE
))
210 usimple_unlock(&kd_trace_lock
);
211 ml_set_interrupts_enabled(s
);
215 else if (kdebug_flags
& KDBG_VALCHECK
)
217 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
218 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
219 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
220 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
221 (debugid
>> 24 != DBG_TRACE
))
223 usimple_unlock(&kd_trace_lock
);
224 ml_set_interrupts_enabled(s
);
229 kd
->debugid
= debugid
;
234 kd
->arg5
= (int)current_thread();
236 kd
->arg5
|= KDBG_CPU_MASK
;
238 ml_get_timebase((unsigned long long *)&kd
->timestamp
);
240 /* Watch for out of order timestamps */
241 now
= (((unsigned long long)kd
->timestamp
.tv_sec
) << 32) |
242 (unsigned long long)((unsigned int)(kd
->timestamp
.tv_nsec
));
244 if (now
< kd_prev_timebase
)
246 /* timestamps are out of order -- adjust */
248 tsp
= (mach_timespec_t
*)&kd_prev_timebase
;
249 kd
->timestamp
.tv_sec
= tsp
->tv_sec
;
250 kd
->timestamp
.tv_nsec
= tsp
->tv_nsec
;
254 /* Then just store the previous timestamp */
255 kd_prev_timebase
= now
;
261 if (kd_bufptr
>= kd_buflast
)
262 kd_bufptr
= kd_buffer
;
263 if (kd_bufptr
== kd_readlast
) {
264 if (kdebug_flags
& KDBG_NOWRAP
)
266 kdebug_flags
|= KDBG_WRAPPED
;
268 usimple_unlock(&kd_trace_lock
);
269 ml_set_interrupts_enabled(s
);
273 kernel_debug1(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
274 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
277 struct proc
*curproc
;
279 unsigned long long now
;
280 mach_timespec_t
*tsp
;
282 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
284 (void)kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
286 if (!((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) ||
287 (kdebug_enable
& KDEBUG_ENABLE_TRACE
)))
291 s
= ml_set_interrupts_enabled(FALSE
);
295 ml_set_interrupts_enabled(s
);
299 usimple_lock(&kd_trace_lock
);
300 if (kdebug_flags
& KDBG_PIDCHECK
)
302 /* If kdebug flag is not set for current proc, return */
303 curproc
= current_proc();
304 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
305 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
307 usimple_unlock(&kd_trace_lock
);
308 ml_set_interrupts_enabled(s
);
312 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
314 /* If kdebug flag is set for current proc, return */
315 curproc
= current_proc();
316 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
317 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
319 usimple_unlock(&kd_trace_lock
);
320 ml_set_interrupts_enabled(s
);
325 if (kdebug_flags
& KDBG_RANGECHECK
)
327 if ((debugid
< kdlog_beg
) || (debugid
> kdlog_end
)
328 && (debugid
>> 24 != DBG_TRACE
))
330 usimple_unlock(&kd_trace_lock
);
331 ml_set_interrupts_enabled(s
);
335 else if (kdebug_flags
& KDBG_VALCHECK
)
337 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
338 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
339 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
340 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
341 (debugid
>> 24 != DBG_TRACE
))
343 usimple_unlock(&kd_trace_lock
);
344 ml_set_interrupts_enabled(s
);
350 kd
->debugid
= debugid
;
356 ml_get_timebase((unsigned long long *)&kd
->timestamp
);
358 /* Watch for out of order timestamps */
359 now
= (((unsigned long long)kd
->timestamp
.tv_sec
) << 32) |
360 (unsigned long long)((unsigned int)(kd
->timestamp
.tv_nsec
));
362 if (now
< kd_prev_timebase
)
364 /* timestamps are out of order -- adjust */
366 tsp
= (mach_timespec_t
*)&kd_prev_timebase
;
367 kd
->timestamp
.tv_sec
= tsp
->tv_sec
;
368 kd
->timestamp
.tv_nsec
= tsp
->tv_nsec
;
372 /* Then just store the previous timestamp */
373 kd_prev_timebase
= now
;
378 if (kd_bufptr
>= kd_buflast
)
379 kd_bufptr
= kd_buffer
;
380 if (kd_bufptr
== kd_readlast
) {
381 if (kdebug_flags
& KDBG_NOWRAP
)
383 kdebug_flags
|= KDBG_WRAPPED
;
385 usimple_unlock(&kd_trace_lock
);
386 ml_set_interrupts_enabled(s
);
392 kd_bufsize
= nkdbufs
* sizeof(kd_buf
);
393 if (kmem_alloc(kernel_map
, &kd_buftomem
,
394 (vm_size_t
)kd_bufsize
) == KERN_SUCCESS
)
395 kd_buffer
= (kd_buf
*) kd_buftomem
;
396 else kd_buffer
= (kd_buf
*) 0;
397 kdebug_flags
&= ~KDBG_WRAPPED
;
399 simple_lock_init(&kd_trace_lock
);
400 kdebug_flags
|= (KDBG_INIT
| KDBG_BUFINIT
);
401 kd_bufptr
= kd_buffer
;
402 kd_buflast
= &kd_bufptr
[nkdbufs
];
403 kd_readlast
= kd_bufptr
;
404 kd_prev_timebase
= 0LL;
408 kdebug_flags
&= ~(KDBG_INIT
| KDBG_BUFINIT
);
419 /* Disable trace collecting */
420 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
423 if ((kdebug_flags
& KDBG_INIT
) && (kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
)
424 kmem_free(kernel_map
, (char *)kd_buffer
, kd_bufsize
);
426 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
428 kmem_free(kernel_map
, (char *)kd_mapptr
, kd_mapsize
);
429 kdebug_flags
&= ~KDBG_MAPINIT
;
431 kd_mapptr
= (kd_threadmap
*) 0;
435 ret
= kdbg_bootstrap();
440 void kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
456 /* Collect the pathname for tracing */
457 dbg_nameptr
= proc
->p_comm
;
458 dbg_namelen
= strlen(proc
->p_comm
);
464 if(dbg_namelen
> sizeof(dbg_parms
))
465 dbg_namelen
= sizeof(dbg_parms
);
467 for(i
=0;dbg_namelen
> 0; i
++)
469 dbg_parms
[i
]=*(long*)dbg_nameptr
;
470 dbg_nameptr
+= sizeof(long);
471 dbg_namelen
-= sizeof(long);
480 kdbg_resolve_map(thread_act_t th_act
, krt_t
*t
)
482 kd_threadmap
*mapptr
;
484 if(t
->count
< t
->maxcount
)
486 mapptr
=&t
->map
[t
->count
];
487 mapptr
->thread
= (unsigned int)getshuttle_thread(th_act
);
489 (void) strncpy (mapptr
->command
, t
->atts
->task_comm
,
490 sizeof(t
->atts
->task_comm
)-1);
491 mapptr
->command
[sizeof(t
->atts
->task_comm
)-1] = '\0';
500 int tts_count
; /* number of task-to-string structures */
501 struct tts
*tts_mapptr
;
502 unsigned int tts_mapsize
= 0;
503 unsigned int tts_maptomem
=0;
507 if (kdebug_flags
& KDBG_MAPINIT
)
510 /* Calculate the sizes of map buffers*/
511 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
;
512 p
= p
->p_list
.le_next
)
514 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
519 * The proc count could change during buffer allocation,
520 * so introduce a small fudge factor to bump up the
521 * buffer sizes. This gives new tasks some chance of
522 * making into the tables. Bump up by 10%.
524 kd_mapcount
+= kd_mapcount
/10;
525 tts_count
+= tts_count
/10;
527 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
528 if((kmem_alloc(kernel_map
, & kd_maptomem
,
529 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
530 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
532 kd_mapptr
= (kd_threadmap
*) 0;
534 tts_mapsize
= tts_count
* sizeof(struct tts
);
535 if((kmem_alloc(kernel_map
, & tts_maptomem
,
536 (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
))
537 tts_mapptr
= (struct tts
*) tts_maptomem
;
539 tts_mapptr
= (struct tts
*) 0;
543 * We need to save the procs command string
544 * and take a reference for each task associated
545 * with a valid process
549 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
;
550 p
= p
->p_list
.le_next
) {
551 if (p
->p_flag
& P_WEXIT
)
554 if (task_reference_try(p
->task
)) {
555 tts_mapptr
[i
].task
= p
->task
;
556 (void)strncpy(&tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
) - 1);
564 if (kd_mapptr
&& tts_mapptr
)
566 kdebug_flags
|= KDBG_MAPINIT
;
567 /* Initialize thread map data */
568 akrt
.map
= kd_mapptr
;
570 akrt
.maxcount
= kd_mapcount
;
572 for (i
=0; i
< tts_count
; i
++)
574 akrt
.atts
= &tts_mapptr
[i
];
575 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
576 task_deallocate(tts_mapptr
[i
].task
);
578 kmem_free(kernel_map
, (char *)tts_mapptr
, tts_mapsize
);
586 /* Clean up the trace buffer */
587 global_state_pid
= -1;
588 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
590 kdebug_flags
&= ~KDBG_BUFINIT
;
591 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
592 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
593 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
594 kmem_free(kernel_map
, (char *)kd_buffer
, kd_bufsize
);
595 kd_buffer
= (kd_buf
*)0;
597 kd_prev_timebase
= 0LL;
599 /* Clean up the thread map buffer */
600 kdebug_flags
&= ~KDBG_MAPINIT
;
601 kmem_free(kernel_map
, (char *)kd_mapptr
, kd_mapsize
);
602 kd_mapptr
= (kd_threadmap
*) 0;
607 kdbg_setpid(kd_regtype
*kdr
)
613 pid
= (pid_t
)kdr
->value1
;
614 flag
= (int)kdr
->value2
;
618 if ((p
= pfind(pid
)) == NULL
)
622 if (flag
== 1) /* turn on pid check for this and all pids */
624 kdebug_flags
|= KDBG_PIDCHECK
;
625 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
626 p
->p_flag
|= P_KDEBUG
;
628 else /* turn off pid check for this pid value */
630 /* Don't turn off all pid checking though */
631 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
632 p
->p_flag
&= ~P_KDEBUG
;
641 /* This is for pid exclusion in the trace buffer */
642 kdbg_setpidex(kd_regtype
*kdr
)
648 pid
= (pid_t
)kdr
->value1
;
649 flag
= (int)kdr
->value2
;
653 if ((p
= pfind(pid
)) == NULL
)
657 if (flag
== 1) /* turn on pid exclusion */
659 kdebug_flags
|= KDBG_PIDEXCLUDE
;
660 kdebug_flags
&= ~KDBG_PIDCHECK
;
661 p
->p_flag
|= P_KDEBUG
;
663 else /* turn off pid exclusion for this pid value */
665 /* Don't turn off all pid exclusion though */
666 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
667 p
->p_flag
&= ~P_KDEBUG
;
676 /* This is for setting a minimum decrementer value */
677 kdbg_setrtcdec(kd_regtype
*kdr
)
682 decval
= (natural_t
)kdr
->value1
;
684 if (decval
&& decval
< KDBG_MINRTCDEC
)
688 rtclock_decrementer_min
= decval
;
697 kdbg_setreg(kd_regtype
* kdr
)
700 unsigned int val_1
, val_2
, val
;
703 case KDBG_CLASSTYPE
:
704 val_1
= (kdr
->value1
& 0xff);
705 val_2
= (kdr
->value2
& 0xff);
706 kdlog_beg
= (val_1
<<24);
707 kdlog_end
= (val_2
<<24);
708 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
709 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
710 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
712 case KDBG_SUBCLSTYPE
:
713 val_1
= (kdr
->value1
& 0xff);
714 val_2
= (kdr
->value2
& 0xff);
716 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
717 kdlog_end
= ((val_1
<<24) | (val
<< 16));
718 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
719 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
720 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
722 case KDBG_RANGETYPE
:
723 kdlog_beg
= (kdr
->value1
);
724 kdlog_end
= (kdr
->value2
);
725 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
726 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
727 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
730 kdlog_value1
= (kdr
->value1
);
731 kdlog_value2
= (kdr
->value2
);
732 kdlog_value3
= (kdr
->value3
);
733 kdlog_value4
= (kdr
->value4
);
734 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
735 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
736 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
739 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
750 kdbg_getreg(kd_regtype
* kdr
)
753 unsigned int val_1
, val_2
, val
;
756 case KDBG_CLASSTYPE
:
757 val_1
= (kdr
->value1
& 0xff);
759 kdlog_beg
= (val_1
<<24);
760 kdlog_end
= (val_2
<<24);
761 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
762 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
764 case KDBG_SUBCLSTYPE
:
765 val_1
= (kdr
->value1
& 0xff);
766 val_2
= (kdr
->value2
& 0xff);
768 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
769 kdlog_end
= ((val_1
<<24) | (val
<< 16));
770 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
771 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
773 case KDBG_RANGETYPE
:
774 kdlog_beg
= (kdr
->value1
);
775 kdlog_end
= (kdr
->value2
);
776 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
777 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
780 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
794 kdbg_readmap(kd_threadmap
*buffer
, size_t *number
)
800 count
= avail
/sizeof (kd_threadmap
);
802 if (count
&& (count
<= kd_mapcount
))
804 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
806 if (*number
< kd_mapsize
)
810 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
820 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
822 kmem_free(kernel_map
, (char *)kd_mapptr
, kd_mapsize
);
823 kdebug_flags
&= ~KDBG_MAPINIT
;
825 kd_mapptr
= (kd_threadmap
*) 0;
832 kdbg_getentropy (mach_timespec_t
* buffer
, size_t *number
, int ms_timeout
)
836 int count
= 0; /* The number of timestamp entries that will fill buffer */
838 if (kd_entropy_buffer
)
841 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
842 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
845 /* Enforce maximum entropy entries here if needed */
847 /* allocate entropy buffer */
848 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
849 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
)
851 kd_entropy_buffer
= (mach_timespec_t
*)kd_entropy_buftomem
;
855 kd_entropy_buffer
= (mach_timespec_t
*) 0;
856 kd_entropy_count
= 0;
864 /* Enable entropy sampling */
865 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
867 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
869 /* Disable entropy sampling */
870 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
875 if (kd_entropy_indx
> 0)
877 /* copyout the buffer */
878 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
881 *number
= kd_entropy_indx
;
885 kd_entropy_count
= 0;
887 kd_entropy_buftomem
= 0;
888 kmem_free(kernel_map
, (char *)kd_entropy_buffer
, kd_entropy_bufsize
);
889 kd_entropy_buffer
= (mach_timespec_t
*) 0;
895 * This function is provided for the CHUD toolkit only.
897 * zero disables kdebug_chudhook function call
898 * non-zero enables kdebug_chudhook function call
900 * address of the enabled kdebug_chudhook function
903 void kdbg_control_chud(int val
, void *fn
)
906 /* enable chudhook */
907 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
908 kdebug_chudhook
= fn
;
911 /* disable chudhook */
912 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
918 kdbg_control(name
, namelen
, where
, sizep
)
927 unsigned int value
= name
[1];
929 kbufinfo_t kd_bufinfo
;
932 struct proc
*p
, *curproc
;
934 if (name
[0] == KERN_KDGETBUF
) {
936 Does not alter the global_state_pid
937 This is a passive request.
939 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
941 There is not enough room to return even
942 the first element of the info structure.
947 kd_bufinfo
.nkdbufs
= nkdbufs
;
948 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
949 kd_bufinfo
.nolog
= kdebug_nolog
;
950 kd_bufinfo
.flags
= kdebug_flags
;
951 kd_bufinfo
.bufid
= global_state_pid
;
953 if(size
>= sizeof(kbufinfo_t
)) {
954 /* Provide all the info we have */
955 if(copyout (&kd_bufinfo
, where
, sizeof(kbufinfo_t
)))
960 For backwards compatibility, only provide
961 as much info as there is room for.
963 if(copyout (&kd_bufinfo
, where
, size
))
968 else if (name
[0] == KERN_KDGETENTROPY
) {
969 if (kd_entropy_buffer
)
972 ret
= kdbg_getentropy((mach_timespec_t
*)where
, sizep
, value
);
976 if(curproc
= current_proc())
977 curpid
= curproc
->p_pid
;
981 if (global_state_pid
== -1)
982 global_state_pid
= curpid
;
983 else if (global_state_pid
!= curpid
)
985 if((p
= pfind(global_state_pid
)) == NULL
)
987 /* The global pid no longer exists */
988 global_state_pid
= curpid
;
992 /* The global pid exists, deny this request */
999 value
&= KDBG_USERFLAGS
;
1000 kdebug_flags
|= value
;
1003 value
&= KDBG_USERFLAGS
;
1004 kdebug_flags
&= ~value
;
1006 case KERN_KDENABLE
: /* used to enable or disable */
1009 /* enable only if buffer is initialized */
1010 if (!(kdebug_flags
& KDBG_BUFINIT
))
1018 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1020 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1022 kdebug_nolog
= (value
)?0:1;
1024 if (kdebug_enable
& KDEBUG_ENABLE_TRACE
)
1028 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1029 /* 'value' is the desired number of trace entries */
1030 max_entries
= (sane_size
/4) / sizeof(kd_buf
);
1031 if (value
<= max_entries
)
1034 nkdbufs
= max_entries
;
1043 if(size
< sizeof(kd_regtype
)) {
1047 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1051 ret
= kdbg_setreg(&kd_Reg
);
1054 if(size
< sizeof(kd_regtype
)) {
1058 ret
= kdbg_getreg(&kd_Reg
);
1059 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
1064 ret
= kdbg_read(where
, sizep
);
1067 if (size
< sizeof(kd_regtype
)) {
1071 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1075 ret
= kdbg_setpid(&kd_Reg
);
1078 if (size
< sizeof(kd_regtype
)) {
1082 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1086 ret
= kdbg_setpidex(&kd_Reg
);
1089 ret
= kdbg_readmap((kd_threadmap
*)where
, sizep
);
1091 case KERN_KDSETRTCDEC
:
1092 if (size
< sizeof(kd_regtype
)) {
1096 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1100 ret
= kdbg_setrtcdec(&kd_Reg
);
1109 kdbg_read(kd_buf
* buffer
, size_t *number
)
1116 unsigned int my_kdebug_flags
;
1117 kd_buf
* my_kd_bufptr
;
1119 s
= ml_set_interrupts_enabled(FALSE
);
1120 usimple_lock(&kd_trace_lock
);
1121 my_kdebug_flags
= kdebug_flags
;
1122 my_kd_bufptr
= kd_bufptr
;
1123 usimple_unlock(&kd_trace_lock
);
1124 ml_set_interrupts_enabled(s
);
1126 count
= avail
/sizeof(kd_buf
);
1128 if ((my_kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
) {
1129 if (count
> nkdbufs
)
1131 if (!(my_kdebug_flags
& KDBG_WRAPPED
) && (my_kd_bufptr
> kd_readlast
))
1133 copycount
= my_kd_bufptr
-kd_readlast
;
1134 if (copycount
> count
)
1137 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1142 kd_readlast
+= copycount
;
1143 *number
= copycount
;
1146 else if (!(my_kdebug_flags
& KDBG_WRAPPED
) && (my_kd_bufptr
== kd_readlast
))
1153 if (my_kdebug_flags
& KDBG_WRAPPED
)
1155 kd_readlast
= my_kd_bufptr
;
1156 kdebug_flags
&= ~KDBG_WRAPPED
;
1159 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1160 we now treat the kd_buffer read the same as if we weren't
1161 wrapped and my_kd_bufptr was less than kd_readlast.
1164 /* first copyout from readlast to end of kd_buffer */
1165 copycount
= kd_buflast
- kd_readlast
;
1166 if (copycount
> count
)
1168 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1173 buffer
+= copycount
;
1175 totalcount
= copycount
;
1176 kd_readlast
+= copycount
;
1177 if (kd_readlast
== kd_buflast
)
1178 kd_readlast
= kd_buffer
;
1181 *number
= totalcount
;
1185 /* second copyout from top of kd_buffer to bufptr */
1186 copycount
= my_kd_bufptr
- kd_readlast
;
1187 if (copycount
> count
)
1191 *number
= totalcount
;
1194 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1198 kd_readlast
+= copycount
;
1199 totalcount
+= copycount
;
1200 *number
= totalcount
;
1203 } /* end if KDBG_BUFINIT */
1204 } /* end if count */
1208 unsigned char *getProcName(struct proc
*proc
);
1209 unsigned char *getProcName(struct proc
*proc
) {
1211 return &proc
->p_comm
; /* Return pointer to the proc name */