2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
26 #include <mach/clock_types.h>
27 #include <mach/mach_types.h>
28 #include <mach/mach_time.h>
29 #include <machine/machine_routines.h>
31 #include <sys/kdebug.h>
32 #include <sys/errno.h>
33 #include <sys/param.h>
36 #include <sys/sysctl.h>
38 #include <kern/thread.h>
39 #include <kern/task.h>
40 #include <vm/vm_kern.h>
43 /* trace enable status */
44 unsigned int kdebug_enable
= 0;
46 /* track timestamps for security server's entropy needs */
47 uint64_t * kd_entropy_buffer
= 0;
48 unsigned int kd_entropy_bufsize
= 0;
49 unsigned int kd_entropy_count
= 0;
50 unsigned int kd_entropy_indx
= 0;
51 unsigned int kd_entropy_buftomem
= 0;
53 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
55 unsigned int kd_buftomem
=0;
59 unsigned int nkdbufs
= 8192;
60 unsigned int kd_bufsize
= 0;
61 unsigned int kdebug_flags
= 0;
62 unsigned int kdebug_nolog
=1;
63 unsigned int kdlog_beg
=0;
64 unsigned int kdlog_end
=0;
65 unsigned int kdlog_value1
=0;
66 unsigned int kdlog_value2
=0;
67 unsigned int kdlog_value3
=0;
68 unsigned int kdlog_value4
=0;
70 unsigned long long kd_prev_timebase
= 0LL;
71 decl_simple_lock_data(,kd_trace_lock
);
73 kd_threadmap
*kd_mapptr
= 0;
74 unsigned int kd_mapsize
= 0;
75 unsigned int kd_mapcount
= 0;
76 unsigned int kd_maptomem
= 0;
78 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
80 #define DBG_FUNC_MASK 0xfffffffc
83 extern natural_t rtclock_decrementer_min
;
95 /* task to string structure */
98 task_t
*task
; /* from procs task */
99 pid_t pid
; /* from procs p_pid */
100 char task_comm
[20]; /* from procs p_comm */
103 typedef struct tts tts_t
;
107 kd_threadmap
*map
; /* pointer to the map buffer */
113 typedef struct krt krt_t
;
115 /* This is for the CHUD toolkit call */
116 typedef void (*kd_chudhook_fn
) (unsigned int debugid
, unsigned int arg1
,
117 unsigned int arg2
, unsigned int arg3
,
118 unsigned int arg4
, unsigned int arg5
);
120 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
122 /* Support syscall SYS_kdebug_trace */
123 kdebug_trace(p
, uap
, retval
)
125 struct kdebug_args
*uap
;
131 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
137 kernel_debug(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
138 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
141 struct proc
*curproc
;
143 unsigned long long now
;
144 mach_timespec_t
*tsp
;
146 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
148 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
150 if (!((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) ||
151 (kdebug_enable
& KDEBUG_ENABLE_TRACE
)))
155 s
= ml_set_interrupts_enabled(FALSE
);
157 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
159 if (kd_entropy_indx
< kd_entropy_count
)
161 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
165 if (kd_entropy_indx
== kd_entropy_count
)
167 /* Disable entropy collection */
168 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
174 ml_set_interrupts_enabled(s
);
178 usimple_lock(&kd_trace_lock
);
179 if (kdebug_flags
& KDBG_PIDCHECK
)
181 /* If kdebug flag is not set for current proc, return */
182 curproc
= current_proc();
183 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
184 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
186 usimple_unlock(&kd_trace_lock
);
187 ml_set_interrupts_enabled(s
);
191 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
193 /* If kdebug flag is set for current proc, return */
194 curproc
= current_proc();
195 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
196 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
198 usimple_unlock(&kd_trace_lock
);
199 ml_set_interrupts_enabled(s
);
204 if (kdebug_flags
& KDBG_RANGECHECK
)
206 if ((debugid
< kdlog_beg
) || (debugid
> kdlog_end
)
207 && (debugid
>> 24 != DBG_TRACE
))
209 usimple_unlock(&kd_trace_lock
);
210 ml_set_interrupts_enabled(s
);
214 else if (kdebug_flags
& KDBG_VALCHECK
)
216 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
217 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
218 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
219 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
220 (debugid
>> 24 != DBG_TRACE
))
222 usimple_unlock(&kd_trace_lock
);
223 ml_set_interrupts_enabled(s
);
228 kd
->debugid
= debugid
;
233 kd
->arg5
= (int)current_act();
235 kd
->arg5
|= KDBG_CPU_MASK
;
237 now
= kd
->timestamp
= mach_absolute_time();
239 /* Watch for out of order timestamps */
241 if (now
< kd_prev_timebase
)
243 kd
->timestamp
= ++kd_prev_timebase
;
247 /* Then just store the previous timestamp */
248 kd_prev_timebase
= now
;
254 if (kd_bufptr
>= kd_buflast
)
255 kd_bufptr
= kd_buffer
;
256 if (kd_bufptr
== kd_readlast
) {
257 if (kdebug_flags
& KDBG_NOWRAP
)
259 kdebug_flags
|= KDBG_WRAPPED
;
261 usimple_unlock(&kd_trace_lock
);
262 ml_set_interrupts_enabled(s
);
266 kernel_debug1(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
267 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
270 struct proc
*curproc
;
272 unsigned long long now
;
273 mach_timespec_t
*tsp
;
275 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
277 (void)kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
279 if (!((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) ||
280 (kdebug_enable
& KDEBUG_ENABLE_TRACE
)))
284 s
= ml_set_interrupts_enabled(FALSE
);
288 ml_set_interrupts_enabled(s
);
292 usimple_lock(&kd_trace_lock
);
293 if (kdebug_flags
& KDBG_PIDCHECK
)
295 /* If kdebug flag is not set for current proc, return */
296 curproc
= current_proc();
297 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
298 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
300 usimple_unlock(&kd_trace_lock
);
301 ml_set_interrupts_enabled(s
);
305 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
307 /* If kdebug flag is set for current proc, return */
308 curproc
= current_proc();
309 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
310 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
312 usimple_unlock(&kd_trace_lock
);
313 ml_set_interrupts_enabled(s
);
318 if (kdebug_flags
& KDBG_RANGECHECK
)
320 if ((debugid
< kdlog_beg
) || (debugid
> kdlog_end
)
321 && (debugid
>> 24 != DBG_TRACE
))
323 usimple_unlock(&kd_trace_lock
);
324 ml_set_interrupts_enabled(s
);
328 else if (kdebug_flags
& KDBG_VALCHECK
)
330 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
331 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
332 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
333 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
334 (debugid
>> 24 != DBG_TRACE
))
336 usimple_unlock(&kd_trace_lock
);
337 ml_set_interrupts_enabled(s
);
343 kd
->debugid
= debugid
;
349 now
= kd
->timestamp
= mach_absolute_time();
351 /* Watch for out of order timestamps */
353 if (now
< kd_prev_timebase
)
355 /* timestamps are out of order -- adjust */
356 kd
->timestamp
= ++kd_prev_timebase
;
360 /* Then just store the previous timestamp */
361 kd_prev_timebase
= now
;
366 if (kd_bufptr
>= kd_buflast
)
367 kd_bufptr
= kd_buffer
;
368 if (kd_bufptr
== kd_readlast
) {
369 if (kdebug_flags
& KDBG_NOWRAP
)
371 kdebug_flags
|= KDBG_WRAPPED
;
373 usimple_unlock(&kd_trace_lock
);
374 ml_set_interrupts_enabled(s
);
380 kd_bufsize
= nkdbufs
* sizeof(kd_buf
);
381 if (kmem_alloc(kernel_map
, &kd_buftomem
,
382 (vm_size_t
)kd_bufsize
) == KERN_SUCCESS
)
383 kd_buffer
= (kd_buf
*) kd_buftomem
;
384 else kd_buffer
= (kd_buf
*) 0;
385 kdebug_flags
&= ~KDBG_WRAPPED
;
387 simple_lock_init(&kd_trace_lock
);
388 kdebug_flags
|= (KDBG_INIT
| KDBG_BUFINIT
);
389 kd_bufptr
= kd_buffer
;
390 kd_buflast
= &kd_bufptr
[nkdbufs
];
391 kd_readlast
= kd_bufptr
;
392 kd_prev_timebase
= 0LL;
396 kdebug_flags
&= ~(KDBG_INIT
| KDBG_BUFINIT
);
407 /* Disable trace collecting */
408 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
411 if ((kdebug_flags
& KDBG_INIT
) && (kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
)
412 kmem_free(kernel_map
, (vm_offset_t
)kd_buffer
, kd_bufsize
);
414 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
416 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
417 kdebug_flags
&= ~KDBG_MAPINIT
;
419 kd_mapptr
= (kd_threadmap
*) 0;
423 ret
= kdbg_bootstrap();
428 void kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
433 *arg_pid
= proc
->p_pid
;
439 void kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
455 /* Collect the pathname for tracing */
456 dbg_nameptr
= proc
->p_comm
;
457 dbg_namelen
= strlen(proc
->p_comm
);
463 if(dbg_namelen
> sizeof(dbg_parms
))
464 dbg_namelen
= sizeof(dbg_parms
);
466 for(i
=0;dbg_namelen
> 0; i
++)
468 dbg_parms
[i
]=*(long*)dbg_nameptr
;
469 dbg_nameptr
+= sizeof(long);
470 dbg_namelen
-= sizeof(long);
479 kdbg_resolve_map(thread_act_t th_act
, krt_t
*t
)
481 kd_threadmap
*mapptr
;
483 if(t
->count
< t
->maxcount
)
485 mapptr
=&t
->map
[t
->count
];
486 mapptr
->thread
= (unsigned int)th_act
;
487 (void) strncpy (mapptr
->command
, t
->atts
->task_comm
,
488 sizeof(t
->atts
->task_comm
)-1);
489 mapptr
->command
[sizeof(t
->atts
->task_comm
)-1] = '\0';
492 Some kernel threads have no associated pid.
493 We still need to mark the entry as valid.
496 mapptr
->valid
= t
->atts
->pid
;
508 int tts_count
; /* number of task-to-string structures */
509 struct tts
*tts_mapptr
;
510 unsigned int tts_mapsize
= 0;
511 unsigned int tts_maptomem
=0;
515 if (kdebug_flags
& KDBG_MAPINIT
)
518 /* Calculate the sizes of map buffers*/
519 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
;
520 p
= p
->p_list
.le_next
)
522 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
527 * The proc count could change during buffer allocation,
528 * so introduce a small fudge factor to bump up the
529 * buffer sizes. This gives new tasks some chance of
530 * making into the tables. Bump up by 10%.
532 kd_mapcount
+= kd_mapcount
/10;
533 tts_count
+= tts_count
/10;
535 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
536 if((kmem_alloc(kernel_map
, & kd_maptomem
,
537 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
539 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
540 bzero(kd_mapptr
, kd_mapsize
);
543 kd_mapptr
= (kd_threadmap
*) 0;
545 tts_mapsize
= tts_count
* sizeof(struct tts
);
546 if((kmem_alloc(kernel_map
, & tts_maptomem
,
547 (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
))
549 tts_mapptr
= (struct tts
*) tts_maptomem
;
550 bzero(tts_mapptr
, tts_mapsize
);
553 tts_mapptr
= (struct tts
*) 0;
557 * We need to save the procs command string
558 * and take a reference for each task associated
559 * with a valid process
563 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
;
564 p
= p
->p_list
.le_next
) {
565 if (p
->p_flag
& P_WEXIT
)
568 if (task_reference_try(p
->task
)) {
569 tts_mapptr
[i
].task
= p
->task
;
570 tts_mapptr
[i
].pid
= p
->p_pid
;
571 (void)strncpy(&tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
) - 1);
579 if (kd_mapptr
&& tts_mapptr
)
581 kdebug_flags
|= KDBG_MAPINIT
;
582 /* Initialize thread map data */
583 akrt
.map
= kd_mapptr
;
585 akrt
.maxcount
= kd_mapcount
;
587 for (i
=0; i
< tts_count
; i
++)
589 akrt
.atts
= &tts_mapptr
[i
];
590 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
591 task_deallocate((task_t
) tts_mapptr
[i
].task
);
593 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
601 /* Clean up the trace buffer */
602 global_state_pid
= -1;
603 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
605 kdebug_flags
&= ~KDBG_BUFINIT
;
606 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
607 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
608 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
609 kmem_free(kernel_map
, (vm_offset_t
)kd_buffer
, kd_bufsize
);
610 kd_buffer
= (kd_buf
*)0;
612 kd_prev_timebase
= 0LL;
614 /* Clean up the thread map buffer */
615 kdebug_flags
&= ~KDBG_MAPINIT
;
616 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
617 kd_mapptr
= (kd_threadmap
*) 0;
622 kdbg_setpid(kd_regtype
*kdr
)
628 pid
= (pid_t
)kdr
->value1
;
629 flag
= (int)kdr
->value2
;
633 if ((p
= pfind(pid
)) == NULL
)
637 if (flag
== 1) /* turn on pid check for this and all pids */
639 kdebug_flags
|= KDBG_PIDCHECK
;
640 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
641 p
->p_flag
|= P_KDEBUG
;
643 else /* turn off pid check for this pid value */
645 /* Don't turn off all pid checking though */
646 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
647 p
->p_flag
&= ~P_KDEBUG
;
656 /* This is for pid exclusion in the trace buffer */
657 kdbg_setpidex(kd_regtype
*kdr
)
663 pid
= (pid_t
)kdr
->value1
;
664 flag
= (int)kdr
->value2
;
668 if ((p
= pfind(pid
)) == NULL
)
672 if (flag
== 1) /* turn on pid exclusion */
674 kdebug_flags
|= KDBG_PIDEXCLUDE
;
675 kdebug_flags
&= ~KDBG_PIDCHECK
;
676 p
->p_flag
|= P_KDEBUG
;
678 else /* turn off pid exclusion for this pid value */
680 /* Don't turn off all pid exclusion though */
681 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
682 p
->p_flag
&= ~P_KDEBUG
;
691 /* This is for setting a minimum decrementer value */
692 kdbg_setrtcdec(kd_regtype
*kdr
)
697 decval
= (natural_t
)kdr
->value1
;
699 if (decval
&& decval
< KDBG_MINRTCDEC
)
703 rtclock_decrementer_min
= decval
;
712 kdbg_setreg(kd_regtype
* kdr
)
715 unsigned int val_1
, val_2
, val
;
718 case KDBG_CLASSTYPE
:
719 val_1
= (kdr
->value1
& 0xff);
720 val_2
= (kdr
->value2
& 0xff);
721 kdlog_beg
= (val_1
<<24);
722 kdlog_end
= (val_2
<<24);
723 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
724 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
725 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
727 case KDBG_SUBCLSTYPE
:
728 val_1
= (kdr
->value1
& 0xff);
729 val_2
= (kdr
->value2
& 0xff);
731 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
732 kdlog_end
= ((val_1
<<24) | (val
<< 16));
733 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
734 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
735 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
737 case KDBG_RANGETYPE
:
738 kdlog_beg
= (kdr
->value1
);
739 kdlog_end
= (kdr
->value2
);
740 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
741 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
742 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
745 kdlog_value1
= (kdr
->value1
);
746 kdlog_value2
= (kdr
->value2
);
747 kdlog_value3
= (kdr
->value3
);
748 kdlog_value4
= (kdr
->value4
);
749 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
750 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
751 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
754 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
765 kdbg_getreg(kd_regtype
* kdr
)
768 unsigned int val_1
, val_2
, val
;
771 case KDBG_CLASSTYPE
:
772 val_1
= (kdr
->value1
& 0xff);
774 kdlog_beg
= (val_1
<<24);
775 kdlog_end
= (val_2
<<24);
776 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
777 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
779 case KDBG_SUBCLSTYPE
:
780 val_1
= (kdr
->value1
& 0xff);
781 val_2
= (kdr
->value2
& 0xff);
783 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
784 kdlog_end
= ((val_1
<<24) | (val
<< 16));
785 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
786 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
788 case KDBG_RANGETYPE
:
789 kdlog_beg
= (kdr
->value1
);
790 kdlog_end
= (kdr
->value2
);
791 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
792 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
795 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
809 kdbg_readmap(kd_threadmap
*buffer
, size_t *number
)
815 count
= avail
/sizeof (kd_threadmap
);
817 if (count
&& (count
<= kd_mapcount
))
819 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
821 if (*number
< kd_mapsize
)
825 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
835 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
837 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
838 kdebug_flags
&= ~KDBG_MAPINIT
;
840 kd_mapptr
= (kd_threadmap
*) 0;
847 kdbg_getentropy (mach_timespec_t
* buffer
, size_t *number
, int ms_timeout
)
851 int count
= 0; /* The number of timestamp entries that will fill buffer */
853 if (kd_entropy_buffer
)
856 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
857 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
860 /* Enforce maximum entropy entries here if needed */
862 /* allocate entropy buffer */
863 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
864 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
)
866 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
870 kd_entropy_buffer
= (uint64_t *) 0;
871 kd_entropy_count
= 0;
879 /* Enable entropy sampling */
880 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
882 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
884 /* Disable entropy sampling */
885 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
890 if (kd_entropy_indx
> 0)
892 /* copyout the buffer */
893 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
896 *number
= kd_entropy_indx
;
900 kd_entropy_count
= 0;
902 kd_entropy_buftomem
= 0;
903 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
904 kd_entropy_buffer
= (uint64_t *) 0;
910 * This function is provided for the CHUD toolkit only.
912 * zero disables kdebug_chudhook function call
913 * non-zero enables kdebug_chudhook function call
915 * address of the enabled kdebug_chudhook function
918 void kdbg_control_chud(int val
, void *fn
)
921 /* enable chudhook */
922 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
923 kdebug_chudhook
= fn
;
926 /* disable chudhook */
927 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
933 kdbg_control(name
, namelen
, where
, sizep
)
942 unsigned int value
= name
[1];
944 kbufinfo_t kd_bufinfo
;
947 struct proc
*p
, *curproc
;
949 if (name
[0] == KERN_KDGETBUF
) {
951 Does not alter the global_state_pid
952 This is a passive request.
954 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
956 There is not enough room to return even
957 the first element of the info structure.
962 kd_bufinfo
.nkdbufs
= nkdbufs
;
963 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
964 kd_bufinfo
.nolog
= kdebug_nolog
;
965 kd_bufinfo
.flags
= kdebug_flags
;
966 kd_bufinfo
.bufid
= global_state_pid
;
968 if(size
>= sizeof(kbufinfo_t
)) {
969 /* Provide all the info we have */
970 if(copyout (&kd_bufinfo
, where
, sizeof(kbufinfo_t
)))
975 For backwards compatibility, only provide
976 as much info as there is room for.
978 if(copyout (&kd_bufinfo
, where
, size
))
983 else if (name
[0] == KERN_KDGETENTROPY
) {
984 if (kd_entropy_buffer
)
987 ret
= kdbg_getentropy((mach_timespec_t
*)where
, sizep
, value
);
991 if(curproc
= current_proc())
992 curpid
= curproc
->p_pid
;
996 if (global_state_pid
== -1)
997 global_state_pid
= curpid
;
998 else if (global_state_pid
!= curpid
)
1000 if((p
= pfind(global_state_pid
)) == NULL
)
1002 /* The global pid no longer exists */
1003 global_state_pid
= curpid
;
1007 /* The global pid exists, deny this request */
1014 value
&= KDBG_USERFLAGS
;
1015 kdebug_flags
|= value
;
1018 value
&= KDBG_USERFLAGS
;
1019 kdebug_flags
&= ~value
;
1021 case KERN_KDENABLE
: /* used to enable or disable */
1024 /* enable only if buffer is initialized */
1025 if (!(kdebug_flags
& KDBG_BUFINIT
))
1033 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1035 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1037 kdebug_nolog
= (value
)?0:1;
1039 if (kdebug_enable
& KDEBUG_ENABLE_TRACE
)
1043 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1044 /* 'value' is the desired number of trace entries */
1045 max_entries
= (sane_size
/4) / sizeof(kd_buf
);
1046 if (value
<= max_entries
)
1049 nkdbufs
= max_entries
;
1058 if(size
< sizeof(kd_regtype
)) {
1062 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1066 ret
= kdbg_setreg(&kd_Reg
);
1069 if(size
< sizeof(kd_regtype
)) {
1073 ret
= kdbg_getreg(&kd_Reg
);
1074 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
1079 ret
= kdbg_read(where
, sizep
);
1082 if (size
< sizeof(kd_regtype
)) {
1086 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1090 ret
= kdbg_setpid(&kd_Reg
);
1093 if (size
< sizeof(kd_regtype
)) {
1097 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1101 ret
= kdbg_setpidex(&kd_Reg
);
1104 ret
= kdbg_readmap((kd_threadmap
*)where
, sizep
);
1106 case KERN_KDSETRTCDEC
:
1107 if (size
< sizeof(kd_regtype
)) {
1111 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1115 ret
= kdbg_setrtcdec(&kd_Reg
);
1124 kdbg_read(kd_buf
* buffer
, size_t *number
)
1131 unsigned int my_kdebug_flags
;
1132 kd_buf
* my_kd_bufptr
;
1134 s
= ml_set_interrupts_enabled(FALSE
);
1135 usimple_lock(&kd_trace_lock
);
1136 my_kdebug_flags
= kdebug_flags
;
1137 my_kd_bufptr
= kd_bufptr
;
1138 usimple_unlock(&kd_trace_lock
);
1139 ml_set_interrupts_enabled(s
);
1141 count
= avail
/sizeof(kd_buf
);
1143 if ((my_kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
) {
1144 if (count
> nkdbufs
)
1146 if (!(my_kdebug_flags
& KDBG_WRAPPED
) && (my_kd_bufptr
> kd_readlast
))
1148 copycount
= my_kd_bufptr
-kd_readlast
;
1149 if (copycount
> count
)
1152 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1157 kd_readlast
+= copycount
;
1158 *number
= copycount
;
1161 else if (!(my_kdebug_flags
& KDBG_WRAPPED
) && (my_kd_bufptr
== kd_readlast
))
1168 if (my_kdebug_flags
& KDBG_WRAPPED
)
1170 kd_readlast
= my_kd_bufptr
;
1171 kdebug_flags
&= ~KDBG_WRAPPED
;
1174 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1175 we now treat the kd_buffer read the same as if we weren't
1176 wrapped and my_kd_bufptr was less than kd_readlast.
1179 /* first copyout from readlast to end of kd_buffer */
1180 copycount
= kd_buflast
- kd_readlast
;
1181 if (copycount
> count
)
1183 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1188 buffer
+= copycount
;
1190 totalcount
= copycount
;
1191 kd_readlast
+= copycount
;
1192 if (kd_readlast
== kd_buflast
)
1193 kd_readlast
= kd_buffer
;
1196 *number
= totalcount
;
1200 /* second copyout from top of kd_buffer to bufptr */
1201 copycount
= my_kd_bufptr
- kd_readlast
;
1202 if (copycount
> count
)
1206 *number
= totalcount
;
1209 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1213 kd_readlast
+= copycount
;
1214 totalcount
+= copycount
;
1215 *number
= totalcount
;
1218 } /* end if KDBG_BUFINIT */
1219 } /* end if count */
1223 unsigned char *getProcName(struct proc
*proc
);
1224 unsigned char *getProcName(struct proc
*proc
) {
1226 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */