2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/proc_internal.h>
29 #include <sys/sysctl.h>
30 #include <sys/kdebug.h>
31 #include <sys/sysproto.h>
34 #include <mach/clock_types.h>
35 #include <mach/mach_types.h>
36 #include <mach/mach_time.h>
37 #include <machine/machine_routines.h>
39 #include <kern/thread.h>
40 #include <kern/task.h>
41 #include <vm/vm_kern.h>
44 /* trace enable status */
45 unsigned int kdebug_enable
= 0;
47 /* track timestamps for security server's entropy needs */
48 uint64_t * kd_entropy_buffer
= 0;
49 unsigned int kd_entropy_bufsize
= 0;
50 unsigned int kd_entropy_count
= 0;
51 unsigned int kd_entropy_indx
= 0;
52 unsigned int kd_entropy_buftomem
= 0;
55 #define SLOW_NOLOG 0x01
56 #define SLOW_CHECKS 0x02
57 #define SLOW_ENTROPY 0x04
59 unsigned int kdebug_slowcheck
=SLOW_NOLOG
;
61 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
63 unsigned int kd_buftomem
=0;
67 unsigned int nkdbufs
= 8192;
68 unsigned int kd_bufsize
= 0;
69 unsigned int kdebug_flags
= 0;
70 unsigned int kdlog_beg
=0;
71 unsigned int kdlog_end
=0;
72 unsigned int kdlog_value1
=0;
73 unsigned int kdlog_value2
=0;
74 unsigned int kdlog_value3
=0;
75 unsigned int kdlog_value4
=0;
77 unsigned long long kd_prev_timebase
= 0LL;
79 static lck_mtx_t
* kd_trace_mtx
;
80 static lck_grp_t
* kd_trace_mtx_grp
;
81 static lck_attr_t
* kd_trace_mtx_attr
;
82 static lck_grp_attr_t
*kd_trace_mtx_grp_attr
;
84 static lck_spin_t
* kd_trace_lock
;
85 static lck_grp_t
* kd_trace_lock_grp
;
86 static lck_attr_t
* kd_trace_lock_attr
;
87 static lck_grp_attr_t
*kd_trace_lock_grp_attr
;
89 kd_threadmap
*kd_mapptr
= 0;
90 unsigned int kd_mapsize
= 0;
91 unsigned int kd_mapcount
= 0;
92 unsigned int kd_maptomem
= 0;
94 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
96 #define DBG_FUNC_MASK 0xfffffffc
99 extern natural_t rtclock_decrementer_min
;
102 /* task to string structure */
105 task_t
*task
; /* from procs task */
106 pid_t pid
; /* from procs p_pid */
107 char task_comm
[20]; /* from procs p_comm */
110 typedef struct tts tts_t
;
114 kd_threadmap
*map
; /* pointer to the map buffer */
120 typedef struct krt krt_t
;
122 /* This is for the CHUD toolkit call */
123 typedef void (*kd_chudhook_fn
) (unsigned int debugid
, unsigned int arg1
,
124 unsigned int arg2
, unsigned int arg3
,
125 unsigned int arg4
, unsigned int arg5
);
127 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
130 /* Support syscall SYS_kdebug_trace */
131 kdebug_trace(p
, uap
, retval
)
133 struct kdebug_trace_args
*uap
;
136 if ( (kdebug_enable
== 0) )
139 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
145 kernel_debug(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
146 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
149 struct proc
*curproc
;
151 unsigned long long now
;
154 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
156 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
158 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
161 s
= ml_set_interrupts_enabled(FALSE
);
162 lck_spin_lock(kd_trace_lock
);
164 if (kdebug_slowcheck
== 0)
167 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
169 if (kd_entropy_indx
< kd_entropy_count
)
171 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
175 if (kd_entropy_indx
== kd_entropy_count
)
177 /* Disable entropy collection */
178 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
179 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
183 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
185 lck_spin_unlock(kd_trace_lock
);
186 ml_set_interrupts_enabled(s
);
190 if (kdebug_flags
& KDBG_PIDCHECK
)
192 /* If kdebug flag is not set for current proc, return */
193 curproc
= current_proc();
194 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
195 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
197 lck_spin_unlock(kd_trace_lock
);
198 ml_set_interrupts_enabled(s
);
202 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
204 /* If kdebug flag is set for current proc, return */
205 curproc
= current_proc();
206 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
207 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
209 lck_spin_unlock(kd_trace_lock
);
210 ml_set_interrupts_enabled(s
);
215 if (kdebug_flags
& KDBG_RANGECHECK
)
217 if ((debugid
< kdlog_beg
) || (debugid
>= kdlog_end
)
218 && (debugid
>> 24 != DBG_TRACE
))
220 lck_spin_unlock(kd_trace_lock
);
221 ml_set_interrupts_enabled(s
);
225 else if (kdebug_flags
& KDBG_VALCHECK
)
227 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
228 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
229 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
230 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
231 (debugid
>> 24 != DBG_TRACE
))
233 lck_spin_unlock(kd_trace_lock
);
234 ml_set_interrupts_enabled(s
);
241 kd
->debugid
= debugid
;
246 kd
->arg5
= (int)current_thread();
248 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
250 /* Watch for out of order timestamps */
252 if (now
< kd_prev_timebase
)
254 now
= ++kd_prev_timebase
& KDBG_TIMESTAMP_MASK
;
258 /* Then just store the previous timestamp */
259 kd_prev_timebase
= now
;
261 kd
->timestamp
= now
| (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT
);
265 if (kd_bufptr
>= kd_buflast
)
266 kd_bufptr
= kd_buffer
;
267 if (kd_bufptr
== kd_readlast
) {
268 if (kdebug_flags
& KDBG_NOWRAP
)
269 kdebug_slowcheck
|= SLOW_NOLOG
;
270 kdebug_flags
|= KDBG_WRAPPED
;
272 lck_spin_unlock(kd_trace_lock
);
273 ml_set_interrupts_enabled(s
);
277 kernel_debug1(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
278 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
281 struct proc
*curproc
;
283 unsigned long long now
;
285 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
287 (void)kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
289 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
292 s
= ml_set_interrupts_enabled(FALSE
);
293 lck_spin_lock(kd_trace_lock
);
295 if (kdebug_slowcheck
== 0)
298 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
300 lck_spin_unlock(kd_trace_lock
);
301 ml_set_interrupts_enabled(s
);
305 if (kdebug_flags
& KDBG_PIDCHECK
)
307 /* If kdebug flag is not set for current proc, return */
308 curproc
= current_proc();
309 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
310 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
312 lck_spin_unlock(kd_trace_lock
);
313 ml_set_interrupts_enabled(s
);
317 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
319 /* If kdebug flag is set for current proc, return */
320 curproc
= current_proc();
321 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
322 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
324 lck_spin_unlock(kd_trace_lock
);
325 ml_set_interrupts_enabled(s
);
330 if (kdebug_flags
& KDBG_RANGECHECK
)
332 if ((debugid
< kdlog_beg
) || (debugid
>= kdlog_end
)
333 && (debugid
>> 24 != DBG_TRACE
))
335 lck_spin_unlock(kd_trace_lock
);
336 ml_set_interrupts_enabled(s
);
340 else if (kdebug_flags
& KDBG_VALCHECK
)
342 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
343 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
344 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
345 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
346 (debugid
>> 24 != DBG_TRACE
))
348 lck_spin_unlock(kd_trace_lock
);
349 ml_set_interrupts_enabled(s
);
356 kd
->debugid
= debugid
;
363 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
365 /* Watch for out of order timestamps */
367 if (now
< kd_prev_timebase
)
369 now
= ++kd_prev_timebase
& KDBG_TIMESTAMP_MASK
;
373 /* Then just store the previous timestamp */
374 kd_prev_timebase
= now
;
376 kd
->timestamp
= now
| (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT
);
380 if (kd_bufptr
>= kd_buflast
)
381 kd_bufptr
= kd_buffer
;
382 if (kd_bufptr
== kd_readlast
) {
383 if (kdebug_flags
& KDBG_NOWRAP
)
384 kdebug_slowcheck
|= SLOW_NOLOG
;
385 kdebug_flags
|= KDBG_WRAPPED
;
387 lck_spin_unlock(kd_trace_lock
);
388 ml_set_interrupts_enabled(s
);
396 if (kdebug_flags
& KDBG_LOCKINIT
)
399 * allocate lock group attribute and group
401 kd_trace_lock_grp_attr
= lck_grp_attr_alloc_init();
402 //lck_grp_attr_setstat(kd_trace_lock_grp_attr);
403 kd_trace_lock_grp
= lck_grp_alloc_init("kdebug", kd_trace_lock_grp_attr
);
405 kd_trace_mtx_grp_attr
= lck_grp_attr_alloc_init();
406 //lck_grp_attr_setstat(kd_trace_mtx_grp_attr);
407 kd_trace_mtx_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_grp_attr
);
410 * allocate the lock attribute
412 kd_trace_lock_attr
= lck_attr_alloc_init();
413 //lck_attr_setdebug(kd_trace_lock_attr);
415 kd_trace_mtx_attr
= lck_attr_alloc_init();
416 //lck_attr_setdebug(kd_trace_mtx_attr);
420 * allocate and initialize spin lock and mutex
422 kd_trace_lock
= lck_spin_alloc_init(kd_trace_lock_grp
, kd_trace_lock_attr
);
423 kd_trace_mtx
= lck_mtx_alloc_init(kd_trace_mtx_grp
, kd_trace_mtx_attr
);
425 kdebug_flags
|= KDBG_LOCKINIT
;
433 kd_bufsize
= nkdbufs
* sizeof(kd_buf
);
435 if (kmem_alloc(kernel_map
, &kd_buftomem
,
436 (vm_size_t
)kd_bufsize
) == KERN_SUCCESS
)
437 kd_buffer
= (kd_buf
*) kd_buftomem
;
439 kd_buffer
= (kd_buf
*) 0;
440 kdebug_flags
&= ~KDBG_WRAPPED
;
443 kdebug_flags
|= (KDBG_INIT
| KDBG_BUFINIT
);
444 kd_bufptr
= kd_buffer
;
445 kd_buflast
= &kd_bufptr
[nkdbufs
];
446 kd_readlast
= kd_bufptr
;
447 kd_prev_timebase
= 0LL;
451 kdebug_flags
&= ~(KDBG_INIT
| KDBG_BUFINIT
);
463 * Disable trace collecting
464 * First make sure we're not in
465 * the middle of cutting a trace
467 s
= ml_set_interrupts_enabled(FALSE
);
468 lck_spin_lock(kd_trace_lock
);
470 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
471 kdebug_slowcheck
|= SLOW_NOLOG
;
473 lck_spin_unlock(kd_trace_lock
);
474 ml_set_interrupts_enabled(s
);
476 if ((kdebug_flags
& KDBG_INIT
) && (kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
)
477 kmem_free(kernel_map
, (vm_offset_t
)kd_buffer
, kd_bufsize
);
479 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
481 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
482 kdebug_flags
&= ~KDBG_MAPINIT
;
484 kd_mapptr
= (kd_threadmap
*) 0;
488 ret
= kdbg_bootstrap();
493 void kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
498 *arg_pid
= proc
->p_pid
;
504 void kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
520 /* Collect the pathname for tracing */
521 dbg_nameptr
= proc
->p_comm
;
522 dbg_namelen
= strlen(proc
->p_comm
);
528 if(dbg_namelen
> sizeof(dbg_parms
))
529 dbg_namelen
= sizeof(dbg_parms
);
531 for(i
=0;dbg_namelen
> 0; i
++)
533 dbg_parms
[i
]=*(long*)dbg_nameptr
;
534 dbg_nameptr
+= sizeof(long);
535 dbg_namelen
-= sizeof(long);
545 kdbg_resolve_map(thread_t th_act
, krt_t
*t
)
547 kd_threadmap
*mapptr
;
549 if(t
->count
< t
->maxcount
)
551 mapptr
=&t
->map
[t
->count
];
552 mapptr
->thread
= (unsigned int)th_act
;
553 (void) strncpy (mapptr
->command
, t
->atts
->task_comm
,
554 sizeof(t
->atts
->task_comm
)-1);
555 mapptr
->command
[sizeof(t
->atts
->task_comm
)-1] = '\0';
558 Some kernel threads have no associated pid.
559 We still need to mark the entry as valid.
562 mapptr
->valid
= t
->atts
->pid
;
574 int tts_count
; /* number of task-to-string structures */
575 struct tts
*tts_mapptr
;
576 unsigned int tts_mapsize
= 0;
577 unsigned int tts_maptomem
=0;
581 if (kdebug_flags
& KDBG_MAPINIT
)
584 /* Calculate the sizes of map buffers*/
585 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
;
586 p
= p
->p_list
.le_next
)
588 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
593 * The proc count could change during buffer allocation,
594 * so introduce a small fudge factor to bump up the
595 * buffer sizes. This gives new tasks some chance of
596 * making into the tables. Bump up by 10%.
598 kd_mapcount
+= kd_mapcount
/10;
599 tts_count
+= tts_count
/10;
601 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
602 if((kmem_alloc(kernel_map
, & kd_maptomem
,
603 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
605 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
606 bzero(kd_mapptr
, kd_mapsize
);
609 kd_mapptr
= (kd_threadmap
*) 0;
611 tts_mapsize
= tts_count
* sizeof(struct tts
);
612 if((kmem_alloc(kernel_map
, & tts_maptomem
,
613 (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
))
615 tts_mapptr
= (struct tts
*) tts_maptomem
;
616 bzero(tts_mapptr
, tts_mapsize
);
619 tts_mapptr
= (struct tts
*) 0;
623 * We need to save the procs command string
624 * and take a reference for each task associated
625 * with a valid process
629 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
;
630 p
= p
->p_list
.le_next
) {
631 if (p
->p_flag
& P_WEXIT
)
635 task_reference(p
->task
);
636 tts_mapptr
[i
].task
= p
->task
;
637 tts_mapptr
[i
].pid
= p
->p_pid
;
638 (void)strncpy(&tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
) - 1);
646 if (kd_mapptr
&& tts_mapptr
)
648 kdebug_flags
|= KDBG_MAPINIT
;
649 /* Initialize thread map data */
650 akrt
.map
= kd_mapptr
;
652 akrt
.maxcount
= kd_mapcount
;
654 for (i
=0; i
< tts_count
; i
++)
656 akrt
.atts
= &tts_mapptr
[i
];
657 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
658 task_deallocate((task_t
) tts_mapptr
[i
].task
);
660 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
670 * Clean up the trace buffer
671 * First make sure we're not in
672 * the middle of cutting a trace
674 s
= ml_set_interrupts_enabled(FALSE
);
675 lck_spin_lock(kd_trace_lock
);
677 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
678 kdebug_slowcheck
= SLOW_NOLOG
;
680 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
681 kdebug_slowcheck
|= SLOW_ENTROPY
;
683 lck_spin_unlock(kd_trace_lock
);
684 ml_set_interrupts_enabled(s
);
686 global_state_pid
= -1;
687 kdebug_flags
&= ~KDBG_BUFINIT
;
688 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
689 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
690 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
691 kmem_free(kernel_map
, (vm_offset_t
)kd_buffer
, kd_bufsize
);
692 kd_buffer
= (kd_buf
*)0;
694 kd_prev_timebase
= 0LL;
696 /* Clean up the thread map buffer */
697 kdebug_flags
&= ~KDBG_MAPINIT
;
698 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
699 kd_mapptr
= (kd_threadmap
*) 0;
704 kdbg_setpid(kd_regtype
*kdr
)
710 pid
= (pid_t
)kdr
->value1
;
711 flag
= (int)kdr
->value2
;
715 if ((p
= pfind(pid
)) == NULL
)
719 if (flag
== 1) /* turn on pid check for this and all pids */
721 kdebug_flags
|= KDBG_PIDCHECK
;
722 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
723 kdebug_slowcheck
|= SLOW_CHECKS
;
725 p
->p_flag
|= P_KDEBUG
;
727 else /* turn off pid check for this pid value */
729 /* Don't turn off all pid checking though */
730 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
731 p
->p_flag
&= ~P_KDEBUG
;
740 /* This is for pid exclusion in the trace buffer */
741 kdbg_setpidex(kd_regtype
*kdr
)
747 pid
= (pid_t
)kdr
->value1
;
748 flag
= (int)kdr
->value2
;
752 if ((p
= pfind(pid
)) == NULL
)
756 if (flag
== 1) /* turn on pid exclusion */
758 kdebug_flags
|= KDBG_PIDEXCLUDE
;
759 kdebug_flags
&= ~KDBG_PIDCHECK
;
760 kdebug_slowcheck
|= SLOW_CHECKS
;
762 p
->p_flag
|= P_KDEBUG
;
764 else /* turn off pid exclusion for this pid value */
766 /* Don't turn off all pid exclusion though */
767 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
768 p
->p_flag
&= ~P_KDEBUG
;
777 /* This is for setting a minimum decrementer value */
778 kdbg_setrtcdec(kd_regtype
*kdr
)
783 decval
= (natural_t
)kdr
->value1
;
785 if (decval
&& decval
< KDBG_MINRTCDEC
)
789 rtclock_decrementer_min
= decval
;
798 kdbg_setreg(kd_regtype
* kdr
)
801 unsigned int val_1
, val_2
, val
;
804 case KDBG_CLASSTYPE
:
805 val_1
= (kdr
->value1
& 0xff);
806 val_2
= (kdr
->value2
& 0xff);
807 kdlog_beg
= (val_1
<<24);
808 kdlog_end
= (val_2
<<24);
809 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
810 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
811 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
812 kdebug_slowcheck
|= SLOW_CHECKS
;
814 case KDBG_SUBCLSTYPE
:
815 val_1
= (kdr
->value1
& 0xff);
816 val_2
= (kdr
->value2
& 0xff);
818 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
819 kdlog_end
= ((val_1
<<24) | (val
<< 16));
820 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
821 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
822 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
823 kdebug_slowcheck
|= SLOW_CHECKS
;
825 case KDBG_RANGETYPE
:
826 kdlog_beg
= (kdr
->value1
);
827 kdlog_end
= (kdr
->value2
);
828 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
829 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
830 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
831 kdebug_slowcheck
|= SLOW_CHECKS
;
834 kdlog_value1
= (kdr
->value1
);
835 kdlog_value2
= (kdr
->value2
);
836 kdlog_value3
= (kdr
->value3
);
837 kdlog_value4
= (kdr
->value4
);
838 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
839 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
840 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
841 kdebug_slowcheck
|= SLOW_CHECKS
;
844 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
846 if ( (kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
847 kdebug_slowcheck
|= SLOW_CHECKS
;
849 kdebug_slowcheck
&= ~SLOW_CHECKS
;
861 kdbg_getreg(kd_regtype
* kdr
)
864 unsigned int val_1
, val_2
, val
;
867 case KDBG_CLASSTYPE
:
868 val_1
= (kdr
->value1
& 0xff);
870 kdlog_beg
= (val_1
<<24);
871 kdlog_end
= (val_2
<<24);
872 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
873 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
875 case KDBG_SUBCLSTYPE
:
876 val_1
= (kdr
->value1
& 0xff);
877 val_2
= (kdr
->value2
& 0xff);
879 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
880 kdlog_end
= ((val_1
<<24) | (val
<< 16));
881 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
882 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
884 case KDBG_RANGETYPE
:
885 kdlog_beg
= (kdr
->value1
);
886 kdlog_end
= (kdr
->value2
);
887 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
888 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
891 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
905 kdbg_readmap(user_addr_t buffer
, size_t *number
)
911 count
= avail
/sizeof (kd_threadmap
);
913 if (count
&& (count
<= kd_mapcount
))
915 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
917 if (*number
< kd_mapsize
)
921 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
931 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
933 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
934 kdebug_flags
&= ~KDBG_MAPINIT
;
936 kd_mapptr
= (kd_threadmap
*) 0;
944 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
948 int count
= 0; /* The number of timestamp entries that will fill buffer */
950 if (kd_entropy_buffer
)
953 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
954 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
957 /* Enforce maximum entropy entries here if needed */
959 /* allocate entropy buffer */
960 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
961 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
)
963 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
967 kd_entropy_buffer
= (uint64_t *) 0;
968 kd_entropy_count
= 0;
976 /* Enable entropy sampling */
977 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
978 kdebug_slowcheck
|= SLOW_ENTROPY
;
980 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
982 /* Disable entropy sampling */
983 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
984 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
989 if (kd_entropy_indx
> 0)
991 /* copyout the buffer */
992 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
995 *number
= kd_entropy_indx
;
999 kd_entropy_count
= 0;
1000 kd_entropy_indx
= 0;
1001 kd_entropy_buftomem
= 0;
1002 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
1003 kd_entropy_buffer
= (uint64_t *) 0;
1009 * This function is provided for the CHUD toolkit only.
1011 * zero disables kdebug_chudhook function call
1012 * non-zero enables kdebug_chudhook function call
1014 * address of the enabled kdebug_chudhook function
1017 void kdbg_control_chud(int val
, void *fn
)
1020 /* enable chudhook */
1021 kdebug_chudhook
= fn
;
1022 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
1025 /* disable chudhook */
1026 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
1027 kdebug_chudhook
= 0;
1032 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1037 unsigned int value
= name
[1];
1039 kbufinfo_t kd_bufinfo
;
1041 struct proc
*p
, *curproc
;
1045 lck_mtx_lock(kd_trace_mtx
);
1047 if (name
[0] == KERN_KDGETBUF
) {
1049 * Does not alter the global_state_pid
1050 * This is a passive request.
1052 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1054 * There is not enough room to return even
1055 * the first element of the info structure.
1057 lck_mtx_unlock(kd_trace_mtx
);
1061 kd_bufinfo
.nkdbufs
= nkdbufs
;
1062 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1064 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
1065 kd_bufinfo
.nolog
= 1;
1067 kd_bufinfo
.nolog
= 0;
1068 kd_bufinfo
.flags
= kdebug_flags
;
1069 kd_bufinfo
.bufid
= global_state_pid
;
1071 if (size
>= sizeof(kd_bufinfo
)) {
1073 * Provide all the info we have
1075 if (copyout (&kd_bufinfo
, where
, sizeof(kd_bufinfo
))) {
1076 lck_mtx_unlock(kd_trace_mtx
);
1083 * For backwards compatibility, only provide
1084 * as much info as there is room for.
1086 if (copyout (&kd_bufinfo
, where
, size
)) {
1087 lck_mtx_unlock(kd_trace_mtx
);
1092 lck_mtx_unlock(kd_trace_mtx
);
1095 } else if (name
[0] == KERN_KDGETENTROPY
) {
1096 if (kd_entropy_buffer
)
1099 ret
= kdbg_getentropy(where
, sizep
, value
);
1100 lck_mtx_unlock(kd_trace_mtx
);
1105 if (curproc
= current_proc())
1106 curpid
= curproc
->p_pid
;
1108 lck_mtx_unlock(kd_trace_mtx
);
1112 if (global_state_pid
== -1)
1113 global_state_pid
= curpid
;
1114 else if (global_state_pid
!= curpid
) {
1115 if ((p
= pfind(global_state_pid
)) == NULL
) {
1117 * The global pid no longer exists
1119 global_state_pid
= curpid
;
1122 * The global pid exists, deny this request
1124 lck_mtx_unlock(kd_trace_mtx
);
1132 value
&= KDBG_USERFLAGS
;
1133 kdebug_flags
|= value
;
1136 value
&= KDBG_USERFLAGS
;
1137 kdebug_flags
&= ~value
;
1139 case KERN_KDENABLE
: /* used to enable or disable */
1142 /* enable only if buffer is initialized */
1143 if (!(kdebug_flags
& KDBG_BUFINIT
))
1148 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1149 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1153 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1154 kdebug_slowcheck
|= SLOW_NOLOG
;
1159 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1160 /* 'value' is the desired number of trace entries */
1161 max_entries
= (sane_size
/4) / sizeof(kd_buf
);
1162 if (value
<= max_entries
)
1165 nkdbufs
= max_entries
;
1174 if(size
< sizeof(kd_regtype
)) {
1178 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1182 ret
= kdbg_setreg(&kd_Reg
);
1185 if(size
< sizeof(kd_regtype
)) {
1189 ret
= kdbg_getreg(&kd_Reg
);
1190 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
1195 ret
= kdbg_read(where
, sizep
);
1198 if (size
< sizeof(kd_regtype
)) {
1202 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1206 ret
= kdbg_setpid(&kd_Reg
);
1209 if (size
< sizeof(kd_regtype
)) {
1213 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1217 ret
= kdbg_setpidex(&kd_Reg
);
1220 ret
= kdbg_readmap(where
, sizep
);
1222 case KERN_KDSETRTCDEC
:
1223 if (size
< sizeof(kd_regtype
)) {
1227 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1231 ret
= kdbg_setrtcdec(&kd_Reg
);
1237 lck_mtx_unlock(kd_trace_mtx
);
1242 kdbg_read(user_addr_t buffer
, size_t *number
)
1249 unsigned int my_kdebug_flags
;
1250 kd_buf
* my_kd_bufptr
;
1252 s
= ml_set_interrupts_enabled(FALSE
);
1253 lck_spin_lock(kd_trace_lock
);
1255 my_kdebug_flags
= kdebug_flags
;
1256 my_kd_bufptr
= kd_bufptr
;
1258 lck_spin_unlock(kd_trace_lock
);
1259 ml_set_interrupts_enabled(s
);
1261 count
= avail
/sizeof(kd_buf
);
1264 if ((my_kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
) {
1265 if (count
> nkdbufs
)
1268 if (!(my_kdebug_flags
& KDBG_WRAPPED
)) {
1269 if (my_kd_bufptr
== kd_readlast
) {
1273 if (my_kd_bufptr
> kd_readlast
) {
1274 copycount
= my_kd_bufptr
- kd_readlast
;
1275 if (copycount
> count
)
1278 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
))) {
1282 kd_readlast
+= copycount
;
1283 *number
= copycount
;
1287 if ( (my_kdebug_flags
& KDBG_WRAPPED
) ) {
1288 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1289 * we now treat the kd_buffer read the same as if we weren't
1290 * wrapped and my_kd_bufptr was less than kd_readlast.
1292 kd_readlast
= my_kd_bufptr
;
1293 kdebug_flags
&= ~KDBG_WRAPPED
;
1296 * first copyout from readlast to end of kd_buffer
1298 copycount
= kd_buflast
- kd_readlast
;
1299 if (copycount
> count
)
1301 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
))) {
1305 buffer
+= (copycount
* sizeof(kd_buf
));
1307 totalcount
= copycount
;
1308 kd_readlast
+= copycount
;
1310 if (kd_readlast
== kd_buflast
)
1311 kd_readlast
= kd_buffer
;
1313 *number
= totalcount
;
1316 /* second copyout from top of kd_buffer to bufptr */
1317 copycount
= my_kd_bufptr
- kd_readlast
;
1318 if (copycount
> count
)
1320 if (copycount
== 0) {
1321 *number
= totalcount
;
1324 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1327 kd_readlast
+= copycount
;
1328 totalcount
+= copycount
;
1329 *number
= totalcount
;
1332 } /* end if KDBG_BUFINIT */
1333 } /* end if count */
1337 unsigned char *getProcName(struct proc
*proc
);
1338 unsigned char *getProcName(struct proc
*proc
) {
1340 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */