2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/proc_internal.h>
29 #include <sys/sysctl.h>
30 #include <sys/kdebug.h>
31 #include <sys/sysproto.h>
34 #include <mach/clock_types.h>
35 #include <mach/mach_types.h>
36 #include <mach/mach_time.h>
37 #include <machine/machine_routines.h>
39 #include <kern/thread.h>
40 #include <kern/task.h>
41 #include <vm/vm_kern.h>
44 /* trace enable status */
45 unsigned int kdebug_enable
= 0;
47 /* track timestamps for security server's entropy needs */
48 uint64_t * kd_entropy_buffer
= 0;
49 unsigned int kd_entropy_bufsize
= 0;
50 unsigned int kd_entropy_count
= 0;
51 unsigned int kd_entropy_indx
= 0;
52 unsigned int kd_entropy_buftomem
= 0;
55 #define SLOW_NOLOG 0x01
56 #define SLOW_CHECKS 0x02
57 #define SLOW_ENTROPY 0x04
59 unsigned int kdebug_slowcheck
=SLOW_NOLOG
;
61 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
63 unsigned int kd_buftomem
=0;
67 unsigned int nkdbufs
= 8192;
68 unsigned int kd_bufsize
= 0;
69 unsigned int kdebug_flags
= 0;
70 unsigned int kdlog_beg
=0;
71 unsigned int kdlog_end
=0;
72 unsigned int kdlog_value1
=0;
73 unsigned int kdlog_value2
=0;
74 unsigned int kdlog_value3
=0;
75 unsigned int kdlog_value4
=0;
77 unsigned long long kd_prev_timebase
= 0LL;
79 static lck_mtx_t
* kd_trace_mtx
;
80 static lck_grp_t
* kd_trace_mtx_grp
;
81 static lck_attr_t
* kd_trace_mtx_attr
;
82 static lck_grp_attr_t
*kd_trace_mtx_grp_attr
;
84 static lck_spin_t
* kd_trace_lock
;
85 static lck_grp_t
* kd_trace_lock_grp
;
86 static lck_attr_t
* kd_trace_lock_attr
;
87 static lck_grp_attr_t
*kd_trace_lock_grp_attr
;
89 kd_threadmap
*kd_mapptr
= 0;
90 unsigned int kd_mapsize
= 0;
91 unsigned int kd_mapcount
= 0;
92 unsigned int kd_maptomem
= 0;
94 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
96 #define DBG_FUNC_MASK 0xfffffffc
98 /* task to string structure */
101 task_t
*task
; /* from procs task */
102 pid_t pid
; /* from procs p_pid */
103 char task_comm
[20]; /* from procs p_comm */
106 typedef struct tts tts_t
;
110 kd_threadmap
*map
; /* pointer to the map buffer */
116 typedef struct krt krt_t
;
118 /* This is for the CHUD toolkit call */
119 typedef void (*kd_chudhook_fn
) (unsigned int debugid
, unsigned int arg1
,
120 unsigned int arg2
, unsigned int arg3
,
121 unsigned int arg4
, unsigned int arg5
);
123 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
126 /* Support syscall SYS_kdebug_trace */
127 kdebug_trace(p
, uap
, retval
)
129 struct kdebug_trace_args
*uap
;
132 if ( (kdebug_enable
== 0) )
135 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
141 kernel_debug(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
142 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
145 struct proc
*curproc
;
147 unsigned long long now
;
150 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
152 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
154 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
157 s
= ml_set_interrupts_enabled(FALSE
);
158 lck_spin_lock(kd_trace_lock
);
160 if (kdebug_slowcheck
== 0)
163 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
165 if (kd_entropy_indx
< kd_entropy_count
)
167 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
171 if (kd_entropy_indx
== kd_entropy_count
)
173 /* Disable entropy collection */
174 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
175 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
179 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
181 lck_spin_unlock(kd_trace_lock
);
182 ml_set_interrupts_enabled(s
);
186 if (kdebug_flags
& KDBG_PIDCHECK
)
188 /* If kdebug flag is not set for current proc, return */
189 curproc
= current_proc();
190 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
191 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
193 lck_spin_unlock(kd_trace_lock
);
194 ml_set_interrupts_enabled(s
);
198 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
200 /* If kdebug flag is set for current proc, return */
201 curproc
= current_proc();
202 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
203 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
205 lck_spin_unlock(kd_trace_lock
);
206 ml_set_interrupts_enabled(s
);
211 if (kdebug_flags
& KDBG_RANGECHECK
)
213 if ((debugid
< kdlog_beg
) || (debugid
>= kdlog_end
)
214 && (debugid
>> 24 != DBG_TRACE
))
216 lck_spin_unlock(kd_trace_lock
);
217 ml_set_interrupts_enabled(s
);
221 else if (kdebug_flags
& KDBG_VALCHECK
)
223 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
224 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
225 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
226 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
227 (debugid
>> 24 != DBG_TRACE
))
229 lck_spin_unlock(kd_trace_lock
);
230 ml_set_interrupts_enabled(s
);
237 kd
->debugid
= debugid
;
242 kd
->arg5
= (int)current_thread();
244 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
246 /* Watch for out of order timestamps */
248 if (now
< kd_prev_timebase
)
250 now
= ++kd_prev_timebase
& KDBG_TIMESTAMP_MASK
;
254 /* Then just store the previous timestamp */
255 kd_prev_timebase
= now
;
257 kd
->timestamp
= now
| (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT
);
261 if (kd_bufptr
>= kd_buflast
)
262 kd_bufptr
= kd_buffer
;
263 if (kd_bufptr
== kd_readlast
) {
264 if (kdebug_flags
& KDBG_NOWRAP
)
265 kdebug_slowcheck
|= SLOW_NOLOG
;
266 kdebug_flags
|= KDBG_WRAPPED
;
268 lck_spin_unlock(kd_trace_lock
);
269 ml_set_interrupts_enabled(s
);
273 kernel_debug1(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
274 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
277 struct proc
*curproc
;
279 unsigned long long now
;
281 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
283 (void)kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
285 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
288 s
= ml_set_interrupts_enabled(FALSE
);
289 lck_spin_lock(kd_trace_lock
);
291 if (kdebug_slowcheck
== 0)
294 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
296 lck_spin_unlock(kd_trace_lock
);
297 ml_set_interrupts_enabled(s
);
301 if (kdebug_flags
& KDBG_PIDCHECK
)
303 /* If kdebug flag is not set for current proc, return */
304 curproc
= current_proc();
305 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
306 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
308 lck_spin_unlock(kd_trace_lock
);
309 ml_set_interrupts_enabled(s
);
313 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
315 /* If kdebug flag is set for current proc, return */
316 curproc
= current_proc();
317 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
318 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
320 lck_spin_unlock(kd_trace_lock
);
321 ml_set_interrupts_enabled(s
);
326 if (kdebug_flags
& KDBG_RANGECHECK
)
328 if ((debugid
< kdlog_beg
) || (debugid
>= kdlog_end
)
329 && (debugid
>> 24 != DBG_TRACE
))
331 lck_spin_unlock(kd_trace_lock
);
332 ml_set_interrupts_enabled(s
);
336 else if (kdebug_flags
& KDBG_VALCHECK
)
338 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
339 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
340 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
341 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
342 (debugid
>> 24 != DBG_TRACE
))
344 lck_spin_unlock(kd_trace_lock
);
345 ml_set_interrupts_enabled(s
);
352 kd
->debugid
= debugid
;
359 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
361 /* Watch for out of order timestamps */
363 if (now
< kd_prev_timebase
)
365 now
= ++kd_prev_timebase
& KDBG_TIMESTAMP_MASK
;
369 /* Then just store the previous timestamp */
370 kd_prev_timebase
= now
;
372 kd
->timestamp
= now
| (((uint64_t)cpu_number()) << KDBG_CPU_SHIFT
);
376 if (kd_bufptr
>= kd_buflast
)
377 kd_bufptr
= kd_buffer
;
378 if (kd_bufptr
== kd_readlast
) {
379 if (kdebug_flags
& KDBG_NOWRAP
)
380 kdebug_slowcheck
|= SLOW_NOLOG
;
381 kdebug_flags
|= KDBG_WRAPPED
;
383 lck_spin_unlock(kd_trace_lock
);
384 ml_set_interrupts_enabled(s
);
392 if (kdebug_flags
& KDBG_LOCKINIT
)
395 * allocate lock group attribute and group
397 kd_trace_lock_grp_attr
= lck_grp_attr_alloc_init();
398 //lck_grp_attr_setstat(kd_trace_lock_grp_attr);
399 kd_trace_lock_grp
= lck_grp_alloc_init("kdebug", kd_trace_lock_grp_attr
);
401 kd_trace_mtx_grp_attr
= lck_grp_attr_alloc_init();
402 //lck_grp_attr_setstat(kd_trace_mtx_grp_attr);
403 kd_trace_mtx_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_grp_attr
);
406 * allocate the lock attribute
408 kd_trace_lock_attr
= lck_attr_alloc_init();
409 //lck_attr_setdebug(kd_trace_lock_attr);
411 kd_trace_mtx_attr
= lck_attr_alloc_init();
412 //lck_attr_setdebug(kd_trace_mtx_attr);
416 * allocate and initialize spin lock and mutex
418 kd_trace_lock
= lck_spin_alloc_init(kd_trace_lock_grp
, kd_trace_lock_attr
);
419 kd_trace_mtx
= lck_mtx_alloc_init(kd_trace_mtx_grp
, kd_trace_mtx_attr
);
421 kdebug_flags
|= KDBG_LOCKINIT
;
429 kd_bufsize
= nkdbufs
* sizeof(kd_buf
);
431 if (kmem_alloc(kernel_map
, &kd_buftomem
,
432 (vm_size_t
)kd_bufsize
) == KERN_SUCCESS
)
433 kd_buffer
= (kd_buf
*) kd_buftomem
;
435 kd_buffer
= (kd_buf
*) 0;
436 kdebug_flags
&= ~KDBG_WRAPPED
;
439 kdebug_flags
|= (KDBG_INIT
| KDBG_BUFINIT
);
440 kd_bufptr
= kd_buffer
;
441 kd_buflast
= &kd_bufptr
[nkdbufs
];
442 kd_readlast
= kd_bufptr
;
443 kd_prev_timebase
= 0LL;
447 kdebug_flags
&= ~(KDBG_INIT
| KDBG_BUFINIT
);
459 * Disable trace collecting
460 * First make sure we're not in
461 * the middle of cutting a trace
463 s
= ml_set_interrupts_enabled(FALSE
);
464 lck_spin_lock(kd_trace_lock
);
466 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
467 kdebug_slowcheck
|= SLOW_NOLOG
;
469 lck_spin_unlock(kd_trace_lock
);
470 ml_set_interrupts_enabled(s
);
472 if ((kdebug_flags
& KDBG_INIT
) && (kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
)
473 kmem_free(kernel_map
, (vm_offset_t
)kd_buffer
, kd_bufsize
);
475 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
477 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
478 kdebug_flags
&= ~KDBG_MAPINIT
;
480 kd_mapptr
= (kd_threadmap
*) 0;
484 ret
= kdbg_bootstrap();
489 void kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
494 *arg_pid
= proc
->p_pid
;
500 void kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
516 /* Collect the pathname for tracing */
517 dbg_nameptr
= proc
->p_comm
;
518 dbg_namelen
= strlen(proc
->p_comm
);
524 if(dbg_namelen
> sizeof(dbg_parms
))
525 dbg_namelen
= sizeof(dbg_parms
);
527 for(i
=0;dbg_namelen
> 0; i
++)
529 dbg_parms
[i
]=*(long*)dbg_nameptr
;
530 dbg_nameptr
+= sizeof(long);
531 dbg_namelen
-= sizeof(long);
541 kdbg_resolve_map(thread_t th_act
, krt_t
*t
)
543 kd_threadmap
*mapptr
;
545 if(t
->count
< t
->maxcount
)
547 mapptr
=&t
->map
[t
->count
];
548 mapptr
->thread
= (unsigned int)th_act
;
549 (void) strncpy (mapptr
->command
, t
->atts
->task_comm
,
550 sizeof(t
->atts
->task_comm
)-1);
551 mapptr
->command
[sizeof(t
->atts
->task_comm
)-1] = '\0';
554 Some kernel threads have no associated pid.
555 We still need to mark the entry as valid.
558 mapptr
->valid
= t
->atts
->pid
;
570 int tts_count
; /* number of task-to-string structures */
571 struct tts
*tts_mapptr
;
572 unsigned int tts_mapsize
= 0;
573 unsigned int tts_maptomem
=0;
577 if (kdebug_flags
& KDBG_MAPINIT
)
580 /* Calculate the sizes of map buffers*/
581 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
;
582 p
= p
->p_list
.le_next
)
584 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
589 * The proc count could change during buffer allocation,
590 * so introduce a small fudge factor to bump up the
591 * buffer sizes. This gives new tasks some chance of
592 * making into the tables. Bump up by 10%.
594 kd_mapcount
+= kd_mapcount
/10;
595 tts_count
+= tts_count
/10;
597 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
598 if((kmem_alloc(kernel_map
, & kd_maptomem
,
599 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
601 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
602 bzero(kd_mapptr
, kd_mapsize
);
605 kd_mapptr
= (kd_threadmap
*) 0;
607 tts_mapsize
= tts_count
* sizeof(struct tts
);
608 if((kmem_alloc(kernel_map
, & tts_maptomem
,
609 (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
))
611 tts_mapptr
= (struct tts
*) tts_maptomem
;
612 bzero(tts_mapptr
, tts_mapsize
);
615 tts_mapptr
= (struct tts
*) 0;
619 * We need to save the procs command string
620 * and take a reference for each task associated
621 * with a valid process
625 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
;
626 p
= p
->p_list
.le_next
) {
627 if (p
->p_flag
& P_WEXIT
)
631 task_reference(p
->task
);
632 tts_mapptr
[i
].task
= p
->task
;
633 tts_mapptr
[i
].pid
= p
->p_pid
;
634 (void)strncpy(&tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
) - 1);
642 if (kd_mapptr
&& tts_mapptr
)
644 kdebug_flags
|= KDBG_MAPINIT
;
645 /* Initialize thread map data */
646 akrt
.map
= kd_mapptr
;
648 akrt
.maxcount
= kd_mapcount
;
650 for (i
=0; i
< tts_count
; i
++)
652 akrt
.atts
= &tts_mapptr
[i
];
653 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
654 task_deallocate((task_t
) tts_mapptr
[i
].task
);
656 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
666 * Clean up the trace buffer
667 * First make sure we're not in
668 * the middle of cutting a trace
670 s
= ml_set_interrupts_enabled(FALSE
);
671 lck_spin_lock(kd_trace_lock
);
673 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
674 kdebug_slowcheck
= SLOW_NOLOG
;
676 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
677 kdebug_slowcheck
|= SLOW_ENTROPY
;
679 lck_spin_unlock(kd_trace_lock
);
680 ml_set_interrupts_enabled(s
);
682 global_state_pid
= -1;
683 kdebug_flags
&= ~KDBG_BUFINIT
;
684 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
685 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
686 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
687 kmem_free(kernel_map
, (vm_offset_t
)kd_buffer
, kd_bufsize
);
688 kd_buffer
= (kd_buf
*)0;
690 kd_prev_timebase
= 0LL;
692 /* Clean up the thread map buffer */
693 kdebug_flags
&= ~KDBG_MAPINIT
;
694 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
695 kd_mapptr
= (kd_threadmap
*) 0;
700 kdbg_setpid(kd_regtype
*kdr
)
706 pid
= (pid_t
)kdr
->value1
;
707 flag
= (int)kdr
->value2
;
711 if ((p
= pfind(pid
)) == NULL
)
715 if (flag
== 1) /* turn on pid check for this and all pids */
717 kdebug_flags
|= KDBG_PIDCHECK
;
718 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
719 kdebug_slowcheck
|= SLOW_CHECKS
;
721 p
->p_flag
|= P_KDEBUG
;
723 else /* turn off pid check for this pid value */
725 /* Don't turn off all pid checking though */
726 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
727 p
->p_flag
&= ~P_KDEBUG
;
736 /* This is for pid exclusion in the trace buffer */
737 kdbg_setpidex(kd_regtype
*kdr
)
743 pid
= (pid_t
)kdr
->value1
;
744 flag
= (int)kdr
->value2
;
748 if ((p
= pfind(pid
)) == NULL
)
752 if (flag
== 1) /* turn on pid exclusion */
754 kdebug_flags
|= KDBG_PIDEXCLUDE
;
755 kdebug_flags
&= ~KDBG_PIDCHECK
;
756 kdebug_slowcheck
|= SLOW_CHECKS
;
758 p
->p_flag
|= P_KDEBUG
;
760 else /* turn off pid exclusion for this pid value */
762 /* Don't turn off all pid exclusion though */
763 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
764 p
->p_flag
&= ~P_KDEBUG
;
773 /* This is for setting a maximum decrementer value */
774 kdbg_setrtcdec(kd_regtype
*kdr
)
779 decval
= (natural_t
)kdr
->value1
;
781 if (decval
&& decval
< KDBG_MINRTCDEC
)
786 extern uint32_t maxDec
;
788 maxDec
= decval
? decval
: 0x7FFFFFFF; /* Set or reset the max decrementer */
798 kdbg_setreg(kd_regtype
* kdr
)
801 unsigned int val_1
, val_2
, val
;
804 case KDBG_CLASSTYPE
:
805 val_1
= (kdr
->value1
& 0xff);
806 val_2
= (kdr
->value2
& 0xff);
807 kdlog_beg
= (val_1
<<24);
808 kdlog_end
= (val_2
<<24);
809 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
810 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
811 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
812 kdebug_slowcheck
|= SLOW_CHECKS
;
814 case KDBG_SUBCLSTYPE
:
815 val_1
= (kdr
->value1
& 0xff);
816 val_2
= (kdr
->value2
& 0xff);
818 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
819 kdlog_end
= ((val_1
<<24) | (val
<< 16));
820 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
821 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
822 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
823 kdebug_slowcheck
|= SLOW_CHECKS
;
825 case KDBG_RANGETYPE
:
826 kdlog_beg
= (kdr
->value1
);
827 kdlog_end
= (kdr
->value2
);
828 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
829 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
830 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
831 kdebug_slowcheck
|= SLOW_CHECKS
;
834 kdlog_value1
= (kdr
->value1
);
835 kdlog_value2
= (kdr
->value2
);
836 kdlog_value3
= (kdr
->value3
);
837 kdlog_value4
= (kdr
->value4
);
838 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
839 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
840 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
841 kdebug_slowcheck
|= SLOW_CHECKS
;
844 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
846 if ( (kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
847 kdebug_slowcheck
|= SLOW_CHECKS
;
849 kdebug_slowcheck
&= ~SLOW_CHECKS
;
861 kdbg_getreg(kd_regtype
* kdr
)
864 unsigned int val_1
, val_2
, val
;
867 case KDBG_CLASSTYPE
:
868 val_1
= (kdr
->value1
& 0xff);
870 kdlog_beg
= (val_1
<<24);
871 kdlog_end
= (val_2
<<24);
872 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
873 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
875 case KDBG_SUBCLSTYPE
:
876 val_1
= (kdr
->value1
& 0xff);
877 val_2
= (kdr
->value2
& 0xff);
879 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
880 kdlog_end
= ((val_1
<<24) | (val
<< 16));
881 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
882 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
884 case KDBG_RANGETYPE
:
885 kdlog_beg
= (kdr
->value1
);
886 kdlog_end
= (kdr
->value2
);
887 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
888 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
891 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
905 kdbg_readmap(user_addr_t buffer
, size_t *number
)
911 count
= avail
/sizeof (kd_threadmap
);
913 if (count
&& (count
<= kd_mapcount
))
915 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
917 if (*number
< kd_mapsize
)
921 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
931 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
933 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
934 kdebug_flags
&= ~KDBG_MAPINIT
;
936 kd_mapptr
= (kd_threadmap
*) 0;
944 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
948 int count
= 0; /* The number of timestamp entries that will fill buffer */
950 if (kd_entropy_buffer
)
953 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
954 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
957 /* Enforce maximum entropy entries here if needed */
959 /* allocate entropy buffer */
960 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
961 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
)
963 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
967 kd_entropy_buffer
= (uint64_t *) 0;
968 kd_entropy_count
= 0;
976 /* Enable entropy sampling */
977 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
978 kdebug_slowcheck
|= SLOW_ENTROPY
;
980 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
982 /* Disable entropy sampling */
983 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
984 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
989 if (kd_entropy_indx
> 0)
991 /* copyout the buffer */
992 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
995 *number
= kd_entropy_indx
;
999 kd_entropy_count
= 0;
1000 kd_entropy_indx
= 0;
1001 kd_entropy_buftomem
= 0;
1002 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
1003 kd_entropy_buffer
= (uint64_t *) 0;
1009 * This function is provided for the CHUD toolkit only.
1011 * zero disables kdebug_chudhook function call
1012 * non-zero enables kdebug_chudhook function call
1014 * address of the enabled kdebug_chudhook function
1017 void kdbg_control_chud(int val
, void *fn
)
1020 /* enable chudhook */
1021 kdebug_chudhook
= fn
;
1022 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
1025 /* disable chudhook */
1026 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
1027 kdebug_chudhook
= 0;
1032 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1037 unsigned int value
= name
[1];
1039 kbufinfo_t kd_bufinfo
;
1041 struct proc
*p
, *curproc
;
1045 lck_mtx_lock(kd_trace_mtx
);
1047 if (name
[0] == KERN_KDGETBUF
) {
1049 * Does not alter the global_state_pid
1050 * This is a passive request.
1052 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1054 * There is not enough room to return even
1055 * the first element of the info structure.
1057 lck_mtx_unlock(kd_trace_mtx
);
1061 kd_bufinfo
.nkdbufs
= nkdbufs
;
1062 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1064 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
1065 kd_bufinfo
.nolog
= 1;
1067 kd_bufinfo
.nolog
= 0;
1068 kd_bufinfo
.flags
= kdebug_flags
;
1069 kd_bufinfo
.bufid
= global_state_pid
;
1071 if (size
>= sizeof(kd_bufinfo
)) {
1073 * Provide all the info we have
1075 if (copyout (&kd_bufinfo
, where
, sizeof(kd_bufinfo
))) {
1076 lck_mtx_unlock(kd_trace_mtx
);
1083 * For backwards compatibility, only provide
1084 * as much info as there is room for.
1086 if (copyout (&kd_bufinfo
, where
, size
)) {
1087 lck_mtx_unlock(kd_trace_mtx
);
1092 lck_mtx_unlock(kd_trace_mtx
);
1095 } else if (name
[0] == KERN_KDGETENTROPY
) {
1096 if (kd_entropy_buffer
)
1099 ret
= kdbg_getentropy(where
, sizep
, value
);
1100 lck_mtx_unlock(kd_trace_mtx
);
1105 if (curproc
= current_proc())
1106 curpid
= curproc
->p_pid
;
1108 lck_mtx_unlock(kd_trace_mtx
);
1112 if (global_state_pid
== -1)
1113 global_state_pid
= curpid
;
1114 else if (global_state_pid
!= curpid
) {
1115 if ((p
= pfind(global_state_pid
)) == NULL
) {
1117 * The global pid no longer exists
1119 global_state_pid
= curpid
;
1122 * The global pid exists, deny this request
1124 lck_mtx_unlock(kd_trace_mtx
);
1132 value
&= KDBG_USERFLAGS
;
1133 kdebug_flags
|= value
;
1136 value
&= KDBG_USERFLAGS
;
1137 kdebug_flags
&= ~value
;
1139 case KERN_KDENABLE
: /* used to enable or disable */
1142 /* enable only if buffer is initialized */
1143 if (!(kdebug_flags
& KDBG_BUFINIT
))
1148 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1149 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1153 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1154 kdebug_slowcheck
|= SLOW_NOLOG
;
1159 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1160 /* 'value' is the desired number of trace entries */
1161 max_entries
= (sane_size
/4) / sizeof(kd_buf
);
1162 if (value
<= max_entries
)
1165 nkdbufs
= max_entries
;
1174 if(size
< sizeof(kd_regtype
)) {
1178 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1182 ret
= kdbg_setreg(&kd_Reg
);
1185 if(size
< sizeof(kd_regtype
)) {
1189 ret
= kdbg_getreg(&kd_Reg
);
1190 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
1195 ret
= kdbg_read(where
, sizep
);
1198 if (size
< sizeof(kd_regtype
)) {
1202 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1206 ret
= kdbg_setpid(&kd_Reg
);
1209 if (size
< sizeof(kd_regtype
)) {
1213 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1217 ret
= kdbg_setpidex(&kd_Reg
);
1220 ret
= kdbg_readmap(where
, sizep
);
1222 case KERN_KDSETRTCDEC
:
1223 if (size
< sizeof(kd_regtype
)) {
1227 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1231 ret
= kdbg_setrtcdec(&kd_Reg
);
1237 lck_mtx_unlock(kd_trace_mtx
);
1242 kdbg_read(user_addr_t buffer
, size_t *number
)
1249 unsigned int my_kdebug_flags
;
1250 kd_buf
* my_kd_bufptr
;
1252 s
= ml_set_interrupts_enabled(FALSE
);
1253 lck_spin_lock(kd_trace_lock
);
1255 my_kdebug_flags
= kdebug_flags
;
1256 my_kd_bufptr
= kd_bufptr
;
1258 lck_spin_unlock(kd_trace_lock
);
1259 ml_set_interrupts_enabled(s
);
1261 count
= avail
/sizeof(kd_buf
);
1264 if ((my_kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
) {
1265 if (count
> nkdbufs
)
1268 if (!(my_kdebug_flags
& KDBG_WRAPPED
)) {
1269 if (my_kd_bufptr
== kd_readlast
) {
1273 if (my_kd_bufptr
> kd_readlast
) {
1274 copycount
= my_kd_bufptr
- kd_readlast
;
1275 if (copycount
> count
)
1278 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
))) {
1282 kd_readlast
+= copycount
;
1283 *number
= copycount
;
1287 if ( (my_kdebug_flags
& KDBG_WRAPPED
) ) {
1288 /* Note that by setting kd_readlast equal to my_kd_bufptr,
1289 * we now treat the kd_buffer read the same as if we weren't
1290 * wrapped and my_kd_bufptr was less than kd_readlast.
1292 kd_readlast
= my_kd_bufptr
;
1293 kdebug_flags
&= ~KDBG_WRAPPED
;
1296 * first copyout from readlast to end of kd_buffer
1298 copycount
= kd_buflast
- kd_readlast
;
1299 if (copycount
> count
)
1301 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
))) {
1305 buffer
+= (copycount
* sizeof(kd_buf
));
1307 totalcount
= copycount
;
1308 kd_readlast
+= copycount
;
1310 if (kd_readlast
== kd_buflast
)
1311 kd_readlast
= kd_buffer
;
1313 *number
= totalcount
;
1316 /* second copyout from top of kd_buffer to bufptr */
1317 copycount
= my_kd_bufptr
- kd_readlast
;
1318 if (copycount
> count
)
1320 if (copycount
== 0) {
1321 *number
= totalcount
;
1324 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
1327 kd_readlast
+= copycount
;
1328 totalcount
+= copycount
;
1329 *number
= totalcount
;
1332 } /* end if KDBG_BUFINIT */
1333 } /* end if count */
1337 unsigned char *getProcName(struct proc
*proc
);
1338 unsigned char *getProcName(struct proc
*proc
) {
1340 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */