2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <kern/debug.h>
43 #include <vm/vm_kern.h>
46 #include <sys/malloc.h>
47 #include <sys/kauth.h>
49 #include <mach/mach_host.h> /* for host_info() */
50 #include <libkern/OSAtomic.h>
52 /* XXX should have prototypes, but Mach does not provide one */
53 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
54 int cpu_number(void); /* XXX <machine/...> include path broken */
56 /* XXX should probably be static, but it's debugging code... */
57 int kdbg_read(user_addr_t
, size_t *);
58 void kdbg_control_chud(int, void *);
59 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
60 int kdbg_getentropy (user_addr_t
, size_t *, int);
61 int kdbg_readmap(user_addr_t
, size_t *);
62 int kdbg_getreg(kd_regtype
*);
63 int kdbg_setreg(kd_regtype
*);
64 int kdbg_setrtcdec(kd_regtype
*);
65 int kdbg_setpidex(kd_regtype
*);
66 int kdbg_setpid(kd_regtype
*);
67 void kdbg_mapinit(void);
68 int kdbg_reinit(void);
69 int kdbg_bootstrap(void);
71 static int create_buffers(void);
72 static void delete_buffers(void);
74 extern void IOSleep(int);
77 extern uint32_t maxDec
;
80 /* trace enable status */
81 unsigned int kdebug_enable
= 0;
83 /* track timestamps for security server's entropy needs */
84 uint64_t * kd_entropy_buffer
= 0;
85 unsigned int kd_entropy_bufsize
= 0;
86 unsigned int kd_entropy_count
= 0;
87 unsigned int kd_entropy_indx
= 0;
88 unsigned int kd_entropy_buftomem
= 0;
91 #define SLOW_NOLOG 0x01
92 #define SLOW_CHECKS 0x02
93 #define SLOW_ENTROPY 0x04
95 unsigned int kdebug_slowcheck
=SLOW_NOLOG
;
104 kd_buf
* kd_readlast
;
105 int kd_wrapped
; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
106 uint64_t kd_prev_timebase
;
107 int kd_pad
[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
111 struct kd_bufinfo
*kdbip
= NULL
;
113 #define KDCOPYBUF_COUNT 1024
114 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
115 kd_buf
*kdcopybuf
= NULL
;
118 unsigned int nkdbufs
= 8192;
119 unsigned int kd_bufsize
= 0;
120 unsigned int kdebug_flags
= 0;
121 unsigned int kdlog_beg
=0;
122 unsigned int kdlog_end
=0;
123 unsigned int kdlog_value1
=0;
124 unsigned int kdlog_value2
=0;
125 unsigned int kdlog_value3
=0;
126 unsigned int kdlog_value4
=0;
128 static lck_mtx_t
* kd_trace_mtx_sysctl
;
129 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
130 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
131 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
133 static lck_grp_t
*stackshot_subsys_lck_grp
;
134 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
135 static lck_attr_t
*stackshot_subsys_lck_attr
;
136 static lck_mtx_t stackshot_subsys_mutex
;
138 void *stackshot_snapbuf
= NULL
;
141 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t options
, register_t
*retval
);
144 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t options
);
147 kdp_stack_snapshot_geterror(void);
149 kdp_stack_snapshot_bytes_traced(void);
151 kd_threadmap
*kd_mapptr
= 0;
152 unsigned int kd_mapsize
= 0;
153 unsigned int kd_mapcount
= 0;
154 unsigned int kd_maptomem
= 0;
156 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
158 #define DBG_FUNC_MASK 0xfffffffc
160 /* task to string structure */
163 task_t task
; /* from procs task */
164 pid_t pid
; /* from procs p_pid */
165 char task_comm
[20]; /* from procs p_comm */
168 typedef struct tts tts_t
;
172 kd_threadmap
*map
; /* pointer to the map buffer */
178 typedef struct krt krt_t
;
180 /* This is for the CHUD toolkit call */
181 typedef void (*kd_chudhook_fn
) (unsigned int debugid
, unsigned int arg1
,
182 unsigned int arg2
, unsigned int arg3
,
183 unsigned int arg4
, unsigned int arg5
);
185 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
187 __private_extern__
void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
189 /* Support syscall SYS_kdebug_trace */
191 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused register_t
*retval
)
193 if ( (kdebug_enable
== 0) )
196 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
206 nentries
= nkdbufs
/ kd_cpus
;
207 nkdbufs
= nentries
* kd_cpus
;
209 kd_bufsize
= nentries
* sizeof(kd_buf
);
211 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
213 if (kdcopybuf
== 0) {
214 if (kmem_alloc(kernel_map
, (unsigned int *)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
)
217 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
218 if (kmem_alloc(kernel_map
, (unsigned int *)&kdbip
[cpu
].kd_buffer
, kd_bufsize
) != KERN_SUCCESS
)
222 for (i
= 0; i
< cpu
; i
++)
223 kmem_free(kernel_map
, (vm_offset_t
)kdbip
[i
].kd_buffer
, kd_bufsize
);
226 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
231 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
232 kdbip
[cpu
].kd_bufptr
= kdbip
[cpu
].kd_buffer
;
233 kdbip
[cpu
].kd_buflast
= &kdbip
[cpu
].kd_bufptr
[nentries
];
234 kdbip
[cpu
].kd_readlast
= kdbip
[cpu
].kd_bufptr
;
236 kdebug_flags
|= KDBG_BUFINIT
;
247 if (kd_bufsize
&& (kdebug_flags
& KDBG_BUFINIT
)) {
248 for (cpu
= 0; cpu
< kd_cpus
; cpu
++)
249 kmem_free(kernel_map
, (vm_offset_t
)kdbip
[cpu
].kd_buffer
, kd_bufsize
);
253 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
256 kdebug_flags
&= ~KDBG_BUFINIT
;
261 kernel_debug_internal(unsigned int debugid
, unsigned int arg1
, unsigned int arg2
, unsigned int arg3
,
262 unsigned int arg4
, unsigned int arg5
, int entropy_flag
)
266 struct proc
*curproc
;
267 unsigned long long now
;
270 s
= ml_set_interrupts_enabled(FALSE
);
272 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
275 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
277 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
279 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
283 if (kdebug_slowcheck
== 0)
286 if (entropy_flag
&& (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
))
288 if (kd_entropy_indx
< kd_entropy_count
)
290 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
294 if (kd_entropy_indx
== kd_entropy_count
)
296 /* Disable entropy collection */
297 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
298 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
302 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
305 if (kdebug_flags
& KDBG_PIDCHECK
)
307 /* If kdebug flag is not set for current proc, return */
308 curproc
= current_proc();
309 if ((curproc
&& !(curproc
->p_kdebug
)) &&
310 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
313 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
315 /* If kdebug flag is set for current proc, return */
316 curproc
= current_proc();
317 if ((curproc
&& curproc
->p_kdebug
) &&
318 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
322 if (kdebug_flags
& KDBG_RANGECHECK
)
324 if ((debugid
< kdlog_beg
)
325 || ((debugid
>= kdlog_end
) && (debugid
>> 24 != DBG_TRACE
)))
328 else if (kdebug_flags
& KDBG_VALCHECK
)
330 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
331 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
332 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
333 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
334 (debugid
>> 24 != DBG_TRACE
))
339 kd
= kdbip
[cpu
].kd_bufptr
;
340 kd
->debugid
= debugid
;
348 * Watch for out of order timestamps
350 if (now
< kdbip
[cpu
].kd_prev_timebase
)
353 * if so, just store the previous timestamp + a cycle
355 now
= ++kdbip
[cpu
].kd_prev_timebase
& KDBG_TIMESTAMP_MASK
;
359 kdbip
[cpu
].kd_prev_timebase
= now
;
361 kd
->timestamp
= now
| (((uint64_t)cpu
) << KDBG_CPU_SHIFT
);
363 kdbip
[cpu
].kd_bufptr
++;
365 if (kdbip
[cpu
].kd_bufptr
>= kdbip
[cpu
].kd_buflast
)
366 kdbip
[cpu
].kd_bufptr
= kdbip
[cpu
].kd_buffer
;
368 if (kdbip
[cpu
].kd_bufptr
== kdbip
[cpu
].kd_readlast
) {
369 if (kdebug_flags
& KDBG_NOWRAP
)
370 kdebug_slowcheck
|= SLOW_NOLOG
;
371 kdbip
[cpu
].kd_wrapped
= 1;
372 kdebug_flags
|= KDBG_WRAPPED
;
376 ml_set_interrupts_enabled(s
);
380 kernel_debug(unsigned int debugid
, unsigned int arg1
, unsigned int arg2
, unsigned int arg3
,
381 unsigned int arg4
, __unused
unsigned int arg5
)
383 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (int)current_thread(), 1);
387 kernel_debug1(unsigned int debugid
, unsigned int arg1
, unsigned int arg2
, unsigned int arg3
,
388 unsigned int arg4
, unsigned int arg5
)
390 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 0);
396 host_basic_info_data_t hinfo
;
397 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
399 if (kdebug_flags
& KDBG_LOCKINIT
)
402 /* get the number of cpus and cache it */
404 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
405 kd_cpus
= hinfo
.logical_cpu_max
;
407 if (kmem_alloc(kernel_map
, (unsigned int *)&kdbip
,
408 sizeof(struct kd_bufinfo
) * kd_cpus
) != KERN_SUCCESS
)
412 * allocate lock group attribute and group
414 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
415 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
418 * allocate the lock attribute
420 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
424 * allocate and initialize spin lock and mutex
426 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
428 kdebug_flags
|= KDBG_LOCKINIT
;
435 kdebug_flags
&= ~KDBG_WRAPPED
;
437 return (create_buffers());
446 * Disable trace collecting
447 * First make sure we're not in
448 * the middle of cutting a trace
451 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
452 kdebug_slowcheck
|= SLOW_NOLOG
;
455 * make sure the SLOW_NOLOG is seen
456 * by everyone that might be trying
463 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
465 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
466 kdebug_flags
&= ~KDBG_MAPINIT
;
468 kd_mapptr
= (kd_threadmap
*) 0;
472 ret
= kdbg_bootstrap();
478 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
483 *arg_pid
= proc
->p_pid
;
490 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
504 /* Collect the pathname for tracing */
505 dbg_nameptr
= proc
->p_comm
;
506 dbg_namelen
= strlen(proc
->p_comm
);
512 if(dbg_namelen
> (int)sizeof(dbg_parms
))
513 dbg_namelen
= sizeof(dbg_parms
);
515 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
524 kdbg_resolve_map(thread_t th_act
, void *opaque
)
526 kd_threadmap
*mapptr
;
527 krt_t
*t
= (krt_t
*)opaque
;
529 if(t
->count
< t
->maxcount
)
531 mapptr
=&t
->map
[t
->count
];
532 mapptr
->thread
= (unsigned int)th_act
;
533 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
534 sizeof(t
->atts
->task_comm
));
537 Some kernel threads have no associated pid.
538 We still need to mark the entry as valid.
541 mapptr
->valid
= t
->atts
->pid
;
554 int tts_count
; /* number of task-to-string structures */
555 struct tts
*tts_mapptr
;
556 unsigned int tts_mapsize
= 0;
557 unsigned int tts_maptomem
=0;
561 if (kdebug_flags
& KDBG_MAPINIT
)
564 /* need to use PROC_SCANPROCLIST with proc_iterate */
567 /* Calculate the sizes of map buffers*/
568 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
;
569 p
= p
->p_list
.le_next
)
571 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
583 kd_mapcount
+= kd_mapcount
/10;
584 tts_count
+= tts_count
/10;
586 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
587 if((kmem_alloc(kernel_map
, & kd_maptomem
,
588 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
590 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
591 bzero(kd_mapptr
, kd_mapsize
);
594 kd_mapptr
= (kd_threadmap
*) 0;
596 tts_mapsize
= tts_count
* sizeof(struct tts
);
597 if((kmem_alloc(kernel_map
, & tts_maptomem
,
598 (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
))
600 tts_mapptr
= (struct tts
*) tts_maptomem
;
601 bzero(tts_mapptr
, tts_mapsize
);
604 tts_mapptr
= (struct tts
*) 0;
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
614 /* should use proc_iterate */
617 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
;
618 p
= p
->p_list
.le_next
) {
619 if (p
->p_lflag
& P_LEXIT
)
623 task_reference(p
->task
);
624 tts_mapptr
[i
].task
= p
->task
;
625 tts_mapptr
[i
].pid
= p
->p_pid
;
626 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
637 if (kd_mapptr
&& tts_mapptr
)
639 kdebug_flags
|= KDBG_MAPINIT
;
640 /* Initialize thread map data */
641 akrt
.map
= kd_mapptr
;
643 akrt
.maxcount
= kd_mapcount
;
645 for (i
=0; i
< tts_count
; i
++)
647 akrt
.atts
= &tts_mapptr
[i
];
648 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
649 task_deallocate((task_t
) tts_mapptr
[i
].task
);
651 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
659 * Clean up the trace buffer
660 * First make sure we're not in
661 * the middle of cutting a trace
664 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
665 kdebug_slowcheck
= SLOW_NOLOG
;
668 * make sure the SLOW_NOLOG is seen
669 * by everyone that might be trying
674 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
675 kdebug_slowcheck
|= SLOW_ENTROPY
;
677 global_state_pid
= -1;
678 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
679 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
680 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
684 /* Clean up the thread map buffer */
685 kdebug_flags
&= ~KDBG_MAPINIT
;
686 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
687 kd_mapptr
= (kd_threadmap
*) 0;
693 kdbg_setpid(kd_regtype
*kdr
)
699 pid
= (pid_t
)kdr
->value1
;
700 flag
= (int)kdr
->value2
;
704 if ((p
= proc_find(pid
)) == NULL
)
708 if (flag
== 1) /* turn on pid check for this and all pids */
710 kdebug_flags
|= KDBG_PIDCHECK
;
711 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
712 kdebug_slowcheck
|= SLOW_CHECKS
;
716 else /* turn off pid check for this pid value */
718 /* Don't turn off all pid checking though */
719 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
730 /* This is for pid exclusion in the trace buffer */
732 kdbg_setpidex(kd_regtype
*kdr
)
738 pid
= (pid_t
)kdr
->value1
;
739 flag
= (int)kdr
->value2
;
743 if ((p
= proc_find(pid
)) == NULL
)
747 if (flag
== 1) /* turn on pid exclusion */
749 kdebug_flags
|= KDBG_PIDEXCLUDE
;
750 kdebug_flags
&= ~KDBG_PIDCHECK
;
751 kdebug_slowcheck
|= SLOW_CHECKS
;
755 else /* turn off pid exclusion for this pid value */
757 /* Don't turn off all pid exclusion though */
758 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
769 /* This is for setting a maximum decrementer value */
771 kdbg_setrtcdec(kd_regtype
*kdr
)
776 decval
= (natural_t
)kdr
->value1
;
778 if (decval
&& decval
< KDBG_MINRTCDEC
)
782 maxDec
= decval
? decval
: 0x7FFFFFFF; /* Set or reset the max decrementer */
793 kdbg_setreg(kd_regtype
* kdr
)
796 unsigned int val_1
, val_2
, val
;
799 case KDBG_CLASSTYPE
:
800 val_1
= (kdr
->value1
& 0xff);
801 val_2
= (kdr
->value2
& 0xff);
802 kdlog_beg
= (val_1
<<24);
803 kdlog_end
= (val_2
<<24);
804 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
805 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
806 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
807 kdebug_slowcheck
|= SLOW_CHECKS
;
809 case KDBG_SUBCLSTYPE
:
810 val_1
= (kdr
->value1
& 0xff);
811 val_2
= (kdr
->value2
& 0xff);
813 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
814 kdlog_end
= ((val_1
<<24) | (val
<< 16));
815 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
816 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
817 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
818 kdebug_slowcheck
|= SLOW_CHECKS
;
820 case KDBG_RANGETYPE
:
821 kdlog_beg
= (kdr
->value1
);
822 kdlog_end
= (kdr
->value2
);
823 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
824 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
825 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
826 kdebug_slowcheck
|= SLOW_CHECKS
;
829 kdlog_value1
= (kdr
->value1
);
830 kdlog_value2
= (kdr
->value2
);
831 kdlog_value3
= (kdr
->value3
);
832 kdlog_value4
= (kdr
->value4
);
833 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
834 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
835 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
836 kdebug_slowcheck
|= SLOW_CHECKS
;
839 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
841 if ( (kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
842 kdebug_slowcheck
|= SLOW_CHECKS
;
844 kdebug_slowcheck
&= ~SLOW_CHECKS
;
857 kdbg_getreg(__unused kd_regtype
* kdr
)
861 unsigned int val_1
, val_2
, val
;
864 case KDBG_CLASSTYPE
:
865 val_1
= (kdr
->value1
& 0xff);
867 kdlog_beg
= (val_1
<<24);
868 kdlog_end
= (val_2
<<24);
869 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
870 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
872 case KDBG_SUBCLSTYPE
:
873 val_1
= (kdr
->value1
& 0xff);
874 val_2
= (kdr
->value2
& 0xff);
876 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
877 kdlog_end
= ((val_1
<<24) | (val
<< 16));
878 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
879 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
881 case KDBG_RANGETYPE
:
882 kdlog_beg
= (kdr
->value1
);
883 kdlog_end
= (kdr
->value2
);
884 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
885 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
888 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
902 kdbg_readmap(user_addr_t buffer
, size_t *number
)
906 unsigned int count
= 0;
908 count
= avail
/sizeof (kd_threadmap
);
910 if (count
&& (count
<= kd_mapcount
))
912 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
914 if (*number
< kd_mapsize
)
918 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
928 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
930 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
931 kdebug_flags
&= ~KDBG_MAPINIT
;
933 kd_mapptr
= (kd_threadmap
*) 0;
941 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
946 if (kd_entropy_buffer
)
949 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
950 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
953 /* Enforce maximum entropy entries here if needed */
955 /* allocate entropy buffer */
956 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
957 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
)
959 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
963 kd_entropy_buffer
= (uint64_t *) 0;
964 kd_entropy_count
= 0;
972 /* Enable entropy sampling */
973 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
974 kdebug_slowcheck
|= SLOW_ENTROPY
;
976 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
978 /* Disable entropy sampling */
979 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
980 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
985 if (kd_entropy_indx
> 0)
987 /* copyout the buffer */
988 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
991 *number
= kd_entropy_indx
;
995 kd_entropy_count
= 0;
997 kd_entropy_buftomem
= 0;
998 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
999 kd_entropy_buffer
= (uint64_t *) 0;
1005 kdbg_set_nkdbufs(unsigned int value
)
1008 * We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller
1009 * 'value' is the desired number of trace entries
1011 unsigned int max_entries
= (sane_size
/4) / sizeof(kd_buf
);
1013 if (value
<= max_entries
)
1016 nkdbufs
= max_entries
;
1021 * This function is provided for the CHUD toolkit only.
1023 * zero disables kdebug_chudhook function call
1024 * non-zero enables kdebug_chudhook function call
1026 * address of the enabled kdebug_chudhook function
1030 kdbg_control_chud(int val
, void *fn
)
1033 /* enable chudhook */
1034 kdebug_chudhook
= fn
;
1035 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
1038 /* disable chudhook */
1039 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
1040 kdebug_chudhook
= 0;
1046 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1050 unsigned int value
= 0;
1052 kbufinfo_t kd_bufinfo
;
1054 struct proc
*p
, *curproc
;
1056 if (name
[0] == KERN_KDGETENTROPY
||
1057 name
[0] == KERN_KDEFLAGS
||
1058 name
[0] == KERN_KDDFLAGS
||
1059 name
[0] == KERN_KDENABLE
||
1060 name
[0] == KERN_KDSETBUF
) {
1069 if ( !(kdebug_flags
& KDBG_LOCKINIT
))
1072 lck_mtx_lock(kd_trace_mtx_sysctl
);
1074 if (name
[0] == KERN_KDGETBUF
) {
1076 * Does not alter the global_state_pid
1077 * This is a passive request.
1079 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1081 * There is not enough room to return even
1082 * the first element of the info structure.
1084 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1088 kd_bufinfo
.nkdbufs
= nkdbufs
;
1089 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1091 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
1092 kd_bufinfo
.nolog
= 1;
1094 kd_bufinfo
.nolog
= 0;
1095 kd_bufinfo
.flags
= kdebug_flags
;
1096 kd_bufinfo
.bufid
= global_state_pid
;
1098 if (size
>= sizeof(kd_bufinfo
)) {
1100 * Provide all the info we have
1102 if (copyout (&kd_bufinfo
, where
, sizeof(kd_bufinfo
))) {
1103 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1110 * For backwards compatibility, only provide
1111 * as much info as there is room for.
1113 if (copyout (&kd_bufinfo
, where
, size
)) {
1114 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1119 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1122 } else if (name
[0] == KERN_KDGETENTROPY
) {
1123 if (kd_entropy_buffer
)
1126 ret
= kdbg_getentropy(where
, sizep
, value
);
1127 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1132 if ((curproc
= current_proc()) != NULL
)
1133 curpid
= curproc
->p_pid
;
1135 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1139 if (global_state_pid
== -1)
1140 global_state_pid
= curpid
;
1141 else if (global_state_pid
!= curpid
) {
1142 if ((p
= proc_find(global_state_pid
)) == NULL
) {
1144 * The global pid no longer exists
1146 global_state_pid
= curpid
;
1149 * The global pid exists, deny this request
1152 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1160 value
&= KDBG_USERFLAGS
;
1161 kdebug_flags
|= value
;
1164 value
&= KDBG_USERFLAGS
;
1165 kdebug_flags
&= ~value
;
1167 case KERN_KDENABLE
: /* used to enable or disable */
1170 /* enable only if buffer is initialized */
1171 if (!(kdebug_flags
& KDBG_BUFINIT
))
1178 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1179 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1183 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1184 kdebug_slowcheck
|= SLOW_NOLOG
;
1188 kdbg_set_nkdbufs(value
);
1197 if(size
< sizeof(kd_regtype
)) {
1201 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1205 ret
= kdbg_setreg(&kd_Reg
);
1208 if(size
< sizeof(kd_regtype
)) {
1212 ret
= kdbg_getreg(&kd_Reg
);
1213 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
1218 ret
= kdbg_read(where
, sizep
);
1221 if (size
< sizeof(kd_regtype
)) {
1225 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1229 ret
= kdbg_setpid(&kd_Reg
);
1232 if (size
< sizeof(kd_regtype
)) {
1236 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1240 ret
= kdbg_setpidex(&kd_Reg
);
1243 ret
= kdbg_readmap(where
, sizep
);
1245 case KERN_KDSETRTCDEC
:
1246 if (size
< sizeof(kd_regtype
)) {
1250 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1254 ret
= kdbg_setrtcdec(&kd_Reg
);
1260 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1267 * This code can run concurrently with kernel_debug_internal()
1268 * without the need of any locks, because all reads of kd_bufptr[i],
1269 * which get modified by kernel_debug_internal(), are safe.
1272 kdbg_read(user_addr_t buffer
, size_t *number
)
1277 uint64_t mintime
, t
, last_wrap_time
;
1281 uint32_t tempbuf_count
;
1282 uint32_t tempbuf_number
;
1283 unsigned int old_kdebug_flags
, new_kdebug_flags
;
1284 unsigned int old_kdebug_slowcheck
, new_kdebug_slowcheck
;
1285 boolean_t first_event
= TRUE
;
1287 count
= *number
/sizeof(kd_buf
);
1290 if (count
== 0 || !(kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
1294 * because we hold kd_trace_mtx_sysctl, no other control threads can
1295 * be playing with kdebug_flags... the code that cuts new events could
1296 * be running, but it only reads kdebug_flags, it doesn't write it..
1297 * use an OSCompareAndSwap to make sure the other processors see the
1298 * change of state immediately, not to protect against 2 threads racing to update it
1300 old_kdebug_slowcheck
= kdebug_slowcheck
;
1302 old_kdebug_flags
= kdebug_flags
;
1303 new_kdebug_flags
= old_kdebug_flags
& ~KDBG_WRAPPED
;
1304 new_kdebug_flags
|= KDBG_NOWRAP
;
1305 } while ( !OSCompareAndSwap((UInt32
)old_kdebug_flags
, (UInt32
)new_kdebug_flags
, (UInt32
*)&kdebug_flags
));
1310 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
1313 if ((cur_bufptr
= kdbip
[cpu
].kd_bufptr
) >= kdbip
[cpu
].kd_buflast
)
1314 cur_bufptr
= kdbip
[cpu
].kd_buffer
;
1316 if (kdbip
[cpu
].kd_wrapped
) {
1317 kdbip
[cpu
].kd_wrapped
= 0;
1318 kdbip
[cpu
].kd_readlast
= cur_bufptr
;
1319 kdbip
[cpu
].kd_stop
= cur_bufptr
;
1321 if (kd_cpus
> 1 && ((cur_bufptr
->timestamp
& KDBG_TIMESTAMP_MASK
) > last_wrap_time
)) {
1322 last_wrap_time
= cur_bufptr
->timestamp
& KDBG_TIMESTAMP_MASK
;
1323 last_wrap_cpu
= cpu
;
1326 if (kdbip
[cpu
].kd_readlast
== cur_bufptr
)
1327 kdbip
[cpu
].kd_stop
= 0;
1329 kdbip
[cpu
].kd_stop
= cur_bufptr
;
1332 if (count
> nkdbufs
)
1335 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1336 tempbuf_count
= KDCOPYBUF_COUNT
;
1338 if (last_wrap_cpu
== -1)
1339 first_event
= FALSE
;
1342 tempbuf
= kdcopybuf
;
1345 while (tempbuf_count
) {
1346 mintime
= 0xffffffffffffffffULL
; /* all actual timestamps are below */
1349 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
1350 if (kdbip
[cpu
].kd_stop
== 0) /* empty buffer */
1352 t
= kdbip
[cpu
].kd_readlast
[0].timestamp
& KDBG_TIMESTAMP_MASK
;
1361 * all buffers ran empty early
1365 if (first_event
== TRUE
) {
1367 * make sure we leave room for the
1368 * LAST_WRAPPER event we inject
1369 * by throwing away the first event
1370 * it's better to lose that one
1373 first_event
= FALSE
;
1375 kdbip
[mincpu
].kd_readlast
++;
1377 if (kdbip
[mincpu
].kd_readlast
== kdbip
[mincpu
].kd_buflast
)
1378 kdbip
[mincpu
].kd_readlast
= kdbip
[mincpu
].kd_buffer
;
1379 if (kdbip
[mincpu
].kd_readlast
== kdbip
[mincpu
].kd_stop
)
1380 kdbip
[mincpu
].kd_stop
= 0;
1384 if (last_wrap_cpu
== mincpu
) {
1385 tempbuf
->debugid
= MISCDBG_CODE(DBG_BUFFER
, 0) | DBG_FUNC_NONE
;
1386 tempbuf
->arg1
= kd_bufsize
/ sizeof(kd_buf
);
1387 tempbuf
->arg2
= kd_cpus
;
1390 tempbuf
->arg5
= (int)current_thread();
1392 tempbuf
->timestamp
= last_wrap_time
| (((uint64_t)last_wrap_cpu
) << KDBG_CPU_SHIFT
);
1399 *(tempbuf
++) = kdbip
[mincpu
].kd_readlast
[0];
1401 kdbip
[mincpu
].kd_readlast
++;
1403 if (kdbip
[mincpu
].kd_readlast
== kdbip
[mincpu
].kd_buflast
)
1404 kdbip
[mincpu
].kd_readlast
= kdbip
[mincpu
].kd_buffer
;
1405 if (kdbip
[mincpu
].kd_readlast
== kdbip
[mincpu
].kd_stop
)
1406 kdbip
[mincpu
].kd_stop
= 0;
1411 if (tempbuf_number
) {
1412 if ((error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
)))) {
1417 count
-= tempbuf_number
;
1418 *number
+= tempbuf_number
;
1419 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
1423 * all trace buffers are empty
1427 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1428 tempbuf_count
= KDCOPYBUF_COUNT
;
1430 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
1432 old_kdebug_flags
= kdebug_flags
;
1433 new_kdebug_flags
= old_kdebug_flags
& ~KDBG_NOWRAP
;
1434 } while ( !OSCompareAndSwap((UInt32
)old_kdebug_flags
, (UInt32
)new_kdebug_flags
, (UInt32
*)&kdebug_flags
));
1436 if ( !(old_kdebug_slowcheck
& SLOW_NOLOG
)) {
1438 old_kdebug_slowcheck
= kdebug_slowcheck
;
1439 new_kdebug_slowcheck
= old_kdebug_slowcheck
& ~SLOW_NOLOG
;
1440 } while ( !OSCompareAndSwap((UInt32
)old_kdebug_slowcheck
, (UInt32
)new_kdebug_slowcheck
, (UInt32
*)&kdebug_slowcheck
));
1447 unsigned char *getProcName(struct proc
*proc
);
1448 unsigned char *getProcName(struct proc
*proc
) {
1450 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
1454 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1455 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1457 #define TRAP_DEBUGGER __asm__ volatile("int3");
1460 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1463 #define SANE_TRACEBUF_SIZE 2*1024*1024
1465 /* Initialize the mutex governing access to the stack snapshot subsystem */
1466 __private_extern__
void
1467 stackshot_lock_init( void )
1469 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
1471 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
1473 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
1475 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
1479 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1480 * on the system, tracing both kernel and user stacks
1481 * where available. Uses machine specific trace routines
1482 * for ppc, ppc64 and x86.
1483 * Inputs: uap->pid - process id of process to be traced, or -1
1484 * for the entire system
1485 * uap->tracebuf - address of the user space destination
1487 * uap->tracebuf_size - size of the user space trace buffer
1488 * uap->options - various options, including the maximum
1489 * number of frames to trace.
1490 * Outputs: EPERM if the caller is not privileged
1491 * EINVAL if the supplied trace buffer isn't sanely sized
1492 * ENOMEM if we don't have enough memory to satisfy the
1494 * ENOENT if the target pid isn't found
1495 * ENOSPC if the supplied buffer is insufficient
1496 * *retval contains the number of bytes traced, if successful
1497 * and -1 otherwise. If the request failed due to
1498 * tracebuffer exhaustion, we copyout as much as possible.
1501 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, register_t
*retval
) {
1504 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
1507 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
1508 uap
->options
, retval
);
1512 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t options
, register_t
*retval
)
1515 unsigned bytesTraced
= 0;
1518 /* Serialize tracing */
1519 STACKSHOT_SUBSYS_LOCK();
1521 if ((tracebuf_size
<= 0) || (tracebuf_size
> SANE_TRACEBUF_SIZE
)) {
1526 MALLOC(stackshot_snapbuf
, void *, tracebuf_size
, M_TEMP
, M_WAITOK
);
1528 if (stackshot_snapbuf
== NULL
) {
1532 /* Preload trace parameters*/
1533 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, options
);
1535 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1538 if (panic_active()) {
1545 bytesTraced
= kdp_stack_snapshot_bytes_traced();
1547 if (bytesTraced
> 0) {
1548 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
1549 ((bytesTraced
< tracebuf_size
) ?
1550 bytesTraced
: tracebuf_size
))))
1552 *retval
= bytesTraced
;
1559 error
= kdp_stack_snapshot_geterror();
1567 if (stackshot_snapbuf
!= NULL
)
1568 FREE(stackshot_snapbuf
, M_TEMP
);
1569 stackshot_snapbuf
= NULL
;
1570 STACKSHOT_SUBSYS_UNLOCK();
1575 start_kern_tracing(unsigned int new_nkdbufs
) {
1578 kdbg_set_nkdbufs(new_nkdbufs
);
1581 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1582 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1584 printf("kernel tracing started\n");