2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <vm/vm_kern.h>
45 #include <sys/malloc.h>
46 #include <sys/kauth.h>
48 #include <mach/mach_host.h> /* for host_info() */
49 #include <libkern/OSAtomic.h>
51 /* XXX should have prototypes, but Mach does not provide one */
52 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
53 int cpu_number(void); /* XXX <machine/...> include path broken */
55 /* XXX should probably be static, but it's debugging code... */
56 int kdbg_read(user_addr_t
, size_t *);
57 void kdbg_control_chud(int, void *);
58 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
59 int kdbg_getentropy (user_addr_t
, size_t *, int);
60 int kdbg_readmap(user_addr_t
, size_t *);
61 int kdbg_getreg(kd_regtype
*);
62 int kdbg_setreg(kd_regtype
*);
63 int kdbg_setrtcdec(kd_regtype
*);
64 int kdbg_setpidex(kd_regtype
*);
65 int kdbg_setpid(kd_regtype
*);
66 void kdbg_mapinit(void);
67 int kdbg_reinit(void);
68 int kdbg_bootstrap(void);
70 static int create_buffers(void);
71 static void delete_buffers(void);
74 extern uint32_t maxDec
;
77 /* trace enable status */
78 unsigned int kdebug_enable
= 0;
80 /* track timestamps for security server's entropy needs */
81 uint64_t * kd_entropy_buffer
= 0;
82 unsigned int kd_entropy_bufsize
= 0;
83 unsigned int kd_entropy_count
= 0;
84 unsigned int kd_entropy_indx
= 0;
85 unsigned int kd_entropy_buftomem
= 0;
88 #define SLOW_NOLOG 0x01
89 #define SLOW_CHECKS 0x02
90 #define SLOW_ENTROPY 0x04
92 unsigned int kdebug_slowcheck
=SLOW_NOLOG
;
101 kd_buf
* kd_readlast
;
102 int kd_wrapped
; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
103 uint64_t kd_prev_timebase
;
104 int kd_pad
[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
108 struct kd_bufinfo
*kdbip
= NULL
;
110 #define KDCOPYBUF_COUNT 1024
111 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
112 kd_buf
*kdcopybuf
= NULL
;
115 unsigned int nkdbufs
= 8192;
116 unsigned int kd_bufsize
= 0;
117 unsigned int kdebug_flags
= 0;
118 unsigned int kdlog_beg
=0;
119 unsigned int kdlog_end
=0;
120 unsigned int kdlog_value1
=0;
121 unsigned int kdlog_value2
=0;
122 unsigned int kdlog_value3
=0;
123 unsigned int kdlog_value4
=0;
125 static lck_mtx_t
* kd_trace_mtx_sysctl
;
126 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
127 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
128 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
130 static lck_grp_t
*stackshot_subsys_lck_grp
;
131 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
132 static lck_attr_t
*stackshot_subsys_lck_attr
;
133 static lck_mtx_t stackshot_subsys_mutex
;
135 void *stackshot_snapbuf
= NULL
;
138 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t options
, register_t
*retval
);
141 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t options
);
144 kdp_stack_snapshot_geterror(void);
146 kdp_stack_snapshot_bytes_traced(void);
148 kd_threadmap
*kd_mapptr
= 0;
149 unsigned int kd_mapsize
= 0;
150 unsigned int kd_mapcount
= 0;
151 unsigned int kd_maptomem
= 0;
153 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
155 #define DBG_FUNC_MASK 0xfffffffc
157 /* task to string structure */
160 task_t task
; /* from procs task */
161 pid_t pid
; /* from procs p_pid */
162 char task_comm
[20]; /* from procs p_comm */
165 typedef struct tts tts_t
;
169 kd_threadmap
*map
; /* pointer to the map buffer */
175 typedef struct krt krt_t
;
177 /* This is for the CHUD toolkit call */
178 typedef void (*kd_chudhook_fn
) (unsigned int debugid
, unsigned int arg1
,
179 unsigned int arg2
, unsigned int arg3
,
180 unsigned int arg4
, unsigned int arg5
);
182 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
184 __private_extern__
void stackshot_lock_init( void );
186 /* Support syscall SYS_kdebug_trace */
188 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused register_t
*retval
)
190 if ( (kdebug_enable
== 0) )
193 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
205 nentries
= nkdbufs
/ kd_cpus
;
206 kd_bufsize
= nentries
* sizeof(kd_buf
);
208 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
210 if (kdcopybuf
== 0) {
211 if (kmem_alloc(kernel_map
, (unsigned int *)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
)
214 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
215 if (kmem_alloc(kernel_map
, (unsigned int *)&kdbip
[cpu
].kd_buffer
, kd_bufsize
) != KERN_SUCCESS
)
219 for (i
= 0; i
< cpu
; i
++)
220 kmem_free(kernel_map
, (vm_offset_t
)kdbip
[i
].kd_buffer
, kd_bufsize
);
223 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
228 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
229 kdbip
[cpu
].kd_bufptr
= kdbip
[cpu
].kd_buffer
;
230 kdbip
[cpu
].kd_buflast
= &kdbip
[cpu
].kd_bufptr
[nentries
];
231 kdbip
[cpu
].kd_readlast
= kdbip
[cpu
].kd_bufptr
;
233 kdebug_flags
|= KDBG_BUFINIT
;
244 if (kd_bufsize
&& (kdebug_flags
& KDBG_BUFINIT
)) {
245 for (cpu
= 0; cpu
< kd_cpus
; cpu
++)
246 kmem_free(kernel_map
, (vm_offset_t
)kdbip
[cpu
].kd_buffer
, kd_bufsize
);
250 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
253 kdebug_flags
&= ~KDBG_BUFINIT
;
258 kernel_debug_internal(unsigned int debugid
, unsigned int arg1
, unsigned int arg2
, unsigned int arg3
,
259 unsigned int arg4
, unsigned int arg5
, int entropy_flag
)
263 struct proc
*curproc
;
264 unsigned long long now
;
267 s
= ml_set_interrupts_enabled(FALSE
);
269 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
272 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
274 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
276 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
280 if (kdebug_slowcheck
== 0)
283 if (entropy_flag
&& (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
))
285 if (kd_entropy_indx
< kd_entropy_count
)
287 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
291 if (kd_entropy_indx
== kd_entropy_count
)
293 /* Disable entropy collection */
294 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
295 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
299 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
302 if (kdebug_flags
& KDBG_PIDCHECK
)
304 /* If kdebug flag is not set for current proc, return */
305 curproc
= current_proc();
306 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
307 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
310 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
312 /* If kdebug flag is set for current proc, return */
313 curproc
= current_proc();
314 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
315 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
319 if (kdebug_flags
& KDBG_RANGECHECK
)
321 if ((debugid
< kdlog_beg
)
322 || ((debugid
>= kdlog_end
) && (debugid
>> 24 != DBG_TRACE
)))
325 else if (kdebug_flags
& KDBG_VALCHECK
)
327 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
328 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
329 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
330 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
331 (debugid
>> 24 != DBG_TRACE
))
336 kd
= kdbip
[cpu
].kd_bufptr
;
337 kd
->debugid
= debugid
;
345 * Watch for out of order timestamps
347 if (now
< kdbip
[cpu
].kd_prev_timebase
)
350 * if so, just store the previous timestamp + a cycle
352 now
= ++kdbip
[cpu
].kd_prev_timebase
& KDBG_TIMESTAMP_MASK
;
356 kdbip
[cpu
].kd_prev_timebase
= now
;
358 kd
->timestamp
= now
| (((uint64_t)cpu
) << KDBG_CPU_SHIFT
);
360 kdbip
[cpu
].kd_bufptr
++;
362 if (kdbip
[cpu
].kd_bufptr
>= kdbip
[cpu
].kd_buflast
)
363 kdbip
[cpu
].kd_bufptr
= kdbip
[cpu
].kd_buffer
;
365 if (kdbip
[cpu
].kd_bufptr
== kdbip
[cpu
].kd_readlast
) {
366 if (kdebug_flags
& KDBG_NOWRAP
)
367 kdebug_slowcheck
|= SLOW_NOLOG
;
368 kdbip
[cpu
].kd_wrapped
= 1;
369 kdebug_flags
|= KDBG_WRAPPED
;
373 ml_set_interrupts_enabled(s
);
377 kernel_debug(unsigned int debugid
, unsigned int arg1
, unsigned int arg2
, unsigned int arg3
,
378 unsigned int arg4
, __unused
unsigned int arg5
)
380 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (int)current_thread(), 1);
384 kernel_debug1(unsigned int debugid
, unsigned int arg1
, unsigned int arg2
, unsigned int arg3
,
385 unsigned int arg4
, unsigned int arg5
)
387 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 0);
393 host_basic_info_data_t hinfo
;
394 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
397 if (kdebug_flags
& KDBG_LOCKINIT
)
400 /* get the number of cpus and cache it */
402 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
403 kd_cpus
= hinfo
.physical_cpu_max
;
405 if (kmem_alloc(kernel_map
, (unsigned int *)&kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
) != KERN_SUCCESS
)
409 * allocate lock group attribute and group
411 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
412 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
415 * allocate the lock attribute
417 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
421 * allocate and initialize spin lock and mutex
423 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
425 kdebug_flags
|= KDBG_LOCKINIT
;
432 kdebug_flags
&= ~KDBG_WRAPPED
;
434 return (create_buffers());
443 * Disable trace collecting
444 * First make sure we're not in
445 * the middle of cutting a trace
448 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
449 kdebug_slowcheck
|= SLOW_NOLOG
;
452 * make sure the SLOW_NOLOG is seen
453 * by everyone that might be trying
460 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
462 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
463 kdebug_flags
&= ~KDBG_MAPINIT
;
465 kd_mapptr
= (kd_threadmap
*) 0;
469 ret
= kdbg_bootstrap();
475 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
480 *arg_pid
= proc
->p_pid
;
487 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
503 /* Collect the pathname for tracing */
504 dbg_nameptr
= proc
->p_comm
;
505 dbg_namelen
= strlen(proc
->p_comm
);
511 if(dbg_namelen
> (int)sizeof(dbg_parms
))
512 dbg_namelen
= sizeof(dbg_parms
);
514 for(i
=0;dbg_namelen
> 0; i
++)
516 dbg_parms
[i
]=*(long*)dbg_nameptr
;
517 dbg_nameptr
+= sizeof(long);
518 dbg_namelen
-= sizeof(long);
528 kdbg_resolve_map(thread_t th_act
, void *opaque
)
530 kd_threadmap
*mapptr
;
531 krt_t
*t
= (krt_t
*)opaque
;
533 if(t
->count
< t
->maxcount
)
535 mapptr
=&t
->map
[t
->count
];
536 mapptr
->thread
= (unsigned int)th_act
;
537 (void) strncpy (mapptr
->command
, t
->atts
->task_comm
,
538 sizeof(t
->atts
->task_comm
)-1);
539 mapptr
->command
[sizeof(t
->atts
->task_comm
)-1] = '\0';
542 Some kernel threads have no associated pid.
543 We still need to mark the entry as valid.
546 mapptr
->valid
= t
->atts
->pid
;
559 int tts_count
; /* number of task-to-string structures */
560 struct tts
*tts_mapptr
;
561 unsigned int tts_mapsize
= 0;
562 unsigned int tts_maptomem
=0;
566 if (kdebug_flags
& KDBG_MAPINIT
)
569 /* Calculate the sizes of map buffers*/
570 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
;
571 p
= p
->p_list
.le_next
)
573 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
583 kd_mapcount
+= kd_mapcount
/10;
584 tts_count
+= tts_count
/10;
586 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
587 if((kmem_alloc(kernel_map
, & kd_maptomem
,
588 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
590 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
591 bzero(kd_mapptr
, kd_mapsize
);
594 kd_mapptr
= (kd_threadmap
*) 0;
596 tts_mapsize
= tts_count
* sizeof(struct tts
);
597 if((kmem_alloc(kernel_map
, & tts_maptomem
,
598 (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
))
600 tts_mapptr
= (struct tts
*) tts_maptomem
;
601 bzero(tts_mapptr
, tts_mapsize
);
604 tts_mapptr
= (struct tts
*) 0;
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
614 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
;
615 p
= p
->p_list
.le_next
) {
616 if (p
->p_flag
& P_WEXIT
)
620 task_reference(p
->task
);
621 tts_mapptr
[i
].task
= p
->task
;
622 tts_mapptr
[i
].pid
= p
->p_pid
;
623 (void)strncpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
) - 1);
631 if (kd_mapptr
&& tts_mapptr
)
633 kdebug_flags
|= KDBG_MAPINIT
;
634 /* Initialize thread map data */
635 akrt
.map
= kd_mapptr
;
637 akrt
.maxcount
= kd_mapcount
;
639 for (i
=0; i
< tts_count
; i
++)
641 akrt
.atts
= &tts_mapptr
[i
];
642 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
643 task_deallocate((task_t
) tts_mapptr
[i
].task
);
645 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
653 * Clean up the trace buffer
654 * First make sure we're not in
655 * the middle of cutting a trace
658 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
659 kdebug_slowcheck
= SLOW_NOLOG
;
662 * make sure the SLOW_NOLOG is seen
663 * by everyone that might be trying
668 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
669 kdebug_slowcheck
|= SLOW_ENTROPY
;
671 global_state_pid
= -1;
672 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
673 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
674 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
678 /* Clean up the thread map buffer */
679 kdebug_flags
&= ~KDBG_MAPINIT
;
680 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
681 kd_mapptr
= (kd_threadmap
*) 0;
687 kdbg_setpid(kd_regtype
*kdr
)
693 pid
= (pid_t
)kdr
->value1
;
694 flag
= (int)kdr
->value2
;
698 if ((p
= pfind(pid
)) == NULL
)
702 if (flag
== 1) /* turn on pid check for this and all pids */
704 kdebug_flags
|= KDBG_PIDCHECK
;
705 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
706 kdebug_slowcheck
|= SLOW_CHECKS
;
708 p
->p_flag
|= P_KDEBUG
;
710 else /* turn off pid check for this pid value */
712 /* Don't turn off all pid checking though */
713 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
714 p
->p_flag
&= ~P_KDEBUG
;
723 /* This is for pid exclusion in the trace buffer */
725 kdbg_setpidex(kd_regtype
*kdr
)
731 pid
= (pid_t
)kdr
->value1
;
732 flag
= (int)kdr
->value2
;
736 if ((p
= pfind(pid
)) == NULL
)
740 if (flag
== 1) /* turn on pid exclusion */
742 kdebug_flags
|= KDBG_PIDEXCLUDE
;
743 kdebug_flags
&= ~KDBG_PIDCHECK
;
744 kdebug_slowcheck
|= SLOW_CHECKS
;
746 p
->p_flag
|= P_KDEBUG
;
748 else /* turn off pid exclusion for this pid value */
750 /* Don't turn off all pid exclusion though */
751 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
752 p
->p_flag
&= ~P_KDEBUG
;
761 /* This is for setting a maximum decrementer value */
763 kdbg_setrtcdec(kd_regtype
*kdr
)
768 decval
= (natural_t
)kdr
->value1
;
770 if (decval
&& decval
< KDBG_MINRTCDEC
)
774 maxDec
= decval
? decval
: 0x7FFFFFFF; /* Set or reset the max decrementer */
785 kdbg_setreg(kd_regtype
* kdr
)
788 unsigned int val_1
, val_2
, val
;
791 case KDBG_CLASSTYPE
:
792 val_1
= (kdr
->value1
& 0xff);
793 val_2
= (kdr
->value2
& 0xff);
794 kdlog_beg
= (val_1
<<24);
795 kdlog_end
= (val_2
<<24);
796 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
797 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
798 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
799 kdebug_slowcheck
|= SLOW_CHECKS
;
801 case KDBG_SUBCLSTYPE
:
802 val_1
= (kdr
->value1
& 0xff);
803 val_2
= (kdr
->value2
& 0xff);
805 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
806 kdlog_end
= ((val_1
<<24) | (val
<< 16));
807 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
808 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
809 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
810 kdebug_slowcheck
|= SLOW_CHECKS
;
812 case KDBG_RANGETYPE
:
813 kdlog_beg
= (kdr
->value1
);
814 kdlog_end
= (kdr
->value2
);
815 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
816 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
817 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
818 kdebug_slowcheck
|= SLOW_CHECKS
;
821 kdlog_value1
= (kdr
->value1
);
822 kdlog_value2
= (kdr
->value2
);
823 kdlog_value3
= (kdr
->value3
);
824 kdlog_value4
= (kdr
->value4
);
825 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
826 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
827 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
828 kdebug_slowcheck
|= SLOW_CHECKS
;
831 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
833 if ( (kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
834 kdebug_slowcheck
|= SLOW_CHECKS
;
836 kdebug_slowcheck
&= ~SLOW_CHECKS
;
849 kdbg_getreg(__unused kd_regtype
* kdr
)
853 unsigned int val_1
, val_2
, val
;
856 case KDBG_CLASSTYPE
:
857 val_1
= (kdr
->value1
& 0xff);
859 kdlog_beg
= (val_1
<<24);
860 kdlog_end
= (val_2
<<24);
861 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
862 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
864 case KDBG_SUBCLSTYPE
:
865 val_1
= (kdr
->value1
& 0xff);
866 val_2
= (kdr
->value2
& 0xff);
868 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
869 kdlog_end
= ((val_1
<<24) | (val
<< 16));
870 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
871 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
873 case KDBG_RANGETYPE
:
874 kdlog_beg
= (kdr
->value1
);
875 kdlog_end
= (kdr
->value2
);
876 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
877 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
880 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
894 kdbg_readmap(user_addr_t buffer
, size_t *number
)
898 unsigned int count
= 0;
900 count
= avail
/sizeof (kd_threadmap
);
902 if (count
&& (count
<= kd_mapcount
))
904 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
906 if (*number
< kd_mapsize
)
910 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
920 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
922 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
923 kdebug_flags
&= ~KDBG_MAPINIT
;
925 kd_mapptr
= (kd_threadmap
*) 0;
933 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
938 if (kd_entropy_buffer
)
941 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
942 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
945 /* Enforce maximum entropy entries here if needed */
947 /* allocate entropy buffer */
948 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
949 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
)
951 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
955 kd_entropy_buffer
= (uint64_t *) 0;
956 kd_entropy_count
= 0;
964 /* Enable entropy sampling */
965 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
966 kdebug_slowcheck
|= SLOW_ENTROPY
;
968 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
970 /* Disable entropy sampling */
971 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
972 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
977 if (kd_entropy_indx
> 0)
979 /* copyout the buffer */
980 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
983 *number
= kd_entropy_indx
;
987 kd_entropy_count
= 0;
989 kd_entropy_buftomem
= 0;
990 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
991 kd_entropy_buffer
= (uint64_t *) 0;
997 * This function is provided for the CHUD toolkit only.
999 * zero disables kdebug_chudhook function call
1000 * non-zero enables kdebug_chudhook function call
1002 * address of the enabled kdebug_chudhook function
1006 kdbg_control_chud(int val
, void *fn
)
1009 /* enable chudhook */
1010 kdebug_chudhook
= fn
;
1011 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
1014 /* disable chudhook */
1015 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
1016 kdebug_chudhook
= 0;
1022 kdbg_control(int *name
, __unused u_int namelen
, user_addr_t where
, size_t *sizep
)
1026 unsigned int max_entries
;
1027 unsigned int value
= name
[1];
1029 kbufinfo_t kd_bufinfo
;
1031 struct proc
*p
, *curproc
;
1036 if ( !(kdebug_flags
& KDBG_LOCKINIT
))
1039 lck_mtx_lock(kd_trace_mtx_sysctl
);
1041 if (name
[0] == KERN_KDGETBUF
) {
1043 * Does not alter the global_state_pid
1044 * This is a passive request.
1046 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1048 * There is not enough room to return even
1049 * the first element of the info structure.
1051 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1055 kd_bufinfo
.nkdbufs
= nkdbufs
;
1056 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1058 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
1059 kd_bufinfo
.nolog
= 1;
1061 kd_bufinfo
.nolog
= 0;
1062 kd_bufinfo
.flags
= kdebug_flags
;
1063 kd_bufinfo
.bufid
= global_state_pid
;
1065 if (size
>= sizeof(kd_bufinfo
)) {
1067 * Provide all the info we have
1069 if (copyout (&kd_bufinfo
, where
, sizeof(kd_bufinfo
))) {
1070 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1077 * For backwards compatibility, only provide
1078 * as much info as there is room for.
1080 if (copyout (&kd_bufinfo
, where
, size
)) {
1081 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1086 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1089 } else if (name
[0] == KERN_KDGETENTROPY
) {
1090 if (kd_entropy_buffer
)
1093 ret
= kdbg_getentropy(where
, sizep
, value
);
1094 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1099 if ((curproc
= current_proc()) != NULL
)
1100 curpid
= curproc
->p_pid
;
1102 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1106 if (global_state_pid
== -1)
1107 global_state_pid
= curpid
;
1108 else if (global_state_pid
!= curpid
) {
1109 if ((p
= pfind(global_state_pid
)) == NULL
) {
1111 * The global pid no longer exists
1113 global_state_pid
= curpid
;
1116 * The global pid exists, deny this request
1118 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1126 value
&= KDBG_USERFLAGS
;
1127 kdebug_flags
|= value
;
1130 value
&= KDBG_USERFLAGS
;
1131 kdebug_flags
&= ~value
;
1133 case KERN_KDENABLE
: /* used to enable or disable */
1136 /* enable only if buffer is initialized */
1137 if (!(kdebug_flags
& KDBG_BUFINIT
))
1144 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1145 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1149 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1150 kdebug_slowcheck
|= SLOW_NOLOG
;
1154 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1155 /* 'value' is the desired number of trace entries */
1156 max_entries
= (sane_size
/4) / sizeof(kd_buf
);
1157 if (value
<= max_entries
)
1160 nkdbufs
= max_entries
;
1169 if(size
< sizeof(kd_regtype
)) {
1173 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1177 ret
= kdbg_setreg(&kd_Reg
);
1180 if(size
< sizeof(kd_regtype
)) {
1184 ret
= kdbg_getreg(&kd_Reg
);
1185 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
1190 ret
= kdbg_read(where
, sizep
);
1193 if (size
< sizeof(kd_regtype
)) {
1197 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1201 ret
= kdbg_setpid(&kd_Reg
);
1204 if (size
< sizeof(kd_regtype
)) {
1208 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1212 ret
= kdbg_setpidex(&kd_Reg
);
1215 ret
= kdbg_readmap(where
, sizep
);
1217 case KERN_KDSETRTCDEC
:
1218 if (size
< sizeof(kd_regtype
)) {
1222 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1226 ret
= kdbg_setrtcdec(&kd_Reg
);
1232 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1239 * This code can run concurrently with kernel_debug_internal()
1240 * without the need of any locks, because all reads of kd_bufptr[i],
1241 * which get modified by kernel_debug_internal(), are safe.
1244 kdbg_read(user_addr_t buffer
, size_t *number
)
1249 uint64_t mintime
, t
, last_wrap_time
;
1253 uint32_t tempbuf_count
;
1254 uint32_t tempbuf_number
;
1255 unsigned int old_kdebug_flags
, new_kdebug_flags
;
1256 unsigned int old_kdebug_slowcheck
, new_kdebug_slowcheck
;
1257 count
= *number
/sizeof(kd_buf
);
1260 if (count
== 0 || !(kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
1264 * because we hold kd_trace_mtx_sysctl, no other control threads can
1265 * be playing with kdebug_flags... the code that cuts new events could
1266 * be running, but it only reads kdebug_flags, it doesn't write it..
1267 * use an OSCompareAndSwap to make sure the other processors see the
1268 * change of state immediately, not to protect against 2 threads racing to update it
1270 old_kdebug_slowcheck
= kdebug_slowcheck
;
1272 old_kdebug_flags
= kdebug_flags
;
1273 new_kdebug_flags
= old_kdebug_flags
& ~KDBG_WRAPPED
;
1274 new_kdebug_flags
|= KDBG_NOWRAP
;
1275 } while ( !OSCompareAndSwap((UInt32
)old_kdebug_flags
, (UInt32
)new_kdebug_flags
, (UInt32
*)&kdebug_flags
));
1280 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
1283 if ((cur_bufptr
= kdbip
[cpu
].kd_bufptr
) >= kdbip
[cpu
].kd_buflast
)
1284 cur_bufptr
= kdbip
[cpu
].kd_buffer
;
1286 if (kdbip
[cpu
].kd_wrapped
) {
1287 kdbip
[cpu
].kd_wrapped
= 0;
1288 kdbip
[cpu
].kd_readlast
= cur_bufptr
;
1289 kdbip
[cpu
].kd_stop
= cur_bufptr
;
1291 if (kd_cpus
> 1 && ((cur_bufptr
->timestamp
& KDBG_TIMESTAMP_MASK
) > last_wrap_time
)) {
1292 last_wrap_time
= cur_bufptr
->timestamp
& KDBG_TIMESTAMP_MASK
;
1293 last_wrap_cpu
= cpu
;
1296 if (kdbip
[cpu
].kd_readlast
== cur_bufptr
)
1297 kdbip
[cpu
].kd_stop
= 0;
1299 kdbip
[cpu
].kd_stop
= cur_bufptr
;
1302 if (count
> nkdbufs
)
1305 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1306 tempbuf_count
= KDCOPYBUF_COUNT
;
1309 tempbuf
= kdcopybuf
;
1312 while (tempbuf_count
) {
1313 mintime
= 0xffffffffffffffffULL
; /* all actual timestamps are below */
1316 for (cpu
= 0; cpu
< kd_cpus
; cpu
++) {
1317 if (kdbip
[cpu
].kd_stop
== 0) /* empty buffer */
1319 t
= kdbip
[cpu
].kd_readlast
[0].timestamp
& KDBG_TIMESTAMP_MASK
;
1328 * all buffers ran empty early
1332 if (last_wrap_cpu
== mincpu
) {
1333 tempbuf
->debugid
= MISCDBG_CODE(DBG_BUFFER
, 0) | DBG_FUNC_NONE
;
1338 tempbuf
->arg5
= (int)current_thread();
1340 tempbuf
->timestamp
= last_wrap_time
| (((uint64_t)last_wrap_cpu
) << KDBG_CPU_SHIFT
);
1347 *(tempbuf
++) = kdbip
[mincpu
].kd_readlast
[0];
1349 kdbip
[mincpu
].kd_readlast
++;
1351 if (kdbip
[mincpu
].kd_readlast
== kdbip
[mincpu
].kd_buflast
)
1352 kdbip
[mincpu
].kd_readlast
= kdbip
[mincpu
].kd_buffer
;
1353 if (kdbip
[mincpu
].kd_readlast
== kdbip
[mincpu
].kd_stop
)
1354 kdbip
[mincpu
].kd_stop
= 0;
1359 if (tempbuf_number
) {
1360 if ((error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
)))) {
1365 count
-= tempbuf_number
;
1366 *number
+= tempbuf_number
;
1367 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
1371 * all trace buffers are empty
1375 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1376 tempbuf_count
= KDCOPYBUF_COUNT
;
1378 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
1380 old_kdebug_flags
= kdebug_flags
;
1381 new_kdebug_flags
= old_kdebug_flags
& ~KDBG_NOWRAP
;
1382 } while ( !OSCompareAndSwap((UInt32
)old_kdebug_flags
, (UInt32
)new_kdebug_flags
, (UInt32
*)&kdebug_flags
));
1384 if ( !(old_kdebug_slowcheck
& SLOW_NOLOG
)) {
1386 old_kdebug_slowcheck
= kdebug_slowcheck
;
1387 new_kdebug_slowcheck
= old_kdebug_slowcheck
& ~SLOW_NOLOG
;
1388 } while ( !OSCompareAndSwap((UInt32
)old_kdebug_slowcheck
, (UInt32
)new_kdebug_slowcheck
, (UInt32
*)&kdebug_slowcheck
));
1395 unsigned char *getProcName(struct proc
*proc
);
1396 unsigned char *getProcName(struct proc
*proc
) {
1398 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
1402 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1403 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1405 #define TRAP_DEBUGGER __asm__ volatile("int3");
1408 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1411 #define SANE_TRACEBUF_SIZE 2*1024*1024
1413 /* Initialize the mutex governing access to the stack snapshot subsystem */
1414 __private_extern__
void
1415 stackshot_lock_init( void )
1417 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
1419 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
1421 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
1423 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
1427 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1428 * on the system, tracing both kernel and user stacks
1429 * where available. Uses machine specific trace routines
1430 * for ppc, ppc64 and x86.
1431 * Inputs: uap->pid - process id of process to be traced, or -1
1432 * for the entire system
1433 * uap->tracebuf - address of the user space destination
1435 * uap->tracebuf_size - size of the user space trace buffer
1436 * uap->options - various options, including the maximum
1437 * number of frames to trace.
1438 * Outputs: EPERM if the caller is not privileged
1439 * EINVAL if the supplied trace buffer isn't sanely sized
1440 * ENOMEM if we don't have enough memory to satisfy the
1442 * ENOENT if the target pid isn't found
1443 * ENOSPC if the supplied buffer is insufficient
1444 * *retval contains the number of bytes traced, if successful
1445 * and -1 otherwise. If the request failed due to
1446 * tracebuffer exhaustion, we copyout as much as possible.
1449 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, register_t
*retval
) {
1452 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
1455 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
1456 uap
->options
, retval
);
1460 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t options
, register_t
*retval
)
1463 unsigned bytesTraced
= 0;
1466 /* Serialize tracing */
1467 STACKSHOT_SUBSYS_LOCK();
1469 if ((tracebuf_size
<= 0) || (tracebuf_size
> SANE_TRACEBUF_SIZE
)) {
1474 MALLOC(stackshot_snapbuf
, void *, tracebuf_size
, M_TEMP
, M_WAITOK
);
1476 if (stackshot_snapbuf
== NULL
) {
1480 /* Preload trace parameters*/
1481 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, options
);
1483 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1488 bytesTraced
= kdp_stack_snapshot_bytes_traced();
1490 if (bytesTraced
> 0) {
1491 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
1492 ((bytesTraced
< tracebuf_size
) ?
1493 bytesTraced
: tracebuf_size
))))
1495 *retval
= bytesTraced
;
1502 error
= kdp_stack_snapshot_geterror();
1510 if (stackshot_snapbuf
!= NULL
)
1511 FREE(stackshot_snapbuf
, M_TEMP
);
1512 stackshot_snapbuf
= NULL
;
1513 STACKSHOT_SUBSYS_UNLOCK();