2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
40 #if defined(__i386__) || defined(__x86_64__)
41 #include <i386/rtclock.h>
43 #include <kern/thread.h>
44 #include <kern/task.h>
45 #include <kern/debug.h>
46 #include <vm/vm_kern.h>
49 #include <sys/malloc.h>
50 #include <sys/mcache.h>
51 #include <sys/kauth.h>
53 #include <sys/vnode.h>
54 #include <sys/vnode_internal.h>
55 #include <sys/fcntl.h>
57 #include <mach/mach_host.h> /* for host_info() */
58 #include <libkern/OSAtomic.h>
60 /* XXX should have prototypes, but Mach does not provide one */
61 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
62 int cpu_number(void); /* XXX <machine/...> include path broken */
64 /* XXX should probably be static, but it's debugging code... */
65 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
66 void kdbg_control_chud(int, void *);
67 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
68 int kdbg_getentropy (user_addr_t
, size_t *, int);
69 int kdbg_readmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
70 int kdbg_getreg(kd_regtype
*);
71 int kdbg_setreg(kd_regtype
*);
72 int kdbg_setrtcdec(kd_regtype
*);
73 int kdbg_setpidex(kd_regtype
*);
74 int kdbg_setpid(kd_regtype
*);
75 void kdbg_mapinit(void);
76 int kdbg_reinit(void);
77 int kdbg_bootstrap(void);
79 static int create_buffers(void);
80 static void delete_buffers(void);
82 extern void IOSleep(int);
85 extern uint32_t maxDec
;
88 /* trace enable status */
89 unsigned int kdebug_enable
= 0;
91 /* track timestamps for security server's entropy needs */
92 uint64_t * kd_entropy_buffer
= 0;
93 unsigned int kd_entropy_bufsize
= 0;
94 unsigned int kd_entropy_count
= 0;
95 unsigned int kd_entropy_indx
= 0;
96 vm_offset_t kd_entropy_buftomem
= 0;
99 #define SLOW_NOLOG 0x01
100 #define SLOW_CHECKS 0x02
101 #define SLOW_ENTROPY 0x04
103 unsigned int kdebug_slowcheck
= SLOW_NOLOG
;
105 unsigned int kd_cpus
;
107 #define EVENTS_PER_STORAGE_UNIT 2048
108 #define MIN_STORAGE_UNITS_PER_CPU 4
111 struct kd_storage
*kds_next
;
114 kd_buf
*kds_readlast
;
116 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
119 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
120 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
123 struct kd_storage_buffers
{
124 struct kd_storage
*kdsb_addr
;
129 struct kd_storage
*kds_free_list
= NULL
;
130 struct kd_storage_buffers
*kd_bufs
= NULL
;
131 int n_storage_units
= 0;
132 int n_storage_buffers
= 0;
135 struct kd_storage
*kd_list_head
;
136 struct kd_storage
*kd_list_tail
;
137 struct kd_storage
*kd_active
;
138 uint64_t kd_prev_timebase
;
139 } __attribute__(( aligned(CPU_CACHE_SIZE
) ));
141 struct kd_bufinfo
*kdbip
= NULL
;
143 #define KDCOPYBUF_COUNT 2048
144 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
145 kd_buf
*kdcopybuf
= NULL
;
148 unsigned int nkdbufs
= 8192;
149 unsigned int kdebug_flags
= 0;
150 unsigned int kdlog_beg
=0;
151 unsigned int kdlog_end
=0;
152 unsigned int kdlog_value1
=0;
153 unsigned int kdlog_value2
=0;
154 unsigned int kdlog_value3
=0;
155 unsigned int kdlog_value4
=0;
157 static lck_spin_t
* kds_spin_lock
;
158 static lck_mtx_t
* kd_trace_mtx_sysctl
;
159 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
160 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
161 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
163 static lck_grp_t
*stackshot_subsys_lck_grp
;
164 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
165 static lck_attr_t
*stackshot_subsys_lck_attr
;
166 static lck_mtx_t stackshot_subsys_mutex
;
168 void *stackshot_snapbuf
= NULL
;
171 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t options
, int32_t *retval
);
174 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t options
);
177 kdp_stack_snapshot_geterror(void);
179 kdp_stack_snapshot_bytes_traced(void);
181 kd_threadmap
*kd_mapptr
= 0;
182 unsigned int kd_mapsize
= 0;
183 unsigned int kd_mapcount
= 0;
184 vm_offset_t kd_maptomem
= 0;
186 off_t RAW_file_offset
= 0;
188 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
190 #define DBG_FUNC_MASK 0xfffffffc
192 /* task to string structure */
195 task_t task
; /* from procs task */
196 pid_t pid
; /* from procs p_pid */
197 char task_comm
[20]; /* from procs p_comm */
200 typedef struct tts tts_t
;
204 kd_threadmap
*map
; /* pointer to the map buffer */
210 typedef struct krt krt_t
;
212 /* This is for the CHUD toolkit call */
213 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
214 uintptr_t arg2
, uintptr_t arg3
,
215 uintptr_t arg4
, uintptr_t arg5
);
217 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
219 __private_extern__
void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
221 /* Support syscall SYS_kdebug_trace */
223 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused
int32_t *retval
)
225 if ( (kdebug_enable
== 0) )
228 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
242 if (nkdbufs
< (kd_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
243 n_storage_units
= kd_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
245 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
247 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
249 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
250 n_storage_buffers
= f_buffers
;
252 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
253 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
260 if (kdcopybuf
== 0) {
261 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
266 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
270 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
272 for (i
= 0; i
< f_buffers
; i
++) {
273 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
277 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
280 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
284 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
287 for (i
= 0; i
< n_storage_buffers
; i
++) {
288 struct kd_storage
*kds
;
292 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
293 kds
= kd_bufs
[i
].kdsb_addr
;
295 for (n
= 0; n
< n_elements
; n
++) {
296 kds
[n
].kds_next
= kds_free_list
;
297 kds_free_list
= &kds
[n
];
299 kds
[n
].kds_buflast
= &kds
[n
].kds_records
[EVENTS_PER_STORAGE_UNIT
];
302 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
304 kdebug_flags
|= KDBG_BUFINIT
;
319 for (i
= 0; i
< n_storage_buffers
; i
++) {
320 if (kd_bufs
[i
].kdsb_addr
)
321 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
323 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
326 n_storage_buffers
= 0;
329 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
333 kds_free_list
= NULL
;
335 kdebug_flags
&= ~KDBG_BUFINIT
;
340 release_storage_unit(struct kd_bufinfo
*kdbp
, struct kd_storage
*kdsp
)
344 s
= ml_set_interrupts_enabled(FALSE
);
345 lck_spin_lock(kds_spin_lock
);
347 if (kdsp
== kdbp
->kd_list_head
) {
349 * its possible for the storage unit pointed to
350 * by kdsp to have already been stolen... so
351 * check to see if its still the head of the list
352 * now that we're behind the lock that protects
353 * adding and removing from the queue...
354 * since we only ever release and steal units from
355 * that position, if its no longer the head
356 * we having nothing to do in this context
358 kdbp
->kd_list_head
= kdsp
->kds_next
;
360 kdsp
->kds_next
= kds_free_list
;
361 kds_free_list
= kdsp
;
363 lck_spin_unlock(kds_spin_lock
);
364 ml_set_interrupts_enabled(s
);
369 * Interrupts are disabled when we enter this routine.
371 static struct kd_storage
*
372 allocate_storage_unit(struct kd_bufinfo
*kdbp
)
374 struct kd_storage
*kdsp
;
375 struct kd_bufinfo
*kdbp_vict
, *kdbp_try
;
376 uint64_t oldest_ts
, ts
;
378 lck_spin_lock(kds_spin_lock
);
380 if ((kdsp
= kds_free_list
))
381 kds_free_list
= kdsp
->kds_next
;
383 if (kdebug_flags
& KDBG_NOWRAP
) {
384 kdebug_slowcheck
|= SLOW_NOLOG
;
388 oldest_ts
= (uint64_t)-1;
390 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_cpus
]; kdbp_try
++) {
392 if ((kdsp
= kdbp_try
->kd_list_head
) == NULL
) {
394 * no storage unit to steal
398 if (kdsp
== kdbp_try
->kd_active
) {
400 * make sure we don't steal the storage unit
401 * being actively recorded to... this state
402 * also implies that this is the only unit assigned
403 * to this CPU, so we can immediately move on
407 ts
= kdbg_get_timestamp(&(kdbp_try
->kd_list_head
->kds_records
[0]));
409 if (ts
< oldest_ts
) {
411 * when 'wrapping', we want to steal the
412 * storage unit that has the 'earliest' time
413 * associated with it (first event time)
416 kdbp_vict
= kdbp_try
;
420 if (kdbp_vict
== NULL
) {
423 panic("allocate_storage_unit: no storage units available\n");
426 kdsp
= kdbp_vict
->kd_list_head
;
428 kdbp_vict
->kd_list_head
= kdsp
->kds_next
;
430 kdebug_flags
|= KDBG_WRAPPED
;
432 kdsp
->kds_next
= NULL
;
433 kdsp
->kds_bufptr
= &kdsp
->kds_records
[0];
434 kdsp
->kds_readlast
= kdsp
->kds_bufptr
;
436 if (kdbp
->kd_list_head
== NULL
)
437 kdbp
->kd_list_head
= kdsp
;
439 kdbp
->kd_list_tail
->kds_next
= kdsp
;
440 kdbp
->kd_list_tail
= kdsp
;
442 lck_spin_unlock(kds_spin_lock
);
450 kernel_debug_internal(
459 struct proc
*curproc
;
464 struct kd_bufinfo
*kdbp
;
465 struct kd_storage
*kdsp
;
467 s
= ml_set_interrupts_enabled(FALSE
);
469 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
472 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
474 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
476 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
479 if (kdebug_slowcheck
== 0)
482 if (entropy_flag
&& (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)) {
483 if (kd_entropy_indx
< kd_entropy_count
) {
484 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
488 if (kd_entropy_indx
== kd_entropy_count
) {
490 * Disable entropy collection
492 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
493 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
496 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
499 if (kdebug_flags
& KDBG_PIDCHECK
) {
501 * If kdebug flag is not set for current proc, return
503 curproc
= current_proc();
505 if ((curproc
&& !(curproc
->p_kdebug
)) &&
506 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
509 else if (kdebug_flags
& KDBG_PIDEXCLUDE
) {
511 * If kdebug flag is set for current proc, return
513 curproc
= current_proc();
515 if ((curproc
&& curproc
->p_kdebug
) &&
516 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
519 if (kdebug_flags
& KDBG_RANGECHECK
) {
520 if ((debugid
< kdlog_beg
)
521 || ((debugid
>= kdlog_end
) && (debugid
>> 24 != DBG_TRACE
)))
524 else if (kdebug_flags
& KDBG_VALCHECK
) {
525 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
526 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
527 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
528 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
529 (debugid
>> 24 != DBG_TRACE
))
536 if ((kdsp
= kdbp
->kd_active
) == NULL
) {
537 if ((kdsp
= allocate_storage_unit(kdbp
)) == NULL
) {
539 * this can only happen if wrapping
544 kdbp
->kd_active
= kdsp
;
546 kd
= kdsp
->kds_bufptr
;
548 kd
->debugid
= debugid
;
555 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
559 if (kdsp
->kds_bufptr
>= kdsp
->kds_buflast
)
560 kdbp
->kd_active
= NULL
;
562 ml_set_interrupts_enabled(s
);
572 __unused
uintptr_t arg5
)
574 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()), 1);
586 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 0);
592 host_basic_info_data_t hinfo
;
593 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
595 if (kdebug_flags
& KDBG_LOCKINIT
)
598 /* get the number of cpus and cache it */
600 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
601 kd_cpus
= hinfo
.logical_cpu_max
;
603 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
,
604 sizeof(struct kd_bufinfo
) * kd_cpus
) != KERN_SUCCESS
)
608 * allocate lock group attribute and group
610 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
611 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
614 * allocate the lock attribute
616 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
620 * allocate and initialize spin lock and mutex
622 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
623 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
625 kdebug_flags
|= KDBG_LOCKINIT
;
632 kdebug_flags
&= ~KDBG_WRAPPED
;
634 return (create_buffers());
643 * Disable trace collecting
644 * First make sure we're not in
645 * the middle of cutting a trace
647 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
648 kdebug_slowcheck
|= SLOW_NOLOG
;
651 * make sure the SLOW_NOLOG is seen
652 * by everyone that might be trying
659 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
660 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
661 kdebug_flags
&= ~KDBG_MAPINIT
;
663 kd_mapptr
= (kd_threadmap
*) 0;
666 ret
= kdbg_bootstrap();
672 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
677 *arg_pid
= proc
->p_pid
;
682 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
696 * Collect the pathname for tracing
698 dbg_nameptr
= proc
->p_comm
;
699 dbg_namelen
= (int)strlen(proc
->p_comm
);
705 if(dbg_namelen
> (int)sizeof(dbg_parms
))
706 dbg_namelen
= (int)sizeof(dbg_parms
);
708 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
717 kdbg_resolve_map(thread_t th_act
, void *opaque
)
719 kd_threadmap
*mapptr
;
720 krt_t
*t
= (krt_t
*)opaque
;
722 if (t
->count
< t
->maxcount
) {
723 mapptr
= &t
->map
[t
->count
];
724 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
726 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
727 sizeof(t
->atts
->task_comm
));
729 * Some kernel threads have no associated pid.
730 * We still need to mark the entry as valid.
733 mapptr
->valid
= t
->atts
->pid
;
746 int tts_count
; /* number of task-to-string structures */
747 struct tts
*tts_mapptr
;
748 unsigned int tts_mapsize
= 0;
749 vm_offset_t tts_maptomem
=0;
752 if (kdebug_flags
& KDBG_MAPINIT
)
756 * need to use PROC_SCANPROCLIST with proc_iterate
761 * Calculate the sizes of map buffers
763 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
764 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
770 * The proc count could change during buffer allocation,
771 * so introduce a small fudge factor to bump up the
772 * buffer sizes. This gives new tasks some chance of
773 * making into the tables. Bump up by 10%.
775 kd_mapcount
+= kd_mapcount
/10;
776 tts_count
+= tts_count
/10;
778 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
780 if ((kmem_alloc(kernel_map
, & kd_maptomem
, (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
)) {
781 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
782 bzero(kd_mapptr
, kd_mapsize
);
784 kd_mapptr
= (kd_threadmap
*) 0;
786 tts_mapsize
= tts_count
* sizeof(struct tts
);
788 if ((kmem_alloc(kernel_map
, & tts_maptomem
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
789 tts_mapptr
= (struct tts
*) tts_maptomem
;
790 bzero(tts_mapptr
, tts_mapsize
);
792 tts_mapptr
= (struct tts
*) 0;
795 * We need to save the procs command string
796 * and take a reference for each task associated
797 * with a valid process
801 * should use proc_iterate
805 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
806 if (p
->p_lflag
& P_LEXIT
)
810 task_reference(p
->task
);
811 tts_mapptr
[i
].task
= p
->task
;
812 tts_mapptr
[i
].pid
= p
->p_pid
;
813 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
822 if (kd_mapptr
&& tts_mapptr
) {
823 kdebug_flags
|= KDBG_MAPINIT
;
826 * Initialize thread map data
828 akrt
.map
= kd_mapptr
;
830 akrt
.maxcount
= kd_mapcount
;
832 for (i
= 0; i
< tts_count
; i
++) {
833 akrt
.atts
= &tts_mapptr
[i
];
834 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
835 task_deallocate((task_t
) tts_mapptr
[i
].task
);
837 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
845 * Clean up the trace buffer
846 * First make sure we're not in
847 * the middle of cutting a trace
850 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
851 kdebug_slowcheck
= SLOW_NOLOG
;
854 * make sure the SLOW_NOLOG is seen
855 * by everyone that might be trying
860 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
861 kdebug_slowcheck
|= SLOW_ENTROPY
;
863 global_state_pid
= -1;
864 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
865 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
866 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
870 /* Clean up the thread map buffer */
871 kdebug_flags
&= ~KDBG_MAPINIT
;
873 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
874 kd_mapptr
= (kd_threadmap
*) 0;
881 kdbg_setpid(kd_regtype
*kdr
)
887 pid
= (pid_t
)kdr
->value1
;
888 flag
= (int)kdr
->value2
;
891 if ((p
= proc_find(pid
)) == NULL
)
896 * turn on pid check for this and all pids
898 kdebug_flags
|= KDBG_PIDCHECK
;
899 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
900 kdebug_slowcheck
|= SLOW_CHECKS
;
905 * turn off pid check for this pid value
906 * Don't turn off all pid checking though
908 * kdebug_flags &= ~KDBG_PIDCHECK;
921 /* This is for pid exclusion in the trace buffer */
923 kdbg_setpidex(kd_regtype
*kdr
)
929 pid
= (pid_t
)kdr
->value1
;
930 flag
= (int)kdr
->value2
;
933 if ((p
= proc_find(pid
)) == NULL
)
938 * turn on pid exclusion
940 kdebug_flags
|= KDBG_PIDEXCLUDE
;
941 kdebug_flags
&= ~KDBG_PIDCHECK
;
942 kdebug_slowcheck
|= SLOW_CHECKS
;
948 * turn off pid exclusion for this pid value
949 * Don't turn off all pid exclusion though
951 * kdebug_flags &= ~KDBG_PIDEXCLUDE;
965 * This is for setting a maximum decrementer value
968 kdbg_setrtcdec(kd_regtype
*kdr
)
973 decval
= (natural_t
)kdr
->value1
;
975 if (decval
&& decval
< KDBG_MINRTCDEC
)
979 maxDec
= decval
? decval
: 0x7FFFFFFF; /* Set or reset the max decrementer */
990 kdbg_setreg(kd_regtype
* kdr
)
993 unsigned int val_1
, val_2
, val
;
996 case KDBG_CLASSTYPE
:
997 val_1
= (kdr
->value1
& 0xff);
998 val_2
= (kdr
->value2
& 0xff);
999 kdlog_beg
= (val_1
<<24);
1000 kdlog_end
= (val_2
<<24);
1001 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1002 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1003 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1004 kdebug_slowcheck
|= SLOW_CHECKS
;
1006 case KDBG_SUBCLSTYPE
:
1007 val_1
= (kdr
->value1
& 0xff);
1008 val_2
= (kdr
->value2
& 0xff);
1010 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1011 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1012 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1013 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1014 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1015 kdebug_slowcheck
|= SLOW_CHECKS
;
1017 case KDBG_RANGETYPE
:
1018 kdlog_beg
= (kdr
->value1
);
1019 kdlog_end
= (kdr
->value2
);
1020 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1021 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1022 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1023 kdebug_slowcheck
|= SLOW_CHECKS
;
1026 kdlog_value1
= (kdr
->value1
);
1027 kdlog_value2
= (kdr
->value2
);
1028 kdlog_value3
= (kdr
->value3
);
1029 kdlog_value4
= (kdr
->value4
);
1030 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1031 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1032 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1033 kdebug_slowcheck
|= SLOW_CHECKS
;
1035 case KDBG_TYPENONE
:
1036 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1038 if ( (kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1039 kdebug_slowcheck
|= SLOW_CHECKS
;
1041 kdebug_slowcheck
&= ~SLOW_CHECKS
;
1054 kdbg_getreg(__unused kd_regtype
* kdr
)
1058 unsigned int val_1
, val_2
, val
;
1060 switch (kdr
->type
) {
1061 case KDBG_CLASSTYPE
:
1062 val_1
= (kdr
->value1
& 0xff);
1064 kdlog_beg
= (val_1
<<24);
1065 kdlog_end
= (val_2
<<24);
1066 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1067 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1069 case KDBG_SUBCLSTYPE
:
1070 val_1
= (kdr
->value1
& 0xff);
1071 val_2
= (kdr
->value2
& 0xff);
1073 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1074 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1075 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1076 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1078 case KDBG_RANGETYPE
:
1079 kdlog_beg
= (kdr
->value1
);
1080 kdlog_end
= (kdr
->value2
);
1081 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1082 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1084 case KDBG_TYPENONE
:
1085 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1099 kdbg_readmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1101 int avail
= *number
;
1105 count
= avail
/sizeof (kd_threadmap
);
1107 if (count
&& (count
<= kd_mapcount
))
1109 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1111 if (*number
< kd_mapsize
)
1116 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
1117 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1118 RAW_file_offset
+= sizeof(uint32_t);
1120 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, kd_mapsize
, RAW_file_offset
,
1121 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1122 RAW_file_offset
+= kd_mapsize
;
1125 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
1139 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
1140 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1141 RAW_file_offset
+= sizeof(uint32_t);
1143 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1145 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1146 kdebug_flags
&= ~KDBG_MAPINIT
;
1148 kd_mapptr
= (kd_threadmap
*) 0;
1156 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
1158 int avail
= *number
;
1161 if (kd_entropy_buffer
)
1164 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
1165 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
1166 kd_entropy_indx
= 0;
1169 * Enforce maximum entropy entries here if needed
1170 * allocate entropy buffer
1172 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
1173 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
) {
1174 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
1176 kd_entropy_buffer
= (uint64_t *) 0;
1177 kd_entropy_count
= 0;
1178 kd_entropy_indx
= 0;
1182 if (ms_timeout
< 10)
1186 * Enable entropy sampling
1188 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
1189 kdebug_slowcheck
|= SLOW_ENTROPY
;
1191 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
1194 * Disable entropy sampling
1196 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
1197 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
1202 if (kd_entropy_indx
> 0) {
1204 * copyout the buffer
1206 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
1209 *number
= kd_entropy_indx
;
1214 kd_entropy_count
= 0;
1215 kd_entropy_indx
= 0;
1216 kd_entropy_buftomem
= 0;
1217 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
1218 kd_entropy_buffer
= (uint64_t *) 0;
1225 kdbg_set_nkdbufs(unsigned int value
)
1228 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
1229 * 'value' is the desired number of trace entries
1231 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
1233 if (value
<= max_entries
)
1236 nkdbufs
= max_entries
;
1241 * This function is provided for the CHUD toolkit only.
1243 * zero disables kdebug_chudhook function call
1244 * non-zero enables kdebug_chudhook function call
1246 * address of the enabled kdebug_chudhook function
1250 kdbg_control_chud(int val
, void *fn
)
1253 /* enable chudhook */
1254 kdebug_chudhook
= fn
;
1255 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
1258 /* disable chudhook */
1259 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
1260 kdebug_chudhook
= 0;
1266 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1269 size_t size
= *sizep
;
1270 unsigned int value
= 0;
1272 kbufinfo_t kd_bufinfo
;
1274 struct proc
*p
, *curproc
;
1276 if (name
[0] == KERN_KDGETENTROPY
||
1277 name
[0] == KERN_KDEFLAGS
||
1278 name
[0] == KERN_KDDFLAGS
||
1279 name
[0] == KERN_KDENABLE
||
1280 name
[0] == KERN_KDSETBUF
) {
1289 if ( !(kdebug_flags
& KDBG_LOCKINIT
))
1292 lck_mtx_lock(kd_trace_mtx_sysctl
);
1294 if (name
[0] == KERN_KDGETBUF
) {
1296 * Does not alter the global_state_pid
1297 * This is a passive request.
1299 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1301 * There is not enough room to return even
1302 * the first element of the info structure.
1307 kd_bufinfo
.nkdbufs
= nkdbufs
;
1308 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1310 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
1311 kd_bufinfo
.nolog
= 1;
1313 kd_bufinfo
.nolog
= 0;
1315 kd_bufinfo
.flags
= kdebug_flags
;
1316 #if defined(__LP64__)
1317 kd_bufinfo
.flags
|= KDBG_LP64
;
1319 kd_bufinfo
.bufid
= global_state_pid
;
1321 if (size
>= sizeof(kd_bufinfo
)) {
1323 * Provide all the info we have
1325 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
1329 * For backwards compatibility, only provide
1330 * as much info as there is room for.
1332 if (copyout(&kd_bufinfo
, where
, size
))
1337 } else if (name
[0] == KERN_KDGETENTROPY
) {
1338 if (kd_entropy_buffer
)
1341 ret
= kdbg_getentropy(where
, sizep
, value
);
1345 if ((curproc
= current_proc()) != NULL
)
1346 curpid
= curproc
->p_pid
;
1351 if (global_state_pid
== -1)
1352 global_state_pid
= curpid
;
1353 else if (global_state_pid
!= curpid
) {
1354 if ((p
= proc_find(global_state_pid
)) == NULL
) {
1356 * The global pid no longer exists
1358 global_state_pid
= curpid
;
1361 * The global pid exists, deny this request
1372 value
&= KDBG_USERFLAGS
;
1373 kdebug_flags
|= value
;
1376 value
&= KDBG_USERFLAGS
;
1377 kdebug_flags
&= ~value
;
1381 * used to enable or disable
1385 * enable only if buffer is initialized
1387 if (!(kdebug_flags
& KDBG_BUFINIT
)) {
1393 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1394 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1397 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1398 kdebug_slowcheck
|= SLOW_NOLOG
;
1402 kdbg_set_nkdbufs(value
);
1405 ret
= kdbg_reinit();
1411 if(size
< sizeof(kd_regtype
)) {
1415 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1419 ret
= kdbg_setreg(&kd_Reg
);
1422 if (size
< sizeof(kd_regtype
)) {
1426 ret
= kdbg_getreg(&kd_Reg
);
1427 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
1432 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
1435 if (size
< sizeof(kd_regtype
)) {
1439 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1443 ret
= kdbg_setpid(&kd_Reg
);
1446 if (size
< sizeof(kd_regtype
)) {
1450 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1454 ret
= kdbg_setpidex(&kd_Reg
);
1457 ret
= kdbg_readmap(where
, sizep
, NULL
, NULL
);
1459 case KERN_KDSETRTCDEC
:
1460 if (size
< sizeof(kd_regtype
)) {
1464 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1468 ret
= kdbg_setrtcdec(&kd_Reg
);
1475 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1482 * This code can run for the most part concurrently with kernel_debug_internal()...
1483 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1484 * synchronize with the recording side of this puzzle... otherwise, we are able to
1485 * move through the lists w/o use of any locks
1488 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1491 unsigned int cpu
, mincpu
;
1492 uint64_t mintime
, t
;
1493 int error
= 0,s
= 0;
1496 kd_buf
*min_rcursor
;
1497 struct kd_storage
*kdsp
;
1498 struct kd_bufinfo
*kdbp
;
1499 uint32_t tempbuf_count
;
1500 uint32_t tempbuf_number
;
1501 uint32_t old_kdebug_flags
;
1502 uint32_t old_kdebug_slowcheck
;
1504 count
= *number
/sizeof(kd_buf
);
1507 if (count
== 0 || !(kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
1511 * because we hold kd_trace_mtx_sysctl, no other control threads can
1512 * be playing with kdebug_flags... the code that cuts new events could
1513 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1514 * storage chunk which is where it examines kdebug_flags... it its adding
1515 * to the same chunk we're reading from, no problem...
1517 s
= ml_set_interrupts_enabled(FALSE
);
1518 lck_spin_lock(kds_spin_lock
);
1520 old_kdebug_slowcheck
= kdebug_slowcheck
;
1521 old_kdebug_flags
= kdebug_flags
;
1523 kdebug_flags
&= ~KDBG_WRAPPED
;
1524 kdebug_flags
|= KDBG_NOWRAP
;
1526 lck_spin_unlock(kds_spin_lock
);
1527 ml_set_interrupts_enabled(s
);
1529 if (count
> nkdbufs
)
1532 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1533 tempbuf_count
= KDCOPYBUF_COUNT
;
1536 tempbuf
= kdcopybuf
;
1539 while (tempbuf_count
) {
1540 mintime
= 0xffffffffffffffffULL
; /* all actual timestamps are below */
1544 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_cpus
; cpu
++, kdbp
++) {
1546 if ((kdsp
= kdbp
->kd_list_head
) == NULL
)
1548 rcursor
= kdsp
->kds_readlast
;
1550 if (rcursor
== kdsp
->kds_bufptr
)
1552 t
= kdbg_get_timestamp(rcursor
);
1557 min_rcursor
= rcursor
;
1560 if (mincpu
== (unsigned int)-1)
1562 * all buffers ran empty
1566 kdbp
= &kdbip
[mincpu
];
1567 kdsp
= kdbp
->kd_list_head
;
1569 *tempbuf
= *min_rcursor
;
1571 if (mintime
!= kdbg_get_timestamp(tempbuf
)) {
1573 * we stole this storage unit and used it
1574 * before we could slurp the selected event out
1575 * so we need to re-evaluate
1580 * Watch for out of order timestamps
1582 if (mintime
< kdbp
->kd_prev_timebase
) {
1584 * if so, use the previous timestamp + 1 cycle
1586 kdbp
->kd_prev_timebase
++;
1587 kdbg_set_timestamp_and_cpu(tempbuf
, kdbp
->kd_prev_timebase
, mincpu
);
1589 kdbp
->kd_prev_timebase
= mintime
;
1591 if (min_rcursor
== kdsp
->kds_readlast
)
1592 kdsp
->kds_readlast
++;
1594 if (kdsp
->kds_readlast
== kdsp
->kds_buflast
)
1595 release_storage_unit(kdbp
, kdsp
);
1601 if (tempbuf_number
) {
1604 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
1605 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1607 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
1609 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
1610 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
1617 count
-= tempbuf_number
;
1618 *number
+= tempbuf_number
;
1622 * all trace buffers are empty
1626 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1627 tempbuf_count
= KDCOPYBUF_COUNT
;
1629 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
1631 s
= ml_set_interrupts_enabled(FALSE
);
1632 lck_spin_lock(kds_spin_lock
);
1634 kdebug_flags
&= ~KDBG_NOWRAP
;
1636 if ( !(old_kdebug_slowcheck
& SLOW_NOLOG
))
1637 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1639 lck_spin_unlock(kds_spin_lock
);
1640 ml_set_interrupts_enabled(s
);
1646 unsigned char *getProcName(struct proc
*proc
);
1647 unsigned char *getProcName(struct proc
*proc
) {
1649 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
1653 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1654 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1655 #if defined(__i386__) || defined (__x86_64__)
1656 #define TRAP_DEBUGGER __asm__ volatile("int3");
1659 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1662 #define SANE_TRACEBUF_SIZE 2*1024*1024
1664 /* Initialize the mutex governing access to the stack snapshot subsystem */
1665 __private_extern__
void
1666 stackshot_lock_init( void )
1668 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
1670 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
1672 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
1674 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
1678 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1679 * on the system, tracing both kernel and user stacks
1680 * where available. Uses machine specific trace routines
1681 * for ppc, ppc64 and x86.
1682 * Inputs: uap->pid - process id of process to be traced, or -1
1683 * for the entire system
1684 * uap->tracebuf - address of the user space destination
1686 * uap->tracebuf_size - size of the user space trace buffer
1687 * uap->options - various options, including the maximum
1688 * number of frames to trace.
1689 * Outputs: EPERM if the caller is not privileged
1690 * EINVAL if the supplied trace buffer isn't sanely sized
1691 * ENOMEM if we don't have enough memory to satisfy the
1693 * ENOENT if the target pid isn't found
1694 * ENOSPC if the supplied buffer is insufficient
1695 * *retval contains the number of bytes traced, if successful
1696 * and -1 otherwise. If the request failed due to
1697 * tracebuffer exhaustion, we copyout as much as possible.
1700 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
1704 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
1707 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
1708 uap
->options
, retval
);
1712 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t options
, int32_t *retval
)
1715 unsigned bytesTraced
= 0;
1718 /* Serialize tracing */
1719 STACKSHOT_SUBSYS_LOCK();
1721 if ((tracebuf_size
<= 0) || (tracebuf_size
> SANE_TRACEBUF_SIZE
)) {
1726 MALLOC(stackshot_snapbuf
, void *, tracebuf_size
, M_TEMP
, M_WAITOK
);
1728 if (stackshot_snapbuf
== NULL
) {
1732 /* Preload trace parameters*/
1733 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, options
);
1735 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1738 if (panic_active()) {
1745 bytesTraced
= kdp_stack_snapshot_bytes_traced();
1747 if (bytesTraced
> 0) {
1748 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
1749 ((bytesTraced
< tracebuf_size
) ?
1750 bytesTraced
: tracebuf_size
))))
1752 *retval
= bytesTraced
;
1759 error
= kdp_stack_snapshot_geterror();
1767 if (stackshot_snapbuf
!= NULL
)
1768 FREE(stackshot_snapbuf
, M_TEMP
);
1769 stackshot_snapbuf
= NULL
;
1770 STACKSHOT_SUBSYS_UNLOCK();
1775 start_kern_tracing(unsigned int new_nkdbufs
) {
1778 kdbg_set_nkdbufs(new_nkdbufs
);
1781 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1782 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1785 #if defined(__i386__) || defined(__x86_64__)
1786 uint64_t now
= mach_absolute_time();
1788 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
,
1789 (uint32_t)(tsc_rebase_abs_time
>> 32), (uint32_t)tsc_rebase_abs_time
,
1790 (uint32_t)(now
>> 32), (uint32_t)now
,
1793 printf("kernel tracing started\n");
1797 kdbg_dump_trace_to_file(const char *filename
)
1805 if (kdebug_enable
& (KDEBUG_ENABLE_CHUD
| KDEBUG_ENABLE_ENTROPY
))
1808 if (global_state_pid
!= -1) {
1809 if ((proc_find(global_state_pid
)) != NULL
) {
1811 * The global pid exists, we're running
1812 * due to fs_usage, latency, etc...
1813 * don't cut the panic/shutdown trace file
1818 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
1822 ctx
= vfs_context_kernel();
1824 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
1827 number
= kd_mapsize
;
1828 kdbg_readmap(0, &number
, vp
, ctx
);
1830 number
= nkdbufs
*sizeof(kd_buf
);
1831 kdbg_read(0, &number
, vp
, ctx
);
1833 vnode_close(vp
, FWRITE
, ctx
);
1835 sync(current_proc(), (void *)NULL
, (int *)NULL
);