2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
40 #if defined(__i386__) || defined(__x86_64__)
41 #include <i386/rtclock.h>
43 #include <kern/thread.h>
44 #include <kern/task.h>
45 #include <kern/debug.h>
46 #include <kern/assert.h>
47 #include <vm/vm_kern.h>
50 #include <sys/malloc.h>
51 #include <sys/mcache.h>
52 #include <sys/kauth.h>
54 #include <sys/vnode.h>
55 #include <sys/vnode_internal.h>
56 #include <sys/fcntl.h>
58 #include <mach/mach_host.h> /* for host_info() */
59 #include <libkern/OSAtomic.h>
61 /* XXX should have prototypes, but Mach does not provide one */
62 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
63 int cpu_number(void); /* XXX <machine/...> include path broken */
65 /* XXX should probably be static, but it's debugging code... */
66 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
67 void kdbg_control_chud(int, void *);
68 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
69 int kdbg_getentropy (user_addr_t
, size_t *, int);
70 int kdbg_readmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
71 int kdbg_getreg(kd_regtype
*);
72 int kdbg_setreg(kd_regtype
*);
73 int kdbg_setrtcdec(kd_regtype
*);
74 int kdbg_setpidex(kd_regtype
*);
75 int kdbg_setpid(kd_regtype
*);
76 void kdbg_mapinit(void);
77 int kdbg_reinit(void);
78 int kdbg_bootstrap(void);
80 static int create_buffers(void);
81 static void delete_buffers(void);
83 extern void IOSleep(int);
86 extern uint32_t maxDec
;
89 /* trace enable status */
90 unsigned int kdebug_enable
= 0;
92 /* track timestamps for security server's entropy needs */
93 uint64_t * kd_entropy_buffer
= 0;
94 unsigned int kd_entropy_bufsize
= 0;
95 unsigned int kd_entropy_count
= 0;
96 unsigned int kd_entropy_indx
= 0;
97 vm_offset_t kd_entropy_buftomem
= 0;
100 #define SLOW_NOLOG 0x01
101 #define SLOW_CHECKS 0x02
102 #define SLOW_ENTROPY 0x04
104 unsigned int kdebug_slowcheck
= SLOW_NOLOG
;
106 unsigned int kd_cpus
;
108 #define EVENTS_PER_STORAGE_UNIT 2048
109 #define MIN_STORAGE_UNITS_PER_CPU 4
112 struct kd_storage
*kds_next
;
115 kd_buf
*kds_readlast
;
117 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
120 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
121 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
124 struct kd_storage_buffers
{
125 struct kd_storage
*kdsb_addr
;
130 struct kd_storage
*kds_free_list
= NULL
;
131 struct kd_storage_buffers
*kd_bufs
= NULL
;
132 int n_storage_units
= 0;
133 int n_storage_buffers
= 0;
136 struct kd_storage
*kd_list_head
;
137 struct kd_storage
*kd_list_tail
;
138 struct kd_storage
*kd_active
;
139 uint64_t kd_prev_timebase
;
140 } __attribute__(( aligned(CPU_CACHE_SIZE
) ));
142 struct kd_bufinfo
*kdbip
= NULL
;
144 #define KDCOPYBUF_COUNT 2048
145 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
146 kd_buf
*kdcopybuf
= NULL
;
149 unsigned int nkdbufs
= 8192;
150 unsigned int kdebug_flags
= 0;
151 unsigned int kdlog_beg
=0;
152 unsigned int kdlog_end
=0;
153 unsigned int kdlog_value1
=0;
154 unsigned int kdlog_value2
=0;
155 unsigned int kdlog_value3
=0;
156 unsigned int kdlog_value4
=0;
158 static lck_spin_t
* kds_spin_lock
;
159 static lck_mtx_t
* kd_trace_mtx_sysctl
;
160 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
161 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
162 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
164 static lck_grp_t
*stackshot_subsys_lck_grp
;
165 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
166 static lck_attr_t
*stackshot_subsys_lck_attr
;
167 static lck_mtx_t stackshot_subsys_mutex
;
169 void *stackshot_snapbuf
= NULL
;
172 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
);
175 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
);
178 kdp_stack_snapshot_geterror(void);
180 kdp_stack_snapshot_bytes_traced(void);
182 kd_threadmap
*kd_mapptr
= 0;
183 unsigned int kd_mapsize
= 0;
184 unsigned int kd_mapcount
= 0;
185 vm_offset_t kd_maptomem
= 0;
187 off_t RAW_file_offset
= 0;
189 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
191 #define DBG_FUNC_MASK 0xfffffffc
193 /* task to string structure */
196 task_t task
; /* from procs task */
197 pid_t pid
; /* from procs p_pid */
198 char task_comm
[20]; /* from procs p_comm */
201 typedef struct tts tts_t
;
205 kd_threadmap
*map
; /* pointer to the map buffer */
211 typedef struct krt krt_t
;
213 /* This is for the CHUD toolkit call */
214 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
215 uintptr_t arg2
, uintptr_t arg3
,
216 uintptr_t arg4
, uintptr_t arg5
);
218 kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
220 __private_extern__
void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
222 /* Support syscall SYS_kdebug_trace */
224 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused
int32_t *retval
)
226 if ( (kdebug_enable
== 0) )
229 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
243 if (nkdbufs
< (kd_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
244 n_storage_units
= kd_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
246 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
248 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
250 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
251 n_storage_buffers
= f_buffers
;
253 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
254 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
261 if (kdcopybuf
== 0) {
262 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
267 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
271 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
273 for (i
= 0; i
< f_buffers
; i
++) {
274 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
278 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
281 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
285 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
288 for (i
= 0; i
< n_storage_buffers
; i
++) {
289 struct kd_storage
*kds
;
293 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
294 kds
= kd_bufs
[i
].kdsb_addr
;
296 for (n
= 0; n
< n_elements
; n
++) {
297 kds
[n
].kds_next
= kds_free_list
;
298 kds_free_list
= &kds
[n
];
300 kds
[n
].kds_buflast
= &kds
[n
].kds_records
[EVENTS_PER_STORAGE_UNIT
];
303 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
305 kdebug_flags
|= KDBG_BUFINIT
;
320 for (i
= 0; i
< n_storage_buffers
; i
++) {
321 if (kd_bufs
[i
].kdsb_addr
)
322 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
324 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
327 n_storage_buffers
= 0;
330 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
334 kds_free_list
= NULL
;
336 kdebug_flags
&= ~KDBG_BUFINIT
;
341 release_storage_unit(struct kd_bufinfo
*kdbp
, struct kd_storage
*kdsp
)
345 s
= ml_set_interrupts_enabled(FALSE
);
346 lck_spin_lock(kds_spin_lock
);
348 if (kdsp
== kdbp
->kd_list_head
) {
350 * its possible for the storage unit pointed to
351 * by kdsp to have already been stolen... so
352 * check to see if its still the head of the list
353 * now that we're behind the lock that protects
354 * adding and removing from the queue...
355 * since we only ever release and steal units from
356 * that position, if its no longer the head
357 * we having nothing to do in this context
359 kdbp
->kd_list_head
= kdsp
->kds_next
;
361 kdsp
->kds_next
= kds_free_list
;
362 kds_free_list
= kdsp
;
364 lck_spin_unlock(kds_spin_lock
);
365 ml_set_interrupts_enabled(s
);
370 * Interrupts are disabled when we enter this routine.
372 static struct kd_storage
*
373 allocate_storage_unit(struct kd_bufinfo
*kdbp
)
375 struct kd_storage
*kdsp
;
376 struct kd_bufinfo
*kdbp_vict
, *kdbp_try
;
377 uint64_t oldest_ts
, ts
;
379 lck_spin_lock(kds_spin_lock
);
381 if ((kdsp
= kds_free_list
))
382 kds_free_list
= kdsp
->kds_next
;
384 if (kdebug_flags
& KDBG_NOWRAP
) {
385 kdebug_slowcheck
|= SLOW_NOLOG
;
389 oldest_ts
= (uint64_t)-1;
391 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_cpus
]; kdbp_try
++) {
393 if ((kdsp
= kdbp_try
->kd_list_head
) == NULL
) {
395 * no storage unit to steal
399 if (kdsp
== kdbp_try
->kd_active
) {
401 * make sure we don't steal the storage unit
402 * being actively recorded to... this state
403 * also implies that this is the only unit assigned
404 * to this CPU, so we can immediately move on
408 ts
= kdbg_get_timestamp(&(kdbp_try
->kd_list_head
->kds_records
[0]));
410 if (ts
< oldest_ts
) {
412 * when 'wrapping', we want to steal the
413 * storage unit that has the 'earliest' time
414 * associated with it (first event time)
417 kdbp_vict
= kdbp_try
;
421 if (kdbp_vict
== NULL
) {
424 panic("allocate_storage_unit: no storage units available\n");
427 kdsp
= kdbp_vict
->kd_list_head
;
429 kdbp_vict
->kd_list_head
= kdsp
->kds_next
;
431 kdebug_flags
|= KDBG_WRAPPED
;
433 kdsp
->kds_next
= NULL
;
434 kdsp
->kds_bufptr
= &kdsp
->kds_records
[0];
435 kdsp
->kds_readlast
= kdsp
->kds_bufptr
;
437 if (kdbp
->kd_list_head
== NULL
)
438 kdbp
->kd_list_head
= kdsp
;
440 kdbp
->kd_list_tail
->kds_next
= kdsp
;
441 kdbp
->kd_list_tail
= kdsp
;
443 lck_spin_unlock(kds_spin_lock
);
451 kernel_debug_internal(
460 struct proc
*curproc
;
465 struct kd_bufinfo
*kdbp
;
466 struct kd_storage
*kdsp
;
468 s
= ml_set_interrupts_enabled(FALSE
);
470 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
473 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
475 kdebug_chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
477 if ( !(kdebug_enable
& (KDEBUG_ENABLE_ENTROPY
| KDEBUG_ENABLE_TRACE
)))
480 if (kdebug_slowcheck
== 0)
483 if (entropy_flag
&& (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)) {
484 if (kd_entropy_indx
< kd_entropy_count
) {
485 kd_entropy_buffer
[ kd_entropy_indx
] = mach_absolute_time();
489 if (kd_entropy_indx
== kd_entropy_count
) {
491 * Disable entropy collection
493 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
494 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
497 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
500 if (kdebug_flags
& KDBG_PIDCHECK
) {
502 * If kdebug flag is not set for current proc, return
504 curproc
= current_proc();
506 if ((curproc
&& !(curproc
->p_kdebug
)) &&
507 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
510 else if (kdebug_flags
& KDBG_PIDEXCLUDE
) {
512 * If kdebug flag is set for current proc, return
514 curproc
= current_proc();
516 if ((curproc
&& curproc
->p_kdebug
) &&
517 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
520 if (kdebug_flags
& KDBG_RANGECHECK
) {
521 if ((debugid
< kdlog_beg
)
522 || ((debugid
>= kdlog_end
) && (debugid
>> 24 != DBG_TRACE
)))
525 else if (kdebug_flags
& KDBG_VALCHECK
) {
526 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
527 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
528 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
529 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
530 (debugid
>> 24 != DBG_TRACE
))
537 if ((kdsp
= kdbp
->kd_active
) == NULL
) {
538 if ((kdsp
= allocate_storage_unit(kdbp
)) == NULL
) {
540 * this can only happen if wrapping
545 kdbp
->kd_active
= kdsp
;
547 kd
= kdsp
->kds_bufptr
;
549 kd
->debugid
= debugid
;
556 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
560 if (kdsp
->kds_bufptr
>= kdsp
->kds_buflast
)
561 kdbp
->kd_active
= NULL
;
563 ml_set_interrupts_enabled(s
);
573 __unused
uintptr_t arg5
)
575 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()), 1);
587 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 0);
593 host_basic_info_data_t hinfo
;
594 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
596 if (kdebug_flags
& KDBG_LOCKINIT
)
599 /* get the number of cpus and cache it */
601 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
602 kd_cpus
= hinfo
.logical_cpu_max
;
604 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
,
605 sizeof(struct kd_bufinfo
) * kd_cpus
) != KERN_SUCCESS
)
609 * allocate lock group attribute and group
611 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
612 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
615 * allocate the lock attribute
617 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
621 * allocate and initialize spin lock and mutex
623 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
624 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
626 kdebug_flags
|= KDBG_LOCKINIT
;
633 kdebug_flags
&= ~KDBG_WRAPPED
;
635 return (create_buffers());
644 * Disable trace collecting
645 * First make sure we're not in
646 * the middle of cutting a trace
648 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
649 kdebug_slowcheck
|= SLOW_NOLOG
;
652 * make sure the SLOW_NOLOG is seen
653 * by everyone that might be trying
660 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
661 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
662 kdebug_flags
&= ~KDBG_MAPINIT
;
664 kd_mapptr
= (kd_threadmap
*) 0;
667 ret
= kdbg_bootstrap();
673 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
678 *arg_pid
= proc
->p_pid
;
683 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
697 * Collect the pathname for tracing
699 dbg_nameptr
= proc
->p_comm
;
700 dbg_namelen
= (int)strlen(proc
->p_comm
);
706 if(dbg_namelen
> (int)sizeof(dbg_parms
))
707 dbg_namelen
= (int)sizeof(dbg_parms
);
709 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
718 kdbg_resolve_map(thread_t th_act
, void *opaque
)
720 kd_threadmap
*mapptr
;
721 krt_t
*t
= (krt_t
*)opaque
;
723 if (t
->count
< t
->maxcount
) {
724 mapptr
= &t
->map
[t
->count
];
725 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
727 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
728 sizeof(t
->atts
->task_comm
));
730 * Some kernel threads have no associated pid.
731 * We still need to mark the entry as valid.
734 mapptr
->valid
= t
->atts
->pid
;
747 int tts_count
; /* number of task-to-string structures */
748 struct tts
*tts_mapptr
;
749 unsigned int tts_mapsize
= 0;
750 vm_offset_t tts_maptomem
=0;
753 if (kdebug_flags
& KDBG_MAPINIT
)
757 * need to use PROC_SCANPROCLIST with proc_iterate
762 * Calculate the sizes of map buffers
764 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
765 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
771 * The proc count could change during buffer allocation,
772 * so introduce a small fudge factor to bump up the
773 * buffer sizes. This gives new tasks some chance of
774 * making into the tables. Bump up by 10%.
776 kd_mapcount
+= kd_mapcount
/10;
777 tts_count
+= tts_count
/10;
779 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
781 if ((kmem_alloc(kernel_map
, & kd_maptomem
, (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
)) {
782 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
783 bzero(kd_mapptr
, kd_mapsize
);
785 kd_mapptr
= (kd_threadmap
*) 0;
787 tts_mapsize
= tts_count
* sizeof(struct tts
);
789 if ((kmem_alloc(kernel_map
, & tts_maptomem
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
790 tts_mapptr
= (struct tts
*) tts_maptomem
;
791 bzero(tts_mapptr
, tts_mapsize
);
793 tts_mapptr
= (struct tts
*) 0;
796 * We need to save the procs command string
797 * and take a reference for each task associated
798 * with a valid process
802 * should use proc_iterate
806 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
807 if (p
->p_lflag
& P_LEXIT
)
811 task_reference(p
->task
);
812 tts_mapptr
[i
].task
= p
->task
;
813 tts_mapptr
[i
].pid
= p
->p_pid
;
814 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
823 if (kd_mapptr
&& tts_mapptr
) {
824 kdebug_flags
|= KDBG_MAPINIT
;
827 * Initialize thread map data
829 akrt
.map
= kd_mapptr
;
831 akrt
.maxcount
= kd_mapcount
;
833 for (i
= 0; i
< tts_count
; i
++) {
834 akrt
.atts
= &tts_mapptr
[i
];
835 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
836 task_deallocate((task_t
) tts_mapptr
[i
].task
);
838 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
846 * Clean up the trace buffer
847 * First make sure we're not in
848 * the middle of cutting a trace
851 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
852 kdebug_slowcheck
= SLOW_NOLOG
;
855 * make sure the SLOW_NOLOG is seen
856 * by everyone that might be trying
861 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
)
862 kdebug_slowcheck
|= SLOW_ENTROPY
;
864 global_state_pid
= -1;
865 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
866 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
867 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
871 /* Clean up the thread map buffer */
872 kdebug_flags
&= ~KDBG_MAPINIT
;
874 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
875 kd_mapptr
= (kd_threadmap
*) 0;
882 kdbg_setpid(kd_regtype
*kdr
)
888 pid
= (pid_t
)kdr
->value1
;
889 flag
= (int)kdr
->value2
;
892 if ((p
= proc_find(pid
)) == NULL
)
897 * turn on pid check for this and all pids
899 kdebug_flags
|= KDBG_PIDCHECK
;
900 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
901 kdebug_slowcheck
|= SLOW_CHECKS
;
906 * turn off pid check for this pid value
907 * Don't turn off all pid checking though
909 * kdebug_flags &= ~KDBG_PIDCHECK;
922 /* This is for pid exclusion in the trace buffer */
924 kdbg_setpidex(kd_regtype
*kdr
)
930 pid
= (pid_t
)kdr
->value1
;
931 flag
= (int)kdr
->value2
;
934 if ((p
= proc_find(pid
)) == NULL
)
939 * turn on pid exclusion
941 kdebug_flags
|= KDBG_PIDEXCLUDE
;
942 kdebug_flags
&= ~KDBG_PIDCHECK
;
943 kdebug_slowcheck
|= SLOW_CHECKS
;
949 * turn off pid exclusion for this pid value
950 * Don't turn off all pid exclusion though
952 * kdebug_flags &= ~KDBG_PIDEXCLUDE;
966 * This is for setting a maximum decrementer value
969 kdbg_setrtcdec(kd_regtype
*kdr
)
974 decval
= (natural_t
)kdr
->value1
;
976 if (decval
&& decval
< KDBG_MINRTCDEC
)
980 maxDec
= decval
? decval
: 0x7FFFFFFF; /* Set or reset the max decrementer */
991 kdbg_setreg(kd_regtype
* kdr
)
994 unsigned int val_1
, val_2
, val
;
997 case KDBG_CLASSTYPE
:
998 val_1
= (kdr
->value1
& 0xff);
999 val_2
= (kdr
->value2
& 0xff);
1000 kdlog_beg
= (val_1
<<24);
1001 kdlog_end
= (val_2
<<24);
1002 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1003 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1004 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1005 kdebug_slowcheck
|= SLOW_CHECKS
;
1007 case KDBG_SUBCLSTYPE
:
1008 val_1
= (kdr
->value1
& 0xff);
1009 val_2
= (kdr
->value2
& 0xff);
1011 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1012 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1013 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1014 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1015 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1016 kdebug_slowcheck
|= SLOW_CHECKS
;
1018 case KDBG_RANGETYPE
:
1019 kdlog_beg
= (kdr
->value1
);
1020 kdlog_end
= (kdr
->value2
);
1021 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1022 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1023 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1024 kdebug_slowcheck
|= SLOW_CHECKS
;
1027 kdlog_value1
= (kdr
->value1
);
1028 kdlog_value2
= (kdr
->value2
);
1029 kdlog_value3
= (kdr
->value3
);
1030 kdlog_value4
= (kdr
->value4
);
1031 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1032 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1033 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1034 kdebug_slowcheck
|= SLOW_CHECKS
;
1036 case KDBG_TYPENONE
:
1037 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1039 if ( (kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1040 kdebug_slowcheck
|= SLOW_CHECKS
;
1042 kdebug_slowcheck
&= ~SLOW_CHECKS
;
1055 kdbg_getreg(__unused kd_regtype
* kdr
)
1059 unsigned int val_1
, val_2
, val
;
1061 switch (kdr
->type
) {
1062 case KDBG_CLASSTYPE
:
1063 val_1
= (kdr
->value1
& 0xff);
1065 kdlog_beg
= (val_1
<<24);
1066 kdlog_end
= (val_2
<<24);
1067 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1068 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1070 case KDBG_SUBCLSTYPE
:
1071 val_1
= (kdr
->value1
& 0xff);
1072 val_2
= (kdr
->value2
& 0xff);
1074 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1075 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1076 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1077 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1079 case KDBG_RANGETYPE
:
1080 kdlog_beg
= (kdr
->value1
);
1081 kdlog_end
= (kdr
->value2
);
1082 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1083 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1085 case KDBG_TYPENONE
:
1086 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1100 kdbg_readmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1102 int avail
= *number
;
1106 count
= avail
/sizeof (kd_threadmap
);
1108 if (count
&& (count
<= kd_mapcount
))
1110 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1112 if (*number
< kd_mapsize
)
1117 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
1118 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1119 RAW_file_offset
+= sizeof(uint32_t);
1121 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, kd_mapsize
, RAW_file_offset
,
1122 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1123 RAW_file_offset
+= kd_mapsize
;
1126 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
1140 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
1141 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1142 RAW_file_offset
+= sizeof(uint32_t);
1144 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1146 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1147 kdebug_flags
&= ~KDBG_MAPINIT
;
1149 kd_mapptr
= (kd_threadmap
*) 0;
1157 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
1159 int avail
= *number
;
1162 if (kd_entropy_buffer
)
1165 kd_entropy_count
= avail
/sizeof(mach_timespec_t
);
1166 kd_entropy_bufsize
= kd_entropy_count
* sizeof(mach_timespec_t
);
1167 kd_entropy_indx
= 0;
1170 * Enforce maximum entropy entries here if needed
1171 * allocate entropy buffer
1173 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
,
1174 (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
) {
1175 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
1177 kd_entropy_buffer
= (uint64_t *) 0;
1178 kd_entropy_count
= 0;
1179 kd_entropy_indx
= 0;
1183 if (ms_timeout
< 10)
1187 * Enable entropy sampling
1189 kdebug_enable
|= KDEBUG_ENABLE_ENTROPY
;
1190 kdebug_slowcheck
|= SLOW_ENTROPY
;
1192 ret
= tsleep (kdbg_getentropy
, PRIBIO
| PCATCH
, "kd_entropy", (ms_timeout
/(1000/HZ
)));
1195 * Disable entropy sampling
1197 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
1198 kdebug_slowcheck
&= ~SLOW_ENTROPY
;
1203 if (kd_entropy_indx
> 0) {
1205 * copyout the buffer
1207 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(mach_timespec_t
)))
1210 *number
= kd_entropy_indx
;
1215 kd_entropy_count
= 0;
1216 kd_entropy_indx
= 0;
1217 kd_entropy_buftomem
= 0;
1218 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
1219 kd_entropy_buffer
= (uint64_t *) 0;
1226 kdbg_set_nkdbufs(unsigned int value
)
1229 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
1230 * 'value' is the desired number of trace entries
1232 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
1234 if (value
<= max_entries
)
1237 nkdbufs
= max_entries
;
1242 * This function is provided for the CHUD toolkit only.
1244 * zero disables kdebug_chudhook function call
1245 * non-zero enables kdebug_chudhook function call
1247 * address of the enabled kdebug_chudhook function
1251 kdbg_control_chud(int val
, void *fn
)
1254 /* enable chudhook */
1255 kdebug_chudhook
= fn
;
1256 kdebug_enable
|= KDEBUG_ENABLE_CHUD
;
1259 /* disable chudhook */
1260 kdebug_enable
&= ~KDEBUG_ENABLE_CHUD
;
1261 kdebug_chudhook
= 0;
1267 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1270 size_t size
= *sizep
;
1271 unsigned int value
= 0;
1273 kbufinfo_t kd_bufinfo
;
1275 struct proc
*p
, *curproc
;
1277 if (name
[0] == KERN_KDGETENTROPY
||
1278 name
[0] == KERN_KDEFLAGS
||
1279 name
[0] == KERN_KDDFLAGS
||
1280 name
[0] == KERN_KDENABLE
||
1281 name
[0] == KERN_KDSETBUF
) {
1290 if ( !(kdebug_flags
& KDBG_LOCKINIT
))
1293 lck_mtx_lock(kd_trace_mtx_sysctl
);
1295 if (name
[0] == KERN_KDGETBUF
) {
1297 * Does not alter the global_state_pid
1298 * This is a passive request.
1300 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1302 * There is not enough room to return even
1303 * the first element of the info structure.
1308 kd_bufinfo
.nkdbufs
= nkdbufs
;
1309 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1311 if ( (kdebug_slowcheck
& SLOW_NOLOG
) )
1312 kd_bufinfo
.nolog
= 1;
1314 kd_bufinfo
.nolog
= 0;
1316 kd_bufinfo
.flags
= kdebug_flags
;
1317 #if defined(__LP64__)
1318 kd_bufinfo
.flags
|= KDBG_LP64
;
1320 kd_bufinfo
.bufid
= global_state_pid
;
1322 if (size
>= sizeof(kd_bufinfo
)) {
1324 * Provide all the info we have
1326 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
1330 * For backwards compatibility, only provide
1331 * as much info as there is room for.
1333 if (copyout(&kd_bufinfo
, where
, size
))
1338 } else if (name
[0] == KERN_KDGETENTROPY
) {
1339 if (kd_entropy_buffer
)
1342 ret
= kdbg_getentropy(where
, sizep
, value
);
1346 if ((curproc
= current_proc()) != NULL
)
1347 curpid
= curproc
->p_pid
;
1352 if (global_state_pid
== -1)
1353 global_state_pid
= curpid
;
1354 else if (global_state_pid
!= curpid
) {
1355 if ((p
= proc_find(global_state_pid
)) == NULL
) {
1357 * The global pid no longer exists
1359 global_state_pid
= curpid
;
1362 * The global pid exists, deny this request
1373 value
&= KDBG_USERFLAGS
;
1374 kdebug_flags
|= value
;
1377 value
&= KDBG_USERFLAGS
;
1378 kdebug_flags
&= ~value
;
1382 * used to enable or disable
1386 * enable only if buffer is initialized
1388 if (!(kdebug_flags
& KDBG_BUFINIT
)) {
1394 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1395 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1398 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
1399 kdebug_slowcheck
|= SLOW_NOLOG
;
1403 kdbg_set_nkdbufs(value
);
1406 ret
= kdbg_reinit();
1412 if(size
< sizeof(kd_regtype
)) {
1416 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1420 ret
= kdbg_setreg(&kd_Reg
);
1423 if (size
< sizeof(kd_regtype
)) {
1427 ret
= kdbg_getreg(&kd_Reg
);
1428 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
1433 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
1436 if (size
< sizeof(kd_regtype
)) {
1440 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1444 ret
= kdbg_setpid(&kd_Reg
);
1447 if (size
< sizeof(kd_regtype
)) {
1451 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1455 ret
= kdbg_setpidex(&kd_Reg
);
1458 ret
= kdbg_readmap(where
, sizep
, NULL
, NULL
);
1460 case KERN_KDSETRTCDEC
:
1461 if (size
< sizeof(kd_regtype
)) {
1465 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1469 ret
= kdbg_setrtcdec(&kd_Reg
);
1476 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1483 * This code can run for the most part concurrently with kernel_debug_internal()...
1484 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1485 * synchronize with the recording side of this puzzle... otherwise, we are able to
1486 * move through the lists w/o use of any locks
1489 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1492 unsigned int cpu
, mincpu
;
1493 uint64_t mintime
, t
;
1494 int error
= 0,s
= 0;
1497 kd_buf
*min_rcursor
;
1498 struct kd_storage
*kdsp
;
1499 struct kd_bufinfo
*kdbp
;
1500 uint32_t tempbuf_count
;
1501 uint32_t tempbuf_number
;
1502 uint32_t old_kdebug_flags
;
1503 uint32_t old_kdebug_slowcheck
;
1505 count
= *number
/sizeof(kd_buf
);
1508 if (count
== 0 || !(kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
1512 * because we hold kd_trace_mtx_sysctl, no other control threads can
1513 * be playing with kdebug_flags... the code that cuts new events could
1514 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1515 * storage chunk which is where it examines kdebug_flags... it its adding
1516 * to the same chunk we're reading from, no problem...
1518 s
= ml_set_interrupts_enabled(FALSE
);
1519 lck_spin_lock(kds_spin_lock
);
1521 old_kdebug_slowcheck
= kdebug_slowcheck
;
1522 old_kdebug_flags
= kdebug_flags
;
1524 kdebug_flags
&= ~KDBG_WRAPPED
;
1525 kdebug_flags
|= KDBG_NOWRAP
;
1527 lck_spin_unlock(kds_spin_lock
);
1528 ml_set_interrupts_enabled(s
);
1530 if (count
> nkdbufs
)
1533 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1534 tempbuf_count
= KDCOPYBUF_COUNT
;
1537 tempbuf
= kdcopybuf
;
1540 while (tempbuf_count
) {
1541 mintime
= 0xffffffffffffffffULL
; /* all actual timestamps are below */
1545 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_cpus
; cpu
++, kdbp
++) {
1547 if ((kdsp
= kdbp
->kd_list_head
) == NULL
)
1549 rcursor
= kdsp
->kds_readlast
;
1551 if (rcursor
== kdsp
->kds_bufptr
)
1553 t
= kdbg_get_timestamp(rcursor
);
1558 min_rcursor
= rcursor
;
1561 if (mincpu
== (unsigned int)-1)
1563 * all buffers ran empty
1567 kdbp
= &kdbip
[mincpu
];
1568 kdsp
= kdbp
->kd_list_head
;
1570 *tempbuf
= *min_rcursor
;
1572 if (mintime
!= kdbg_get_timestamp(tempbuf
)) {
1574 * we stole this storage unit and used it
1575 * before we could slurp the selected event out
1576 * so we need to re-evaluate
1581 * Watch for out of order timestamps
1583 if (mintime
< kdbp
->kd_prev_timebase
) {
1585 * if so, use the previous timestamp + 1 cycle
1587 kdbp
->kd_prev_timebase
++;
1588 kdbg_set_timestamp_and_cpu(tempbuf
, kdbp
->kd_prev_timebase
, mincpu
);
1590 kdbp
->kd_prev_timebase
= mintime
;
1592 if (min_rcursor
== kdsp
->kds_readlast
)
1593 kdsp
->kds_readlast
++;
1595 if (kdsp
->kds_readlast
== kdsp
->kds_buflast
)
1596 release_storage_unit(kdbp
, kdsp
);
1602 if (tempbuf_number
) {
1605 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
1606 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1608 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
1610 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
1611 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
1618 count
-= tempbuf_number
;
1619 *number
+= tempbuf_number
;
1623 * all trace buffers are empty
1627 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
1628 tempbuf_count
= KDCOPYBUF_COUNT
;
1630 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
1632 s
= ml_set_interrupts_enabled(FALSE
);
1633 lck_spin_lock(kds_spin_lock
);
1635 kdebug_flags
&= ~KDBG_NOWRAP
;
1637 if ( !(old_kdebug_slowcheck
& SLOW_NOLOG
))
1638 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1640 lck_spin_unlock(kds_spin_lock
);
1641 ml_set_interrupts_enabled(s
);
1647 unsigned char *getProcName(struct proc
*proc
);
1648 unsigned char *getProcName(struct proc
*proc
) {
1650 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
1654 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1655 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1656 #if defined(__i386__) || defined (__x86_64__)
1657 #define TRAP_DEBUGGER __asm__ volatile("int3");
1660 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1663 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
1665 /* Initialize the mutex governing access to the stack snapshot subsystem */
1666 __private_extern__
void
1667 stackshot_lock_init( void )
1669 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
1671 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
1673 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
1675 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
1679 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1680 * on the system, tracing both kernel and user stacks
1681 * where available. Uses machine specific trace routines
1682 * for ppc, ppc64 and x86.
1683 * Inputs: uap->pid - process id of process to be traced, or -1
1684 * for the entire system
1685 * uap->tracebuf - address of the user space destination
1687 * uap->tracebuf_size - size of the user space trace buffer
1688 * uap->options - various options, including the maximum
1689 * number of frames to trace.
1690 * Outputs: EPERM if the caller is not privileged
1691 * EINVAL if the supplied trace buffer isn't sanely sized
1692 * ENOMEM if we don't have enough memory to satisfy the
1694 * ENOENT if the target pid isn't found
1695 * ENOSPC if the supplied buffer is insufficient
1696 * *retval contains the number of bytes traced, if successful
1697 * and -1 otherwise. If the request failed due to
1698 * tracebuffer exhaustion, we copyout as much as possible.
1701 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
1705 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
1708 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
1709 uap
->flags
, uap
->dispatch_offset
, retval
);
1713 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
)
1716 unsigned bytesTraced
= 0;
1720 /* Serialize tracing */
1721 STACKSHOT_SUBSYS_LOCK();
1723 if ((tracebuf_size
<= 0) || (tracebuf_size
> SANE_TRACEBUF_SIZE
)) {
1728 assert(stackshot_snapbuf
== NULL
);
1729 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) {
1734 if (panic_active()) {
1739 istate
= ml_set_interrupts_enabled(FALSE
);
1740 /* Preload trace parameters*/
1741 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
);
1743 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1749 ml_set_interrupts_enabled(istate
);
1751 bytesTraced
= kdp_stack_snapshot_bytes_traced();
1753 if (bytesTraced
> 0) {
1754 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
1755 ((bytesTraced
< tracebuf_size
) ?
1756 bytesTraced
: tracebuf_size
))))
1758 *retval
= bytesTraced
;
1765 error
= kdp_stack_snapshot_geterror();
1773 if (stackshot_snapbuf
!= NULL
)
1774 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
1775 stackshot_snapbuf
= NULL
;
1776 STACKSHOT_SUBSYS_UNLOCK();
1781 start_kern_tracing(unsigned int new_nkdbufs
) {
1784 kdbg_set_nkdbufs(new_nkdbufs
);
1787 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
1788 kdebug_slowcheck
&= ~SLOW_NOLOG
;
1791 #if defined(__i386__) || defined(__x86_64__)
1792 uint64_t now
= mach_absolute_time();
1794 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
,
1795 (uint32_t)(tsc_rebase_abs_time
>> 32), (uint32_t)tsc_rebase_abs_time
,
1796 (uint32_t)(now
>> 32), (uint32_t)now
,
1799 printf("kernel tracing started\n");
1803 kdbg_dump_trace_to_file(const char *filename
)
1811 if (kdebug_enable
& (KDEBUG_ENABLE_CHUD
| KDEBUG_ENABLE_ENTROPY
))
1814 if (global_state_pid
!= -1) {
1815 if ((proc_find(global_state_pid
)) != NULL
) {
1817 * The global pid exists, we're running
1818 * due to fs_usage, latency, etc...
1819 * don't cut the panic/shutdown trace file
1824 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
1828 ctx
= vfs_context_kernel();
1830 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
1833 number
= kd_mapsize
;
1834 kdbg_readmap(0, &number
, vp
, ctx
);
1836 number
= nkdbufs
*sizeof(kd_buf
);
1837 kdbg_read(0, &number
, vp
, ctx
);
1839 vnode_close(vp
, FWRITE
, ctx
);
1841 sync(current_proc(), (void *)NULL
, (int *)NULL
);