2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
24 #include <machine/spl.h>
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
37 #include <mach/clock_types.h>
38 #include <mach/mach_types.h>
39 #include <mach/mach_time.h>
40 #include <machine/machine_routines.h>
42 #if defined(__i386__) || defined(__x86_64__)
43 #include <i386/rtclock_protos.h>
45 #include <i386/machine_routines.h>
48 #include <kern/clock.h>
50 #include <kern/thread.h>
51 #include <kern/task.h>
52 #include <kern/debug.h>
53 #include <kern/kalloc.h>
54 #include <kern/cpu_data.h>
55 #include <kern/assert.h>
56 #include <vm/vm_kern.h>
59 #include <sys/malloc.h>
60 #include <sys/mcache.h>
61 #include <sys/kauth.h>
63 #include <sys/vnode.h>
64 #include <sys/vnode_internal.h>
65 #include <sys/fcntl.h>
66 #include <sys/file_internal.h>
68 #include <sys/param.h> /* for isset() */
70 #include <mach/mach_host.h> /* for host_info() */
71 #include <libkern/OSAtomic.h>
73 #include <machine/pal_routines.h>
75 /* XXX should have prototypes, but Mach does not provide one */
76 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
77 int cpu_number(void); /* XXX <machine/...> include path broken */
79 /* XXX should probably be static, but it's debugging code... */
80 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
81 void kdbg_control_chud(int, void *);
82 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
83 int kdbg_getentropy (user_addr_t
, size_t *, int);
84 int kdbg_readmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
85 int kdbg_getreg(kd_regtype
*);
86 int kdbg_setreg(kd_regtype
*);
87 int kdbg_setrtcdec(kd_regtype
*);
88 int kdbg_setpidex(kd_regtype
*);
89 int kdbg_setpid(kd_regtype
*);
90 void kdbg_mapinit(void);
91 int kdbg_reinit(boolean_t
);
92 int kdbg_bootstrap(boolean_t
);
94 static int kdbg_enable_typefilter(void);
95 static int kdbg_disable_typefilter(void);
97 static int create_buffers(boolean_t
);
98 static void delete_buffers(void);
100 extern void IOSleep(int);
102 /* trace enable status */
103 unsigned int kdebug_enable
= 0;
105 /* track timestamps for security server's entropy needs */
106 uint64_t * kd_entropy_buffer
= 0;
107 unsigned int kd_entropy_bufsize
= 0;
108 unsigned int kd_entropy_count
= 0;
109 unsigned int kd_entropy_indx
= 0;
110 vm_offset_t kd_entropy_buftomem
= 0;
112 #define MAX_ENTROPY_COUNT (128 * 1024)
115 #define SLOW_NOLOG 0x01
116 #define SLOW_CHECKS 0x02
117 #define SLOW_ENTROPY 0x04
118 #define SLOW_CHUD 0x08
120 unsigned int kd_cpus
;
122 #define EVENTS_PER_STORAGE_UNIT 2048
123 #define MIN_STORAGE_UNITS_PER_CPU 4
125 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
127 #define NATIVE_TRACE_FACILITY
131 uint32_t buffer_index
:21;
138 union kds_ptr kds_next
;
139 uint32_t kds_bufindx
;
141 uint32_t kds_readlast
;
142 boolean_t kds_lostevents
;
143 uint64_t kds_timestamp
;
145 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
148 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
149 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
151 struct kd_storage_buffers
{
152 struct kd_storage
*kdsb_addr
;
156 #define KDS_PTR_NULL 0xffffffff
157 struct kd_storage_buffers
*kd_bufs
= NULL
;
158 int n_storage_units
= 0;
159 int n_storage_buffers
= 0;
160 int n_storage_threshold
= 0;
166 union kds_ptr kd_list_head
;
167 union kds_ptr kd_list_tail
;
168 boolean_t kd_lostevents
;
170 uint64_t kd_prev_timebase
;
172 } __attribute__(( aligned(CPU_CACHE_SIZE
) ));
174 struct kd_ctrl_page_t
{
175 union kds_ptr kds_free_list
;
179 uint32_t kdebug_flags
;
180 uint32_t kdebug_slowcheck
;
185 } cpu_timebase
[32]; // should be max number of actual logical cpus
186 } kd_ctrl_page
= {.kds_free_list
= {.raw
= KDS_PTR_NULL
}, .enabled
= 0, .kds_inuse_count
= 0, .kdebug_flags
= 0, .kdebug_slowcheck
= SLOW_NOLOG
};
189 struct kd_bufinfo
*kdbip
= NULL
;
191 #define KDCOPYBUF_COUNT 8192
192 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
193 kd_buf
*kdcopybuf
= NULL
;
196 int kdlog_sched_events
= 0;
198 boolean_t kdlog_bg_trace
= FALSE
;
199 boolean_t kdlog_bg_trace_running
= FALSE
;
200 unsigned int bg_nkdbufs
= 0;
202 unsigned int nkdbufs
= 0;
203 unsigned int kdlog_beg
=0;
204 unsigned int kdlog_end
=0;
205 unsigned int kdlog_value1
=0;
206 unsigned int kdlog_value2
=0;
207 unsigned int kdlog_value3
=0;
208 unsigned int kdlog_value4
=0;
210 static lck_spin_t
* kdw_spin_lock
;
211 static lck_spin_t
* kds_spin_lock
;
212 static lck_mtx_t
* kd_trace_mtx_sysctl
;
213 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
214 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
215 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
217 static lck_grp_t
*stackshot_subsys_lck_grp
;
218 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
219 static lck_attr_t
*stackshot_subsys_lck_attr
;
220 static lck_mtx_t stackshot_subsys_mutex
;
222 void *stackshot_snapbuf
= NULL
;
225 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
);
228 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
);
231 kdp_stack_snapshot_geterror(void);
233 kdp_stack_snapshot_bytes_traced(void);
235 kd_threadmap
*kd_mapptr
= 0;
236 unsigned int kd_mapsize
= 0;
237 unsigned int kd_mapcount
= 0;
238 vm_offset_t kd_maptomem
= 0;
240 off_t RAW_file_offset
= 0;
241 int RAW_file_written
= 0;
243 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
246 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
248 #define DBG_FUNC_MASK 0xfffffffc
250 /* TODO: move to kdebug.h */
251 #define CLASS_MASK 0xff000000
252 #define CLASS_OFFSET 24
253 #define SUBCLASS_MASK 0x00ff0000
254 #define SUBCLASS_OFFSET 16
255 #define CSC_MASK 0xffff0000 /* class and subclass mask */
256 #define CSC_OFFSET SUBCLASS_OFFSET
258 #define EXTRACT_CLASS(debugid) ( (uint8_t) ( ((debugid) & CLASS_MASK ) >> CLASS_OFFSET ) )
259 #define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
260 #define EXTRACT_CSC(debugid) ( (uint16_t)( ((debugid) & CSC_MASK ) >> CSC_OFFSET ) )
262 #define INTERRUPT 0x01050000
263 #define MACH_vmfault 0x01300008
264 #define BSC_SysCall 0x040c0000
265 #define MACH_SysCall 0x010c0000
266 #define DBG_SCALL_MASK 0xffff0000
269 /* task to string structure */
272 task_t task
; /* from procs task */
273 pid_t pid
; /* from procs p_pid */
274 char task_comm
[20]; /* from procs p_comm */
277 typedef struct tts tts_t
;
281 kd_threadmap
*map
; /* pointer to the map buffer */
287 typedef struct krt krt_t
;
289 /* This is for the CHUD toolkit call */
290 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
291 uintptr_t arg2
, uintptr_t arg3
,
292 uintptr_t arg4
, uintptr_t arg5
);
294 volatile kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
296 __private_extern__
void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
298 static uint8_t *type_filter_bitmap
;
301 kdbg_set_tracing_enabled(boolean_t enabled
, uint32_t trace_type
)
303 int s
= ml_set_interrupts_enabled(FALSE
);
304 lck_spin_lock(kds_spin_lock
);
307 kdebug_enable
|= trace_type
;
308 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
309 kd_ctrl_page
.enabled
= 1;
311 kdebug_enable
&= ~(KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
);
312 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
313 kd_ctrl_page
.enabled
= 0;
315 lck_spin_unlock(kds_spin_lock
);
316 ml_set_interrupts_enabled(s
);
320 kdbg_set_flags(int slowflag
, int enableflag
, boolean_t enabled
)
322 int s
= ml_set_interrupts_enabled(FALSE
);
323 lck_spin_lock(kds_spin_lock
);
326 kd_ctrl_page
.kdebug_slowcheck
|= slowflag
;
327 kdebug_enable
|= enableflag
;
329 kd_ctrl_page
.kdebug_slowcheck
&= ~slowflag
;
330 kdebug_enable
&= ~enableflag
;
332 lck_spin_unlock(kds_spin_lock
);
333 ml_set_interrupts_enabled(s
);
337 #ifdef NATIVE_TRACE_FACILITY
339 disable_wrap(uint32_t *old_slowcheck
, uint32_t *old_flags
)
341 int s
= ml_set_interrupts_enabled(FALSE
);
342 lck_spin_lock(kds_spin_lock
);
344 *old_slowcheck
= kd_ctrl_page
.kdebug_slowcheck
;
345 *old_flags
= kd_ctrl_page
.kdebug_flags
;
347 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
348 kd_ctrl_page
.kdebug_flags
|= KDBG_NOWRAP
;
350 lck_spin_unlock(kds_spin_lock
);
351 ml_set_interrupts_enabled(s
);
355 enable_wrap(uint32_t old_slowcheck
, boolean_t lostevents
)
357 int s
= ml_set_interrupts_enabled(FALSE
);
358 lck_spin_lock(kds_spin_lock
);
360 kd_ctrl_page
.kdebug_flags
&= ~KDBG_NOWRAP
;
362 if ( !(old_slowcheck
& SLOW_NOLOG
))
363 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
365 if (lostevents
== TRUE
)
366 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
368 lck_spin_unlock(kds_spin_lock
);
369 ml_set_interrupts_enabled(s
);
372 void trace_set_timebases(__unused
uint64_t tsc
, __unused
uint64_t ns
)
376 /* Begin functions that are defined twice */
377 void trace_set_timebases(uint64_t tsc
, uint64_t ns
)
379 int cpu
= cpu_number();
380 kd_ctrl_page
.cpu_timebase
[cpu
].tsc_base
= tsc
;
381 kd_ctrl_page
.cpu_timebase
[cpu
].ns_base
= ns
;
387 #if defined(__i386__) || defined(__x86_64__)
388 create_buffers(boolean_t early_trace
)
390 create_buffers(__unused boolean_t early_trace
)
400 * get the number of cpus and cache it
402 #if defined(__i386__) || defined(__x86_64__)
403 if (early_trace
== TRUE
) {
405 * we've started tracing before the
406 * IOKit has even started running... just
407 * use the static max value
413 host_basic_info_data_t hinfo
;
414 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
417 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
418 kd_cpus
= hinfo
.logical_cpu_max
;
420 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
) != KERN_SUCCESS
) {
425 trace_handler_map_bufinfo((uintptr_t)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
427 #if !defined(NATIVE_TRACE_FACILITY)
428 for(i
=0;i
<(int)kd_cpus
;i
++) {
429 get_nanotime_timebases(i
,
430 &kd_ctrl_page
.cpu_timebase
[i
].tsc_base
,
431 &kd_ctrl_page
.cpu_timebase
[i
].ns_base
);
435 if (nkdbufs
< (kd_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
436 n_storage_units
= kd_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
438 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
440 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
442 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
443 n_storage_buffers
= f_buffers
;
445 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
446 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
453 if (kdcopybuf
== 0) {
454 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
459 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
463 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
465 for (i
= 0; i
< f_buffers
; i
++) {
466 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
470 bzero(kd_bufs
[i
].kdsb_addr
, f_buffer_size
);
472 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
475 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
479 bzero(kd_bufs
[i
].kdsb_addr
, p_buffer_size
);
481 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
485 for (i
= 0; i
< n_storage_buffers
; i
++) {
486 struct kd_storage
*kds
;
490 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
491 kds
= kd_bufs
[i
].kdsb_addr
;
493 trace_handler_map_buffer(i
, (uintptr_t)kd_bufs
[i
].kdsb_addr
, kd_bufs
[i
].kdsb_size
);
495 for (n
= 0; n
< n_elements
; n
++) {
496 kds
[n
].kds_next
.buffer_index
= kd_ctrl_page
.kds_free_list
.buffer_index
;
497 kds
[n
].kds_next
.offset
= kd_ctrl_page
.kds_free_list
.offset
;
499 kd_ctrl_page
.kds_free_list
.buffer_index
= i
;
500 kd_ctrl_page
.kds_free_list
.offset
= n
;
502 n_storage_units
+= n_elements
;
505 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
507 for (i
= 0; i
< (int)kd_cpus
; i
++) {
508 kdbip
[i
].kd_list_head
.raw
= KDS_PTR_NULL
;
509 kdbip
[i
].kd_list_tail
.raw
= KDS_PTR_NULL
;
510 kdbip
[i
].kd_lostevents
= FALSE
;
511 kdbip
[i
].num_bufs
= 0;
514 kd_ctrl_page
.kdebug_flags
|= KDBG_BUFINIT
;
516 kd_ctrl_page
.kds_inuse_count
= 0;
517 n_storage_threshold
= n_storage_units
/ 2;
532 for (i
= 0; i
< n_storage_buffers
; i
++) {
533 if (kd_bufs
[i
].kdsb_addr
) {
534 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
535 trace_handler_unmap_buffer(i
);
538 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
541 n_storage_buffers
= 0;
544 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
548 kd_ctrl_page
.kds_free_list
.raw
= KDS_PTR_NULL
;
551 trace_handler_unmap_bufinfo();
553 kmem_free(kernel_map
, (vm_offset_t
)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
557 kd_ctrl_page
.kdebug_flags
&= ~KDBG_BUFINIT
;
561 #ifdef NATIVE_TRACE_FACILITY
563 release_storage_unit(int cpu
, uint32_t kdsp_raw
)
566 struct kd_storage
*kdsp_actual
;
567 struct kd_bufinfo
*kdbp
;
572 s
= ml_set_interrupts_enabled(FALSE
);
573 lck_spin_lock(kds_spin_lock
);
577 if (kdsp
.raw
== kdbp
->kd_list_head
.raw
) {
579 * it's possible for the storage unit pointed to
580 * by kdsp to have already been stolen... so
581 * check to see if it's still the head of the list
582 * now that we're behind the lock that protects
583 * adding and removing from the queue...
584 * since we only ever release and steal units from
585 * that position, if it's no longer the head
586 * we having nothing to do in this context
588 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
589 kdbp
->kd_list_head
= kdsp_actual
->kds_next
;
591 kdsp_actual
->kds_next
= kd_ctrl_page
.kds_free_list
;
592 kd_ctrl_page
.kds_free_list
= kdsp
;
594 kd_ctrl_page
.kds_inuse_count
--;
596 lck_spin_unlock(kds_spin_lock
);
597 ml_set_interrupts_enabled(s
);
602 allocate_storage_unit(int cpu
)
605 struct kd_storage
*kdsp_actual
, *kdsp_next_actual
;
606 struct kd_bufinfo
*kdbp
, *kdbp_vict
, *kdbp_try
;
607 uint64_t oldest_ts
, ts
;
608 boolean_t retval
= TRUE
;
611 s
= ml_set_interrupts_enabled(FALSE
);
612 lck_spin_lock(kds_spin_lock
);
616 /* If someone beat us to the allocate, return success */
617 if (kdbp
->kd_list_tail
.raw
!= KDS_PTR_NULL
) {
618 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
);
620 if (kdsp_actual
->kds_bufindx
< EVENTS_PER_STORAGE_UNIT
)
624 if ((kdsp
= kd_ctrl_page
.kds_free_list
).raw
!= KDS_PTR_NULL
) {
625 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
626 kd_ctrl_page
.kds_free_list
= kdsp_actual
->kds_next
;
628 kd_ctrl_page
.kds_inuse_count
++;
630 if (kd_ctrl_page
.kdebug_flags
& KDBG_NOWRAP
) {
631 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
632 kdbp
->kd_lostevents
= TRUE
;
637 oldest_ts
= (uint64_t)-1;
639 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_cpus
]; kdbp_try
++) {
641 if (kdbp_try
->kd_list_head
.raw
== KDS_PTR_NULL
) {
643 * no storage unit to steal
648 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp_try
->kd_list_head
);
650 if (kdsp_actual
->kds_bufcnt
< EVENTS_PER_STORAGE_UNIT
) {
652 * make sure we don't steal the storage unit
653 * being actively recorded to... need to
654 * move on because we don't want an out-of-order
655 * set of events showing up later
659 ts
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[0]);
661 if (ts
< oldest_ts
) {
663 * when 'wrapping', we want to steal the
664 * storage unit that has the 'earliest' time
665 * associated with it (first event time)
668 kdbp_vict
= kdbp_try
;
671 if (kdbp_vict
== NULL
) {
673 kd_ctrl_page
.enabled
= 0;
677 kdsp
= kdbp_vict
->kd_list_head
;
678 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
679 kdbp_vict
->kd_list_head
= kdsp_actual
->kds_next
;
681 if (kdbp_vict
->kd_list_head
.raw
!= KDS_PTR_NULL
) {
682 kdsp_next_actual
= POINTER_FROM_KDS_PTR(kdbp_vict
->kd_list_head
);
683 kdsp_next_actual
->kds_lostevents
= TRUE
;
685 kdbp_vict
->kd_lostevents
= TRUE
;
687 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
689 kdsp_actual
->kds_timestamp
= mach_absolute_time();
690 kdsp_actual
->kds_next
.raw
= KDS_PTR_NULL
;
691 kdsp_actual
->kds_bufcnt
= 0;
692 kdsp_actual
->kds_readlast
= 0;
694 kdsp_actual
->kds_lostevents
= kdbp
->kd_lostevents
;
695 kdbp
->kd_lostevents
= FALSE
;
696 kdsp_actual
->kds_bufindx
= 0;
698 if (kdbp
->kd_list_head
.raw
== KDS_PTR_NULL
)
699 kdbp
->kd_list_head
= kdsp
;
701 POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
)->kds_next
= kdsp
;
702 kdbp
->kd_list_tail
= kdsp
;
704 lck_spin_unlock(kds_spin_lock
);
705 ml_set_interrupts_enabled(s
);
712 kernel_debug_internal(
721 __attribute__((always_inline
)) void
722 kernel_debug_internal(
731 struct proc
*curproc
;
737 struct kd_bufinfo
*kdbp
;
738 struct kd_storage
*kdsp_actual
;
739 union kds_ptr kds_raw
;
743 if (kd_ctrl_page
.kdebug_slowcheck
) {
745 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
746 kd_chudhook_fn chudhook
;
748 * Mask interrupts to minimize the interval across
749 * which the driver providing the hook could be
752 s
= ml_set_interrupts_enabled(FALSE
);
753 chudhook
= kdebug_chudhook
;
755 chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
756 ml_set_interrupts_enabled(s
);
758 if ((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) && entropy_flag
) {
760 now
= mach_absolute_time();
762 s
= ml_set_interrupts_enabled(FALSE
);
763 lck_spin_lock(kds_spin_lock
);
765 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) {
767 if (kd_entropy_indx
< kd_entropy_count
) {
768 kd_entropy_buffer
[kd_entropy_indx
] = now
;
771 if (kd_entropy_indx
== kd_entropy_count
) {
773 * Disable entropy collection
775 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
776 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_ENTROPY
;
779 lck_spin_unlock(kds_spin_lock
);
780 ml_set_interrupts_enabled(s
);
782 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
785 if ( !ml_at_interrupt_context()) {
786 if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDCHECK
) {
788 * If kdebug flag is not set for current proc, return
790 curproc
= current_proc();
792 if ((curproc
&& !(curproc
->p_kdebug
)) &&
793 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
794 (debugid
>> 24 != DBG_TRACE
))
797 else if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDEXCLUDE
) {
799 * If kdebug flag is set for current proc, return
801 curproc
= current_proc();
803 if ((curproc
&& curproc
->p_kdebug
) &&
804 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
805 (debugid
>> 24 != DBG_TRACE
))
810 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
811 /* Always record trace system info */
812 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
815 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
819 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
820 if ((debugid
>= kdlog_beg
&& debugid
<= kdlog_end
) || (debugid
>> 24) == DBG_TRACE
)
822 if (kdlog_sched_events
&& (debugid
& 0xffff0000) == (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
))
826 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
827 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
828 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
829 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
830 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
831 (debugid
>> 24 != DBG_TRACE
))
836 disable_preemption();
840 kds_raw
= kdbp
->kd_list_tail
;
842 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
843 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
844 bindx
= kdsp_actual
->kds_bufindx
;
848 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
849 if (allocate_storage_unit(cpu
) == FALSE
) {
851 * this can only happen if wrapping
858 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
860 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
863 kd
= &kdsp_actual
->kds_records
[bindx
];
865 kd
->debugid
= debugid
;
872 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
874 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
878 if ((kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) ||
879 (kde_waiter
&& kd_entropy_indx
>= kd_entropy_count
)) {
883 etype
= debugid
& DBG_FUNC_MASK
;
884 stype
= debugid
& DBG_SCALL_MASK
;
886 if (etype
== INTERRUPT
|| etype
== MACH_vmfault
||
887 stype
== BSC_SysCall
|| stype
== MACH_SysCall
) {
889 boolean_t need_kds_wakeup
= FALSE
;
890 boolean_t need_kde_wakeup
= FALSE
;
893 * try to take the lock here to synchronize with the
894 * waiter entering the blocked state... use the try
895 * mode to prevent deadlocks caused by re-entering this
896 * routine due to various trace points triggered in the
897 * lck_spin_sleep_xxxx routines used to actually enter
898 * one of our 2 wait conditions... no problem if we fail,
899 * there will be lots of additional events coming in that
900 * will eventually succeed in grabbing this lock
902 s
= ml_set_interrupts_enabled(FALSE
);
904 if (lck_spin_try_lock(kdw_spin_lock
)) {
906 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
908 need_kds_wakeup
= TRUE
;
910 if (kde_waiter
&& kd_entropy_indx
>= kd_entropy_count
) {
912 need_kde_wakeup
= TRUE
;
914 lck_spin_unlock(kdw_spin_lock
);
916 ml_set_interrupts_enabled(s
);
918 if (need_kds_wakeup
== TRUE
)
920 if (need_kde_wakeup
== TRUE
)
933 __unused
uintptr_t arg5
)
935 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()), 1);
947 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 1);
951 * Support syscall SYS_kdebug_trace
954 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused
int32_t *retval
)
956 if ( __probable(kdebug_enable
== 0) )
959 kernel_debug_internal(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, (uintptr_t)thread_tid(current_thread()), 0);
968 if (kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
)
971 trace_handler_map_ctrl_page((uintptr_t)&kd_ctrl_page
, sizeof(kd_ctrl_page
), sizeof(struct kd_storage
), sizeof(union kds_ptr
));
974 * allocate lock group attribute and group
976 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
977 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
980 * allocate the lock attribute
982 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
986 * allocate and initialize mutex's
988 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
989 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
990 kdw_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
992 kd_ctrl_page
.kdebug_flags
|= KDBG_LOCKINIT
;
997 kdbg_bootstrap(boolean_t early_trace
)
999 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
1001 return (create_buffers(early_trace
));
1005 kdbg_reinit(boolean_t early_trace
)
1010 * Disable trace collecting
1011 * First make sure we're not in
1012 * the middle of cutting a trace
1014 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1017 * make sure the SLOW_NOLOG is seen
1018 * by everyone that might be trying
1025 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
1026 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1027 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1029 kd_mapptr
= (kd_threadmap
*) 0;
1032 ret
= kdbg_bootstrap(early_trace
);
1034 RAW_file_offset
= 0;
1035 RAW_file_written
= 0;
1041 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
1046 *arg_pid
= proc
->p_pid
;
1051 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
1065 * Collect the pathname for tracing
1067 dbg_nameptr
= proc
->p_comm
;
1068 dbg_namelen
= (int)strlen(proc
->p_comm
);
1074 if(dbg_namelen
> (int)sizeof(dbg_parms
))
1075 dbg_namelen
= (int)sizeof(dbg_parms
);
1077 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
1086 kdbg_resolve_map(thread_t th_act
, void *opaque
)
1088 kd_threadmap
*mapptr
;
1089 krt_t
*t
= (krt_t
*)opaque
;
1091 if (t
->count
< t
->maxcount
) {
1092 mapptr
= &t
->map
[t
->count
];
1093 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
1095 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
1096 sizeof(t
->atts
->task_comm
));
1098 * Some kernel threads have no associated pid.
1099 * We still need to mark the entry as valid.
1102 mapptr
->valid
= t
->atts
->pid
;
1115 int tts_count
; /* number of task-to-string structures */
1116 struct tts
*tts_mapptr
;
1117 unsigned int tts_mapsize
= 0;
1118 vm_offset_t tts_maptomem
=0;
1121 if (kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
)
1125 * need to use PROC_SCANPROCLIST with proc_iterate
1130 * Calculate the sizes of map buffers
1132 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
1133 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
1139 * The proc count could change during buffer allocation,
1140 * so introduce a small fudge factor to bump up the
1141 * buffer sizes. This gives new tasks some chance of
1142 * making into the tables. Bump up by 10%.
1144 kd_mapcount
+= kd_mapcount
/10;
1145 tts_count
+= tts_count
/10;
1147 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
1149 if ((kmem_alloc(kernel_map
, & kd_maptomem
, (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
)) {
1150 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
1151 bzero(kd_mapptr
, kd_mapsize
);
1153 kd_mapptr
= (kd_threadmap
*) 0;
1155 tts_mapsize
= tts_count
* sizeof(struct tts
);
1157 if ((kmem_alloc(kernel_map
, & tts_maptomem
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
1158 tts_mapptr
= (struct tts
*) tts_maptomem
;
1159 bzero(tts_mapptr
, tts_mapsize
);
1161 tts_mapptr
= (struct tts
*) 0;
1164 * We need to save the procs command string
1165 * and take a reference for each task associated
1166 * with a valid process
1170 * should use proc_iterate
1174 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
1175 if (p
->p_lflag
& P_LEXIT
)
1179 task_reference(p
->task
);
1180 tts_mapptr
[i
].task
= p
->task
;
1181 tts_mapptr
[i
].pid
= p
->p_pid
;
1182 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
1191 if (kd_mapptr
&& tts_mapptr
) {
1192 kd_ctrl_page
.kdebug_flags
|= KDBG_MAPINIT
;
1195 * Initialize thread map data
1197 akrt
.map
= kd_mapptr
;
1199 akrt
.maxcount
= kd_mapcount
;
1201 for (i
= 0; i
< tts_count
; i
++) {
1202 akrt
.atts
= &tts_mapptr
[i
];
1203 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
1204 task_deallocate((task_t
) tts_mapptr
[i
].task
);
1206 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
1214 * Clean up the trace buffer
1215 * First make sure we're not in
1216 * the middle of cutting a trace
1218 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1221 * make sure the SLOW_NOLOG is seen
1222 * by everyone that might be trying
1227 kdlog_sched_events
= 0;
1228 global_state_pid
= -1;
1229 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1230 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
1231 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
1233 kdbg_disable_typefilter();
1238 /* Clean up the thread map buffer */
1239 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1241 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1242 kd_mapptr
= (kd_threadmap
*) 0;
1247 RAW_file_offset
= 0;
1248 RAW_file_written
= 0;
1252 kdbg_setpid(kd_regtype
*kdr
)
1258 pid
= (pid_t
)kdr
->value1
;
1259 flag
= (int)kdr
->value2
;
1262 if ((p
= proc_find(pid
)) == NULL
)
1267 * turn on pid check for this and all pids
1269 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDCHECK
;
1270 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
1271 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1276 * turn off pid check for this pid value
1277 * Don't turn off all pid checking though
1279 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1292 /* This is for pid exclusion in the trace buffer */
1294 kdbg_setpidex(kd_regtype
*kdr
)
1300 pid
= (pid_t
)kdr
->value1
;
1301 flag
= (int)kdr
->value2
;
1304 if ((p
= proc_find(pid
)) == NULL
)
1309 * turn on pid exclusion
1311 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDEXCLUDE
;
1312 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDCHECK
;
1313 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1319 * turn off pid exclusion for this pid value
1320 * Don't turn off all pid exclusion though
1322 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1336 * This is for setting a maximum decrementer value
1339 kdbg_setrtcdec(kd_regtype
*kdr
)
1344 decval
= (natural_t
)kdr
->value1
;
1346 if (decval
&& decval
< KDBG_MINRTCDEC
)
1355 kdbg_enable_typefilter(void)
1357 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1358 /* free the old filter */
1359 kdbg_disable_typefilter();
1362 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
) != KERN_SUCCESS
) {
1366 bzero(type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1368 /* Turn off range and value checks */
1369 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_RANGECHECK
| KDBG_VALCHECK
);
1371 /* Enable filter checking */
1372 kd_ctrl_page
.kdebug_flags
|= KDBG_TYPEFILTER_CHECK
;
1373 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1378 kdbg_disable_typefilter(void)
1380 /* Disable filter checking */
1381 kd_ctrl_page
.kdebug_flags
&= ~KDBG_TYPEFILTER_CHECK
;
1383 /* Turn off slow checks unless pid checks are using them */
1384 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1385 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1387 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1389 if(type_filter_bitmap
== NULL
)
1392 vm_offset_t old_bitmap
= (vm_offset_t
)type_filter_bitmap
;
1393 type_filter_bitmap
= NULL
;
1395 kmem_free(kernel_map
, old_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1400 kdbg_setreg(kd_regtype
* kdr
)
1403 unsigned int val_1
, val_2
, val
;
1405 kdlog_sched_events
= 0;
1407 switch (kdr
->type
) {
1409 case KDBG_CLASSTYPE
:
1410 val_1
= (kdr
->value1
& 0xff);
1411 val_2
= (kdr
->value2
& 0xff);
1413 if (val_1
== DBG_FSYSTEM
&& val_2
== (DBG_FSYSTEM
+ 1))
1414 kdlog_sched_events
= 1;
1416 kdlog_beg
= (val_1
<<24);
1417 kdlog_end
= (val_2
<<24);
1418 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1419 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1420 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1421 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1423 case KDBG_SUBCLSTYPE
:
1424 val_1
= (kdr
->value1
& 0xff);
1425 val_2
= (kdr
->value2
& 0xff);
1427 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1428 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1429 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1430 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1431 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1432 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1434 case KDBG_RANGETYPE
:
1435 kdlog_beg
= (kdr
->value1
);
1436 kdlog_end
= (kdr
->value2
);
1437 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1438 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1439 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1440 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1443 kdlog_value1
= (kdr
->value1
);
1444 kdlog_value2
= (kdr
->value2
);
1445 kdlog_value3
= (kdr
->value3
);
1446 kdlog_value4
= (kdr
->value4
);
1447 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1448 kd_ctrl_page
.kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1449 kd_ctrl_page
.kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1450 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1452 case KDBG_TYPENONE
:
1453 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1455 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
|
1456 KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
|
1457 KDBG_TYPEFILTER_CHECK
)) )
1458 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1460 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1473 kdbg_getreg(__unused kd_regtype
* kdr
)
1477 unsigned int val_1
, val_2
, val
;
1479 switch (kdr
->type
) {
1480 case KDBG_CLASSTYPE
:
1481 val_1
= (kdr
->value1
& 0xff);
1483 kdlog_beg
= (val_1
<<24);
1484 kdlog_end
= (val_2
<<24);
1485 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1486 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1488 case KDBG_SUBCLSTYPE
:
1489 val_1
= (kdr
->value1
& 0xff);
1490 val_2
= (kdr
->value2
& 0xff);
1492 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1493 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1494 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1495 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1497 case KDBG_RANGETYPE
:
1498 kdlog_beg
= (kdr
->value1
);
1499 kdlog_end
= (kdr
->value2
);
1500 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1501 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1503 case KDBG_TYPENONE
:
1504 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1518 kdbg_readmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1520 int avail
= *number
;
1524 count
= avail
/sizeof (kd_threadmap
);
1526 if (count
&& (count
<= kd_mapcount
))
1528 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1530 if (*number
< kd_mapsize
)
1542 header
.version_no
= RAW_VERSION1
;
1543 header
.thread_count
= count
;
1545 clock_get_calendar_microtime(&secs
, &usecs
);
1546 header
.TOD_secs
= secs
;
1547 header
.TOD_usecs
= usecs
;
1549 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&header
, sizeof(RAW_header
), RAW_file_offset
,
1550 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1553 RAW_file_offset
+= sizeof(RAW_header
);
1555 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, kd_mapsize
, RAW_file_offset
,
1556 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1559 RAW_file_offset
+= kd_mapsize
;
1561 pad_size
= PAGE_SIZE
- (RAW_file_offset
& PAGE_MASK_64
);
1565 pad_buf
= (char *)kalloc(pad_size
);
1566 memset(pad_buf
, 0, pad_size
);
1568 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
1569 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1570 kfree(pad_buf
, pad_size
);
1574 RAW_file_offset
+= pad_size
;
1576 RAW_file_written
+= sizeof(RAW_header
) + kd_mapsize
+ pad_size
;
1579 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
1594 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
1595 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1596 RAW_file_offset
+= sizeof(uint32_t);
1597 RAW_file_written
+= sizeof(uint32_t);
1600 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1602 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1603 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1605 kd_mapptr
= (kd_threadmap
*) 0;
1612 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
1614 int avail
= *number
;
1619 int wait_result
= THREAD_AWAKENED
;
1622 if (kd_entropy_buffer
)
1628 kd_entropy_count
= avail
/sizeof(uint64_t);
1630 if (kd_entropy_count
> MAX_ENTROPY_COUNT
|| kd_entropy_count
== 0) {
1632 * Enforce maximum entropy entries
1636 kd_entropy_bufsize
= kd_entropy_count
* sizeof(uint64_t);
1639 * allocate entropy buffer
1641 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
, (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
) {
1642 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
1644 kd_entropy_buffer
= (uint64_t *) 0;
1645 kd_entropy_count
= 0;
1649 kd_entropy_indx
= 0;
1651 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START
, ms_timeout
, kd_entropy_count
, 0, 0, 0);
1654 * Enable entropy sampling
1656 kdbg_set_flags(SLOW_ENTROPY
, KDEBUG_ENABLE_ENTROPY
, TRUE
);
1659 ns
= (u_int64_t
)ms_timeout
* (u_int64_t
)(1000 * 1000);
1660 nanoseconds_to_absolutetime(ns
, &abstime
);
1661 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1665 s
= ml_set_interrupts_enabled(FALSE
);
1666 lck_spin_lock(kdw_spin_lock
);
1668 while (wait_result
== THREAD_AWAKENED
&& kd_entropy_indx
< kd_entropy_count
) {
1674 * wait for the specified timeout or
1675 * until we've hit our sample limit
1677 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kde_waiter
, THREAD_ABORTSAFE
, abstime
);
1680 * wait until we've hit our sample limit
1682 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kde_waiter
, THREAD_ABORTSAFE
);
1686 lck_spin_unlock(kdw_spin_lock
);
1687 ml_set_interrupts_enabled(s
);
1690 * Disable entropy sampling
1692 kdbg_set_flags(SLOW_ENTROPY
, KDEBUG_ENABLE_ENTROPY
, FALSE
);
1694 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END
, ms_timeout
, kd_entropy_indx
, 0, 0, 0);
1699 if (kd_entropy_indx
> 0) {
1701 * copyout the buffer
1703 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(uint64_t)))
1706 *number
= kd_entropy_indx
* sizeof(uint64_t);
1711 kd_entropy_count
= 0;
1712 kd_entropy_indx
= 0;
1713 kd_entropy_buftomem
= 0;
1714 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
1715 kd_entropy_buffer
= (uint64_t *) 0;
1722 kdbg_set_nkdbufs(unsigned int value
)
1725 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
1726 * 'value' is the desired number of trace entries
1728 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
1730 if (value
<= max_entries
)
1733 return (max_entries
);
1738 kdbg_enable_bg_trace(void)
1740 if (kdlog_bg_trace
== TRUE
&& kdlog_bg_trace_running
== FALSE
&& n_storage_buffers
== 0) {
1741 nkdbufs
= bg_nkdbufs
;
1743 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
1744 kdlog_bg_trace_running
= TRUE
;
1749 kdbg_disable_bg_trace(void)
1751 if (kdlog_bg_trace_running
== TRUE
) {
1752 kdlog_bg_trace_running
= FALSE
;
1760 * This function is provided for the CHUD toolkit only.
1762 * zero disables kdebug_chudhook function call
1763 * non-zero enables kdebug_chudhook function call
1765 * address of the enabled kdebug_chudhook function
1769 kdbg_control_chud(int val
, void *fn
)
1774 /* enable chudhook */
1775 kdebug_chudhook
= fn
;
1776 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, TRUE
);
1779 /* disable chudhook */
1780 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, FALSE
);
1781 kdebug_chudhook
= 0;
1787 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1790 size_t size
= *sizep
;
1791 unsigned int value
= 0;
1793 kbufinfo_t kd_bufinfo
;
1797 if (name
[0] == KERN_KDGETENTROPY
||
1798 name
[0] == KERN_KDWRITETR
||
1799 name
[0] == KERN_KDWRITEMAP
||
1800 name
[0] == KERN_KDEFLAGS
||
1801 name
[0] == KERN_KDDFLAGS
||
1802 name
[0] == KERN_KDENABLE
||
1803 name
[0] == KERN_KDENABLE_BG_TRACE
||
1804 name
[0] == KERN_KDSETBUF
) {
1813 if ( !(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
1816 lck_mtx_lock(kd_trace_mtx_sysctl
);
1822 * Does not alter the global_state_pid
1823 * This is a passive request.
1825 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1827 * There is not enough room to return even
1828 * the first element of the info structure.
1833 kd_bufinfo
.nkdbufs
= nkdbufs
;
1834 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1836 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) )
1837 kd_bufinfo
.nolog
= 1;
1839 kd_bufinfo
.nolog
= 0;
1841 kd_bufinfo
.flags
= kd_ctrl_page
.kdebug_flags
;
1842 #if defined(__LP64__)
1843 kd_bufinfo
.flags
|= KDBG_LP64
;
1845 kd_bufinfo
.bufid
= global_state_pid
;
1847 if (size
>= sizeof(kd_bufinfo
)) {
1849 * Provide all the info we have
1851 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
1855 * For backwards compatibility, only provide
1856 * as much info as there is room for.
1858 if (copyout(&kd_bufinfo
, where
, size
))
1864 case KERN_KDGETENTROPY
:
1865 if (kd_entropy_buffer
)
1868 ret
= kdbg_getentropy(where
, sizep
, value
);
1872 case KERN_KDENABLE_BG_TRACE
:
1873 bg_nkdbufs
= kdbg_set_nkdbufs(value
);
1874 kdlog_bg_trace
= TRUE
;
1875 kdbg_enable_bg_trace();
1879 case KERN_KDDISABLE_BG_TRACE
:
1880 kdlog_bg_trace
= FALSE
;
1881 kdbg_disable_bg_trace();
1886 if ((curproc
= current_proc()) != NULL
)
1887 curpid
= curproc
->p_pid
;
1892 if (global_state_pid
== -1)
1893 global_state_pid
= curpid
;
1894 else if (global_state_pid
!= curpid
) {
1895 if ((p
= proc_find(global_state_pid
)) == NULL
) {
1897 * The global pid no longer exists
1899 global_state_pid
= curpid
;
1902 * The global pid exists, deny this request
1913 kdbg_disable_bg_trace();
1915 value
&= KDBG_USERFLAGS
;
1916 kd_ctrl_page
.kdebug_flags
|= value
;
1919 kdbg_disable_bg_trace();
1921 value
&= KDBG_USERFLAGS
;
1922 kd_ctrl_page
.kdebug_flags
&= ~value
;
1926 * Enable tracing mechanism. Two types:
1927 * KDEBUG_TRACE is the standard one,
1928 * and KDEBUG_PPT which is a carefully
1929 * chosen subset to avoid performance impact.
1933 * enable only if buffer is initialized
1935 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) ||
1936 !(value
== KDEBUG_ENABLE_TRACE
|| value
== KDEBUG_ENABLE_PPT
)) {
1942 kdbg_set_tracing_enabled(TRUE
, value
);
1946 kdbg_set_tracing_enabled(FALSE
, 0);
1950 kdbg_disable_bg_trace();
1952 nkdbufs
= kdbg_set_nkdbufs(value
);
1955 kdbg_disable_bg_trace();
1957 ret
= kdbg_reinit(FALSE
);
1961 kdbg_enable_bg_trace();
1964 if(size
< sizeof(kd_regtype
)) {
1968 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1972 kdbg_disable_bg_trace();
1974 ret
= kdbg_setreg(&kd_Reg
);
1977 if (size
< sizeof(kd_regtype
)) {
1981 ret
= kdbg_getreg(&kd_Reg
);
1982 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
1985 kdbg_disable_bg_trace();
1989 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
1991 case KERN_KDWRITETR
:
1992 case KERN_KDWRITEMAP
:
1994 struct vfs_context context
;
1995 struct fileproc
*fp
;
2000 kdbg_disable_bg_trace();
2002 if (name
[0] == KERN_KDWRITETR
) {
2004 int wait_result
= THREAD_AWAKENED
;
2009 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2010 nanoseconds_to_absolutetime(ns
, &abstime
);
2011 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2015 s
= ml_set_interrupts_enabled(FALSE
);
2016 lck_spin_lock(kdw_spin_lock
);
2018 while (wait_result
== THREAD_AWAKENED
&& kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2023 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2025 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2029 lck_spin_unlock(kdw_spin_lock
);
2030 ml_set_interrupts_enabled(s
);
2036 if ( (ret
= fp_lookup(p
, fd
, &fp
, 1)) ) {
2040 context
.vc_thread
= current_thread();
2041 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
2043 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
2044 fp_drop(p
, fd
, fp
, 1);
2050 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
2053 if ((ret
= vnode_getwithref(vp
)) == 0) {
2055 if (name
[0] == KERN_KDWRITETR
) {
2056 number
= nkdbufs
* sizeof(kd_buf
);
2058 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2059 ret
= kdbg_read(0, &number
, vp
, &context
);
2060 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_END
, number
, 0, 0, 0, 0);
2064 number
= kd_mapsize
;
2065 kdbg_readmap(0, &number
, vp
, &context
);
2069 fp_drop(p
, fd
, fp
, 0);
2074 if (size
< sizeof(kd_regtype
)) {
2078 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2082 kdbg_disable_bg_trace();
2084 ret
= kdbg_setpid(&kd_Reg
);
2087 if (size
< sizeof(kd_regtype
)) {
2091 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2095 kdbg_disable_bg_trace();
2097 ret
= kdbg_setpidex(&kd_Reg
);
2100 ret
= kdbg_readmap(where
, sizep
, NULL
, NULL
);
2102 case KERN_KDSETRTCDEC
:
2103 if (size
< sizeof(kd_regtype
)) {
2107 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2111 kdbg_disable_bg_trace();
2113 ret
= kdbg_setrtcdec(&kd_Reg
);
2115 case KERN_KDSET_TYPEFILTER
:
2116 kdbg_disable_bg_trace();
2118 if ((kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) == 0){
2119 if ((ret
= kdbg_enable_typefilter()))
2123 if (size
!= KDBG_TYPEFILTER_BITMAP_SIZE
) {
2128 if (copyin(where
, type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
)) {
2137 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2144 * This code can run for the most part concurrently with kernel_debug_internal()...
2145 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2146 * synchronize with the recording side of this puzzle... otherwise, we are able to
2147 * move through the lists w/o use of any locks
2150 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
2153 unsigned int cpu
, min_cpu
;
2154 uint64_t mintime
, t
;
2160 struct kd_storage
*kdsp_actual
;
2161 struct kd_bufinfo
*kdbp
;
2162 struct kd_bufinfo
*min_kdbp
;
2163 uint32_t tempbuf_count
;
2164 uint32_t tempbuf_number
;
2165 uint32_t old_kdebug_flags
;
2166 uint32_t old_kdebug_slowcheck
;
2167 boolean_t lostevents
= FALSE
;
2168 boolean_t out_of_events
= FALSE
;
2170 count
= *number
/sizeof(kd_buf
);
2173 if (count
== 0 || !(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
2176 memset(&lostevent
, 0, sizeof(lostevent
));
2177 lostevent
.debugid
= TRACEDBG_CODE(DBG_TRACE_INFO
, 2);
2180 * because we hold kd_trace_mtx_sysctl, no other control threads can
2181 * be playing with kdebug_flags... the code that cuts new events could
2182 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2183 * storage chunk which is where it examines kdebug_flags... it its adding
2184 * to the same chunk we're reading from, no problem...
2187 disable_wrap(&old_kdebug_slowcheck
, &old_kdebug_flags
);
2189 if (count
> nkdbufs
)
2192 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2193 tempbuf_count
= KDCOPYBUF_COUNT
;
2196 tempbuf
= kdcopybuf
;
2200 while (tempbuf_count
) {
2201 mintime
= 0xffffffffffffffffULL
;
2206 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_cpus
; cpu
++, kdbp
++) {
2208 // Find one with raw data
2209 if ((kdsp
= kdbp
->kd_list_head
).raw
== KDS_PTR_NULL
)
2212 // Get from cpu data to buffer header to buffer
2213 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2215 // See if there are actual data left in this buffer
2216 rcursor
= kdsp_actual
->kds_readlast
;
2218 if (rcursor
== kdsp_actual
->kds_bufindx
)
2221 t
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[rcursor
]);
2223 if (t
< kdsp_actual
->kds_timestamp
) {
2225 * indicates we've not yet completed filling
2227 * this should only occur when we're looking
2228 * at the buf that the record head is utilizing
2229 * we'll pick these events up on the next
2231 * we bail at this point so that we don't
2232 * get an out-of-order timestream by continuing
2233 * to read events from the other CPUs' timestream(s)
2235 out_of_events
= TRUE
;
2244 if (min_kdbp
== NULL
|| out_of_events
== TRUE
) {
2246 * all buffers ran empty
2248 out_of_events
= TRUE
;
2253 kdsp
= min_kdbp
->kd_list_head
;
2254 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2256 if (kdsp_actual
->kds_lostevents
== TRUE
) {
2257 kdbg_set_timestamp_and_cpu(&lostevent
, kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
].timestamp
, min_cpu
);
2258 *tempbuf
= lostevent
;
2260 kdsp_actual
->kds_lostevents
= FALSE
;
2267 *tempbuf
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
++];
2269 if (kdsp_actual
->kds_readlast
== EVENTS_PER_STORAGE_UNIT
)
2270 release_storage_unit(min_cpu
, kdsp
.raw
);
2273 * Watch for out of order timestamps
2275 if (mintime
< min_kdbp
->kd_prev_timebase
) {
2277 * if so, use the previous timestamp + 1 cycle
2279 min_kdbp
->kd_prev_timebase
++;
2280 kdbg_set_timestamp_and_cpu(tempbuf
, min_kdbp
->kd_prev_timebase
, kdbg_get_cpu(tempbuf
));
2282 min_kdbp
->kd_prev_timebase
= mintime
;
2288 if ((RAW_file_written
+= sizeof(kd_buf
)) >= RAW_FLUSH_SIZE
)
2291 if (tempbuf_number
) {
2294 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
2295 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2297 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
2299 if (RAW_file_written
>= RAW_FLUSH_SIZE
) {
2300 cluster_push(vp
, 0);
2302 RAW_file_written
= 0;
2305 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
2306 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
2313 count
-= tempbuf_number
;
2314 *number
+= tempbuf_number
;
2316 if (out_of_events
== TRUE
)
2318 * all trace buffers are empty
2322 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2323 tempbuf_count
= KDCOPYBUF_COUNT
;
2325 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
2326 enable_wrap(old_kdebug_slowcheck
, lostevents
);
2332 unsigned char *getProcName(struct proc
*proc
);
2333 unsigned char *getProcName(struct proc
*proc
) {
2335 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
2339 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2340 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2341 #if defined(__i386__) || defined (__x86_64__)
2342 #define TRAP_DEBUGGER __asm__ volatile("int3");
2345 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2347 /* Initialize the mutex governing access to the stack snapshot subsystem */
2348 __private_extern__
void
2349 stackshot_lock_init( void )
2351 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
2353 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
2355 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
2357 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
2361 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2362 * on the system, tracing both kernel and user stacks
2363 * where available. Uses machine specific trace routines
2364 * for ppc, ppc64 and x86.
2365 * Inputs: uap->pid - process id of process to be traced, or -1
2366 * for the entire system
2367 * uap->tracebuf - address of the user space destination
2369 * uap->tracebuf_size - size of the user space trace buffer
2370 * uap->options - various options, including the maximum
2371 * number of frames to trace.
2372 * Outputs: EPERM if the caller is not privileged
2373 * EINVAL if the supplied trace buffer isn't sanely sized
2374 * ENOMEM if we don't have enough memory to satisfy the
2376 * ENOENT if the target pid isn't found
2377 * ENOSPC if the supplied buffer is insufficient
2378 * *retval contains the number of bytes traced, if successful
2379 * and -1 otherwise. If the request failed due to
2380 * tracebuffer exhaustion, we copyout as much as possible.
2383 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
2386 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
2389 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
2390 uap
->flags
, uap
->dispatch_offset
, retval
);
2394 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
)
2397 unsigned bytesTraced
= 0;
2401 /* Serialize tracing */
2402 STACKSHOT_SUBSYS_LOCK();
2404 if ((tracebuf_size
<= 0) || (tracebuf_size
> SANE_TRACEBUF_SIZE
)) {
2409 assert(stackshot_snapbuf
== NULL
);
2410 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) {
2415 if (panic_active()) {
2420 istate
= ml_set_interrupts_enabled(FALSE
);
2421 /* Preload trace parameters*/
2422 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
);
2424 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2430 ml_set_interrupts_enabled(istate
);
2432 bytesTraced
= kdp_stack_snapshot_bytes_traced();
2434 if (bytesTraced
> 0) {
2435 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
2436 ((bytesTraced
< tracebuf_size
) ?
2437 bytesTraced
: tracebuf_size
))))
2439 *retval
= bytesTraced
;
2446 error
= kdp_stack_snapshot_geterror();
2454 if (stackshot_snapbuf
!= NULL
)
2455 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
2456 stackshot_snapbuf
= NULL
;
2457 STACKSHOT_SUBSYS_UNLOCK();
2462 start_kern_tracing(unsigned int new_nkdbufs
, boolean_t need_map
) {
2466 nkdbufs
= kdbg_set_nkdbufs(new_nkdbufs
);
2469 if (need_map
== TRUE
)
2471 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
2473 #if defined(__i386__) || defined(__x86_64__)
2474 uint64_t now
= mach_absolute_time();
2476 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
,
2477 (uint32_t)(tsc_rebase_abs_time
>> 32), (uint32_t)tsc_rebase_abs_time
,
2478 (uint32_t)(now
>> 32), (uint32_t)now
,
2481 printf("kernel tracing started\n");
2485 kdbg_dump_trace_to_file(const char *filename
)
2493 if ( !(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
2496 if (global_state_pid
!= -1) {
2497 if ((proc_find(global_state_pid
)) != NULL
) {
2499 * The global pid exists, we're running
2500 * due to fs_usage, latency, etc...
2501 * don't cut the panic/shutdown trace file
2506 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
2509 kd_ctrl_page
.enabled
= 0;
2511 ctx
= vfs_context_kernel();
2513 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
2516 number
= kd_mapsize
;
2517 kdbg_readmap(0, &number
, vp
, ctx
);
2519 number
= nkdbufs
*sizeof(kd_buf
);
2520 kdbg_read(0, &number
, vp
, ctx
);
2522 vnode_close(vp
, FWRITE
, ctx
);
2524 sync(current_proc(), (void *)NULL
, (int *)NULL
);
2527 /* Helper function for filling in the BSD name for an address space
2528 * Defined here because the machine bindings know only Mach threads
2529 * and nothing about BSD processes.
2531 * FIXME: need to grab a lock during this?
2533 void kdbg_get_task_name(char* name_buf
, int len
, task_t task
)
2537 /* Note: we can't use thread->task (and functions that rely on it) here
2538 * because it hasn't been initialized yet when this function is called.
2539 * We use the explicitly-passed task parameter instead.
2541 proc
= get_bsdtask_info(task
);
2542 if (proc
!= PROC_NULL
)
2543 snprintf(name_buf
, len
, "%s/%d", proc
->p_comm
, proc
->p_pid
);
2545 snprintf(name_buf
, len
, "%p [!bsd]", task
);
2550 #if defined(NATIVE_TRACE_FACILITY)
2551 void trace_handler_map_ctrl_page(__unused
uintptr_t addr
, __unused
size_t ctrl_page_size
, __unused
size_t storage_size
, __unused
size_t kds_ptr_size
)
2554 void trace_handler_map_bufinfo(__unused
uintptr_t addr
, __unused
size_t size
)
2557 void trace_handler_unmap_bufinfo(void)
2560 void trace_handler_map_buffer(__unused
int index
, __unused
uintptr_t addr
, __unused
size_t size
)
2563 void trace_handler_unmap_buffer(__unused
int index
)