2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
24 #include <machine/spl.h>
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
37 #include <mach/clock_types.h>
38 #include <mach/mach_types.h>
39 #include <mach/mach_time.h>
40 #include <machine/machine_routines.h>
42 #if defined(__i386__) || defined(__x86_64__)
43 #include <i386/rtclock_protos.h>
45 #include <i386/machine_routines.h>
48 #include <kern/clock.h>
50 #include <kern/thread.h>
51 #include <kern/task.h>
52 #include <kern/debug.h>
53 #include <kern/kalloc.h>
54 #include <kern/cpu_data.h>
55 #include <kern/assert.h>
56 #include <vm/vm_kern.h>
59 #include <sys/malloc.h>
60 #include <sys/mcache.h>
61 #include <sys/kauth.h>
63 #include <sys/vnode.h>
64 #include <sys/vnode_internal.h>
65 #include <sys/fcntl.h>
66 #include <sys/file_internal.h>
69 #include <mach/mach_host.h> /* for host_info() */
70 #include <libkern/OSAtomic.h>
72 #include <machine/pal_routines.h>
74 /* XXX should have prototypes, but Mach does not provide one */
75 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
76 int cpu_number(void); /* XXX <machine/...> include path broken */
78 /* XXX should probably be static, but it's debugging code... */
79 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
80 void kdbg_control_chud(int, void *);
81 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
82 int kdbg_getentropy (user_addr_t
, size_t *, int);
83 int kdbg_readmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
84 int kdbg_getreg(kd_regtype
*);
85 int kdbg_setreg(kd_regtype
*);
86 int kdbg_setrtcdec(kd_regtype
*);
87 int kdbg_setpidex(kd_regtype
*);
88 int kdbg_setpid(kd_regtype
*);
89 void kdbg_mapinit(void);
90 int kdbg_reinit(boolean_t
);
91 int kdbg_bootstrap(boolean_t
);
93 static int create_buffers(boolean_t
);
94 static void delete_buffers(void);
96 extern void IOSleep(int);
98 /* trace enable status */
99 unsigned int kdebug_enable
= 0;
101 /* track timestamps for security server's entropy needs */
102 uint64_t * kd_entropy_buffer
= 0;
103 unsigned int kd_entropy_bufsize
= 0;
104 unsigned int kd_entropy_count
= 0;
105 unsigned int kd_entropy_indx
= 0;
106 vm_offset_t kd_entropy_buftomem
= 0;
108 #define MAX_ENTROPY_COUNT (128 * 1024)
111 #define SLOW_NOLOG 0x01
112 #define SLOW_CHECKS 0x02
113 #define SLOW_ENTROPY 0x04
114 #define SLOW_CHUD 0x08
116 unsigned int kd_cpus
;
118 #define EVENTS_PER_STORAGE_UNIT 2048
119 #define MIN_STORAGE_UNITS_PER_CPU 4
121 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
123 #define NATIVE_TRACE_FACILITY
127 uint32_t buffer_index
:21;
134 union kds_ptr kds_next
;
135 uint32_t kds_bufindx
;
137 uint32_t kds_readlast
;
138 boolean_t kds_lostevents
;
139 uint64_t kds_timestamp
;
141 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
144 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
145 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
147 struct kd_storage_buffers
{
148 struct kd_storage
*kdsb_addr
;
152 #define KDS_PTR_NULL 0xffffffff
153 struct kd_storage_buffers
*kd_bufs
= NULL
;
154 int n_storage_units
= 0;
155 int n_storage_buffers
= 0;
156 int n_storage_threshold
= 0;
162 union kds_ptr kd_list_head
;
163 union kds_ptr kd_list_tail
;
164 boolean_t kd_lostevents
;
166 uint64_t kd_prev_timebase
;
168 } __attribute__(( aligned(CPU_CACHE_SIZE
) ));
170 struct kd_ctrl_page_t
{
171 union kds_ptr kds_free_list
;
175 uint32_t kdebug_flags
;
176 uint32_t kdebug_slowcheck
;
181 } cpu_timebase
[32]; // should be max number of actual logical cpus
182 } kd_ctrl_page
= {.kds_free_list
= {.raw
= KDS_PTR_NULL
}, .enabled
= 0, .kds_inuse_count
= 0, .kdebug_flags
= 0, .kdebug_slowcheck
= SLOW_NOLOG
};
185 struct kd_bufinfo
*kdbip
= NULL
;
187 #define KDCOPYBUF_COUNT 8192
188 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
189 kd_buf
*kdcopybuf
= NULL
;
192 unsigned int nkdbufs
= 8192;
193 unsigned int kdlog_beg
=0;
194 unsigned int kdlog_end
=0;
195 unsigned int kdlog_value1
=0;
196 unsigned int kdlog_value2
=0;
197 unsigned int kdlog_value3
=0;
198 unsigned int kdlog_value4
=0;
200 static lck_spin_t
* kdw_spin_lock
;
201 static lck_spin_t
* kds_spin_lock
;
202 static lck_mtx_t
* kd_trace_mtx_sysctl
;
203 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
204 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
205 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
207 static lck_grp_t
*stackshot_subsys_lck_grp
;
208 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
209 static lck_attr_t
*stackshot_subsys_lck_attr
;
210 static lck_mtx_t stackshot_subsys_mutex
;
212 void *stackshot_snapbuf
= NULL
;
215 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
);
218 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
);
221 kdp_stack_snapshot_geterror(void);
223 kdp_stack_snapshot_bytes_traced(void);
225 kd_threadmap
*kd_mapptr
= 0;
226 unsigned int kd_mapsize
= 0;
227 unsigned int kd_mapcount
= 0;
228 vm_offset_t kd_maptomem
= 0;
230 off_t RAW_file_offset
= 0;
231 int RAW_file_written
= 0;
233 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
236 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
238 #define DBG_FUNC_MASK 0xfffffffc
240 #define INTERRUPT 0x01050000
241 #define MACH_vmfault 0x01300008
242 #define BSC_SysCall 0x040c0000
243 #define MACH_SysCall 0x010c0000
244 #define DBG_SCALL_MASK 0xffff0000
247 /* task to string structure */
250 task_t task
; /* from procs task */
251 pid_t pid
; /* from procs p_pid */
252 char task_comm
[20]; /* from procs p_comm */
255 typedef struct tts tts_t
;
259 kd_threadmap
*map
; /* pointer to the map buffer */
265 typedef struct krt krt_t
;
267 /* This is for the CHUD toolkit call */
268 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
269 uintptr_t arg2
, uintptr_t arg3
,
270 uintptr_t arg4
, uintptr_t arg5
);
272 volatile kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
274 __private_extern__
void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
277 kdbg_set_tracing_enabled(boolean_t enabled
)
279 int s
= ml_set_interrupts_enabled(FALSE
);
280 lck_spin_lock(kds_spin_lock
);
283 kdebug_enable
|= KDEBUG_ENABLE_TRACE
;
284 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
285 kd_ctrl_page
.enabled
= 1;
287 kdebug_enable
&= ~KDEBUG_ENABLE_TRACE
;
288 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
289 kd_ctrl_page
.enabled
= 0;
291 lck_spin_unlock(kds_spin_lock
);
292 ml_set_interrupts_enabled(s
);
296 kdbg_set_flags(int slowflag
, int enableflag
, boolean_t enabled
)
298 int s
= ml_set_interrupts_enabled(FALSE
);
299 lck_spin_lock(kds_spin_lock
);
302 kd_ctrl_page
.kdebug_slowcheck
|= slowflag
;
303 kdebug_enable
|= enableflag
;
305 kd_ctrl_page
.kdebug_slowcheck
&= ~slowflag
;
306 kdebug_enable
&= ~enableflag
;
308 lck_spin_unlock(kds_spin_lock
);
309 ml_set_interrupts_enabled(s
);
313 #ifdef NATIVE_TRACE_FACILITY
315 disable_wrap(uint32_t *old_slowcheck
, uint32_t *old_flags
)
317 int s
= ml_set_interrupts_enabled(FALSE
);
318 lck_spin_lock(kds_spin_lock
);
320 *old_slowcheck
= kd_ctrl_page
.kdebug_slowcheck
;
321 *old_flags
= kd_ctrl_page
.kdebug_flags
;
323 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
324 kd_ctrl_page
.kdebug_flags
|= KDBG_NOWRAP
;
326 lck_spin_unlock(kds_spin_lock
);
327 ml_set_interrupts_enabled(s
);
331 enable_wrap(uint32_t old_slowcheck
, boolean_t lostevents
)
333 int s
= ml_set_interrupts_enabled(FALSE
);
334 lck_spin_lock(kds_spin_lock
);
336 kd_ctrl_page
.kdebug_flags
&= ~KDBG_NOWRAP
;
338 if ( !(old_slowcheck
& SLOW_NOLOG
))
339 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
341 if (lostevents
== TRUE
)
342 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
344 lck_spin_unlock(kds_spin_lock
);
345 ml_set_interrupts_enabled(s
);
348 void trace_set_timebases(__unused
uint64_t tsc
, __unused
uint64_t ns
)
352 /* Begin functions that are defined twice */
353 void trace_set_timebases(uint64_t tsc
, uint64_t ns
)
355 int cpu
= cpu_number();
356 kd_ctrl_page
.cpu_timebase
[cpu
].tsc_base
= tsc
;
357 kd_ctrl_page
.cpu_timebase
[cpu
].ns_base
= ns
;
363 #if defined(__i386__) || defined(__x86_64__)
364 create_buffers(boolean_t early_trace
)
366 create_buffers(__unused boolean_t early_trace
)
376 * get the number of cpus and cache it
378 #if defined(__i386__) || defined(__x86_64__)
379 if (early_trace
== TRUE
) {
381 * we've started tracing before the
382 * IOKit has even started running... just
383 * use the static max value
389 host_basic_info_data_t hinfo
;
390 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
393 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
394 kd_cpus
= hinfo
.logical_cpu_max
;
396 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
) != KERN_SUCCESS
) {
401 trace_handler_map_bufinfo((uintptr_t)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
403 #if !defined(NATIVE_TRACE_FACILITY)
404 for(i
=0;i
<(int)kd_cpus
;i
++) {
405 get_nanotime_timebases(i
,
406 &kd_ctrl_page
.cpu_timebase
[i
].tsc_base
,
407 &kd_ctrl_page
.cpu_timebase
[i
].ns_base
);
411 if (nkdbufs
< (kd_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
412 n_storage_units
= kd_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
414 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
416 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
418 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
419 n_storage_buffers
= f_buffers
;
421 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
422 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
429 if (kdcopybuf
== 0) {
430 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
435 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
439 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
441 for (i
= 0; i
< f_buffers
; i
++) {
442 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
446 bzero(kd_bufs
[i
].kdsb_addr
, f_buffer_size
);
448 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
451 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
455 bzero(kd_bufs
[i
].kdsb_addr
, p_buffer_size
);
457 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
461 for (i
= 0; i
< n_storage_buffers
; i
++) {
462 struct kd_storage
*kds
;
466 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
467 kds
= kd_bufs
[i
].kdsb_addr
;
469 trace_handler_map_buffer(i
, (uintptr_t)kd_bufs
[i
].kdsb_addr
, kd_bufs
[i
].kdsb_size
);
471 for (n
= 0; n
< n_elements
; n
++) {
472 kds
[n
].kds_next
.buffer_index
= kd_ctrl_page
.kds_free_list
.buffer_index
;
473 kds
[n
].kds_next
.offset
= kd_ctrl_page
.kds_free_list
.offset
;
475 kd_ctrl_page
.kds_free_list
.buffer_index
= i
;
476 kd_ctrl_page
.kds_free_list
.offset
= n
;
478 n_storage_units
+= n_elements
;
481 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
483 for (i
= 0; i
< (int)kd_cpus
; i
++) {
484 kdbip
[i
].kd_list_head
.raw
= KDS_PTR_NULL
;
485 kdbip
[i
].kd_list_tail
.raw
= KDS_PTR_NULL
;
486 kdbip
[i
].kd_lostevents
= FALSE
;
487 kdbip
[i
].num_bufs
= 0;
490 kd_ctrl_page
.kdebug_flags
|= KDBG_BUFINIT
;
492 kd_ctrl_page
.kds_inuse_count
= 0;
493 n_storage_threshold
= n_storage_units
/ 2;
508 for (i
= 0; i
< n_storage_buffers
; i
++) {
509 if (kd_bufs
[i
].kdsb_addr
) {
510 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
511 trace_handler_unmap_buffer(i
);
514 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
517 n_storage_buffers
= 0;
520 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
524 kd_ctrl_page
.kds_free_list
.raw
= KDS_PTR_NULL
;
527 trace_handler_unmap_bufinfo();
529 kmem_free(kernel_map
, (vm_offset_t
)kdbip
, sizeof(struct kd_bufinfo
) * kd_cpus
);
533 kd_ctrl_page
.kdebug_flags
&= ~KDBG_BUFINIT
;
537 #ifdef NATIVE_TRACE_FACILITY
539 release_storage_unit(int cpu
, uint32_t kdsp_raw
)
542 struct kd_storage
*kdsp_actual
;
543 struct kd_bufinfo
*kdbp
;
548 s
= ml_set_interrupts_enabled(FALSE
);
549 lck_spin_lock(kds_spin_lock
);
553 if (kdsp
.raw
== kdbp
->kd_list_head
.raw
) {
555 * it's possible for the storage unit pointed to
556 * by kdsp to have already been stolen... so
557 * check to see if it's still the head of the list
558 * now that we're behind the lock that protects
559 * adding and removing from the queue...
560 * since we only ever release and steal units from
561 * that position, if it's no longer the head
562 * we having nothing to do in this context
564 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
565 kdbp
->kd_list_head
= kdsp_actual
->kds_next
;
567 kdsp_actual
->kds_next
= kd_ctrl_page
.kds_free_list
;
568 kd_ctrl_page
.kds_free_list
= kdsp
;
570 kd_ctrl_page
.kds_inuse_count
--;
572 lck_spin_unlock(kds_spin_lock
);
573 ml_set_interrupts_enabled(s
);
578 allocate_storage_unit(int cpu
)
581 struct kd_storage
*kdsp_actual
;
582 struct kd_bufinfo
*kdbp
, *kdbp_vict
, *kdbp_try
;
583 uint64_t oldest_ts
, ts
;
584 boolean_t retval
= TRUE
;
587 s
= ml_set_interrupts_enabled(FALSE
);
588 lck_spin_lock(kds_spin_lock
);
592 /* If someone beat us to the allocate, return success */
593 if (kdbp
->kd_list_tail
.raw
!= KDS_PTR_NULL
) {
594 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
);
596 if (kdsp_actual
->kds_bufindx
< EVENTS_PER_STORAGE_UNIT
)
600 if ((kdsp
= kd_ctrl_page
.kds_free_list
).raw
!= KDS_PTR_NULL
) {
601 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
602 kd_ctrl_page
.kds_free_list
= kdsp_actual
->kds_next
;
604 kd_ctrl_page
.kds_inuse_count
++;
606 if (kd_ctrl_page
.kdebug_flags
& KDBG_NOWRAP
) {
607 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
608 kdbp
->kd_lostevents
= TRUE
;
613 oldest_ts
= (uint64_t)-1;
615 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_cpus
]; kdbp_try
++) {
617 if (kdbp_try
->kd_list_head
.raw
== KDS_PTR_NULL
) {
619 * no storage unit to steal
624 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp_try
->kd_list_head
);
626 if (kdsp_actual
->kds_bufcnt
< EVENTS_PER_STORAGE_UNIT
) {
628 * make sure we don't steal the storage unit
629 * being actively recorded to... need to
630 * move on because we don't want an out-of-order
631 * set of events showing up later
635 ts
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[0]);
637 if (ts
< oldest_ts
) {
639 * when 'wrapping', we want to steal the
640 * storage unit that has the 'earliest' time
641 * associated with it (first event time)
644 kdbp_vict
= kdbp_try
;
647 if (kdbp_vict
== NULL
) {
649 kd_ctrl_page
.enabled
= 0;
653 kdsp
= kdbp_vict
->kd_list_head
;
654 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
656 kdbp_vict
->kd_list_head
= kdsp_actual
->kds_next
;
658 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
660 kdsp_actual
->kds_timestamp
= mach_absolute_time();
661 kdsp_actual
->kds_next
.raw
= KDS_PTR_NULL
;
662 kdsp_actual
->kds_bufcnt
= 0;
663 kdsp_actual
->kds_readlast
= 0;
665 kdsp_actual
->kds_lostevents
= kdbp
->kd_lostevents
;
666 kdbp
->kd_lostevents
= FALSE
;
667 kdsp_actual
->kds_bufindx
= 0;
669 if (kdbp
->kd_list_head
.raw
== KDS_PTR_NULL
)
670 kdbp
->kd_list_head
= kdsp
;
672 POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
)->kds_next
= kdsp
;
673 kdbp
->kd_list_tail
= kdsp
;
675 lck_spin_unlock(kds_spin_lock
);
676 ml_set_interrupts_enabled(s
);
683 kernel_debug_internal(
692 __attribute__((always_inline
)) void
693 kernel_debug_internal(
702 struct proc
*curproc
;
708 struct kd_bufinfo
*kdbp
;
709 struct kd_storage
*kdsp_actual
;
712 if (kd_ctrl_page
.kdebug_slowcheck
) {
714 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
715 kd_chudhook_fn chudhook
;
717 * Mask interrupts to minimize the interval across
718 * which the driver providing the hook could be
721 s
= ml_set_interrupts_enabled(FALSE
);
722 chudhook
= kdebug_chudhook
;
724 chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
725 ml_set_interrupts_enabled(s
);
727 if ((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) && entropy_flag
) {
729 now
= mach_absolute_time();
731 s
= ml_set_interrupts_enabled(FALSE
);
732 lck_spin_lock(kds_spin_lock
);
734 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) {
736 if (kd_entropy_indx
< kd_entropy_count
) {
737 kd_entropy_buffer
[kd_entropy_indx
] = now
;
740 if (kd_entropy_indx
== kd_entropy_count
) {
742 * Disable entropy collection
744 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
745 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_ENTROPY
;
748 lck_spin_unlock(kds_spin_lock
);
749 ml_set_interrupts_enabled(s
);
751 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
754 if ( !ml_at_interrupt_context()) {
755 if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDCHECK
) {
757 * If kdebug flag is not set for current proc, return
759 curproc
= current_proc();
761 if ((curproc
&& !(curproc
->p_kdebug
)) &&
762 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
765 else if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDEXCLUDE
) {
767 * If kdebug flag is set for current proc, return
769 curproc
= current_proc();
771 if ((curproc
&& curproc
->p_kdebug
) &&
772 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
776 if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
777 if ((debugid
< kdlog_beg
)
778 || ((debugid
>= kdlog_end
) && (debugid
>> 24 != DBG_TRACE
)))
781 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
782 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
783 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
784 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
785 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
786 (debugid
>> 24 != DBG_TRACE
))
790 disable_preemption();
794 if (kdbp
->kd_list_tail
.raw
!= KDS_PTR_NULL
) {
795 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
);
796 bindx
= kdsp_actual
->kds_bufindx
;
800 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
801 if (allocate_storage_unit(cpu
) == FALSE
) {
803 * this can only happen if wrapping
810 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
812 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
815 kd
= &kdsp_actual
->kds_records
[bindx
];
817 kd
->debugid
= debugid
;
824 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
826 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
830 if ((kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) ||
831 (kde_waiter
&& kd_entropy_indx
>= kd_entropy_count
)) {
835 etype
= debugid
& DBG_FUNC_MASK
;
836 stype
= debugid
& DBG_SCALL_MASK
;
838 if (etype
== INTERRUPT
|| etype
== MACH_vmfault
||
839 stype
== BSC_SysCall
|| stype
== MACH_SysCall
) {
841 boolean_t need_kds_wakeup
= FALSE
;
842 boolean_t need_kde_wakeup
= FALSE
;
845 * try to take the lock here to synchronize with the
846 * waiter entering the blocked state... use the try
847 * mode to prevent deadlocks caused by re-entering this
848 * routine due to various trace points triggered in the
849 * lck_spin_sleep_xxxx routines used to actually enter
850 * one of our 2 wait conditions... no problem if we fail,
851 * there will be lots of additional events coming in that
852 * will eventually succeed in grabbing this lock
854 s
= ml_set_interrupts_enabled(FALSE
);
856 if (lck_spin_try_lock(kdw_spin_lock
)) {
858 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
860 need_kds_wakeup
= TRUE
;
862 if (kde_waiter
&& kd_entropy_indx
>= kd_entropy_count
) {
864 need_kde_wakeup
= TRUE
;
866 lck_spin_unlock(kdw_spin_lock
);
868 ml_set_interrupts_enabled(s
);
870 if (need_kds_wakeup
== TRUE
)
872 if (need_kde_wakeup
== TRUE
)
885 __unused
uintptr_t arg5
)
887 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()), 1);
899 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 1);
903 * Support syscall SYS_kdebug_trace
906 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused
int32_t *retval
)
908 if ( __probable(kdebug_enable
== 0) )
911 kernel_debug_internal(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, (uintptr_t)thread_tid(current_thread()), 0);
920 if (kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
)
923 trace_handler_map_ctrl_page((uintptr_t)&kd_ctrl_page
, sizeof(kd_ctrl_page
), sizeof(struct kd_storage
), sizeof(union kds_ptr
));
926 * allocate lock group attribute and group
928 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
929 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
932 * allocate the lock attribute
934 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
938 * allocate and initialize mutex's
940 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
941 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
942 kdw_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
944 kd_ctrl_page
.kdebug_flags
|= KDBG_LOCKINIT
;
949 kdbg_bootstrap(boolean_t early_trace
)
951 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
953 return (create_buffers(early_trace
));
957 kdbg_reinit(boolean_t early_trace
)
962 * Disable trace collecting
963 * First make sure we're not in
964 * the middle of cutting a trace
966 kdbg_set_tracing_enabled(FALSE
);
969 * make sure the SLOW_NOLOG is seen
970 * by everyone that might be trying
977 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
978 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
979 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
981 kd_mapptr
= (kd_threadmap
*) 0;
984 ret
= kdbg_bootstrap(early_trace
);
987 RAW_file_written
= 0;
993 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
998 *arg_pid
= proc
->p_pid
;
1003 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
1017 * Collect the pathname for tracing
1019 dbg_nameptr
= proc
->p_comm
;
1020 dbg_namelen
= (int)strlen(proc
->p_comm
);
1026 if(dbg_namelen
> (int)sizeof(dbg_parms
))
1027 dbg_namelen
= (int)sizeof(dbg_parms
);
1029 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
1038 kdbg_resolve_map(thread_t th_act
, void *opaque
)
1040 kd_threadmap
*mapptr
;
1041 krt_t
*t
= (krt_t
*)opaque
;
1043 if (t
->count
< t
->maxcount
) {
1044 mapptr
= &t
->map
[t
->count
];
1045 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
1047 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
1048 sizeof(t
->atts
->task_comm
));
1050 * Some kernel threads have no associated pid.
1051 * We still need to mark the entry as valid.
1054 mapptr
->valid
= t
->atts
->pid
;
1067 int tts_count
; /* number of task-to-string structures */
1068 struct tts
*tts_mapptr
;
1069 unsigned int tts_mapsize
= 0;
1070 vm_offset_t tts_maptomem
=0;
1073 if (kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
)
1077 * need to use PROC_SCANPROCLIST with proc_iterate
1082 * Calculate the sizes of map buffers
1084 for (p
= allproc
.lh_first
, kd_mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
1085 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
1091 * The proc count could change during buffer allocation,
1092 * so introduce a small fudge factor to bump up the
1093 * buffer sizes. This gives new tasks some chance of
1094 * making into the tables. Bump up by 10%.
1096 kd_mapcount
+= kd_mapcount
/10;
1097 tts_count
+= tts_count
/10;
1099 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
1101 if ((kmem_alloc(kernel_map
, & kd_maptomem
, (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
)) {
1102 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
1103 bzero(kd_mapptr
, kd_mapsize
);
1105 kd_mapptr
= (kd_threadmap
*) 0;
1107 tts_mapsize
= tts_count
* sizeof(struct tts
);
1109 if ((kmem_alloc(kernel_map
, & tts_maptomem
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
1110 tts_mapptr
= (struct tts
*) tts_maptomem
;
1111 bzero(tts_mapptr
, tts_mapsize
);
1113 tts_mapptr
= (struct tts
*) 0;
1116 * We need to save the procs command string
1117 * and take a reference for each task associated
1118 * with a valid process
1122 * should use proc_iterate
1126 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
1127 if (p
->p_lflag
& P_LEXIT
)
1131 task_reference(p
->task
);
1132 tts_mapptr
[i
].task
= p
->task
;
1133 tts_mapptr
[i
].pid
= p
->p_pid
;
1134 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
1143 if (kd_mapptr
&& tts_mapptr
) {
1144 kd_ctrl_page
.kdebug_flags
|= KDBG_MAPINIT
;
1147 * Initialize thread map data
1149 akrt
.map
= kd_mapptr
;
1151 akrt
.maxcount
= kd_mapcount
;
1153 for (i
= 0; i
< tts_count
; i
++) {
1154 akrt
.atts
= &tts_mapptr
[i
];
1155 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
1156 task_deallocate((task_t
) tts_mapptr
[i
].task
);
1158 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
1166 * Clean up the trace buffer
1167 * First make sure we're not in
1168 * the middle of cutting a trace
1170 kdbg_set_tracing_enabled(FALSE
);
1173 * make sure the SLOW_NOLOG is seen
1174 * by everyone that might be trying
1179 global_state_pid
= -1;
1180 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1181 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
1182 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
1186 /* Clean up the thread map buffer */
1187 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1189 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1190 kd_mapptr
= (kd_threadmap
*) 0;
1195 RAW_file_offset
= 0;
1196 RAW_file_written
= 0;
1200 kdbg_setpid(kd_regtype
*kdr
)
1206 pid
= (pid_t
)kdr
->value1
;
1207 flag
= (int)kdr
->value2
;
1210 if ((p
= proc_find(pid
)) == NULL
)
1215 * turn on pid check for this and all pids
1217 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDCHECK
;
1218 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
1219 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1224 * turn off pid check for this pid value
1225 * Don't turn off all pid checking though
1227 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1240 /* This is for pid exclusion in the trace buffer */
1242 kdbg_setpidex(kd_regtype
*kdr
)
1248 pid
= (pid_t
)kdr
->value1
;
1249 flag
= (int)kdr
->value2
;
1252 if ((p
= proc_find(pid
)) == NULL
)
1257 * turn on pid exclusion
1259 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDEXCLUDE
;
1260 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDCHECK
;
1261 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1267 * turn off pid exclusion for this pid value
1268 * Don't turn off all pid exclusion though
1270 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1284 * This is for setting a maximum decrementer value
1287 kdbg_setrtcdec(kd_regtype
*kdr
)
1292 decval
= (natural_t
)kdr
->value1
;
1294 if (decval
&& decval
< KDBG_MINRTCDEC
)
1303 kdbg_setreg(kd_regtype
* kdr
)
1306 unsigned int val_1
, val_2
, val
;
1307 switch (kdr
->type
) {
1309 case KDBG_CLASSTYPE
:
1310 val_1
= (kdr
->value1
& 0xff);
1311 val_2
= (kdr
->value2
& 0xff);
1312 kdlog_beg
= (val_1
<<24);
1313 kdlog_end
= (val_2
<<24);
1314 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1315 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1316 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1317 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1319 case KDBG_SUBCLSTYPE
:
1320 val_1
= (kdr
->value1
& 0xff);
1321 val_2
= (kdr
->value2
& 0xff);
1323 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1324 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1325 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1326 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1327 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1328 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1330 case KDBG_RANGETYPE
:
1331 kdlog_beg
= (kdr
->value1
);
1332 kdlog_end
= (kdr
->value2
);
1333 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1334 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1335 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1336 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1339 kdlog_value1
= (kdr
->value1
);
1340 kdlog_value2
= (kdr
->value2
);
1341 kdlog_value3
= (kdr
->value3
);
1342 kdlog_value4
= (kdr
->value4
);
1343 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1344 kd_ctrl_page
.kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1345 kd_ctrl_page
.kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1346 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1348 case KDBG_TYPENONE
:
1349 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1351 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
| KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1352 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1354 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1367 kdbg_getreg(__unused kd_regtype
* kdr
)
1371 unsigned int val_1
, val_2
, val
;
1373 switch (kdr
->type
) {
1374 case KDBG_CLASSTYPE
:
1375 val_1
= (kdr
->value1
& 0xff);
1377 kdlog_beg
= (val_1
<<24);
1378 kdlog_end
= (val_2
<<24);
1379 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1380 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1382 case KDBG_SUBCLSTYPE
:
1383 val_1
= (kdr
->value1
& 0xff);
1384 val_2
= (kdr
->value2
& 0xff);
1386 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1387 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1388 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1389 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1391 case KDBG_RANGETYPE
:
1392 kdlog_beg
= (kdr
->value1
);
1393 kdlog_end
= (kdr
->value2
);
1394 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1395 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1397 case KDBG_TYPENONE
:
1398 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1412 kdbg_readmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1414 int avail
= *number
;
1418 count
= avail
/sizeof (kd_threadmap
);
1420 if (count
&& (count
<= kd_mapcount
))
1422 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1424 if (*number
< kd_mapsize
)
1436 header
.version_no
= RAW_VERSION1
;
1437 header
.thread_count
= count
;
1439 clock_get_calendar_microtime(&secs
, &usecs
);
1440 header
.TOD_secs
= secs
;
1441 header
.TOD_usecs
= usecs
;
1443 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&header
, sizeof(RAW_header
), RAW_file_offset
,
1444 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1447 RAW_file_offset
+= sizeof(RAW_header
);
1449 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, kd_mapsize
, RAW_file_offset
,
1450 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1453 RAW_file_offset
+= kd_mapsize
;
1455 pad_size
= PAGE_SIZE
- (RAW_file_offset
& PAGE_MASK_64
);
1459 pad_buf
= (char *)kalloc(pad_size
);
1460 memset(pad_buf
, 0, pad_size
);
1462 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
1463 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1464 kfree(pad_buf
, pad_size
);
1468 RAW_file_offset
+= pad_size
;
1470 RAW_file_written
+= sizeof(RAW_header
) + kd_mapsize
+ pad_size
;
1473 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
1488 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
1489 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1490 RAW_file_offset
+= sizeof(uint32_t);
1491 RAW_file_written
+= sizeof(uint32_t);
1494 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1496 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1497 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1499 kd_mapptr
= (kd_threadmap
*) 0;
1506 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
1508 int avail
= *number
;
1513 int wait_result
= THREAD_AWAKENED
;
1516 if (kd_entropy_buffer
)
1522 kd_entropy_count
= avail
/sizeof(uint64_t);
1524 if (kd_entropy_count
> MAX_ENTROPY_COUNT
|| kd_entropy_count
== 0) {
1526 * Enforce maximum entropy entries
1530 kd_entropy_bufsize
= kd_entropy_count
* sizeof(uint64_t);
1533 * allocate entropy buffer
1535 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
, (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
) {
1536 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
1538 kd_entropy_buffer
= (uint64_t *) 0;
1539 kd_entropy_count
= 0;
1543 kd_entropy_indx
= 0;
1545 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START
, ms_timeout
, kd_entropy_count
, 0, 0, 0);
1548 * Enable entropy sampling
1550 kdbg_set_flags(SLOW_ENTROPY
, KDEBUG_ENABLE_ENTROPY
, TRUE
);
1553 ns
= (u_int64_t
)ms_timeout
* (u_int64_t
)(1000 * 1000);
1554 nanoseconds_to_absolutetime(ns
, &abstime
);
1555 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1559 s
= ml_set_interrupts_enabled(FALSE
);
1560 lck_spin_lock(kdw_spin_lock
);
1562 while (wait_result
== THREAD_AWAKENED
&& kd_entropy_indx
< kd_entropy_count
) {
1568 * wait for the specified timeout or
1569 * until we've hit our sample limit
1571 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kde_waiter
, THREAD_ABORTSAFE
, abstime
);
1574 * wait until we've hit our sample limit
1576 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kde_waiter
, THREAD_ABORTSAFE
);
1580 lck_spin_unlock(kdw_spin_lock
);
1581 ml_set_interrupts_enabled(s
);
1584 * Disable entropy sampling
1586 kdbg_set_flags(SLOW_ENTROPY
, KDEBUG_ENABLE_ENTROPY
, FALSE
);
1588 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END
, ms_timeout
, kd_entropy_indx
, 0, 0, 0);
1593 if (kd_entropy_indx
> 0) {
1595 * copyout the buffer
1597 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(uint64_t)))
1600 *number
= kd_entropy_indx
* sizeof(uint64_t);
1605 kd_entropy_count
= 0;
1606 kd_entropy_indx
= 0;
1607 kd_entropy_buftomem
= 0;
1608 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
1609 kd_entropy_buffer
= (uint64_t *) 0;
1616 kdbg_set_nkdbufs(unsigned int value
)
1619 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
1620 * 'value' is the desired number of trace entries
1622 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
1624 if (value
<= max_entries
)
1627 nkdbufs
= max_entries
;
1632 * This function is provided for the CHUD toolkit only.
1634 * zero disables kdebug_chudhook function call
1635 * non-zero enables kdebug_chudhook function call
1637 * address of the enabled kdebug_chudhook function
1641 kdbg_control_chud(int val
, void *fn
)
1646 /* enable chudhook */
1647 kdebug_chudhook
= fn
;
1648 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, TRUE
);
1651 /* disable chudhook */
1652 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, FALSE
);
1653 kdebug_chudhook
= 0;
1659 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
1662 size_t size
= *sizep
;
1663 unsigned int value
= 0;
1665 kbufinfo_t kd_bufinfo
;
1669 if (name
[0] == KERN_KDGETENTROPY
||
1670 name
[0] == KERN_KDWRITETR
||
1671 name
[0] == KERN_KDWRITEMAP
||
1672 name
[0] == KERN_KDEFLAGS
||
1673 name
[0] == KERN_KDDFLAGS
||
1674 name
[0] == KERN_KDENABLE
||
1675 name
[0] == KERN_KDSETBUF
) {
1684 if ( !(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
1687 lck_mtx_lock(kd_trace_mtx_sysctl
);
1689 if (name
[0] == KERN_KDGETBUF
) {
1691 * Does not alter the global_state_pid
1692 * This is a passive request.
1694 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
1696 * There is not enough room to return even
1697 * the first element of the info structure.
1702 kd_bufinfo
.nkdbufs
= nkdbufs
;
1703 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
1705 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) )
1706 kd_bufinfo
.nolog
= 1;
1708 kd_bufinfo
.nolog
= 0;
1710 kd_bufinfo
.flags
= kd_ctrl_page
.kdebug_flags
;
1711 #if defined(__LP64__)
1712 kd_bufinfo
.flags
|= KDBG_LP64
;
1714 kd_bufinfo
.bufid
= global_state_pid
;
1716 if (size
>= sizeof(kd_bufinfo
)) {
1718 * Provide all the info we have
1720 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
1724 * For backwards compatibility, only provide
1725 * as much info as there is room for.
1727 if (copyout(&kd_bufinfo
, where
, size
))
1732 } else if (name
[0] == KERN_KDGETENTROPY
) {
1733 if (kd_entropy_buffer
)
1736 ret
= kdbg_getentropy(where
, sizep
, value
);
1740 if ((curproc
= current_proc()) != NULL
)
1741 curpid
= curproc
->p_pid
;
1746 if (global_state_pid
== -1)
1747 global_state_pid
= curpid
;
1748 else if (global_state_pid
!= curpid
) {
1749 if ((p
= proc_find(global_state_pid
)) == NULL
) {
1751 * The global pid no longer exists
1753 global_state_pid
= curpid
;
1756 * The global pid exists, deny this request
1767 value
&= KDBG_USERFLAGS
;
1768 kd_ctrl_page
.kdebug_flags
|= value
;
1771 value
&= KDBG_USERFLAGS
;
1772 kd_ctrl_page
.kdebug_flags
&= ~value
;
1776 * used to enable or disable
1780 * enable only if buffer is initialized
1782 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
)) {
1788 kdbg_set_tracing_enabled(TRUE
);
1791 kdbg_set_tracing_enabled(FALSE
);
1794 kdbg_set_nkdbufs(value
);
1797 ret
= kdbg_reinit(FALSE
);
1803 if(size
< sizeof(kd_regtype
)) {
1807 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1811 ret
= kdbg_setreg(&kd_Reg
);
1814 if (size
< sizeof(kd_regtype
)) {
1818 ret
= kdbg_getreg(&kd_Reg
);
1819 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
1824 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
1826 case KERN_KDWRITETR
:
1827 case KERN_KDWRITEMAP
:
1829 struct vfs_context context
;
1830 struct fileproc
*fp
;
1835 if (name
[0] == KERN_KDWRITETR
) {
1837 int wait_result
= THREAD_AWAKENED
;
1842 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
1843 nanoseconds_to_absolutetime(ns
, &abstime
);
1844 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
1848 s
= ml_set_interrupts_enabled(FALSE
);
1849 lck_spin_lock(kdw_spin_lock
);
1851 while (wait_result
== THREAD_AWAKENED
&& kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
1856 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
1858 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
1862 lck_spin_unlock(kdw_spin_lock
);
1863 ml_set_interrupts_enabled(s
);
1869 if ( (ret
= fp_lookup(p
, fd
, &fp
, 1)) ) {
1873 context
.vc_thread
= current_thread();
1874 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
1876 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
1877 fp_drop(p
, fd
, fp
, 1);
1883 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1886 if ((ret
= vnode_getwithref(vp
)) == 0) {
1888 if (name
[0] == KERN_KDWRITETR
) {
1889 number
= nkdbufs
* sizeof(kd_buf
);
1891 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1892 ret
= kdbg_read(0, &number
, vp
, &context
);
1893 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_END
, number
, 0, 0, 0, 0);
1897 number
= kd_mapsize
;
1898 kdbg_readmap(0, &number
, vp
, &context
);
1902 fp_drop(p
, fd
, fp
, 0);
1907 if (size
< sizeof(kd_regtype
)) {
1911 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1915 ret
= kdbg_setpid(&kd_Reg
);
1918 if (size
< sizeof(kd_regtype
)) {
1922 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1926 ret
= kdbg_setpidex(&kd_Reg
);
1929 ret
= kdbg_readmap(where
, sizep
, NULL
, NULL
);
1931 case KERN_KDSETRTCDEC
:
1932 if (size
< sizeof(kd_regtype
)) {
1936 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
1940 ret
= kdbg_setrtcdec(&kd_Reg
);
1947 lck_mtx_unlock(kd_trace_mtx_sysctl
);
1954 * This code can run for the most part concurrently with kernel_debug_internal()...
1955 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1956 * synchronize with the recording side of this puzzle... otherwise, we are able to
1957 * move through the lists w/o use of any locks
1960 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1963 unsigned int cpu
, min_cpu
;
1964 uint64_t mintime
, t
;
1970 struct kd_storage
*kdsp_actual
;
1971 struct kd_bufinfo
*kdbp
;
1972 struct kd_bufinfo
*min_kdbp
;
1973 uint32_t tempbuf_count
;
1974 uint32_t tempbuf_number
;
1975 uint32_t old_kdebug_flags
;
1976 uint32_t old_kdebug_slowcheck
;
1977 boolean_t lostevents
= FALSE
;
1978 boolean_t out_of_events
= FALSE
;
1980 count
= *number
/sizeof(kd_buf
);
1983 if (count
== 0 || !(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
1986 memset(&lostevent
, 0, sizeof(lostevent
));
1987 lostevent
.debugid
= TRACEDBG_CODE(DBG_TRACE_INFO
, 2);
1990 * because we hold kd_trace_mtx_sysctl, no other control threads can
1991 * be playing with kdebug_flags... the code that cuts new events could
1992 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1993 * storage chunk which is where it examines kdebug_flags... it its adding
1994 * to the same chunk we're reading from, no problem...
1997 disable_wrap(&old_kdebug_slowcheck
, &old_kdebug_flags
);
1999 if (count
> nkdbufs
)
2002 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2003 tempbuf_count
= KDCOPYBUF_COUNT
;
2006 tempbuf
= kdcopybuf
;
2009 while (tempbuf_count
) {
2010 mintime
= 0xffffffffffffffffULL
;
2014 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_cpus
; cpu
++, kdbp
++) {
2016 if ((kdsp
= kdbp
->kd_list_head
).raw
== KDS_PTR_NULL
)
2018 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2020 rcursor
= kdsp_actual
->kds_readlast
;
2022 if (rcursor
== kdsp_actual
->kds_bufindx
)
2025 t
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[rcursor
]);
2027 if (t
< kdsp_actual
->kds_timestamp
) {
2029 * indicates we've not yet completed filling
2031 * this should only occur when we're looking
2032 * at the buf that the record head is utilizing
2033 * we'll pick these events up on the next
2035 * we bail at this point so that we don't
2036 * get an out-of-order timestream by continuing
2037 * to read events from the other CPUs' timestream(s)
2039 out_of_events
= TRUE
;
2048 if (min_kdbp
== NULL
|| out_of_events
== TRUE
) {
2050 * all buffers ran empty
2052 out_of_events
= TRUE
;
2055 kdsp
= min_kdbp
->kd_list_head
;
2056 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2058 if (kdsp_actual
->kds_lostevents
== TRUE
) {
2059 lostevent
.timestamp
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
].timestamp
;
2060 *tempbuf
= lostevent
;
2062 kdsp_actual
->kds_lostevents
= FALSE
;
2067 *tempbuf
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
++];
2069 if (kdsp_actual
->kds_readlast
== EVENTS_PER_STORAGE_UNIT
)
2070 release_storage_unit(min_cpu
, kdsp
.raw
);
2073 * Watch for out of order timestamps
2075 if (mintime
< min_kdbp
->kd_prev_timebase
) {
2077 * if so, use the previous timestamp + 1 cycle
2079 min_kdbp
->kd_prev_timebase
++;
2080 kdbg_set_timestamp_and_cpu(tempbuf
, min_kdbp
->kd_prev_timebase
, kdbg_get_cpu(tempbuf
));
2082 min_kdbp
->kd_prev_timebase
= mintime
;
2088 if ((RAW_file_written
+= sizeof(kd_buf
)) >= RAW_FLUSH_SIZE
)
2091 if (tempbuf_number
) {
2094 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
2095 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2097 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
2099 if (RAW_file_written
>= RAW_FLUSH_SIZE
) {
2100 cluster_push(vp
, 0);
2102 RAW_file_written
= 0;
2105 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
2106 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
2113 count
-= tempbuf_number
;
2114 *number
+= tempbuf_number
;
2116 if (out_of_events
== TRUE
)
2118 * all trace buffers are empty
2122 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2123 tempbuf_count
= KDCOPYBUF_COUNT
;
2125 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
2126 enable_wrap(old_kdebug_slowcheck
, lostevents
);
2132 unsigned char *getProcName(struct proc
*proc
);
2133 unsigned char *getProcName(struct proc
*proc
) {
2135 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
2139 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2140 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2141 #if defined(__i386__) || defined (__x86_64__)
2142 #define TRAP_DEBUGGER __asm__ volatile("int3");
2145 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2147 /* Initialize the mutex governing access to the stack snapshot subsystem */
2148 __private_extern__
void
2149 stackshot_lock_init( void )
2151 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
2153 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
2155 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
2157 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
2161 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2162 * on the system, tracing both kernel and user stacks
2163 * where available. Uses machine specific trace routines
2164 * for ppc, ppc64 and x86.
2165 * Inputs: uap->pid - process id of process to be traced, or -1
2166 * for the entire system
2167 * uap->tracebuf - address of the user space destination
2169 * uap->tracebuf_size - size of the user space trace buffer
2170 * uap->options - various options, including the maximum
2171 * number of frames to trace.
2172 * Outputs: EPERM if the caller is not privileged
2173 * EINVAL if the supplied trace buffer isn't sanely sized
2174 * ENOMEM if we don't have enough memory to satisfy the
2176 * ENOENT if the target pid isn't found
2177 * ENOSPC if the supplied buffer is insufficient
2178 * *retval contains the number of bytes traced, if successful
2179 * and -1 otherwise. If the request failed due to
2180 * tracebuffer exhaustion, we copyout as much as possible.
2183 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
2186 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
2189 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
2190 uap
->flags
, uap
->dispatch_offset
, retval
);
2194 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
)
2197 unsigned bytesTraced
= 0;
2201 /* Serialize tracing */
2202 STACKSHOT_SUBSYS_LOCK();
2204 if ((tracebuf_size
<= 0) || (tracebuf_size
> SANE_TRACEBUF_SIZE
)) {
2209 assert(stackshot_snapbuf
== NULL
);
2210 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) {
2215 if (panic_active()) {
2220 istate
= ml_set_interrupts_enabled(FALSE
);
2221 /* Preload trace parameters*/
2222 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
);
2224 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2230 ml_set_interrupts_enabled(istate
);
2232 bytesTraced
= kdp_stack_snapshot_bytes_traced();
2234 if (bytesTraced
> 0) {
2235 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
2236 ((bytesTraced
< tracebuf_size
) ?
2237 bytesTraced
: tracebuf_size
))))
2239 *retval
= bytesTraced
;
2246 error
= kdp_stack_snapshot_geterror();
2254 if (stackshot_snapbuf
!= NULL
)
2255 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
2256 stackshot_snapbuf
= NULL
;
2257 STACKSHOT_SUBSYS_UNLOCK();
2262 start_kern_tracing(unsigned int new_nkdbufs
) {
2266 kdbg_set_nkdbufs(new_nkdbufs
);
2269 kdbg_set_tracing_enabled(TRUE
);
2271 #if defined(__i386__) || defined(__x86_64__)
2272 uint64_t now
= mach_absolute_time();
2274 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
,
2275 (uint32_t)(tsc_rebase_abs_time
>> 32), (uint32_t)tsc_rebase_abs_time
,
2276 (uint32_t)(now
>> 32), (uint32_t)now
,
2279 printf("kernel tracing started\n");
2283 kdbg_dump_trace_to_file(const char *filename
)
2291 if ( !(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
2294 if (global_state_pid
!= -1) {
2295 if ((proc_find(global_state_pid
)) != NULL
) {
2297 * The global pid exists, we're running
2298 * due to fs_usage, latency, etc...
2299 * don't cut the panic/shutdown trace file
2304 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
2307 kd_ctrl_page
.enabled
= 0;
2309 ctx
= vfs_context_kernel();
2311 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
2314 number
= kd_mapsize
;
2315 kdbg_readmap(0, &number
, vp
, ctx
);
2317 number
= nkdbufs
*sizeof(kd_buf
);
2318 kdbg_read(0, &number
, vp
, ctx
);
2320 vnode_close(vp
, FWRITE
, ctx
);
2322 sync(current_proc(), (void *)NULL
, (int *)NULL
);
2325 /* Helper function for filling in the BSD name for an address space
2326 * Defined here because the machine bindings know only Mach threads
2327 * and nothing about BSD processes.
2329 * FIXME: need to grab a lock during this?
2331 void kdbg_get_task_name(char* name_buf
, int len
, task_t task
)
2335 /* Note: we can't use thread->task (and functions that rely on it) here
2336 * because it hasn't been initialized yet when this function is called.
2337 * We use the explicitly-passed task parameter instead.
2339 proc
= get_bsdtask_info(task
);
2340 if (proc
!= PROC_NULL
)
2341 snprintf(name_buf
, len
, "%s/%d", proc
->p_comm
, proc
->p_pid
);
2343 snprintf(name_buf
, len
, "%p [!bsd]", task
);
2348 #if defined(NATIVE_TRACE_FACILITY)
2349 void trace_handler_map_ctrl_page(__unused
uintptr_t addr
, __unused
size_t ctrl_page_size
, __unused
size_t storage_size
, __unused
size_t kds_ptr_size
)
2352 void trace_handler_map_bufinfo(__unused
uintptr_t addr
, __unused
size_t size
)
2355 void trace_handler_unmap_bufinfo(void)
2358 void trace_handler_map_buffer(__unused
int index
, __unused
uintptr_t addr
, __unused
size_t size
)
2361 void trace_handler_unmap_buffer(__unused
int index
)