2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
24 #include <machine/spl.h>
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
35 #include <sys/random.h>
38 #include <mach/clock_types.h>
39 #include <mach/mach_types.h>
40 #include <mach/mach_time.h>
41 #include <machine/machine_routines.h>
43 #if defined(__i386__) || defined(__x86_64__)
44 #include <i386/rtclock_protos.h>
46 #include <i386/machine_routines.h>
49 #include <kern/clock.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/debug.h>
54 #include <kern/kalloc.h>
55 #include <kern/cpu_data.h>
56 #include <kern/assert.h>
57 #include <kern/telemetry.h>
58 #include <vm/vm_kern.h>
61 #include <sys/malloc.h>
62 #include <sys/mcache.h>
63 #include <sys/kauth.h>
65 #include <sys/vnode.h>
66 #include <sys/vnode_internal.h>
67 #include <sys/fcntl.h>
68 #include <sys/file_internal.h>
70 #include <sys/param.h> /* for isset() */
72 #include <mach/mach_host.h> /* for host_info() */
73 #include <libkern/OSAtomic.h>
75 #include <machine/pal_routines.h>
77 extern boolean_t kdebug_serial
;
79 #include <sys/kdebugevents.h>
80 static void kdebug_serial_print( /* forward */
81 uint32_t, uint32_t, uint64_t,
82 uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
88 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
90 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
91 * They are registered dynamically. Each is assigned a cpu_id at registration.
93 * NOTE: IOP trace events may not use the same clock hardware as "normal"
94 * cpus. There is an effort made to synchronize the IOP timebase with the
95 * AP, but it should be understood that there may be discrepancies.
97 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
98 * The current implementation depends on this for thread safety.
100 * New registrations occur by allocating an kd_iop struct and assigning
101 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
102 * list_head pointer resolves any races.
104 * You may safely walk the kd_iops list at any time, without holding locks.
106 * When allocating buffers, the current kd_iops head is captured. Any operations
107 * that depend on the buffer state (such as flushing IOP traces on reads,
108 * etc.) should use the captured list head. This will allow registrations to
109 * take place while trace is in use.
112 typedef struct kd_iop
{
113 kd_callback_t callback
;
115 uint64_t last_timestamp
; /* Prevent timer rollback */
119 static kd_iop_t
* kd_iops
= NULL
;
121 /* XXX should have prototypes, but Mach does not provide one */
122 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
123 int cpu_number(void); /* XXX <machine/...> include path broken */
124 void commpage_update_kdebug_enable(void); /* XXX sign */
126 /* XXX should probably be static, but it's debugging code... */
127 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
128 void kdbg_control_chud(int, void *);
129 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
130 int kdbg_readcpumap(user_addr_t
, size_t *);
131 int kdbg_readcurcpumap(user_addr_t
, size_t *);
132 int kdbg_readthrmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
133 int kdbg_readcurthrmap(user_addr_t
, size_t *);
134 int kdbg_getreg(kd_regtype
*);
135 int kdbg_setreg(kd_regtype
*);
136 int kdbg_setrtcdec(kd_regtype
*);
137 int kdbg_setpidex(kd_regtype
*);
138 int kdbg_setpid(kd_regtype
*);
139 void kdbg_thrmap_init(void);
140 int kdbg_reinit(boolean_t
);
141 int kdbg_bootstrap(boolean_t
);
143 int kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
);
144 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
);
146 static int kdbg_enable_typefilter(void);
147 static int kdbg_disable_typefilter(void);
149 static int create_buffers(boolean_t
);
150 static void delete_buffers(void);
152 extern void IOSleep(int);
154 /* trace enable status */
155 unsigned int kdebug_enable
= 0;
157 /* A static buffer to record events prior to the start of regular logging */
158 #define KD_EARLY_BUFFER_MAX 64
159 static kd_buf kd_early_buffer
[KD_EARLY_BUFFER_MAX
];
160 static int kd_early_index
= 0;
161 static boolean_t kd_early_overflow
= FALSE
;
163 #define SLOW_NOLOG 0x01
164 #define SLOW_CHECKS 0x02
165 #define SLOW_ENTROPY 0x04 /* Obsolescent */
166 #define SLOW_CHUD 0x08
168 #define EVENTS_PER_STORAGE_UNIT 2048
169 #define MIN_STORAGE_UNITS_PER_CPU 4
171 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
175 uint32_t buffer_index
:21;
182 union kds_ptr kds_next
;
183 uint32_t kds_bufindx
;
185 uint32_t kds_readlast
;
186 boolean_t kds_lostevents
;
187 uint64_t kds_timestamp
;
189 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
192 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
193 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
195 struct kd_storage_buffers
{
196 struct kd_storage
*kdsb_addr
;
200 #define KDS_PTR_NULL 0xffffffff
201 struct kd_storage_buffers
*kd_bufs
= NULL
;
202 int n_storage_units
= 0;
203 int n_storage_buffers
= 0;
204 int n_storage_threshold
= 0;
209 union kds_ptr kd_list_head
;
210 union kds_ptr kd_list_tail
;
211 boolean_t kd_lostevents
;
213 uint64_t kd_prev_timebase
;
215 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE
) ));
217 struct kd_ctrl_page_t
{
218 union kds_ptr kds_free_list
;
222 uint32_t kdebug_flags
;
223 uint32_t kdebug_slowcheck
;
225 * The number of kd_bufinfo structs allocated may not match the current
226 * number of active cpus. We capture the iops list head at initialization
227 * which we could use to calculate the number of cpus we allocated data for,
228 * unless it happens to be null. To avoid that case, we explicitly also
229 * capture a cpu count.
231 kd_iop_t
* kdebug_iops
;
232 uint32_t kdebug_cpus
;
233 } kd_ctrl_page
= { .kds_free_list
= {.raw
= KDS_PTR_NULL
}, .kdebug_slowcheck
= SLOW_NOLOG
};
237 struct kd_bufinfo
*kdbip
= NULL
;
239 #define KDCOPYBUF_COUNT 8192
240 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
241 kd_buf
*kdcopybuf
= NULL
;
243 boolean_t kdlog_bg_trace
= FALSE
;
244 boolean_t kdlog_bg_trace_running
= FALSE
;
245 unsigned int bg_nkdbufs
= 0;
247 unsigned int nkdbufs
= 0;
248 unsigned int kdlog_beg
=0;
249 unsigned int kdlog_end
=0;
250 unsigned int kdlog_value1
=0;
251 unsigned int kdlog_value2
=0;
252 unsigned int kdlog_value3
=0;
253 unsigned int kdlog_value4
=0;
255 static lck_spin_t
* kdw_spin_lock
;
256 static lck_spin_t
* kds_spin_lock
;
257 static lck_mtx_t
* kd_trace_mtx_sysctl
;
258 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
259 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
260 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
262 static lck_grp_t
*stackshot_subsys_lck_grp
;
263 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
264 static lck_attr_t
*stackshot_subsys_lck_attr
;
265 static lck_mtx_t stackshot_subsys_mutex
;
267 void *stackshot_snapbuf
= NULL
;
270 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
);
273 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
);
275 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
);
278 kdp_stack_snapshot_geterror(void);
280 kdp_stack_snapshot_bytes_traced(void);
282 kd_threadmap
*kd_mapptr
= 0;
283 unsigned int kd_mapsize
= 0;
284 unsigned int kd_mapcount
= 0;
286 off_t RAW_file_offset
= 0;
287 int RAW_file_written
= 0;
289 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
291 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
293 #define DBG_FUNC_MASK 0xfffffffc
295 /* TODO: move to kdebug.h */
296 #define CLASS_MASK 0xff000000
297 #define CLASS_OFFSET 24
298 #define SUBCLASS_MASK 0x00ff0000
299 #define SUBCLASS_OFFSET 16
300 #define CSC_MASK 0xffff0000 /* class and subclass mask */
301 #define CSC_OFFSET SUBCLASS_OFFSET
303 #define EXTRACT_CLASS(debugid) ( (uint8_t) ( ((debugid) & CLASS_MASK ) >> CLASS_OFFSET ) )
304 #define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
305 #define EXTRACT_CSC(debugid) ( (uint16_t)( ((debugid) & CSC_MASK ) >> CSC_OFFSET ) )
307 #define INTERRUPT 0x01050000
308 #define MACH_vmfault 0x01300008
309 #define BSC_SysCall 0x040c0000
310 #define MACH_SysCall 0x010c0000
311 #define DBG_SCALL_MASK 0xffff0000
313 /* task to string structure */
316 task_t task
; /* from procs task */
317 pid_t pid
; /* from procs p_pid */
318 char task_comm
[20]; /* from procs p_comm */
321 typedef struct tts tts_t
;
325 kd_threadmap
*map
; /* pointer to the map buffer */
331 typedef struct krt krt_t
;
333 /* This is for the CHUD toolkit call */
334 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
335 uintptr_t arg2
, uintptr_t arg3
,
336 uintptr_t arg4
, uintptr_t arg5
);
338 volatile kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
340 __private_extern__
void stackshot_lock_init( void );
342 static uint8_t *type_filter_bitmap
;
345 * This allows kperf to swap out the global state pid when kperf ownership is
346 * passed from one process to another. It checks the old global state pid so
347 * that kperf can't accidentally steal control of trace when a non-kperf trace user has
351 kdbg_swap_global_state_pid(pid_t old_pid
, pid_t new_pid
);
354 kdbg_swap_global_state_pid(pid_t old_pid
, pid_t new_pid
)
356 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
359 lck_mtx_lock(kd_trace_mtx_sysctl
);
361 if (old_pid
== global_state_pid
)
362 global_state_pid
= new_pid
;
364 lck_mtx_unlock(kd_trace_mtx_sysctl
);
368 kdbg_cpu_count(boolean_t early_trace
)
372 * we've started tracing before the IOKit has even
373 * started running... just use the static max value
378 host_basic_info_data_t hinfo
;
379 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
380 host_info((host_t
)1 /* BSD_HOST */, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
381 assert(hinfo
.logical_cpu_max
> 0);
382 return hinfo
.logical_cpu_max
;
386 #endif /* MACH_ASSERT */
389 kdbg_iop_list_callback(kd_iop_t
* iop
, kd_callback_type type
, void* arg
)
392 iop
->callback
.func(iop
->callback
.context
, type
, arg
);
398 kdbg_set_tracing_enabled(boolean_t enabled
, uint32_t trace_type
)
400 int s
= ml_set_interrupts_enabled(FALSE
);
401 lck_spin_lock(kds_spin_lock
);
403 kdebug_enable
|= trace_type
;
404 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
405 kd_ctrl_page
.enabled
= 1;
406 commpage_update_kdebug_enable();
408 kdebug_enable
&= ~(KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
);
409 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
410 kd_ctrl_page
.enabled
= 0;
411 commpage_update_kdebug_enable();
413 lck_spin_unlock(kds_spin_lock
);
414 ml_set_interrupts_enabled(s
);
417 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_ENABLED
, NULL
);
420 * If you do not flush the IOP trace buffers, they can linger
421 * for a considerable period; consider code which disables and
422 * deallocates without a final sync flush.
424 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_DISABLED
, NULL
);
425 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
);
430 kdbg_set_flags(int slowflag
, int enableflag
, boolean_t enabled
)
432 int s
= ml_set_interrupts_enabled(FALSE
);
433 lck_spin_lock(kds_spin_lock
);
436 kd_ctrl_page
.kdebug_slowcheck
|= slowflag
;
437 kdebug_enable
|= enableflag
;
439 kd_ctrl_page
.kdebug_slowcheck
&= ~slowflag
;
440 kdebug_enable
&= ~enableflag
;
443 lck_spin_unlock(kds_spin_lock
);
444 ml_set_interrupts_enabled(s
);
448 disable_wrap(uint32_t *old_slowcheck
, uint32_t *old_flags
)
450 int s
= ml_set_interrupts_enabled(FALSE
);
451 lck_spin_lock(kds_spin_lock
);
453 *old_slowcheck
= kd_ctrl_page
.kdebug_slowcheck
;
454 *old_flags
= kd_ctrl_page
.kdebug_flags
;
456 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
457 kd_ctrl_page
.kdebug_flags
|= KDBG_NOWRAP
;
459 lck_spin_unlock(kds_spin_lock
);
460 ml_set_interrupts_enabled(s
);
464 enable_wrap(uint32_t old_slowcheck
, boolean_t lostevents
)
466 int s
= ml_set_interrupts_enabled(FALSE
);
467 lck_spin_lock(kds_spin_lock
);
469 kd_ctrl_page
.kdebug_flags
&= ~KDBG_NOWRAP
;
471 if ( !(old_slowcheck
& SLOW_NOLOG
))
472 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
474 if (lostevents
== TRUE
)
475 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
477 lck_spin_unlock(kds_spin_lock
);
478 ml_set_interrupts_enabled(s
);
482 create_buffers(boolean_t early_trace
)
491 * For the duration of this allocation, trace code will only reference
492 * kdebug_iops. Any iops registered after this enabling will not be
493 * messaged until the buffers are reallocated.
495 * TLDR; Must read kd_iops once and only once!
497 kd_ctrl_page
.kdebug_iops
= kd_iops
;
501 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
502 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
503 * be the list head + 1.
506 kd_ctrl_page
.kdebug_cpus
= kd_ctrl_page
.kdebug_iops
? kd_ctrl_page
.kdebug_iops
->cpu_id
+ 1 : kdbg_cpu_count(early_trace
);
508 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
) != KERN_SUCCESS
) {
513 if (nkdbufs
< (kd_ctrl_page
.kdebug_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
514 n_storage_units
= kd_ctrl_page
.kdebug_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
516 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
518 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
520 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
521 n_storage_buffers
= f_buffers
;
523 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
524 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
531 if (kdcopybuf
== 0) {
532 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
537 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
541 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
543 for (i
= 0; i
< f_buffers
; i
++) {
544 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
548 bzero(kd_bufs
[i
].kdsb_addr
, f_buffer_size
);
550 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
553 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
557 bzero(kd_bufs
[i
].kdsb_addr
, p_buffer_size
);
559 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
563 for (i
= 0; i
< n_storage_buffers
; i
++) {
564 struct kd_storage
*kds
;
568 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
569 kds
= kd_bufs
[i
].kdsb_addr
;
571 for (n
= 0; n
< n_elements
; n
++) {
572 kds
[n
].kds_next
.buffer_index
= kd_ctrl_page
.kds_free_list
.buffer_index
;
573 kds
[n
].kds_next
.offset
= kd_ctrl_page
.kds_free_list
.offset
;
575 kd_ctrl_page
.kds_free_list
.buffer_index
= i
;
576 kd_ctrl_page
.kds_free_list
.offset
= n
;
578 n_storage_units
+= n_elements
;
581 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
);
583 for (i
= 0; i
< (int)kd_ctrl_page
.kdebug_cpus
; i
++) {
584 kdbip
[i
].kd_list_head
.raw
= KDS_PTR_NULL
;
585 kdbip
[i
].kd_list_tail
.raw
= KDS_PTR_NULL
;
586 kdbip
[i
].kd_lostevents
= FALSE
;
587 kdbip
[i
].num_bufs
= 0;
590 kd_ctrl_page
.kdebug_flags
|= KDBG_BUFINIT
;
592 kd_ctrl_page
.kds_inuse_count
= 0;
593 n_storage_threshold
= n_storage_units
/ 2;
607 for (i
= 0; i
< n_storage_buffers
; i
++) {
608 if (kd_bufs
[i
].kdsb_addr
) {
609 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
612 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
615 n_storage_buffers
= 0;
618 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
622 kd_ctrl_page
.kds_free_list
.raw
= KDS_PTR_NULL
;
625 kmem_free(kernel_map
, (vm_offset_t
)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
);
629 kd_ctrl_page
.kdebug_iops
= NULL
;
630 kd_ctrl_page
.kdebug_cpus
= 0;
631 kd_ctrl_page
.kdebug_flags
&= ~KDBG_BUFINIT
;
635 release_storage_unit(int cpu
, uint32_t kdsp_raw
)
638 struct kd_storage
*kdsp_actual
;
639 struct kd_bufinfo
*kdbp
;
644 s
= ml_set_interrupts_enabled(FALSE
);
645 lck_spin_lock(kds_spin_lock
);
649 if (kdsp
.raw
== kdbp
->kd_list_head
.raw
) {
651 * it's possible for the storage unit pointed to
652 * by kdsp to have already been stolen... so
653 * check to see if it's still the head of the list
654 * now that we're behind the lock that protects
655 * adding and removing from the queue...
656 * since we only ever release and steal units from
657 * that position, if it's no longer the head
658 * we having nothing to do in this context
660 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
661 kdbp
->kd_list_head
= kdsp_actual
->kds_next
;
663 kdsp_actual
->kds_next
= kd_ctrl_page
.kds_free_list
;
664 kd_ctrl_page
.kds_free_list
= kdsp
;
666 kd_ctrl_page
.kds_inuse_count
--;
668 lck_spin_unlock(kds_spin_lock
);
669 ml_set_interrupts_enabled(s
);
674 allocate_storage_unit(int cpu
)
677 struct kd_storage
*kdsp_actual
, *kdsp_next_actual
;
678 struct kd_bufinfo
*kdbp
, *kdbp_vict
, *kdbp_try
;
679 uint64_t oldest_ts
, ts
;
680 boolean_t retval
= TRUE
;
683 s
= ml_set_interrupts_enabled(FALSE
);
684 lck_spin_lock(kds_spin_lock
);
688 /* If someone beat us to the allocate, return success */
689 if (kdbp
->kd_list_tail
.raw
!= KDS_PTR_NULL
) {
690 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
);
692 if (kdsp_actual
->kds_bufindx
< EVENTS_PER_STORAGE_UNIT
)
696 if ((kdsp
= kd_ctrl_page
.kds_free_list
).raw
!= KDS_PTR_NULL
) {
697 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
698 kd_ctrl_page
.kds_free_list
= kdsp_actual
->kds_next
;
700 kd_ctrl_page
.kds_inuse_count
++;
702 if (kd_ctrl_page
.kdebug_flags
& KDBG_NOWRAP
) {
703 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
704 kdbp
->kd_lostevents
= TRUE
;
709 oldest_ts
= (uint64_t)-1;
711 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_ctrl_page
.kdebug_cpus
]; kdbp_try
++) {
713 if (kdbp_try
->kd_list_head
.raw
== KDS_PTR_NULL
) {
715 * no storage unit to steal
720 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp_try
->kd_list_head
);
722 if (kdsp_actual
->kds_bufcnt
< EVENTS_PER_STORAGE_UNIT
) {
724 * make sure we don't steal the storage unit
725 * being actively recorded to... need to
726 * move on because we don't want an out-of-order
727 * set of events showing up later
731 ts
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[0]);
733 if (ts
< oldest_ts
) {
735 * when 'wrapping', we want to steal the
736 * storage unit that has the 'earliest' time
737 * associated with it (first event time)
740 kdbp_vict
= kdbp_try
;
743 if (kdbp_vict
== NULL
) {
745 kd_ctrl_page
.enabled
= 0;
746 commpage_update_kdebug_enable();
750 kdsp
= kdbp_vict
->kd_list_head
;
751 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
752 kdbp_vict
->kd_list_head
= kdsp_actual
->kds_next
;
754 if (kdbp_vict
->kd_list_head
.raw
!= KDS_PTR_NULL
) {
755 kdsp_next_actual
= POINTER_FROM_KDS_PTR(kdbp_vict
->kd_list_head
);
756 kdsp_next_actual
->kds_lostevents
= TRUE
;
758 kdbp_vict
->kd_lostevents
= TRUE
;
760 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
762 kdsp_actual
->kds_timestamp
= mach_absolute_time();
763 kdsp_actual
->kds_next
.raw
= KDS_PTR_NULL
;
764 kdsp_actual
->kds_bufcnt
= 0;
765 kdsp_actual
->kds_readlast
= 0;
767 kdsp_actual
->kds_lostevents
= kdbp
->kd_lostevents
;
768 kdbp
->kd_lostevents
= FALSE
;
769 kdsp_actual
->kds_bufindx
= 0;
771 if (kdbp
->kd_list_head
.raw
== KDS_PTR_NULL
)
772 kdbp
->kd_list_head
= kdsp
;
774 POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
)->kds_next
= kdsp
;
775 kdbp
->kd_list_tail
= kdsp
;
777 lck_spin_unlock(kds_spin_lock
);
778 ml_set_interrupts_enabled(s
);
784 kernel_debug_register_callback(kd_callback_t callback
)
787 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&iop
, sizeof(kd_iop_t
)) == KERN_SUCCESS
) {
788 memcpy(&iop
->callback
, &callback
, sizeof(kd_callback_t
));
791 * <rdar://problem/13351477> Some IOP clients are not providing a name.
796 boolean_t is_valid_name
= FALSE
;
797 for (uint32_t length
=0; length
<sizeof(callback
.iop_name
); ++length
) {
798 /* This is roughly isprintable(c) */
799 if (callback
.iop_name
[length
] > 0x20 && callback
.iop_name
[length
] < 0x7F)
801 if (callback
.iop_name
[length
] == 0) {
803 is_valid_name
= TRUE
;
808 if (!is_valid_name
) {
809 strlcpy(iop
->callback
.iop_name
, "IOP-???", sizeof(iop
->callback
.iop_name
));
813 iop
->last_timestamp
= 0;
817 * We use two pieces of state, the old list head
818 * pointer, and the value of old_list_head->cpu_id.
819 * If we read kd_iops more than once, it can change
822 * TLDR; Must not read kd_iops more than once per loop.
825 iop
->cpu_id
= iop
->next
? (iop
->next
->cpu_id
+1) : kdbg_cpu_count(FALSE
);
828 * Header says OSCompareAndSwapPtr has a memory barrier
830 } while (!OSCompareAndSwapPtr(iop
->next
, iop
, (void* volatile*)&kd_iops
));
852 struct kd_bufinfo
*kdbp
;
853 struct kd_storage
*kdsp_actual
;
854 union kds_ptr kds_raw
;
856 if (kd_ctrl_page
.kdebug_slowcheck
) {
858 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
861 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
862 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
866 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
867 if (debugid
>= kdlog_beg
&& debugid
<= kdlog_end
)
871 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
872 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
873 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
874 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
875 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
)
882 disable_preemption();
884 if (kd_ctrl_page
.enabled
== 0)
887 kdbp
= &kdbip
[coreid
];
888 timestamp
&= KDBG_TIMESTAMP_MASK
;
890 #if KDEBUG_MOJO_TRACE
891 if (kdebug_enable
& KDEBUG_ENABLE_SERIAL
)
892 kdebug_serial_print(coreid
, debugid
, timestamp
,
893 arg1
, arg2
, arg3
, arg4
, threadid
);
897 kds_raw
= kdbp
->kd_list_tail
;
899 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
900 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
901 bindx
= kdsp_actual
->kds_bufindx
;
905 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
906 if (allocate_storage_unit(coreid
) == FALSE
) {
908 * this can only happen if wrapping
915 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
918 // IOP entries can be allocated before xnu allocates and inits the buffer
919 if (timestamp
< kdsp_actual
->kds_timestamp
)
920 kdsp_actual
->kds_timestamp
= timestamp
;
922 kd
= &kdsp_actual
->kds_records
[bindx
];
924 kd
->debugid
= debugid
;
931 kdbg_set_timestamp_and_cpu(kd
, timestamp
, coreid
);
933 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
937 if ((kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
)) {
938 boolean_t need_kds_wakeup
= FALSE
;
942 * try to take the lock here to synchronize with the
943 * waiter entering the blocked state... use the try
944 * mode to prevent deadlocks caused by re-entering this
945 * routine due to various trace points triggered in the
946 * lck_spin_sleep_xxxx routines used to actually enter
947 * our wait condition... no problem if we fail,
948 * there will be lots of additional events coming in that
949 * will eventually succeed in grabbing this lock
951 s
= ml_set_interrupts_enabled(FALSE
);
953 if (lck_spin_try_lock(kdw_spin_lock
)) {
955 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
957 need_kds_wakeup
= TRUE
;
959 lck_spin_unlock(kdw_spin_lock
);
961 ml_set_interrupts_enabled(s
);
963 if (need_kds_wakeup
== TRUE
)
972 kernel_debug_internal(
980 struct proc
*curproc
;
986 struct kd_bufinfo
*kdbp
;
987 struct kd_storage
*kdsp_actual
;
988 union kds_ptr kds_raw
;
992 if (kd_ctrl_page
.kdebug_slowcheck
) {
994 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
995 kd_chudhook_fn chudhook
;
997 * Mask interrupts to minimize the interval across
998 * which the driver providing the hook could be
1001 s
= ml_set_interrupts_enabled(FALSE
);
1002 chudhook
= kdebug_chudhook
;
1004 chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
1005 ml_set_interrupts_enabled(s
);
1007 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
1010 if ( !ml_at_interrupt_context()) {
1011 if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDCHECK
) {
1013 * If kdebug flag is not set for current proc, return
1015 curproc
= current_proc();
1017 if ((curproc
&& !(curproc
->p_kdebug
)) &&
1018 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
1019 (debugid
>> 24 != DBG_TRACE
))
1022 else if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDEXCLUDE
) {
1024 * If kdebug flag is set for current proc, return
1026 curproc
= current_proc();
1028 if ((curproc
&& curproc
->p_kdebug
) &&
1029 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
1030 (debugid
>> 24 != DBG_TRACE
))
1035 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1036 /* Always record trace system info */
1037 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1040 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
1044 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
1045 /* Always record trace system info */
1046 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1049 if (debugid
< kdlog_beg
|| debugid
> kdlog_end
)
1052 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
1053 /* Always record trace system info */
1054 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1057 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
1058 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
1059 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
1060 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
)
1065 disable_preemption();
1067 if (kd_ctrl_page
.enabled
== 0)
1073 #if KDEBUG_MOJO_TRACE
1074 if (kdebug_enable
& KDEBUG_ENABLE_SERIAL
)
1075 kdebug_serial_print(cpu
, debugid
,
1076 mach_absolute_time() & KDBG_TIMESTAMP_MASK
,
1077 arg1
, arg2
, arg3
, arg4
, arg5
);
1081 kds_raw
= kdbp
->kd_list_tail
;
1083 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
1084 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
1085 bindx
= kdsp_actual
->kds_bufindx
;
1089 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
1090 if (allocate_storage_unit(cpu
) == FALSE
) {
1092 * this can only happen if wrapping
1099 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
1101 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
1104 kd
= &kdsp_actual
->kds_records
[bindx
];
1106 kd
->debugid
= debugid
;
1113 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
1115 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
1117 enable_preemption();
1119 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
1123 etype
= debugid
& DBG_FUNC_MASK
;
1124 stype
= debugid
& DBG_SCALL_MASK
;
1126 if (etype
== INTERRUPT
|| etype
== MACH_vmfault
||
1127 stype
== BSC_SysCall
|| stype
== MACH_SysCall
) {
1129 boolean_t need_kds_wakeup
= FALSE
;
1132 * try to take the lock here to synchronize with the
1133 * waiter entering the blocked state... use the try
1134 * mode to prevent deadlocks caused by re-entering this
1135 * routine due to various trace points triggered in the
1136 * lck_spin_sleep_xxxx routines used to actually enter
1137 * one of our 2 wait conditions... no problem if we fail,
1138 * there will be lots of additional events coming in that
1139 * will eventually succeed in grabbing this lock
1141 s
= ml_set_interrupts_enabled(FALSE
);
1143 if (lck_spin_try_lock(kdw_spin_lock
)) {
1145 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
1147 need_kds_wakeup
= TRUE
;
1149 lck_spin_unlock(kdw_spin_lock
);
1151 ml_set_interrupts_enabled(s
);
1153 if (need_kds_wakeup
== TRUE
)
1154 wakeup(&kds_waiter
);
1166 __unused
uintptr_t arg5
)
1168 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()));
1180 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
1184 kernel_debug_string(const char *message
)
1186 uintptr_t arg
[4] = {0, 0, 0, 0};
1188 /* Stuff the message string in the args and log it. */
1189 strncpy((char *)arg
, message
, MIN(sizeof(arg
), strlen(message
)));
1192 arg
[0], arg
[1], arg
[2], arg
[3]);
1195 extern int master_cpu
; /* MACH_KERNEL_PRIVATE */
1197 * Used prior to start_kern_tracing() being called.
1198 * Log temporarily into a static buffer.
1208 /* If tracing is already initialized, use it */
1210 KERNEL_DEBUG_CONSTANT(debugid
, arg1
, arg2
, arg3
, arg4
, 0);
1214 /* Do nothing if the buffer is full or we're not on the boot cpu */
1215 kd_early_overflow
= kd_early_index
>= KD_EARLY_BUFFER_MAX
;
1216 if (kd_early_overflow
||
1217 cpu_number() != master_cpu
)
1220 kd_early_buffer
[kd_early_index
].debugid
= debugid
;
1221 kd_early_buffer
[kd_early_index
].timestamp
= mach_absolute_time();
1222 kd_early_buffer
[kd_early_index
].arg1
= arg1
;
1223 kd_early_buffer
[kd_early_index
].arg2
= arg2
;
1224 kd_early_buffer
[kd_early_index
].arg3
= arg3
;
1225 kd_early_buffer
[kd_early_index
].arg4
= arg4
;
1226 kd_early_buffer
[kd_early_index
].arg5
= 0;
1231 * Transfen the contents of the temporary buffer into the trace buffers.
1232 * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
1233 * when mach_absolute_time is set to 0.
1236 kernel_debug_early_end(void)
1240 if (cpu_number() != master_cpu
)
1241 panic("kernel_debug_early_end() not call on boot processor");
1243 /* Fake sentinel marking the start of kernel time relative to TSC */
1248 (uint32_t)(tsc_rebase_abs_time
>> 32),
1249 (uint32_t)tsc_rebase_abs_time
,
1253 for (i
= 0; i
< kd_early_index
; i
++) {
1256 kd_early_buffer
[i
].debugid
,
1257 kd_early_buffer
[i
].timestamp
,
1258 kd_early_buffer
[i
].arg1
,
1259 kd_early_buffer
[i
].arg2
,
1260 kd_early_buffer
[i
].arg3
,
1261 kd_early_buffer
[i
].arg4
,
1265 /* Cut events-lost event on overflow */
1266 if (kd_early_overflow
)
1267 KERNEL_DEBUG_CONSTANT(
1268 TRACE_LOST_EVENTS
, 0, 0, 0, 0, 0);
1270 /* This trace marks the start of kernel tracing */
1271 kernel_debug_string("early trace done");
1275 * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
1278 kdebug_trace(struct proc
*p
, struct kdebug_trace_args
*uap
, int32_t *retval
)
1280 struct kdebug_trace64_args uap64
;
1282 uap64
.code
= uap
->code
;
1283 uap64
.arg1
= uap
->arg1
;
1284 uap64
.arg2
= uap
->arg2
;
1285 uap64
.arg3
= uap
->arg3
;
1286 uap64
.arg4
= uap
->arg4
;
1288 return kdebug_trace64(p
, &uap64
, retval
);
1292 * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
1294 int kdebug_trace64(__unused
struct proc
*p
, struct kdebug_trace64_args
*uap
, __unused
int32_t *retval
)
1299 * Not all class are supported for injection from userspace, especially ones used by the core
1300 * kernel tracing infrastructure.
1302 code_class
= EXTRACT_CLASS(uap
->code
);
1304 switch (code_class
) {
1309 if ( __probable(kdebug_enable
== 0) )
1312 kernel_debug_internal(uap
->code
, (uintptr_t)uap
->arg1
, (uintptr_t)uap
->arg2
, (uintptr_t)uap
->arg3
, (uintptr_t)uap
->arg4
, (uintptr_t)thread_tid(current_thread()));
1318 kdbg_lock_init(void)
1320 if (kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
)
1324 * allocate lock group attribute and group
1326 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
1327 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
1330 * allocate the lock attribute
1332 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
1336 * allocate and initialize mutex's
1338 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1339 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1340 kdw_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1342 kd_ctrl_page
.kdebug_flags
|= KDBG_LOCKINIT
;
1347 kdbg_bootstrap(boolean_t early_trace
)
1349 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
1351 return (create_buffers(early_trace
));
1355 kdbg_reinit(boolean_t early_trace
)
1360 * Disable trace collecting
1361 * First make sure we're not in
1362 * the middle of cutting a trace
1364 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1367 * make sure the SLOW_NOLOG is seen
1368 * by everyone that might be trying
1375 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
1376 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1377 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1379 kd_mapptr
= (kd_threadmap
*) 0;
1382 ret
= kdbg_bootstrap(early_trace
);
1384 RAW_file_offset
= 0;
1385 RAW_file_written
= 0;
1391 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
1396 *arg_pid
= proc
->p_pid
;
1401 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
1415 * Collect the pathname for tracing
1417 dbg_nameptr
= proc
->p_comm
;
1418 dbg_namelen
= (int)strlen(proc
->p_comm
);
1424 if(dbg_namelen
> (int)sizeof(dbg_parms
))
1425 dbg_namelen
= (int)sizeof(dbg_parms
);
1427 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
1436 kdbg_resolve_map(thread_t th_act
, void *opaque
)
1438 kd_threadmap
*mapptr
;
1439 krt_t
*t
= (krt_t
*)opaque
;
1441 if (t
->count
< t
->maxcount
) {
1442 mapptr
= &t
->map
[t
->count
];
1443 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
1445 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
1446 sizeof(t
->atts
->task_comm
));
1448 * Some kernel threads have no associated pid.
1449 * We still need to mark the entry as valid.
1452 mapptr
->valid
= t
->atts
->pid
;
1462 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1464 * You may provide a buffer and size, or if you set the buffer to NULL, a
1465 * buffer of sufficient size will be allocated.
1467 * If you provide a buffer and it is too small, sets cpumap_size to the number
1468 * of bytes required and returns EINVAL.
1470 * On success, if you provided a buffer, cpumap_size is set to the number of
1471 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1472 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1474 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1476 * We may be reporting data from "now", or from the "past".
1478 * The "now" data would be for something like kdbg_readcurcpumap().
1479 * The "past" data would be for kdbg_readcpumap().
1481 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1482 * will need to read "now" state to get the number of cpus, which would be in
1483 * error if we were reporting "past" state.
1487 kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
)
1490 assert(cpumap_size
);
1492 assert(!iops
|| iops
->cpu_id
+ 1 == cpu_count
);
1494 uint32_t bytes_needed
= sizeof(kd_cpumap_header
) + cpu_count
* sizeof(kd_cpumap
);
1495 uint32_t bytes_available
= *cpumap_size
;
1496 *cpumap_size
= bytes_needed
;
1498 if (*cpumap
== NULL
) {
1499 if (kmem_alloc(kernel_map
, (vm_offset_t
*)cpumap
, (vm_size_t
)*cpumap_size
) != KERN_SUCCESS
) {
1502 } else if (bytes_available
< bytes_needed
) {
1506 kd_cpumap_header
* header
= (kd_cpumap_header
*)(uintptr_t)*cpumap
;
1508 header
->version_no
= RAW_VERSION1
;
1509 header
->cpu_count
= cpu_count
;
1511 kd_cpumap
* cpus
= (kd_cpumap
*)&header
[1];
1513 int32_t index
= cpu_count
- 1;
1515 cpus
[index
].cpu_id
= iops
->cpu_id
;
1516 cpus
[index
].flags
= KDBG_CPUMAP_IS_IOP
;
1517 bzero(cpus
[index
].name
, sizeof(cpus
->name
));
1518 strlcpy(cpus
[index
].name
, iops
->callback
.iop_name
, sizeof(cpus
->name
));
1524 while (index
>= 0) {
1525 cpus
[index
].cpu_id
= index
;
1526 cpus
[index
].flags
= 0;
1527 bzero(cpus
[index
].name
, sizeof(cpus
->name
));
1528 strlcpy(cpus
[index
].name
, "AP", sizeof(cpus
->name
));
1533 return KERN_SUCCESS
;
1537 kdbg_thrmap_init(void)
1539 if (kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
)
1542 kd_mapptr
= kdbg_thrmap_init_internal(0, &kd_mapsize
, &kd_mapcount
);
1545 kd_ctrl_page
.kdebug_flags
|= KDBG_MAPINIT
;
1549 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
)
1551 kd_threadmap
*mapptr
;
1554 int tts_count
; /* number of task-to-string structures */
1555 struct tts
*tts_mapptr
;
1556 unsigned int tts_mapsize
= 0;
1561 * need to use PROC_SCANPROCLIST with proc_iterate
1566 * Calculate the sizes of map buffers
1568 for (p
= allproc
.lh_first
, *mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
1569 *mapcount
+= get_task_numacts((task_t
)p
->task
);
1575 * The proc count could change during buffer allocation,
1576 * so introduce a small fudge factor to bump up the
1577 * buffer sizes. This gives new tasks some chance of
1578 * making into the tables. Bump up by 25%.
1580 *mapcount
+= *mapcount
/4;
1581 tts_count
+= tts_count
/4;
1583 *mapsize
= *mapcount
* sizeof(kd_threadmap
);
1585 if (count
&& count
< *mapcount
)
1588 if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)*mapsize
) == KERN_SUCCESS
)) {
1589 bzero((void *)kaddr
, *mapsize
);
1590 mapptr
= (kd_threadmap
*)kaddr
;
1594 tts_mapsize
= tts_count
* sizeof(struct tts
);
1596 if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
1597 bzero((void *)kaddr
, tts_mapsize
);
1598 tts_mapptr
= (struct tts
*)kaddr
;
1600 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, *mapsize
);
1605 * We need to save the procs command string
1606 * and take a reference for each task associated
1607 * with a valid process
1613 * should use proc_iterate
1615 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
1616 if (p
->p_lflag
& P_LEXIT
)
1620 task_reference(p
->task
);
1621 tts_mapptr
[i
].task
= p
->task
;
1622 tts_mapptr
[i
].pid
= p
->p_pid
;
1623 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
1632 * Initialize thread map data
1636 akrt
.maxcount
= *mapcount
;
1638 for (i
= 0; i
< tts_count
; i
++) {
1639 akrt
.atts
= &tts_mapptr
[i
];
1640 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
1641 task_deallocate((task_t
) tts_mapptr
[i
].task
);
1643 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
1645 *mapcount
= akrt
.count
;
1654 * Clean up the trace buffer
1655 * First make sure we're not in
1656 * the middle of cutting a trace
1658 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1661 * make sure the SLOW_NOLOG is seen
1662 * by everyone that might be trying
1667 global_state_pid
= -1;
1668 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1669 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
1670 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
1672 kdbg_disable_typefilter();
1677 /* Clean up the thread map buffer */
1678 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1680 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1681 kd_mapptr
= (kd_threadmap
*) 0;
1686 RAW_file_offset
= 0;
1687 RAW_file_written
= 0;
1691 kdbg_setpid(kd_regtype
*kdr
)
1697 pid
= (pid_t
)kdr
->value1
;
1698 flag
= (int)kdr
->value2
;
1701 if ((p
= proc_find(pid
)) == NULL
)
1706 * turn on pid check for this and all pids
1708 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDCHECK
;
1709 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
1710 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1715 * turn off pid check for this pid value
1716 * Don't turn off all pid checking though
1718 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1731 /* This is for pid exclusion in the trace buffer */
1733 kdbg_setpidex(kd_regtype
*kdr
)
1739 pid
= (pid_t
)kdr
->value1
;
1740 flag
= (int)kdr
->value2
;
1743 if ((p
= proc_find(pid
)) == NULL
)
1748 * turn on pid exclusion
1750 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDEXCLUDE
;
1751 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDCHECK
;
1752 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1758 * turn off pid exclusion for this pid value
1759 * Don't turn off all pid exclusion though
1761 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1775 * This is for setting a maximum decrementer value
1778 kdbg_setrtcdec(kd_regtype
*kdr
)
1783 decval
= (natural_t
)kdr
->value1
;
1785 if (decval
&& decval
< KDBG_MINRTCDEC
)
1794 kdbg_enable_typefilter(void)
1796 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1797 /* free the old filter */
1798 kdbg_disable_typefilter();
1801 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
) != KERN_SUCCESS
) {
1805 bzero(type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1807 /* Turn off range and value checks */
1808 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_RANGECHECK
| KDBG_VALCHECK
);
1810 /* Enable filter checking */
1811 kd_ctrl_page
.kdebug_flags
|= KDBG_TYPEFILTER_CHECK
;
1812 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1817 kdbg_disable_typefilter(void)
1819 /* Disable filter checking */
1820 kd_ctrl_page
.kdebug_flags
&= ~KDBG_TYPEFILTER_CHECK
;
1822 /* Turn off slow checks unless pid checks are using them */
1823 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1824 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1826 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1828 if(type_filter_bitmap
== NULL
)
1831 vm_offset_t old_bitmap
= (vm_offset_t
)type_filter_bitmap
;
1832 type_filter_bitmap
= NULL
;
1834 kmem_free(kernel_map
, old_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1839 kdbg_setreg(kd_regtype
* kdr
)
1842 unsigned int val_1
, val_2
, val
;
1843 switch (kdr
->type
) {
1845 case KDBG_CLASSTYPE
:
1846 val_1
= (kdr
->value1
& 0xff);
1847 val_2
= (kdr
->value2
& 0xff);
1848 kdlog_beg
= (val_1
<<24);
1849 kdlog_end
= (val_2
<<24);
1850 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1851 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1852 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1853 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1855 case KDBG_SUBCLSTYPE
:
1856 val_1
= (kdr
->value1
& 0xff);
1857 val_2
= (kdr
->value2
& 0xff);
1859 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1860 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1861 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1862 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1863 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1864 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1866 case KDBG_RANGETYPE
:
1867 kdlog_beg
= (kdr
->value1
);
1868 kdlog_end
= (kdr
->value2
);
1869 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1870 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1871 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1872 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1875 kdlog_value1
= (kdr
->value1
);
1876 kdlog_value2
= (kdr
->value2
);
1877 kdlog_value3
= (kdr
->value3
);
1878 kdlog_value4
= (kdr
->value4
);
1879 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1880 kd_ctrl_page
.kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1881 kd_ctrl_page
.kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1882 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1884 case KDBG_TYPENONE
:
1885 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1887 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
|
1888 KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
|
1889 KDBG_TYPEFILTER_CHECK
)) )
1890 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1892 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1905 kdbg_getreg(__unused kd_regtype
* kdr
)
1909 unsigned int val_1
, val_2
, val
;
1911 switch (kdr
->type
) {
1912 case KDBG_CLASSTYPE
:
1913 val_1
= (kdr
->value1
& 0xff);
1915 kdlog_beg
= (val_1
<<24);
1916 kdlog_end
= (val_2
<<24);
1917 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1918 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1920 case KDBG_SUBCLSTYPE
:
1921 val_1
= (kdr
->value1
& 0xff);
1922 val_2
= (kdr
->value2
& 0xff);
1924 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1925 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1926 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1927 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1929 case KDBG_RANGETYPE
:
1930 kdlog_beg
= (kdr
->value1
);
1931 kdlog_end
= (kdr
->value2
);
1932 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1933 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1935 case KDBG_TYPENONE
:
1936 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1949 kdbg_readcpumap(user_addr_t user_cpumap
, size_t *user_cpumap_size
)
1951 uint8_t* cpumap
= NULL
;
1952 uint32_t cpumap_size
= 0;
1953 int ret
= KERN_SUCCESS
;
1955 if (kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) {
1956 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, &cpumap
, &cpumap_size
) == KERN_SUCCESS
) {
1958 size_t bytes_to_copy
= (*user_cpumap_size
>= cpumap_size
) ? cpumap_size
: *user_cpumap_size
;
1959 if (copyout(cpumap
, user_cpumap
, (size_t)bytes_to_copy
)) {
1963 *user_cpumap_size
= cpumap_size
;
1964 kmem_free(kernel_map
, (vm_offset_t
)cpumap
, cpumap_size
);
1974 kdbg_readcurthrmap(user_addr_t buffer
, size_t *bufsize
)
1976 kd_threadmap
*mapptr
;
1977 unsigned int mapsize
;
1978 unsigned int mapcount
;
1979 unsigned int count
= 0;
1982 count
= *bufsize
/sizeof(kd_threadmap
);
1985 if ( (mapptr
= kdbg_thrmap_init_internal(count
, &mapsize
, &mapcount
)) ) {
1986 if (copyout(mapptr
, buffer
, mapcount
* sizeof(kd_threadmap
)))
1989 *bufsize
= (mapcount
* sizeof(kd_threadmap
));
1991 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, mapsize
);
1999 kdbg_readthrmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
2001 int avail
= *number
;
2004 unsigned int mapsize
;
2006 count
= avail
/sizeof (kd_threadmap
);
2008 mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
2010 if (count
&& (count
<= kd_mapcount
))
2012 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
2014 if (*number
< mapsize
)
2025 uint32_t extra_thread_count
= 0;
2026 uint32_t cpumap_size
;
2029 * To write a RAW_VERSION1+ file, we
2030 * must embed a cpumap in the "padding"
2031 * used to page align the events folloing
2032 * the threadmap. If the threadmap happens
2033 * to not require enough padding, we
2034 * artificially increase its footprint
2035 * until it needs enough padding.
2038 pad_size
= PAGE_SIZE
- ((sizeof(RAW_header
) + (count
* sizeof(kd_threadmap
))) & PAGE_MASK_64
);
2039 cpumap_size
= sizeof(kd_cpumap_header
) + kd_ctrl_page
.kdebug_cpus
* sizeof(kd_cpumap
);
2041 if (cpumap_size
> pad_size
) {
2042 /* Force an overflow onto the next page, we get a full page of padding */
2043 extra_thread_count
= (pad_size
/ sizeof(kd_threadmap
)) + 1;
2046 header
.version_no
= RAW_VERSION1
;
2047 header
.thread_count
= count
+ extra_thread_count
;
2049 clock_get_calendar_microtime(&secs
, &usecs
);
2050 header
.TOD_secs
= secs
;
2051 header
.TOD_usecs
= usecs
;
2053 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&header
, sizeof(RAW_header
), RAW_file_offset
,
2054 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2057 RAW_file_offset
+= sizeof(RAW_header
);
2059 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, mapsize
, RAW_file_offset
,
2060 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2063 RAW_file_offset
+= mapsize
;
2065 if (extra_thread_count
) {
2066 pad_size
= extra_thread_count
* sizeof(kd_threadmap
);
2067 pad_buf
= (char *)kalloc(pad_size
);
2068 memset(pad_buf
, 0, pad_size
);
2070 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
2071 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2072 kfree(pad_buf
, pad_size
);
2076 RAW_file_offset
+= pad_size
;
2080 pad_size
= PAGE_SIZE
- (RAW_file_offset
& PAGE_MASK_64
);
2082 pad_buf
= (char *)kalloc(pad_size
);
2083 memset(pad_buf
, 0, pad_size
);
2086 * embed a cpumap in the padding bytes.
2087 * older code will skip this.
2088 * newer code will know how to read it.
2090 uint32_t temp
= pad_size
;
2091 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, (uint8_t**)&pad_buf
, &temp
) != KERN_SUCCESS
) {
2092 memset(pad_buf
, 0, pad_size
);
2095 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
2096 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2097 kfree(pad_buf
, pad_size
);
2101 RAW_file_offset
+= pad_size
;
2103 RAW_file_written
+= sizeof(RAW_header
) + mapsize
+ pad_size
;
2106 if (copyout(kd_mapptr
, buffer
, mapsize
))
2121 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
2122 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2123 RAW_file_offset
+= sizeof(uint32_t);
2124 RAW_file_written
+= sizeof(uint32_t);
2127 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
2129 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
2130 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
2132 kd_mapptr
= (kd_threadmap
*) 0;
2140 kdbg_set_nkdbufs(unsigned int value
)
2143 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2144 * 'value' is the desired number of trace entries
2146 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
2148 if (value
<= max_entries
)
2151 return (max_entries
);
2156 kdbg_enable_bg_trace(void)
2160 if (kdlog_bg_trace
== TRUE
&& kdlog_bg_trace_running
== FALSE
&& n_storage_buffers
== 0) {
2161 nkdbufs
= bg_nkdbufs
;
2162 ret
= kdbg_reinit(FALSE
);
2164 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
2165 kdlog_bg_trace_running
= TRUE
;
2172 kdbg_disable_bg_trace(void)
2174 if (kdlog_bg_trace_running
== TRUE
) {
2175 kdlog_bg_trace_running
= FALSE
;
2183 * This function is provided for the CHUD toolkit only.
2185 * zero disables kdebug_chudhook function call
2186 * non-zero enables kdebug_chudhook function call
2188 * address of the enabled kdebug_chudhook function
2192 kdbg_control_chud(int val
, void *fn
)
2197 /* enable chudhook */
2198 kdebug_chudhook
= fn
;
2199 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, TRUE
);
2202 /* disable chudhook */
2203 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, FALSE
);
2204 kdebug_chudhook
= 0;
2210 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
2213 size_t size
= *sizep
;
2214 unsigned int value
= 0;
2216 kbufinfo_t kd_bufinfo
;
2220 if (name
[0] == KERN_KDGETENTROPY
||
2221 name
[0] == KERN_KDWRITETR
||
2222 name
[0] == KERN_KDWRITEMAP
||
2223 name
[0] == KERN_KDEFLAGS
||
2224 name
[0] == KERN_KDDFLAGS
||
2225 name
[0] == KERN_KDENABLE
||
2226 name
[0] == KERN_KDENABLE_BG_TRACE
||
2227 name
[0] == KERN_KDSETBUF
) {
2236 if ( !(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
2239 lck_mtx_lock(kd_trace_mtx_sysctl
);
2244 * Does not alter the global_state_pid
2245 * This is a passive request.
2247 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
2249 * There is not enough room to return even
2250 * the first element of the info structure.
2255 kd_bufinfo
.nkdbufs
= nkdbufs
;
2256 kd_bufinfo
.nkdthreads
= kd_mapcount
;
2258 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) )
2259 kd_bufinfo
.nolog
= 1;
2261 kd_bufinfo
.nolog
= 0;
2263 kd_bufinfo
.flags
= kd_ctrl_page
.kdebug_flags
;
2264 #if defined(__LP64__)
2265 kd_bufinfo
.flags
|= KDBG_LP64
;
2267 kd_bufinfo
.bufid
= global_state_pid
;
2269 if (size
>= sizeof(kd_bufinfo
)) {
2271 * Provide all the info we have
2273 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
2277 * For backwards compatibility, only provide
2278 * as much info as there is room for.
2280 if (copyout(&kd_bufinfo
, where
, size
))
2285 case KERN_KDGETENTROPY
: {
2286 /* Obsolescent - just fake with a random buffer */
2287 char *buffer
= (char *) kalloc(size
);
2288 read_frandom((void *) buffer
, size
);
2289 ret
= copyout(buffer
, where
, size
);
2290 kfree(buffer
, size
);
2294 case KERN_KDENABLE_BG_TRACE
:
2295 bg_nkdbufs
= kdbg_set_nkdbufs(value
);
2296 kdlog_bg_trace
= TRUE
;
2297 ret
= kdbg_enable_bg_trace();
2300 case KERN_KDDISABLE_BG_TRACE
:
2301 kdlog_bg_trace
= FALSE
;
2302 kdbg_disable_bg_trace();
2306 if ((curproc
= current_proc()) != NULL
)
2307 curpid
= curproc
->p_pid
;
2312 if (global_state_pid
== -1)
2313 global_state_pid
= curpid
;
2314 else if (global_state_pid
!= curpid
) {
2315 if ((p
= proc_find(global_state_pid
)) == NULL
) {
2317 * The global pid no longer exists
2319 global_state_pid
= curpid
;
2322 * The global pid exists, deny this request
2333 kdbg_disable_bg_trace();
2335 value
&= KDBG_USERFLAGS
;
2336 kd_ctrl_page
.kdebug_flags
|= value
;
2339 kdbg_disable_bg_trace();
2341 value
&= KDBG_USERFLAGS
;
2342 kd_ctrl_page
.kdebug_flags
&= ~value
;
2346 * Enable tracing mechanism. Two types:
2347 * KDEBUG_TRACE is the standard one,
2348 * and KDEBUG_PPT which is a carefully
2349 * chosen subset to avoid performance impact.
2353 * enable only if buffer is initialized
2355 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) ||
2356 !(value
== KDEBUG_ENABLE_TRACE
|| value
== KDEBUG_ENABLE_PPT
)) {
2362 kdbg_set_tracing_enabled(TRUE
, value
);
2366 kdbg_set_tracing_enabled(FALSE
, 0);
2370 kdbg_disable_bg_trace();
2372 nkdbufs
= kdbg_set_nkdbufs(value
);
2375 kdbg_disable_bg_trace();
2377 ret
= kdbg_reinit(FALSE
);
2381 ret
= kdbg_enable_bg_trace();
2384 if(size
< sizeof(kd_regtype
)) {
2388 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2392 kdbg_disable_bg_trace();
2394 ret
= kdbg_setreg(&kd_Reg
);
2397 if (size
< sizeof(kd_regtype
)) {
2401 ret
= kdbg_getreg(&kd_Reg
);
2402 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
2405 kdbg_disable_bg_trace();
2409 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
2411 case KERN_KDWRITETR
:
2412 case KERN_KDWRITEMAP
:
2414 struct vfs_context context
;
2415 struct fileproc
*fp
;
2420 kdbg_disable_bg_trace();
2422 if (name
[0] == KERN_KDWRITETR
) {
2424 int wait_result
= THREAD_AWAKENED
;
2429 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2430 nanoseconds_to_absolutetime(ns
, &abstime
);
2431 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2435 s
= ml_set_interrupts_enabled(FALSE
);
2436 lck_spin_lock(kdw_spin_lock
);
2438 while (wait_result
== THREAD_AWAKENED
&& kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2443 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2445 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2449 lck_spin_unlock(kdw_spin_lock
);
2450 ml_set_interrupts_enabled(s
);
2456 if ( (ret
= fp_lookup(p
, fd
, &fp
, 1)) ) {
2460 context
.vc_thread
= current_thread();
2461 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
2463 if (FILEGLOB_DTYPE(fp
->f_fglob
) != DTYPE_VNODE
) {
2464 fp_drop(p
, fd
, fp
, 1);
2470 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
2473 if ((ret
= vnode_getwithref(vp
)) == 0) {
2474 RAW_file_offset
= fp
->f_fglob
->fg_offset
;
2475 if (name
[0] == KERN_KDWRITETR
) {
2476 number
= nkdbufs
* sizeof(kd_buf
);
2478 KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
2479 ret
= kdbg_read(0, &number
, vp
, &context
);
2480 KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS
| DBG_FUNC_END
, number
, 0, 0, 0, 0);
2484 number
= kd_mapcount
* sizeof(kd_threadmap
);
2485 kdbg_readthrmap(0, &number
, vp
, &context
);
2487 fp
->f_fglob
->fg_offset
= RAW_file_offset
;
2490 fp_drop(p
, fd
, fp
, 0);
2494 case KERN_KDBUFWAIT
:
2496 /* WRITETR lite -- just block until there's data */
2498 int wait_result
= THREAD_AWAKENED
;
2503 kdbg_disable_bg_trace();
2507 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2508 nanoseconds_to_absolutetime(ns
, &abstime
);
2509 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2513 s
= ml_set_interrupts_enabled(FALSE
);
2515 panic("trying to wait with interrupts off");
2516 lck_spin_lock(kdw_spin_lock
);
2518 /* drop the mutex so don't exclude others from
2521 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2523 while (wait_result
== THREAD_AWAKENED
&&
2524 kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2529 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2531 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2536 /* check the count under the spinlock */
2537 number
= (kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
);
2539 lck_spin_unlock(kdw_spin_lock
);
2540 ml_set_interrupts_enabled(s
);
2542 /* pick the mutex back up again */
2543 lck_mtx_lock(kd_trace_mtx_sysctl
);
2545 /* write out whether we've exceeded the threshold */
2550 if (size
< sizeof(kd_regtype
)) {
2554 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2558 kdbg_disable_bg_trace();
2560 ret
= kdbg_setpid(&kd_Reg
);
2563 if (size
< sizeof(kd_regtype
)) {
2567 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2571 kdbg_disable_bg_trace();
2573 ret
= kdbg_setpidex(&kd_Reg
);
2576 ret
= kdbg_readcpumap(where
, sizep
);
2579 ret
= kdbg_readthrmap(where
, sizep
, NULL
, NULL
);
2581 case KERN_KDREADCURTHRMAP
:
2582 ret
= kdbg_readcurthrmap(where
, sizep
);
2584 case KERN_KDSETRTCDEC
:
2585 if (size
< sizeof(kd_regtype
)) {
2589 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2593 kdbg_disable_bg_trace();
2595 ret
= kdbg_setrtcdec(&kd_Reg
);
2597 case KERN_KDSET_TYPEFILTER
:
2598 kdbg_disable_bg_trace();
2600 if ((kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) == 0){
2601 if ((ret
= kdbg_enable_typefilter()))
2605 if (size
!= KDBG_TYPEFILTER_BITMAP_SIZE
) {
2610 if (copyin(where
, type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
)) {
2614 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_TYPEFILTER_CHANGED
, type_filter_bitmap
);
2620 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2627 * This code can run for the most part concurrently with kernel_debug_internal()...
2628 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2629 * synchronize with the recording side of this puzzle... otherwise, we are able to
2630 * move through the lists w/o use of any locks
2633 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
2636 unsigned int cpu
, min_cpu
;
2637 uint64_t mintime
, t
, barrier
= 0;
2643 struct kd_storage
*kdsp_actual
;
2644 struct kd_bufinfo
*kdbp
;
2645 struct kd_bufinfo
*min_kdbp
;
2646 uint32_t tempbuf_count
;
2647 uint32_t tempbuf_number
;
2648 uint32_t old_kdebug_flags
;
2649 uint32_t old_kdebug_slowcheck
;
2650 boolean_t lostevents
= FALSE
;
2651 boolean_t out_of_events
= FALSE
;
2653 count
= *number
/sizeof(kd_buf
);
2656 if (count
== 0 || !(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
2659 memset(&lostevent
, 0, sizeof(lostevent
));
2660 lostevent
.debugid
= TRACE_LOST_EVENTS
;
2662 /* Capture timestamp. Only sort events that have occured before the timestamp.
2663 * Since the iop is being flushed here, its possible that events occur on the AP
2664 * while running live tracing. If we are disabled, no new events should
2668 if (kd_ctrl_page
.enabled
)
2670 // timestamp is non-zero value
2671 barrier
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
2674 // Request each IOP to provide us with up to date entries before merging buffers together.
2675 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
);
2678 * because we hold kd_trace_mtx_sysctl, no other control threads can
2679 * be playing with kdebug_flags... the code that cuts new events could
2680 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2681 * storage chunk which is where it examines kdebug_flags... it its adding
2682 * to the same chunk we're reading from, no problem...
2685 disable_wrap(&old_kdebug_slowcheck
, &old_kdebug_flags
);
2687 if (count
> nkdbufs
)
2690 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2691 tempbuf_count
= KDCOPYBUF_COUNT
;
2694 tempbuf
= kdcopybuf
;
2698 while (tempbuf_count
) {
2699 mintime
= 0xffffffffffffffffULL
;
2704 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_ctrl_page
.kdebug_cpus
; cpu
++, kdbp
++) {
2706 // Find one with raw data
2707 if ((kdsp
= kdbp
->kd_list_head
).raw
== KDS_PTR_NULL
)
2709 /* Debugging aid: maintain a copy of the "kdsp"
2712 volatile union kds_ptr kdsp_shadow
;
2716 // Get from cpu data to buffer header to buffer
2717 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2719 volatile struct kd_storage
*kdsp_actual_shadow
;
2721 kdsp_actual_shadow
= kdsp_actual
;
2723 // See if there are actual data left in this buffer
2724 rcursor
= kdsp_actual
->kds_readlast
;
2726 if (rcursor
== kdsp_actual
->kds_bufindx
)
2729 t
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[rcursor
]);
2731 if ((t
> barrier
) && (barrier
> 0)) {
2733 * Need to wait to flush iop again before we
2734 * sort any more data from the buffers
2736 out_of_events
= TRUE
;
2739 if (t
< kdsp_actual
->kds_timestamp
) {
2741 * indicates we've not yet completed filling
2743 * this should only occur when we're looking
2744 * at the buf that the record head is utilizing
2745 * we'll pick these events up on the next
2747 * we bail at this point so that we don't
2748 * get an out-of-order timestream by continuing
2749 * to read events from the other CPUs' timestream(s)
2751 out_of_events
= TRUE
;
2760 if (min_kdbp
== NULL
|| out_of_events
== TRUE
) {
2762 * all buffers ran empty
2764 out_of_events
= TRUE
;
2769 kdsp
= min_kdbp
->kd_list_head
;
2770 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2772 if (kdsp_actual
->kds_lostevents
== TRUE
) {
2773 kdbg_set_timestamp_and_cpu(&lostevent
, kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
].timestamp
, min_cpu
);
2774 *tempbuf
= lostevent
;
2776 kdsp_actual
->kds_lostevents
= FALSE
;
2783 *tempbuf
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
++];
2785 if (kdsp_actual
->kds_readlast
== EVENTS_PER_STORAGE_UNIT
)
2786 release_storage_unit(min_cpu
, kdsp
.raw
);
2789 * Watch for out of order timestamps
2791 if (mintime
< min_kdbp
->kd_prev_timebase
) {
2793 * if so, use the previous timestamp + 1 cycle
2795 min_kdbp
->kd_prev_timebase
++;
2796 kdbg_set_timestamp_and_cpu(tempbuf
, min_kdbp
->kd_prev_timebase
, kdbg_get_cpu(tempbuf
));
2798 min_kdbp
->kd_prev_timebase
= mintime
;
2804 if ((RAW_file_written
+= sizeof(kd_buf
)) >= RAW_FLUSH_SIZE
)
2807 if (tempbuf_number
) {
2810 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
2811 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2813 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
2815 if (RAW_file_written
>= RAW_FLUSH_SIZE
) {
2816 cluster_push(vp
, 0);
2818 RAW_file_written
= 0;
2821 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
2822 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
2829 count
-= tempbuf_number
;
2830 *number
+= tempbuf_number
;
2832 if (out_of_events
== TRUE
)
2834 * all trace buffers are empty
2838 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2839 tempbuf_count
= KDCOPYBUF_COUNT
;
2841 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
2842 enable_wrap(old_kdebug_slowcheck
, lostevents
);
2848 unsigned char *getProcName(struct proc
*proc
);
2849 unsigned char *getProcName(struct proc
*proc
) {
2851 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
2855 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2856 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2857 #if defined(__i386__) || defined (__x86_64__)
2858 #define TRAP_DEBUGGER __asm__ volatile("int3");
2860 #error No TRAP_DEBUGGER definition for this architecture
2863 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2864 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
2866 /* Initialize the mutex governing access to the stack snapshot subsystem */
2867 __private_extern__
void
2868 stackshot_lock_init( void )
2870 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
2872 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
2874 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
2876 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
2880 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2881 * on the system, tracing both kernel and user stacks
2882 * where available. Uses machine specific trace routines
2883 * for ppc, ppc64 and x86.
2884 * Inputs: uap->pid - process id of process to be traced, or -1
2885 * for the entire system
2886 * uap->tracebuf - address of the user space destination
2888 * uap->tracebuf_size - size of the user space trace buffer
2889 * uap->options - various options, including the maximum
2890 * number of frames to trace.
2891 * Outputs: EPERM if the caller is not privileged
2892 * EINVAL if the supplied trace buffer isn't sanely sized
2893 * ENOMEM if we don't have enough memory to satisfy the
2895 * ENOENT if the target pid isn't found
2896 * ENOSPC if the supplied buffer is insufficient
2897 * *retval contains the number of bytes traced, if successful
2898 * and -1 otherwise. If the request failed due to
2899 * tracebuffer exhaustion, we copyout as much as possible.
2902 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
2905 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
2908 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
2909 uap
->flags
, uap
->dispatch_offset
, retval
);
2913 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
)
2918 if ((buf
== NULL
) || (size
<= 0) || (bytesTraced
== NULL
)) {
2922 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
2923 if (size
> SANE_TRACEBUF_SIZE
) {
2924 size
= SANE_TRACEBUF_SIZE
;
2927 /* Serialize tracing */
2928 STACKSHOT_SUBSYS_LOCK();
2929 istate
= ml_set_interrupts_enabled(FALSE
);
2932 /* Preload trace parameters*/
2933 kdp_snapshot_preflight(pid
, buf
, size
, flags
, 0);
2935 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2940 ml_set_interrupts_enabled(istate
);
2942 *bytesTraced
= kdp_stack_snapshot_bytes_traced();
2944 error
= kdp_stack_snapshot_geterror();
2946 STACKSHOT_SUBSYS_UNLOCK();
2953 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
)
2957 unsigned bytesTraced
= 0;
2959 #if CONFIG_TELEMETRY
2960 if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE
) {
2961 telemetry_global_ctl(1);
2964 } else if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE
) {
2965 telemetry_global_ctl(0);
2970 if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE
) {
2971 error
= telemetry_enable_window();
2973 if (error
!= KERN_SUCCESS
) {
2974 /* We are probably out of memory */
2981 } else if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE
) {
2982 telemetry_disable_window();
2989 /* Serialize tracing */
2990 STACKSHOT_SUBSYS_LOCK();
2992 if (tracebuf_size
<= 0) {
2997 #if CONFIG_TELEMETRY
2998 if (flags
& STACKSHOT_GET_MICROSTACKSHOT
) {
3000 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3005 bytesTraced
= tracebuf_size
;
3006 error
= telemetry_gather(tracebuf
, &bytesTraced
,
3007 (flags
& STACKSHOT_SET_MICROSTACKSHOT_MARK
) ? TRUE
: FALSE
);
3008 if (error
== KERN_NO_SPACE
) {
3012 *retval
= (int)bytesTraced
;
3016 if (flags
& STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS
) {
3018 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3023 bytesTraced
= tracebuf_size
;
3024 error
= telemetry_gather_windowed(tracebuf
, &bytesTraced
);
3025 if (error
== KERN_NO_SPACE
) {
3029 *retval
= (int)bytesTraced
;
3033 if (flags
& STACKSHOT_GET_BOOT_PROFILE
) {
3035 if (tracebuf_size
> SANE_BOOTPROFILE_TRACEBUF_SIZE
) {
3040 bytesTraced
= tracebuf_size
;
3041 error
= bootprofile_gather(tracebuf
, &bytesTraced
);
3042 if (error
== KERN_NO_SPACE
) {
3046 *retval
= (int)bytesTraced
;
3051 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3056 assert(stackshot_snapbuf
== NULL
);
3057 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) {
3062 if (panic_active()) {
3067 istate
= ml_set_interrupts_enabled(FALSE
);
3068 /* Preload trace parameters*/
3069 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
);
3071 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
3077 ml_set_interrupts_enabled(istate
);
3079 bytesTraced
= kdp_stack_snapshot_bytes_traced();
3081 if (bytesTraced
> 0) {
3082 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
3083 ((bytesTraced
< tracebuf_size
) ?
3084 bytesTraced
: tracebuf_size
))))
3086 *retval
= bytesTraced
;
3093 error
= kdp_stack_snapshot_geterror();
3101 if (stackshot_snapbuf
!= NULL
)
3102 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
3103 stackshot_snapbuf
= NULL
;
3104 STACKSHOT_SUBSYS_UNLOCK();
3109 start_kern_tracing(unsigned int new_nkdbufs
, boolean_t need_map
)
3114 nkdbufs
= kdbg_set_nkdbufs(new_nkdbufs
);
3117 kernel_debug_string("start_kern_tracing");
3119 if (0 == kdbg_reinit(TRUE
)) {
3121 if (need_map
== TRUE
) {
3122 uint32_t old1
, old2
;
3126 disable_wrap(&old1
, &old2
);
3129 /* Hold off interrupts until the early traces are cut */
3130 boolean_t s
= ml_set_interrupts_enabled(FALSE
);
3132 kdbg_set_tracing_enabled(
3135 (KDEBUG_ENABLE_TRACE
| KDEBUG_ENABLE_SERIAL
) :
3136 KDEBUG_ENABLE_TRACE
);
3139 * Transfer all very early events from the static buffer
3140 * into the real buffers.
3142 kernel_debug_early_end();
3144 ml_set_interrupts_enabled(s
);
3146 printf("kernel tracing started\n");
3147 #if KDEBUG_MOJO_TRACE
3148 if (kdebug_serial
) {
3149 printf("serial output enabled with %lu named events\n",
3150 sizeof(kd_events
)/sizeof(kd_event_t
));
3154 printf("error from kdbg_reinit, kernel tracing not started\n");
3159 start_kern_tracing_with_typefilter(unsigned int new_nkdbufs
,
3161 unsigned int typefilter
)
3163 /* startup tracing */
3164 start_kern_tracing(new_nkdbufs
, need_map
);
3166 /* check that tracing was actually enabled */
3167 if (!(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
3170 /* setup the typefiltering */
3171 if (0 == kdbg_enable_typefilter())
3172 setbit(type_filter_bitmap
, typefilter
& (CSC_MASK
>> CSC_OFFSET
));
3176 kdbg_dump_trace_to_file(const char *filename
)
3184 if ( !(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
3187 if (global_state_pid
!= -1) {
3188 if ((proc_find(global_state_pid
)) != NULL
) {
3190 * The global pid exists, we're running
3191 * due to fs_usage, latency, etc...
3192 * don't cut the panic/shutdown trace file
3193 * Disable tracing from this point to avoid
3197 kd_ctrl_page
.enabled
= 0;
3198 commpage_update_kdebug_enable();
3202 KERNEL_DEBUG_CONSTANT(TRACE_PANIC
| DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
3205 kd_ctrl_page
.enabled
= 0;
3206 commpage_update_kdebug_enable();
3208 ctx
= vfs_context_kernel();
3210 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
3213 number
= kd_mapcount
* sizeof(kd_threadmap
);
3214 kdbg_readthrmap(0, &number
, vp
, ctx
);
3216 number
= nkdbufs
*sizeof(kd_buf
);
3217 kdbg_read(0, &number
, vp
, ctx
);
3219 vnode_close(vp
, FWRITE
, ctx
);
3221 sync(current_proc(), (void *)NULL
, (int *)NULL
);
3224 /* Helper function for filling in the BSD name for an address space
3225 * Defined here because the machine bindings know only Mach threads
3226 * and nothing about BSD processes.
3228 * FIXME: need to grab a lock during this?
3230 void kdbg_get_task_name(char* name_buf
, int len
, task_t task
)
3234 /* Note: we can't use thread->task (and functions that rely on it) here
3235 * because it hasn't been initialized yet when this function is called.
3236 * We use the explicitly-passed task parameter instead.
3238 proc
= get_bsdtask_info(task
);
3239 if (proc
!= PROC_NULL
)
3240 snprintf(name_buf
, len
, "%s/%d", proc
->p_comm
, proc
->p_pid
);
3242 snprintf(name_buf
, len
, "%p [!bsd]", task
);
3245 #if KDEBUG_MOJO_TRACE
3247 binary_search(uint32_t id
)
3252 high
= sizeof(kd_events
)/sizeof(kd_event_t
) - 1;
3256 mid
= (low
+ high
) / 2;
3259 return NULL
; /* failed */
3260 else if ( low
+ 1 >= high
) {
3261 /* We have a match */
3262 if (kd_events
[high
].id
== id
)
3263 return &kd_events
[high
];
3264 else if (kd_events
[low
].id
== id
)
3265 return &kd_events
[low
];
3267 return NULL
; /* search failed */
3269 else if (id
< kd_events
[mid
].id
)
3277 * Look up event id to get name string.
3278 * Using a per-cpu cache of a single entry
3279 * before resorting to a binary search of the full table.
3282 static kd_event_t
*last_hit
[MAX_CPUS
];
3284 event_lookup_cache(uint32_t cpu
, uint32_t id
)
3286 if (last_hit
[cpu
] == NULL
|| last_hit
[cpu
]->id
!= id
)
3287 last_hit
[cpu
] = binary_search(id
);
3288 return last_hit
[cpu
];
3291 static uint64_t kd_last_timstamp
;
3294 kdebug_serial_print(
3305 char kprintf_line
[192];
3307 uint64_t us
= timestamp
/ NSEC_PER_USEC
;
3308 uint64_t us_tenth
= (timestamp
% NSEC_PER_USEC
) / 100;
3309 uint64_t delta
= timestamp
- kd_last_timstamp
;
3310 uint64_t delta_us
= delta
/ NSEC_PER_USEC
;
3311 uint64_t delta_us_tenth
= (delta
% NSEC_PER_USEC
) / 100;
3312 uint32_t event_id
= debugid
& DBG_FUNC_MASK
;
3313 const char *command
;
3318 /* event time and delta from last */
3319 snprintf(kprintf_line
, sizeof(kprintf_line
),
3320 "%11llu.%1llu %8llu.%1llu ",
3321 us
, us_tenth
, delta_us
, delta_us_tenth
);
3324 /* event (id or name) - start prefixed by "[", end postfixed by "]" */
3325 bra
= (debugid
& DBG_FUNC_START
) ? "[" : " ";
3326 ket
= (debugid
& DBG_FUNC_END
) ? "]" : " ";
3327 ep
= event_lookup_cache(cpunum
, event_id
);
3329 if (strlen(ep
->name
) < sizeof(event
) - 3)
3330 snprintf(event
, sizeof(event
), "%s%s%s",
3331 bra
, ep
->name
, ket
);
3333 snprintf(event
, sizeof(event
), "%s%x(name too long)%s",
3334 bra
, event_id
, ket
);
3336 snprintf(event
, sizeof(event
), "%s%x%s",
3337 bra
, event_id
, ket
);
3339 snprintf(kprintf_line
+ strlen(kprintf_line
),
3340 sizeof(kprintf_line
) - strlen(kprintf_line
),
3343 /* arg1 .. arg4 with special cases for strings */
3346 case VFS_LOOKUP_DONE
:
3347 if (debugid
& DBG_FUNC_START
) {
3348 /* arg1 hex then arg2..arg4 chars */
3349 snprintf(kprintf_line
+ strlen(kprintf_line
),
3350 sizeof(kprintf_line
) - strlen(kprintf_line
),
3351 "%-16lx %-8s%-8s%-8s ",
3352 arg1
, (char*)&arg2
, (char*)&arg3
, (char*)&arg4
);
3355 /* else fall through for arg1..arg4 chars */
3356 case TRACE_STRING_EXEC
:
3357 case TRACE_STRING_NEWTHREAD
:
3358 case TRACE_INFO_STRING
:
3359 snprintf(kprintf_line
+ strlen(kprintf_line
),
3360 sizeof(kprintf_line
) - strlen(kprintf_line
),
3361 "%-8s%-8s%-8s%-8s ",
3362 (char*)&arg1
, (char*)&arg2
, (char*)&arg3
, (char*)&arg4
);
3365 snprintf(kprintf_line
+ strlen(kprintf_line
),
3366 sizeof(kprintf_line
) - strlen(kprintf_line
),
3367 "%-16lx %-16lx %-16lx %-16lx",
3368 arg1
, arg2
, arg3
, arg4
);
3371 /* threadid, cpu and command name */
3372 if (threadid
== (uintptr_t)thread_tid(current_thread()) &&
3374 current_proc()->p_comm
)
3375 command
= current_proc()->p_comm
;
3378 snprintf(kprintf_line
+ strlen(kprintf_line
),
3379 sizeof(kprintf_line
) - strlen(kprintf_line
),
3380 " %-16lx %-2d %s\n",
3381 threadid
, cpunum
, command
);
3383 kprintf("%s", kprintf_line
);
3384 kd_last_timstamp
= timestamp
;