2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
24 #include <machine/spl.h>
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
35 #include <sys/random.h>
38 #include <mach/clock_types.h>
39 #include <mach/mach_types.h>
40 #include <mach/mach_time.h>
41 #include <machine/machine_routines.h>
43 #if defined(__i386__) || defined(__x86_64__)
44 #include <i386/rtclock_protos.h>
46 #include <i386/machine_routines.h>
49 #include <kern/clock.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/debug.h>
54 #include <kern/kalloc.h>
55 #include <kern/cpu_data.h>
56 #include <kern/assert.h>
57 #include <kern/telemetry.h>
58 #include <vm/vm_kern.h>
61 #include <sys/malloc.h>
62 #include <sys/mcache.h>
63 #include <sys/kauth.h>
65 #include <sys/vnode.h>
66 #include <sys/vnode_internal.h>
67 #include <sys/fcntl.h>
68 #include <sys/file_internal.h>
70 #include <sys/param.h> /* for isset() */
72 #include <mach/mach_host.h> /* for host_info() */
73 #include <libkern/OSAtomic.h>
75 #include <machine/pal_routines.h>
80 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
82 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
83 * They are registered dynamically. Each is assigned a cpu_id at registration.
85 * NOTE: IOP trace events may not use the same clock hardware as "normal"
86 * cpus. There is an effort made to synchronize the IOP timebase with the
87 * AP, but it should be understood that there may be discrepancies.
89 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
90 * The current implementation depends on this for thread safety.
92 * New registrations occur by allocating an kd_iop struct and assigning
93 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
94 * list_head pointer resolves any races.
96 * You may safely walk the kd_iops list at any time, without holding locks.
98 * When allocating buffers, the current kd_iops head is captured. Any operations
99 * that depend on the buffer state (such as flushing IOP traces on reads,
100 * etc.) should use the captured list head. This will allow registrations to
101 * take place while trace is in use.
104 typedef struct kd_iop
{
105 kd_callback_t callback
;
107 uint64_t last_timestamp
; /* Prevent timer rollback */
111 static kd_iop_t
* kd_iops
= NULL
;
113 /* XXX should have prototypes, but Mach does not provide one */
114 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
115 int cpu_number(void); /* XXX <machine/...> include path broken */
116 void commpage_update_kdebug_enable(void); /* XXX sign */
118 /* XXX should probably be static, but it's debugging code... */
119 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
120 void kdbg_control_chud(int, void *);
121 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
122 int kdbg_readcpumap(user_addr_t
, size_t *);
123 int kdbg_readcurcpumap(user_addr_t
, size_t *);
124 int kdbg_readthrmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
125 int kdbg_readcurthrmap(user_addr_t
, size_t *);
126 int kdbg_getreg(kd_regtype
*);
127 int kdbg_setreg(kd_regtype
*);
128 int kdbg_setrtcdec(kd_regtype
*);
129 int kdbg_setpidex(kd_regtype
*);
130 int kdbg_setpid(kd_regtype
*);
131 void kdbg_thrmap_init(void);
132 int kdbg_reinit(boolean_t
);
133 int kdbg_bootstrap(boolean_t
);
135 int kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
);
136 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
);
138 static int kdbg_enable_typefilter(void);
139 static int kdbg_disable_typefilter(void);
141 static int create_buffers(boolean_t
);
142 static void delete_buffers(void);
144 extern void IOSleep(int);
146 /* trace enable status */
147 unsigned int kdebug_enable
= 0;
149 /* A static buffer to record events prior to the start of regular logging */
150 #define KD_EARLY_BUFFER_MAX 64
151 static kd_buf kd_early_buffer
[KD_EARLY_BUFFER_MAX
];
152 static int kd_early_index
= 0;
153 static boolean_t kd_early_overflow
= FALSE
;
155 #define SLOW_NOLOG 0x01
156 #define SLOW_CHECKS 0x02
157 #define SLOW_ENTROPY 0x04 /* Obsolescent */
158 #define SLOW_CHUD 0x08
160 #define EVENTS_PER_STORAGE_UNIT 2048
161 #define MIN_STORAGE_UNITS_PER_CPU 4
163 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
167 uint32_t buffer_index
:21;
174 union kds_ptr kds_next
;
175 uint32_t kds_bufindx
;
177 uint32_t kds_readlast
;
178 boolean_t kds_lostevents
;
179 uint64_t kds_timestamp
;
181 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
184 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
185 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
187 struct kd_storage_buffers
{
188 struct kd_storage
*kdsb_addr
;
192 #define KDS_PTR_NULL 0xffffffff
193 struct kd_storage_buffers
*kd_bufs
= NULL
;
194 int n_storage_units
= 0;
195 int n_storage_buffers
= 0;
196 int n_storage_threshold
= 0;
201 union kds_ptr kd_list_head
;
202 union kds_ptr kd_list_tail
;
203 boolean_t kd_lostevents
;
205 uint64_t kd_prev_timebase
;
207 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE
) ));
209 struct kd_ctrl_page_t
{
210 union kds_ptr kds_free_list
;
214 uint32_t kdebug_flags
;
215 uint32_t kdebug_slowcheck
;
217 * The number of kd_bufinfo structs allocated may not match the current
218 * number of active cpus. We capture the iops list head at initialization
219 * which we could use to calculate the number of cpus we allocated data for,
220 * unless it happens to be null. To avoid that case, we explicitly also
221 * capture a cpu count.
223 kd_iop_t
* kdebug_iops
;
224 uint32_t kdebug_cpus
;
225 } kd_ctrl_page
= { .kds_free_list
= {.raw
= KDS_PTR_NULL
}, .kdebug_slowcheck
= SLOW_NOLOG
};
229 struct kd_bufinfo
*kdbip
= NULL
;
231 #define KDCOPYBUF_COUNT 8192
232 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
233 kd_buf
*kdcopybuf
= NULL
;
235 boolean_t kdlog_bg_trace
= FALSE
;
236 boolean_t kdlog_bg_trace_running
= FALSE
;
237 unsigned int bg_nkdbufs
= 0;
239 unsigned int nkdbufs
= 0;
240 unsigned int kdlog_beg
=0;
241 unsigned int kdlog_end
=0;
242 unsigned int kdlog_value1
=0;
243 unsigned int kdlog_value2
=0;
244 unsigned int kdlog_value3
=0;
245 unsigned int kdlog_value4
=0;
247 static lck_spin_t
* kdw_spin_lock
;
248 static lck_spin_t
* kds_spin_lock
;
249 static lck_mtx_t
* kd_trace_mtx_sysctl
;
250 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
251 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
252 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
254 static lck_grp_t
*stackshot_subsys_lck_grp
;
255 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
256 static lck_attr_t
*stackshot_subsys_lck_attr
;
257 static lck_mtx_t stackshot_subsys_mutex
;
259 void *stackshot_snapbuf
= NULL
;
262 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
);
265 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
);
267 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
);
270 kdp_stack_snapshot_geterror(void);
272 kdp_stack_snapshot_bytes_traced(void);
274 kd_threadmap
*kd_mapptr
= 0;
275 unsigned int kd_mapsize
= 0;
276 unsigned int kd_mapcount
= 0;
278 off_t RAW_file_offset
= 0;
279 int RAW_file_written
= 0;
281 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
283 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
285 #define DBG_FUNC_MASK 0xfffffffc
287 /* TODO: move to kdebug.h */
288 #define CLASS_MASK 0xff000000
289 #define CLASS_OFFSET 24
290 #define SUBCLASS_MASK 0x00ff0000
291 #define SUBCLASS_OFFSET 16
292 #define CSC_MASK 0xffff0000 /* class and subclass mask */
293 #define CSC_OFFSET SUBCLASS_OFFSET
295 #define EXTRACT_CLASS(debugid) ( (uint8_t) ( ((debugid) & CLASS_MASK ) >> CLASS_OFFSET ) )
296 #define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
297 #define EXTRACT_CSC(debugid) ( (uint16_t)( ((debugid) & CSC_MASK ) >> CSC_OFFSET ) )
299 #define INTERRUPT 0x01050000
300 #define MACH_vmfault 0x01300008
301 #define BSC_SysCall 0x040c0000
302 #define MACH_SysCall 0x010c0000
303 #define DBG_SCALL_MASK 0xffff0000
306 /* task to string structure */
309 task_t task
; /* from procs task */
310 pid_t pid
; /* from procs p_pid */
311 char task_comm
[20]; /* from procs p_comm */
314 typedef struct tts tts_t
;
318 kd_threadmap
*map
; /* pointer to the map buffer */
324 typedef struct krt krt_t
;
326 /* This is for the CHUD toolkit call */
327 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
328 uintptr_t arg2
, uintptr_t arg3
,
329 uintptr_t arg4
, uintptr_t arg5
);
331 volatile kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
333 __private_extern__
void stackshot_lock_init( void );
335 static uint8_t *type_filter_bitmap
;
338 * This allows kperf to swap out the global state pid when kperf ownership is
339 * passed from one process to another. It checks the old global state pid so
340 * that kperf can't accidentally steal control of trace when a non-kperf trace user has
344 kdbg_swap_global_state_pid(pid_t old_pid
, pid_t new_pid
);
347 kdbg_swap_global_state_pid(pid_t old_pid
, pid_t new_pid
)
349 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
352 lck_mtx_lock(kd_trace_mtx_sysctl
);
354 if (old_pid
== global_state_pid
)
355 global_state_pid
= new_pid
;
357 lck_mtx_unlock(kd_trace_mtx_sysctl
);
361 kdbg_cpu_count(boolean_t early_trace
)
365 * we've started tracing before the IOKit has even
366 * started running... just use the static max value
371 host_basic_info_data_t hinfo
;
372 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
373 host_info((host_t
)1 /* BSD_HOST */, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
374 assert(hinfo
.logical_cpu_max
> 0);
375 return hinfo
.logical_cpu_max
;
379 #endif /* MACH_ASSERT */
382 kdbg_iop_list_callback(kd_iop_t
* iop
, kd_callback_type type
, void* arg
)
385 iop
->callback
.func(iop
->callback
.context
, type
, arg
);
391 kdbg_set_tracing_enabled(boolean_t enabled
, uint32_t trace_type
)
393 int s
= ml_set_interrupts_enabled(FALSE
);
394 lck_spin_lock(kds_spin_lock
);
397 kdebug_enable
|= trace_type
;
398 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
399 kd_ctrl_page
.enabled
= 1;
400 commpage_update_kdebug_enable();
402 kdebug_enable
&= ~(KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
);
403 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
404 kd_ctrl_page
.enabled
= 0;
405 commpage_update_kdebug_enable();
407 lck_spin_unlock(kds_spin_lock
);
408 ml_set_interrupts_enabled(s
);
411 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_ENABLED
, NULL
);
414 * If you do not flush the IOP trace buffers, they can linger
415 * for a considerable period; consider code which disables and
416 * deallocates without a final sync flush.
418 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_DISABLED
, NULL
);
419 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
);
424 kdbg_set_flags(int slowflag
, int enableflag
, boolean_t enabled
)
426 int s
= ml_set_interrupts_enabled(FALSE
);
427 lck_spin_lock(kds_spin_lock
);
430 kd_ctrl_page
.kdebug_slowcheck
|= slowflag
;
431 kdebug_enable
|= enableflag
;
433 kd_ctrl_page
.kdebug_slowcheck
&= ~slowflag
;
434 kdebug_enable
&= ~enableflag
;
437 lck_spin_unlock(kds_spin_lock
);
438 ml_set_interrupts_enabled(s
);
442 disable_wrap(uint32_t *old_slowcheck
, uint32_t *old_flags
)
444 int s
= ml_set_interrupts_enabled(FALSE
);
445 lck_spin_lock(kds_spin_lock
);
447 *old_slowcheck
= kd_ctrl_page
.kdebug_slowcheck
;
448 *old_flags
= kd_ctrl_page
.kdebug_flags
;
450 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
451 kd_ctrl_page
.kdebug_flags
|= KDBG_NOWRAP
;
453 lck_spin_unlock(kds_spin_lock
);
454 ml_set_interrupts_enabled(s
);
458 enable_wrap(uint32_t old_slowcheck
, boolean_t lostevents
)
460 int s
= ml_set_interrupts_enabled(FALSE
);
461 lck_spin_lock(kds_spin_lock
);
463 kd_ctrl_page
.kdebug_flags
&= ~KDBG_NOWRAP
;
465 if ( !(old_slowcheck
& SLOW_NOLOG
))
466 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
468 if (lostevents
== TRUE
)
469 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
471 lck_spin_unlock(kds_spin_lock
);
472 ml_set_interrupts_enabled(s
);
476 create_buffers(boolean_t early_trace
)
485 * For the duration of this allocation, trace code will only reference
486 * kdebug_iops. Any iops registered after this enabling will not be
487 * messaged until the buffers are reallocated.
489 * TLDR; Must read kd_iops once and only once!
491 kd_ctrl_page
.kdebug_iops
= kd_iops
;
495 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
496 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
497 * be the list head + 1.
500 kd_ctrl_page
.kdebug_cpus
= kd_ctrl_page
.kdebug_iops
? kd_ctrl_page
.kdebug_iops
->cpu_id
+ 1 : kdbg_cpu_count(early_trace
);
502 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
) != KERN_SUCCESS
) {
507 if (nkdbufs
< (kd_ctrl_page
.kdebug_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
508 n_storage_units
= kd_ctrl_page
.kdebug_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
510 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
512 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
514 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
515 n_storage_buffers
= f_buffers
;
517 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
518 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
525 if (kdcopybuf
== 0) {
526 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
531 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
535 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
537 for (i
= 0; i
< f_buffers
; i
++) {
538 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
542 bzero(kd_bufs
[i
].kdsb_addr
, f_buffer_size
);
544 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
547 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
551 bzero(kd_bufs
[i
].kdsb_addr
, p_buffer_size
);
553 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
557 for (i
= 0; i
< n_storage_buffers
; i
++) {
558 struct kd_storage
*kds
;
562 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
563 kds
= kd_bufs
[i
].kdsb_addr
;
565 for (n
= 0; n
< n_elements
; n
++) {
566 kds
[n
].kds_next
.buffer_index
= kd_ctrl_page
.kds_free_list
.buffer_index
;
567 kds
[n
].kds_next
.offset
= kd_ctrl_page
.kds_free_list
.offset
;
569 kd_ctrl_page
.kds_free_list
.buffer_index
= i
;
570 kd_ctrl_page
.kds_free_list
.offset
= n
;
572 n_storage_units
+= n_elements
;
575 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
);
577 for (i
= 0; i
< (int)kd_ctrl_page
.kdebug_cpus
; i
++) {
578 kdbip
[i
].kd_list_head
.raw
= KDS_PTR_NULL
;
579 kdbip
[i
].kd_list_tail
.raw
= KDS_PTR_NULL
;
580 kdbip
[i
].kd_lostevents
= FALSE
;
581 kdbip
[i
].num_bufs
= 0;
584 kd_ctrl_page
.kdebug_flags
|= KDBG_BUFINIT
;
586 kd_ctrl_page
.kds_inuse_count
= 0;
587 n_storage_threshold
= n_storage_units
/ 2;
601 for (i
= 0; i
< n_storage_buffers
; i
++) {
602 if (kd_bufs
[i
].kdsb_addr
) {
603 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
606 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
609 n_storage_buffers
= 0;
612 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
616 kd_ctrl_page
.kds_free_list
.raw
= KDS_PTR_NULL
;
619 kmem_free(kernel_map
, (vm_offset_t
)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
);
623 kd_ctrl_page
.kdebug_iops
= NULL
;
624 kd_ctrl_page
.kdebug_cpus
= 0;
625 kd_ctrl_page
.kdebug_flags
&= ~KDBG_BUFINIT
;
629 release_storage_unit(int cpu
, uint32_t kdsp_raw
)
632 struct kd_storage
*kdsp_actual
;
633 struct kd_bufinfo
*kdbp
;
638 s
= ml_set_interrupts_enabled(FALSE
);
639 lck_spin_lock(kds_spin_lock
);
643 if (kdsp
.raw
== kdbp
->kd_list_head
.raw
) {
645 * it's possible for the storage unit pointed to
646 * by kdsp to have already been stolen... so
647 * check to see if it's still the head of the list
648 * now that we're behind the lock that protects
649 * adding and removing from the queue...
650 * since we only ever release and steal units from
651 * that position, if it's no longer the head
652 * we having nothing to do in this context
654 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
655 kdbp
->kd_list_head
= kdsp_actual
->kds_next
;
657 kdsp_actual
->kds_next
= kd_ctrl_page
.kds_free_list
;
658 kd_ctrl_page
.kds_free_list
= kdsp
;
660 kd_ctrl_page
.kds_inuse_count
--;
662 lck_spin_unlock(kds_spin_lock
);
663 ml_set_interrupts_enabled(s
);
668 allocate_storage_unit(int cpu
)
671 struct kd_storage
*kdsp_actual
, *kdsp_next_actual
;
672 struct kd_bufinfo
*kdbp
, *kdbp_vict
, *kdbp_try
;
673 uint64_t oldest_ts
, ts
;
674 boolean_t retval
= TRUE
;
677 s
= ml_set_interrupts_enabled(FALSE
);
678 lck_spin_lock(kds_spin_lock
);
682 /* If someone beat us to the allocate, return success */
683 if (kdbp
->kd_list_tail
.raw
!= KDS_PTR_NULL
) {
684 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
);
686 if (kdsp_actual
->kds_bufindx
< EVENTS_PER_STORAGE_UNIT
)
690 if ((kdsp
= kd_ctrl_page
.kds_free_list
).raw
!= KDS_PTR_NULL
) {
691 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
692 kd_ctrl_page
.kds_free_list
= kdsp_actual
->kds_next
;
694 kd_ctrl_page
.kds_inuse_count
++;
696 if (kd_ctrl_page
.kdebug_flags
& KDBG_NOWRAP
) {
697 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
698 kdbp
->kd_lostevents
= TRUE
;
703 oldest_ts
= (uint64_t)-1;
705 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_ctrl_page
.kdebug_cpus
]; kdbp_try
++) {
707 if (kdbp_try
->kd_list_head
.raw
== KDS_PTR_NULL
) {
709 * no storage unit to steal
714 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp_try
->kd_list_head
);
716 if (kdsp_actual
->kds_bufcnt
< EVENTS_PER_STORAGE_UNIT
) {
718 * make sure we don't steal the storage unit
719 * being actively recorded to... need to
720 * move on because we don't want an out-of-order
721 * set of events showing up later
725 ts
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[0]);
727 if (ts
< oldest_ts
) {
729 * when 'wrapping', we want to steal the
730 * storage unit that has the 'earliest' time
731 * associated with it (first event time)
734 kdbp_vict
= kdbp_try
;
737 if (kdbp_vict
== NULL
) {
739 kd_ctrl_page
.enabled
= 0;
740 commpage_update_kdebug_enable();
744 kdsp
= kdbp_vict
->kd_list_head
;
745 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
746 kdbp_vict
->kd_list_head
= kdsp_actual
->kds_next
;
748 if (kdbp_vict
->kd_list_head
.raw
!= KDS_PTR_NULL
) {
749 kdsp_next_actual
= POINTER_FROM_KDS_PTR(kdbp_vict
->kd_list_head
);
750 kdsp_next_actual
->kds_lostevents
= TRUE
;
752 kdbp_vict
->kd_lostevents
= TRUE
;
754 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
756 kdsp_actual
->kds_timestamp
= mach_absolute_time();
757 kdsp_actual
->kds_next
.raw
= KDS_PTR_NULL
;
758 kdsp_actual
->kds_bufcnt
= 0;
759 kdsp_actual
->kds_readlast
= 0;
761 kdsp_actual
->kds_lostevents
= kdbp
->kd_lostevents
;
762 kdbp
->kd_lostevents
= FALSE
;
763 kdsp_actual
->kds_bufindx
= 0;
765 if (kdbp
->kd_list_head
.raw
== KDS_PTR_NULL
)
766 kdbp
->kd_list_head
= kdsp
;
768 POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
)->kds_next
= kdsp
;
769 kdbp
->kd_list_tail
= kdsp
;
771 lck_spin_unlock(kds_spin_lock
);
772 ml_set_interrupts_enabled(s
);
778 kernel_debug_register_callback(kd_callback_t callback
)
781 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&iop
, sizeof(kd_iop_t
)) == KERN_SUCCESS
) {
782 memcpy(&iop
->callback
, &callback
, sizeof(kd_callback_t
));
785 * <rdar://problem/13351477> Some IOP clients are not providing a name.
790 boolean_t is_valid_name
= FALSE
;
791 for (uint32_t length
=0; length
<sizeof(callback
.iop_name
); ++length
) {
792 /* This is roughly isprintable(c) */
793 if (callback
.iop_name
[length
] > 0x20 && callback
.iop_name
[length
] < 0x7F)
795 if (callback
.iop_name
[length
] == 0) {
797 is_valid_name
= TRUE
;
802 if (!is_valid_name
) {
803 strlcpy(iop
->callback
.iop_name
, "IOP-???", sizeof(iop
->callback
.iop_name
));
807 iop
->last_timestamp
= 0;
811 * We use two pieces of state, the old list head
812 * pointer, and the value of old_list_head->cpu_id.
813 * If we read kd_iops more than once, it can change
816 * TLDR; Must not read kd_iops more than once per loop.
819 iop
->cpu_id
= iop
->next
? (iop
->next
->cpu_id
+1) : kdbg_cpu_count(FALSE
);
822 * Header says OSCompareAndSwapPtr has a memory barrier
824 } while (!OSCompareAndSwapPtr(iop
->next
, iop
, (void* volatile*)&kd_iops
));
846 struct kd_bufinfo
*kdbp
;
847 struct kd_storage
*kdsp_actual
;
848 union kds_ptr kds_raw
;
850 if (kd_ctrl_page
.kdebug_slowcheck
) {
852 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
855 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
856 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
860 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
861 if (debugid
>= kdlog_beg
&& debugid
<= kdlog_end
)
865 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
866 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
867 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
868 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
869 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
)
876 disable_preemption();
878 if (kd_ctrl_page
.enabled
== 0)
881 kdbp
= &kdbip
[coreid
];
882 timestamp
&= KDBG_TIMESTAMP_MASK
;
885 kds_raw
= kdbp
->kd_list_tail
;
887 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
888 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
889 bindx
= kdsp_actual
->kds_bufindx
;
893 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
894 if (allocate_storage_unit(coreid
) == FALSE
) {
896 * this can only happen if wrapping
903 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
906 // IOP entries can be allocated before xnu allocates and inits the buffer
907 if (timestamp
< kdsp_actual
->kds_timestamp
)
908 kdsp_actual
->kds_timestamp
= timestamp
;
910 kd
= &kdsp_actual
->kds_records
[bindx
];
912 kd
->debugid
= debugid
;
919 kdbg_set_timestamp_and_cpu(kd
, timestamp
, coreid
);
921 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
925 if ((kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
)) {
926 boolean_t need_kds_wakeup
= FALSE
;
930 * try to take the lock here to synchronize with the
931 * waiter entering the blocked state... use the try
932 * mode to prevent deadlocks caused by re-entering this
933 * routine due to various trace points triggered in the
934 * lck_spin_sleep_xxxx routines used to actually enter
935 * our wait condition... no problem if we fail,
936 * there will be lots of additional events coming in that
937 * will eventually succeed in grabbing this lock
939 s
= ml_set_interrupts_enabled(FALSE
);
941 if (lck_spin_try_lock(kdw_spin_lock
)) {
943 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
945 need_kds_wakeup
= TRUE
;
947 lck_spin_unlock(kdw_spin_lock
);
949 ml_set_interrupts_enabled(s
);
951 if (need_kds_wakeup
== TRUE
)
960 kernel_debug_internal(
968 struct proc
*curproc
;
974 struct kd_bufinfo
*kdbp
;
975 struct kd_storage
*kdsp_actual
;
976 union kds_ptr kds_raw
;
980 if (kd_ctrl_page
.kdebug_slowcheck
) {
982 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
983 kd_chudhook_fn chudhook
;
985 * Mask interrupts to minimize the interval across
986 * which the driver providing the hook could be
989 s
= ml_set_interrupts_enabled(FALSE
);
990 chudhook
= kdebug_chudhook
;
992 chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
993 ml_set_interrupts_enabled(s
);
995 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
998 if ( !ml_at_interrupt_context()) {
999 if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDCHECK
) {
1001 * If kdebug flag is not set for current proc, return
1003 curproc
= current_proc();
1005 if ((curproc
&& !(curproc
->p_kdebug
)) &&
1006 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
1007 (debugid
>> 24 != DBG_TRACE
))
1010 else if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDEXCLUDE
) {
1012 * If kdebug flag is set for current proc, return
1014 curproc
= current_proc();
1016 if ((curproc
&& curproc
->p_kdebug
) &&
1017 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
1018 (debugid
>> 24 != DBG_TRACE
))
1023 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1024 /* Always record trace system info */
1025 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1028 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
1032 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
1033 /* Always record trace system info */
1034 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1037 if (debugid
< kdlog_beg
|| debugid
> kdlog_end
)
1040 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
1041 /* Always record trace system info */
1042 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1045 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
1046 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
1047 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
1048 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
)
1053 disable_preemption();
1055 if (kd_ctrl_page
.enabled
== 0)
1061 kds_raw
= kdbp
->kd_list_tail
;
1063 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
1064 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
1065 bindx
= kdsp_actual
->kds_bufindx
;
1069 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
1070 if (allocate_storage_unit(cpu
) == FALSE
) {
1072 * this can only happen if wrapping
1079 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
1081 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
1084 kd
= &kdsp_actual
->kds_records
[bindx
];
1086 kd
->debugid
= debugid
;
1093 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
1095 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
1097 enable_preemption();
1099 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
1103 etype
= debugid
& DBG_FUNC_MASK
;
1104 stype
= debugid
& DBG_SCALL_MASK
;
1106 if (etype
== INTERRUPT
|| etype
== MACH_vmfault
||
1107 stype
== BSC_SysCall
|| stype
== MACH_SysCall
) {
1109 boolean_t need_kds_wakeup
= FALSE
;
1112 * try to take the lock here to synchronize with the
1113 * waiter entering the blocked state... use the try
1114 * mode to prevent deadlocks caused by re-entering this
1115 * routine due to various trace points triggered in the
1116 * lck_spin_sleep_xxxx routines used to actually enter
1117 * one of our 2 wait conditions... no problem if we fail,
1118 * there will be lots of additional events coming in that
1119 * will eventually succeed in grabbing this lock
1121 s
= ml_set_interrupts_enabled(FALSE
);
1123 if (lck_spin_try_lock(kdw_spin_lock
)) {
1125 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
1127 need_kds_wakeup
= TRUE
;
1129 lck_spin_unlock(kdw_spin_lock
);
1131 ml_set_interrupts_enabled(s
);
1133 if (need_kds_wakeup
== TRUE
)
1134 wakeup(&kds_waiter
);
1146 __unused
uintptr_t arg5
)
1148 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()));
1160 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
1164 kernel_debug_string(const char *message
)
1166 uintptr_t arg
[4] = {0, 0, 0, 0};
1168 /* Stuff the message string in the args and log it. */
1169 strncpy((char *)arg
, message
, MIN(sizeof(arg
), strlen(message
)));
1171 (TRACEDBG_CODE(DBG_TRACE_INFO
, 4)) | DBG_FUNC_NONE
,
1172 arg
[0], arg
[1], arg
[2], arg
[3]);
1175 extern int master_cpu
; /* MACH_KERNEL_PRIVATE */
1177 * Used prior to start_kern_tracing() being called.
1178 * Log temporarily into a static buffer.
1188 /* If tracing is already initialized, use it */
1190 KERNEL_DEBUG_CONSTANT(debugid
, arg1
, arg2
, arg3
, arg4
, 0);
1192 /* Do nothing if the buffer is full or we're not on the boot cpu */
1193 kd_early_overflow
= kd_early_index
>= KD_EARLY_BUFFER_MAX
;
1194 if (kd_early_overflow
||
1195 cpu_number() != master_cpu
)
1198 kd_early_buffer
[kd_early_index
].debugid
= debugid
;
1199 kd_early_buffer
[kd_early_index
].timestamp
= mach_absolute_time();
1200 kd_early_buffer
[kd_early_index
].arg1
= arg1
;
1201 kd_early_buffer
[kd_early_index
].arg2
= arg2
;
1202 kd_early_buffer
[kd_early_index
].arg3
= arg3
;
1203 kd_early_buffer
[kd_early_index
].arg4
= arg4
;
1204 kd_early_buffer
[kd_early_index
].arg5
= 0;
1209 * Transfer the contents of the temporary buffer into the trace buffers.
1210 * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
1211 * when mach_absolute_time is set to 0.
1214 kernel_debug_early_end(void)
1218 if (cpu_number() != master_cpu
)
1219 panic("kernel_debug_early_end() not call on boot processor");
1221 /* Fake sentinel marking the start of kernel time relative to TSC */
1224 (TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
,
1226 (uint32_t)(tsc_rebase_abs_time
>> 32),
1227 (uint32_t)tsc_rebase_abs_time
,
1231 for (i
= 0; i
< kd_early_index
; i
++) {
1234 kd_early_buffer
[i
].debugid
,
1235 kd_early_buffer
[i
].timestamp
,
1236 kd_early_buffer
[i
].arg1
,
1237 kd_early_buffer
[i
].arg2
,
1238 kd_early_buffer
[i
].arg3
,
1239 kd_early_buffer
[i
].arg4
,
1243 /* Cut events-lost event on overflow */
1244 if (kd_early_overflow
)
1245 KERNEL_DEBUG_CONSTANT(
1246 TRACEDBG_CODE(DBG_TRACE_INFO
, 2), 0, 0, 0, 0, 0);
1248 /* This trace marks the start of kernel tracing */
1249 kernel_debug_string("early trace done");
1253 * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
1256 kdebug_trace(struct proc
*p
, struct kdebug_trace_args
*uap
, int32_t *retval
)
1258 struct kdebug_trace64_args uap64
;
1260 uap64
.code
= uap
->code
;
1261 uap64
.arg1
= uap
->arg1
;
1262 uap64
.arg2
= uap
->arg2
;
1263 uap64
.arg3
= uap
->arg3
;
1264 uap64
.arg4
= uap
->arg4
;
1266 return kdebug_trace64(p
, &uap64
, retval
);
1270 * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
1272 int kdebug_trace64(__unused
struct proc
*p
, struct kdebug_trace64_args
*uap
, __unused
int32_t *retval
)
1277 * Not all class are supported for injection from userspace, especially ones used by the core
1278 * kernel tracing infrastructure.
1280 code_class
= EXTRACT_CLASS(uap
->code
);
1282 switch (code_class
) {
1287 if ( __probable(kdebug_enable
== 0) )
1290 kernel_debug_internal(uap
->code
, (uintptr_t)uap
->arg1
, (uintptr_t)uap
->arg2
, (uintptr_t)uap
->arg3
, (uintptr_t)uap
->arg4
, (uintptr_t)thread_tid(current_thread()));
1296 kdbg_lock_init(void)
1298 if (kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
)
1302 * allocate lock group attribute and group
1304 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
1305 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
1308 * allocate the lock attribute
1310 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
1314 * allocate and initialize mutex's
1316 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1317 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1318 kdw_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1320 kd_ctrl_page
.kdebug_flags
|= KDBG_LOCKINIT
;
1325 kdbg_bootstrap(boolean_t early_trace
)
1327 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
1329 return (create_buffers(early_trace
));
1333 kdbg_reinit(boolean_t early_trace
)
1338 * Disable trace collecting
1339 * First make sure we're not in
1340 * the middle of cutting a trace
1342 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1345 * make sure the SLOW_NOLOG is seen
1346 * by everyone that might be trying
1353 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
1354 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1355 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1357 kd_mapptr
= (kd_threadmap
*) 0;
1360 ret
= kdbg_bootstrap(early_trace
);
1362 RAW_file_offset
= 0;
1363 RAW_file_written
= 0;
1369 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
1374 *arg_pid
= proc
->p_pid
;
1379 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
1393 * Collect the pathname for tracing
1395 dbg_nameptr
= proc
->p_comm
;
1396 dbg_namelen
= (int)strlen(proc
->p_comm
);
1402 if(dbg_namelen
> (int)sizeof(dbg_parms
))
1403 dbg_namelen
= (int)sizeof(dbg_parms
);
1405 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
1414 kdbg_resolve_map(thread_t th_act
, void *opaque
)
1416 kd_threadmap
*mapptr
;
1417 krt_t
*t
= (krt_t
*)opaque
;
1419 if (t
->count
< t
->maxcount
) {
1420 mapptr
= &t
->map
[t
->count
];
1421 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
1423 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
1424 sizeof(t
->atts
->task_comm
));
1426 * Some kernel threads have no associated pid.
1427 * We still need to mark the entry as valid.
1430 mapptr
->valid
= t
->atts
->pid
;
1440 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1442 * You may provide a buffer and size, or if you set the buffer to NULL, a
1443 * buffer of sufficient size will be allocated.
1445 * If you provide a buffer and it is too small, sets cpumap_size to the number
1446 * of bytes required and returns EINVAL.
1448 * On success, if you provided a buffer, cpumap_size is set to the number of
1449 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1450 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1452 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1454 * We may be reporting data from "now", or from the "past".
1456 * The "now" data would be for something like kdbg_readcurcpumap().
1457 * The "past" data would be for kdbg_readcpumap().
1459 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1460 * will need to read "now" state to get the number of cpus, which would be in
1461 * error if we were reporting "past" state.
1465 kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
)
1468 assert(cpumap_size
);
1470 assert(!iops
|| iops
->cpu_id
+ 1 == cpu_count
);
1472 uint32_t bytes_needed
= sizeof(kd_cpumap_header
) + cpu_count
* sizeof(kd_cpumap
);
1473 uint32_t bytes_available
= *cpumap_size
;
1474 *cpumap_size
= bytes_needed
;
1476 if (*cpumap
== NULL
) {
1477 if (kmem_alloc(kernel_map
, (vm_offset_t
*)cpumap
, (vm_size_t
)*cpumap_size
) != KERN_SUCCESS
) {
1480 } else if (bytes_available
< bytes_needed
) {
1484 kd_cpumap_header
* header
= (kd_cpumap_header
*)(uintptr_t)*cpumap
;
1486 header
->version_no
= RAW_VERSION1
;
1487 header
->cpu_count
= cpu_count
;
1489 kd_cpumap
* cpus
= (kd_cpumap
*)&header
[1];
1491 int32_t index
= cpu_count
- 1;
1493 cpus
[index
].cpu_id
= iops
->cpu_id
;
1494 cpus
[index
].flags
= KDBG_CPUMAP_IS_IOP
;
1495 bzero(cpus
[index
].name
, sizeof(cpus
->name
));
1496 strlcpy(cpus
[index
].name
, iops
->callback
.iop_name
, sizeof(cpus
->name
));
1502 while (index
>= 0) {
1503 cpus
[index
].cpu_id
= index
;
1504 cpus
[index
].flags
= 0;
1505 bzero(cpus
[index
].name
, sizeof(cpus
->name
));
1506 strlcpy(cpus
[index
].name
, "AP", sizeof(cpus
->name
));
1511 return KERN_SUCCESS
;
1515 kdbg_thrmap_init(void)
1517 if (kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
)
1520 kd_mapptr
= kdbg_thrmap_init_internal(0, &kd_mapsize
, &kd_mapcount
);
1523 kd_ctrl_page
.kdebug_flags
|= KDBG_MAPINIT
;
1527 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
)
1529 kd_threadmap
*mapptr
;
1532 int tts_count
; /* number of task-to-string structures */
1533 struct tts
*tts_mapptr
;
1534 unsigned int tts_mapsize
= 0;
1539 * need to use PROC_SCANPROCLIST with proc_iterate
1544 * Calculate the sizes of map buffers
1546 for (p
= allproc
.lh_first
, *mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
1547 *mapcount
+= get_task_numacts((task_t
)p
->task
);
1553 * The proc count could change during buffer allocation,
1554 * so introduce a small fudge factor to bump up the
1555 * buffer sizes. This gives new tasks some chance of
1556 * making into the tables. Bump up by 25%.
1558 *mapcount
+= *mapcount
/4;
1559 tts_count
+= tts_count
/4;
1561 *mapsize
= *mapcount
* sizeof(kd_threadmap
);
1563 if (count
&& count
< *mapcount
)
1566 if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)*mapsize
) == KERN_SUCCESS
)) {
1567 bzero((void *)kaddr
, *mapsize
);
1568 mapptr
= (kd_threadmap
*)kaddr
;
1572 tts_mapsize
= tts_count
* sizeof(struct tts
);
1574 if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
1575 bzero((void *)kaddr
, tts_mapsize
);
1576 tts_mapptr
= (struct tts
*)kaddr
;
1578 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, *mapsize
);
1583 * We need to save the procs command string
1584 * and take a reference for each task associated
1585 * with a valid process
1591 * should use proc_iterate
1593 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
1594 if (p
->p_lflag
& P_LEXIT
)
1598 task_reference(p
->task
);
1599 tts_mapptr
[i
].task
= p
->task
;
1600 tts_mapptr
[i
].pid
= p
->p_pid
;
1601 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
1610 * Initialize thread map data
1614 akrt
.maxcount
= *mapcount
;
1616 for (i
= 0; i
< tts_count
; i
++) {
1617 akrt
.atts
= &tts_mapptr
[i
];
1618 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
1619 task_deallocate((task_t
) tts_mapptr
[i
].task
);
1621 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
1623 *mapcount
= akrt
.count
;
1632 * Clean up the trace buffer
1633 * First make sure we're not in
1634 * the middle of cutting a trace
1636 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1639 * make sure the SLOW_NOLOG is seen
1640 * by everyone that might be trying
1645 global_state_pid
= -1;
1646 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1647 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
1648 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
1650 kdbg_disable_typefilter();
1655 /* Clean up the thread map buffer */
1656 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1658 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1659 kd_mapptr
= (kd_threadmap
*) 0;
1664 RAW_file_offset
= 0;
1665 RAW_file_written
= 0;
1669 kdbg_setpid(kd_regtype
*kdr
)
1675 pid
= (pid_t
)kdr
->value1
;
1676 flag
= (int)kdr
->value2
;
1679 if ((p
= proc_find(pid
)) == NULL
)
1684 * turn on pid check for this and all pids
1686 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDCHECK
;
1687 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
1688 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1693 * turn off pid check for this pid value
1694 * Don't turn off all pid checking though
1696 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1709 /* This is for pid exclusion in the trace buffer */
1711 kdbg_setpidex(kd_regtype
*kdr
)
1717 pid
= (pid_t
)kdr
->value1
;
1718 flag
= (int)kdr
->value2
;
1721 if ((p
= proc_find(pid
)) == NULL
)
1726 * turn on pid exclusion
1728 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDEXCLUDE
;
1729 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDCHECK
;
1730 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1736 * turn off pid exclusion for this pid value
1737 * Don't turn off all pid exclusion though
1739 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1753 * This is for setting a maximum decrementer value
1756 kdbg_setrtcdec(kd_regtype
*kdr
)
1761 decval
= (natural_t
)kdr
->value1
;
1763 if (decval
&& decval
< KDBG_MINRTCDEC
)
1772 kdbg_enable_typefilter(void)
1774 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1775 /* free the old filter */
1776 kdbg_disable_typefilter();
1779 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
) != KERN_SUCCESS
) {
1783 bzero(type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1785 /* Turn off range and value checks */
1786 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_RANGECHECK
| KDBG_VALCHECK
);
1788 /* Enable filter checking */
1789 kd_ctrl_page
.kdebug_flags
|= KDBG_TYPEFILTER_CHECK
;
1790 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1795 kdbg_disable_typefilter(void)
1797 /* Disable filter checking */
1798 kd_ctrl_page
.kdebug_flags
&= ~KDBG_TYPEFILTER_CHECK
;
1800 /* Turn off slow checks unless pid checks are using them */
1801 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1802 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1804 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1806 if(type_filter_bitmap
== NULL
)
1809 vm_offset_t old_bitmap
= (vm_offset_t
)type_filter_bitmap
;
1810 type_filter_bitmap
= NULL
;
1812 kmem_free(kernel_map
, old_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1817 kdbg_setreg(kd_regtype
* kdr
)
1820 unsigned int val_1
, val_2
, val
;
1821 switch (kdr
->type
) {
1823 case KDBG_CLASSTYPE
:
1824 val_1
= (kdr
->value1
& 0xff);
1825 val_2
= (kdr
->value2
& 0xff);
1826 kdlog_beg
= (val_1
<<24);
1827 kdlog_end
= (val_2
<<24);
1828 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1829 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1830 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1831 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1833 case KDBG_SUBCLSTYPE
:
1834 val_1
= (kdr
->value1
& 0xff);
1835 val_2
= (kdr
->value2
& 0xff);
1837 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1838 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1839 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1840 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1841 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1842 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1844 case KDBG_RANGETYPE
:
1845 kdlog_beg
= (kdr
->value1
);
1846 kdlog_end
= (kdr
->value2
);
1847 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1848 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1849 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1850 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1853 kdlog_value1
= (kdr
->value1
);
1854 kdlog_value2
= (kdr
->value2
);
1855 kdlog_value3
= (kdr
->value3
);
1856 kdlog_value4
= (kdr
->value4
);
1857 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1858 kd_ctrl_page
.kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1859 kd_ctrl_page
.kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1860 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1862 case KDBG_TYPENONE
:
1863 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1865 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
|
1866 KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
|
1867 KDBG_TYPEFILTER_CHECK
)) )
1868 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1870 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1883 kdbg_getreg(__unused kd_regtype
* kdr
)
1887 unsigned int val_1
, val_2
, val
;
1889 switch (kdr
->type
) {
1890 case KDBG_CLASSTYPE
:
1891 val_1
= (kdr
->value1
& 0xff);
1893 kdlog_beg
= (val_1
<<24);
1894 kdlog_end
= (val_2
<<24);
1895 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1896 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1898 case KDBG_SUBCLSTYPE
:
1899 val_1
= (kdr
->value1
& 0xff);
1900 val_2
= (kdr
->value2
& 0xff);
1902 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1903 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1904 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1905 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1907 case KDBG_RANGETYPE
:
1908 kdlog_beg
= (kdr
->value1
);
1909 kdlog_end
= (kdr
->value2
);
1910 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1911 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1913 case KDBG_TYPENONE
:
1914 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1927 kdbg_readcpumap(user_addr_t user_cpumap
, size_t *user_cpumap_size
)
1929 uint8_t* cpumap
= NULL
;
1930 uint32_t cpumap_size
= 0;
1931 int ret
= KERN_SUCCESS
;
1933 if (kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) {
1934 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, &cpumap
, &cpumap_size
) == KERN_SUCCESS
) {
1936 size_t bytes_to_copy
= (*user_cpumap_size
>= cpumap_size
) ? cpumap_size
: *user_cpumap_size
;
1937 if (copyout(cpumap
, user_cpumap
, (size_t)bytes_to_copy
)) {
1941 *user_cpumap_size
= cpumap_size
;
1942 kmem_free(kernel_map
, (vm_offset_t
)cpumap
, cpumap_size
);
1952 kdbg_readcurthrmap(user_addr_t buffer
, size_t *bufsize
)
1954 kd_threadmap
*mapptr
;
1955 unsigned int mapsize
;
1956 unsigned int mapcount
;
1957 unsigned int count
= 0;
1960 count
= *bufsize
/sizeof(kd_threadmap
);
1963 if ( (mapptr
= kdbg_thrmap_init_internal(count
, &mapsize
, &mapcount
)) ) {
1964 if (copyout(mapptr
, buffer
, mapcount
* sizeof(kd_threadmap
)))
1967 *bufsize
= (mapcount
* sizeof(kd_threadmap
));
1969 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, mapsize
);
1977 kdbg_readthrmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1979 int avail
= *number
;
1982 unsigned int mapsize
;
1984 count
= avail
/sizeof (kd_threadmap
);
1986 mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
1988 if (count
&& (count
<= kd_mapcount
))
1990 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1992 if (*number
< mapsize
)
2003 uint32_t extra_thread_count
= 0;
2004 uint32_t cpumap_size
;
2007 * To write a RAW_VERSION1+ file, we
2008 * must embed a cpumap in the "padding"
2009 * used to page align the events folloing
2010 * the threadmap. If the threadmap happens
2011 * to not require enough padding, we
2012 * artificially increase its footprint
2013 * until it needs enough padding.
2016 pad_size
= PAGE_SIZE
- ((sizeof(RAW_header
) + (count
* sizeof(kd_threadmap
))) & PAGE_MASK_64
);
2017 cpumap_size
= sizeof(kd_cpumap_header
) + kd_ctrl_page
.kdebug_cpus
* sizeof(kd_cpumap
);
2019 if (cpumap_size
> pad_size
) {
2020 /* Force an overflow onto the next page, we get a full page of padding */
2021 extra_thread_count
= (pad_size
/ sizeof(kd_threadmap
)) + 1;
2024 header
.version_no
= RAW_VERSION1
;
2025 header
.thread_count
= count
+ extra_thread_count
;
2027 clock_get_calendar_microtime(&secs
, &usecs
);
2028 header
.TOD_secs
= secs
;
2029 header
.TOD_usecs
= usecs
;
2031 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&header
, sizeof(RAW_header
), RAW_file_offset
,
2032 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2035 RAW_file_offset
+= sizeof(RAW_header
);
2037 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, mapsize
, RAW_file_offset
,
2038 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2041 RAW_file_offset
+= mapsize
;
2043 if (extra_thread_count
) {
2044 pad_size
= extra_thread_count
* sizeof(kd_threadmap
);
2045 pad_buf
= (char *)kalloc(pad_size
);
2046 memset(pad_buf
, 0, pad_size
);
2048 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
2049 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2050 kfree(pad_buf
, pad_size
);
2054 RAW_file_offset
+= pad_size
;
2058 pad_size
= PAGE_SIZE
- (RAW_file_offset
& PAGE_MASK_64
);
2060 pad_buf
= (char *)kalloc(pad_size
);
2061 memset(pad_buf
, 0, pad_size
);
2064 * embed a cpumap in the padding bytes.
2065 * older code will skip this.
2066 * newer code will know how to read it.
2068 uint32_t temp
= pad_size
;
2069 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, (uint8_t**)&pad_buf
, &temp
) != KERN_SUCCESS
) {
2070 memset(pad_buf
, 0, pad_size
);
2073 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
2074 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2075 kfree(pad_buf
, pad_size
);
2079 RAW_file_offset
+= pad_size
;
2081 RAW_file_written
+= sizeof(RAW_header
) + mapsize
+ pad_size
;
2084 if (copyout(kd_mapptr
, buffer
, mapsize
))
2099 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
2100 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2101 RAW_file_offset
+= sizeof(uint32_t);
2102 RAW_file_written
+= sizeof(uint32_t);
2105 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
2107 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
2108 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
2110 kd_mapptr
= (kd_threadmap
*) 0;
2118 kdbg_set_nkdbufs(unsigned int value
)
2121 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2122 * 'value' is the desired number of trace entries
2124 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
2126 if (value
<= max_entries
)
2129 return (max_entries
);
2134 kdbg_enable_bg_trace(void)
2138 if (kdlog_bg_trace
== TRUE
&& kdlog_bg_trace_running
== FALSE
&& n_storage_buffers
== 0) {
2139 nkdbufs
= bg_nkdbufs
;
2140 ret
= kdbg_reinit(FALSE
);
2142 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
2143 kdlog_bg_trace_running
= TRUE
;
2150 kdbg_disable_bg_trace(void)
2152 if (kdlog_bg_trace_running
== TRUE
) {
2153 kdlog_bg_trace_running
= FALSE
;
2161 * This function is provided for the CHUD toolkit only.
2163 * zero disables kdebug_chudhook function call
2164 * non-zero enables kdebug_chudhook function call
2166 * address of the enabled kdebug_chudhook function
2170 kdbg_control_chud(int val
, void *fn
)
2175 /* enable chudhook */
2176 kdebug_chudhook
= fn
;
2177 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, TRUE
);
2180 /* disable chudhook */
2181 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, FALSE
);
2182 kdebug_chudhook
= 0;
2188 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
2191 size_t size
= *sizep
;
2192 unsigned int value
= 0;
2194 kbufinfo_t kd_bufinfo
;
2198 if (name
[0] == KERN_KDGETENTROPY
||
2199 name
[0] == KERN_KDWRITETR
||
2200 name
[0] == KERN_KDWRITEMAP
||
2201 name
[0] == KERN_KDEFLAGS
||
2202 name
[0] == KERN_KDDFLAGS
||
2203 name
[0] == KERN_KDENABLE
||
2204 name
[0] == KERN_KDENABLE_BG_TRACE
||
2205 name
[0] == KERN_KDSETBUF
) {
2214 if ( !(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
2217 lck_mtx_lock(kd_trace_mtx_sysctl
);
2222 * Does not alter the global_state_pid
2223 * This is a passive request.
2225 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
2227 * There is not enough room to return even
2228 * the first element of the info structure.
2233 kd_bufinfo
.nkdbufs
= nkdbufs
;
2234 kd_bufinfo
.nkdthreads
= kd_mapcount
;
2236 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) )
2237 kd_bufinfo
.nolog
= 1;
2239 kd_bufinfo
.nolog
= 0;
2241 kd_bufinfo
.flags
= kd_ctrl_page
.kdebug_flags
;
2242 #if defined(__LP64__)
2243 kd_bufinfo
.flags
|= KDBG_LP64
;
2245 kd_bufinfo
.bufid
= global_state_pid
;
2247 if (size
>= sizeof(kd_bufinfo
)) {
2249 * Provide all the info we have
2251 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
2255 * For backwards compatibility, only provide
2256 * as much info as there is room for.
2258 if (copyout(&kd_bufinfo
, where
, size
))
2263 case KERN_KDGETENTROPY
: {
2264 /* Obsolescent - just fake with a random buffer */
2265 char *buffer
= (char *) kalloc(size
);
2266 read_frandom((void *) buffer
, size
);
2267 ret
= copyout(buffer
, where
, size
);
2268 kfree(buffer
, size
);
2272 case KERN_KDENABLE_BG_TRACE
:
2273 bg_nkdbufs
= kdbg_set_nkdbufs(value
);
2274 kdlog_bg_trace
= TRUE
;
2275 ret
= kdbg_enable_bg_trace();
2278 case KERN_KDDISABLE_BG_TRACE
:
2279 kdlog_bg_trace
= FALSE
;
2280 kdbg_disable_bg_trace();
2284 if ((curproc
= current_proc()) != NULL
)
2285 curpid
= curproc
->p_pid
;
2290 if (global_state_pid
== -1)
2291 global_state_pid
= curpid
;
2292 else if (global_state_pid
!= curpid
) {
2293 if ((p
= proc_find(global_state_pid
)) == NULL
) {
2295 * The global pid no longer exists
2297 global_state_pid
= curpid
;
2300 * The global pid exists, deny this request
2311 kdbg_disable_bg_trace();
2313 value
&= KDBG_USERFLAGS
;
2314 kd_ctrl_page
.kdebug_flags
|= value
;
2317 kdbg_disable_bg_trace();
2319 value
&= KDBG_USERFLAGS
;
2320 kd_ctrl_page
.kdebug_flags
&= ~value
;
2324 * Enable tracing mechanism. Two types:
2325 * KDEBUG_TRACE is the standard one,
2326 * and KDEBUG_PPT which is a carefully
2327 * chosen subset to avoid performance impact.
2331 * enable only if buffer is initialized
2333 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) ||
2334 !(value
== KDEBUG_ENABLE_TRACE
|| value
== KDEBUG_ENABLE_PPT
)) {
2340 kdbg_set_tracing_enabled(TRUE
, value
);
2344 kdbg_set_tracing_enabled(FALSE
, 0);
2348 kdbg_disable_bg_trace();
2350 nkdbufs
= kdbg_set_nkdbufs(value
);
2353 kdbg_disable_bg_trace();
2355 ret
= kdbg_reinit(FALSE
);
2359 ret
= kdbg_enable_bg_trace();
2362 if(size
< sizeof(kd_regtype
)) {
2366 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2370 kdbg_disable_bg_trace();
2372 ret
= kdbg_setreg(&kd_Reg
);
2375 if (size
< sizeof(kd_regtype
)) {
2379 ret
= kdbg_getreg(&kd_Reg
);
2380 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
2383 kdbg_disable_bg_trace();
2387 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
2389 case KERN_KDWRITETR
:
2390 case KERN_KDWRITEMAP
:
2392 struct vfs_context context
;
2393 struct fileproc
*fp
;
2398 kdbg_disable_bg_trace();
2400 if (name
[0] == KERN_KDWRITETR
) {
2402 int wait_result
= THREAD_AWAKENED
;
2407 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2408 nanoseconds_to_absolutetime(ns
, &abstime
);
2409 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2413 s
= ml_set_interrupts_enabled(FALSE
);
2414 lck_spin_lock(kdw_spin_lock
);
2416 while (wait_result
== THREAD_AWAKENED
&& kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2421 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2423 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2427 lck_spin_unlock(kdw_spin_lock
);
2428 ml_set_interrupts_enabled(s
);
2434 if ( (ret
= fp_lookup(p
, fd
, &fp
, 1)) ) {
2438 context
.vc_thread
= current_thread();
2439 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
2441 if (FILEGLOB_DTYPE(fp
->f_fglob
) != DTYPE_VNODE
) {
2442 fp_drop(p
, fd
, fp
, 1);
2448 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
2451 if ((ret
= vnode_getwithref(vp
)) == 0) {
2452 RAW_file_offset
= fp
->f_fglob
->fg_offset
;
2453 if (name
[0] == KERN_KDWRITETR
) {
2454 number
= nkdbufs
* sizeof(kd_buf
);
2456 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2457 ret
= kdbg_read(0, &number
, vp
, &context
);
2458 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_END
, number
, 0, 0, 0, 0);
2462 number
= kd_mapcount
* sizeof(kd_threadmap
);
2463 kdbg_readthrmap(0, &number
, vp
, &context
);
2465 fp
->f_fglob
->fg_offset
= RAW_file_offset
;
2468 fp_drop(p
, fd
, fp
, 0);
2472 case KERN_KDBUFWAIT
:
2474 /* WRITETR lite -- just block until there's data */
2476 int wait_result
= THREAD_AWAKENED
;
2481 kdbg_disable_bg_trace();
2485 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2486 nanoseconds_to_absolutetime(ns
, &abstime
);
2487 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2491 s
= ml_set_interrupts_enabled(FALSE
);
2493 panic("trying to wait with interrupts off");
2494 lck_spin_lock(kdw_spin_lock
);
2496 /* drop the mutex so don't exclude others from
2499 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2501 while (wait_result
== THREAD_AWAKENED
&&
2502 kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2507 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2509 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2514 /* check the count under the spinlock */
2515 number
= (kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
);
2517 lck_spin_unlock(kdw_spin_lock
);
2518 ml_set_interrupts_enabled(s
);
2520 /* pick the mutex back up again */
2521 lck_mtx_lock(kd_trace_mtx_sysctl
);
2523 /* write out whether we've exceeded the threshold */
2528 if (size
< sizeof(kd_regtype
)) {
2532 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2536 kdbg_disable_bg_trace();
2538 ret
= kdbg_setpid(&kd_Reg
);
2541 if (size
< sizeof(kd_regtype
)) {
2545 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2549 kdbg_disable_bg_trace();
2551 ret
= kdbg_setpidex(&kd_Reg
);
2554 ret
= kdbg_readcpumap(where
, sizep
);
2557 ret
= kdbg_readthrmap(where
, sizep
, NULL
, NULL
);
2559 case KERN_KDREADCURTHRMAP
:
2560 ret
= kdbg_readcurthrmap(where
, sizep
);
2562 case KERN_KDSETRTCDEC
:
2563 if (size
< sizeof(kd_regtype
)) {
2567 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2571 kdbg_disable_bg_trace();
2573 ret
= kdbg_setrtcdec(&kd_Reg
);
2575 case KERN_KDSET_TYPEFILTER
:
2576 kdbg_disable_bg_trace();
2578 if ((kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) == 0){
2579 if ((ret
= kdbg_enable_typefilter()))
2583 if (size
!= KDBG_TYPEFILTER_BITMAP_SIZE
) {
2588 if (copyin(where
, type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
)) {
2592 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_TYPEFILTER_CHANGED
, type_filter_bitmap
);
2598 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2605 * This code can run for the most part concurrently with kernel_debug_internal()...
2606 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2607 * synchronize with the recording side of this puzzle... otherwise, we are able to
2608 * move through the lists w/o use of any locks
2611 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
2614 unsigned int cpu
, min_cpu
;
2615 uint64_t mintime
, t
, barrier
= 0;
2621 struct kd_storage
*kdsp_actual
;
2622 struct kd_bufinfo
*kdbp
;
2623 struct kd_bufinfo
*min_kdbp
;
2624 uint32_t tempbuf_count
;
2625 uint32_t tempbuf_number
;
2626 uint32_t old_kdebug_flags
;
2627 uint32_t old_kdebug_slowcheck
;
2628 boolean_t lostevents
= FALSE
;
2629 boolean_t out_of_events
= FALSE
;
2631 count
= *number
/sizeof(kd_buf
);
2634 if (count
== 0 || !(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
2637 memset(&lostevent
, 0, sizeof(lostevent
));
2638 lostevent
.debugid
= TRACEDBG_CODE(DBG_TRACE_INFO
, 2);
2640 /* Capture timestamp. Only sort events that have occured before the timestamp.
2641 * Since the iop is being flushed here, its possible that events occur on the AP
2642 * while running live tracing. If we are disabled, no new events should
2646 if (kd_ctrl_page
.enabled
)
2648 // timestamp is non-zero value
2649 barrier
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
2652 // Request each IOP to provide us with up to date entries before merging buffers together.
2653 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
);
2656 * because we hold kd_trace_mtx_sysctl, no other control threads can
2657 * be playing with kdebug_flags... the code that cuts new events could
2658 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2659 * storage chunk which is where it examines kdebug_flags... it its adding
2660 * to the same chunk we're reading from, no problem...
2663 disable_wrap(&old_kdebug_slowcheck
, &old_kdebug_flags
);
2665 if (count
> nkdbufs
)
2668 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2669 tempbuf_count
= KDCOPYBUF_COUNT
;
2672 tempbuf
= kdcopybuf
;
2676 while (tempbuf_count
) {
2677 mintime
= 0xffffffffffffffffULL
;
2682 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_ctrl_page
.kdebug_cpus
; cpu
++, kdbp
++) {
2684 // Find one with raw data
2685 if ((kdsp
= kdbp
->kd_list_head
).raw
== KDS_PTR_NULL
)
2687 /* Debugging aid: maintain a copy of the "kdsp"
2690 volatile union kds_ptr kdsp_shadow
;
2694 // Get from cpu data to buffer header to buffer
2695 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2697 volatile struct kd_storage
*kdsp_actual_shadow
;
2699 kdsp_actual_shadow
= kdsp_actual
;
2701 // See if there are actual data left in this buffer
2702 rcursor
= kdsp_actual
->kds_readlast
;
2704 if (rcursor
== kdsp_actual
->kds_bufindx
)
2707 t
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[rcursor
]);
2709 if ((t
> barrier
) && (barrier
> 0)) {
2711 * Need to wait to flush iop again before we
2712 * sort any more data from the buffers
2714 out_of_events
= TRUE
;
2717 if (t
< kdsp_actual
->kds_timestamp
) {
2719 * indicates we've not yet completed filling
2721 * this should only occur when we're looking
2722 * at the buf that the record head is utilizing
2723 * we'll pick these events up on the next
2725 * we bail at this point so that we don't
2726 * get an out-of-order timestream by continuing
2727 * to read events from the other CPUs' timestream(s)
2729 out_of_events
= TRUE
;
2738 if (min_kdbp
== NULL
|| out_of_events
== TRUE
) {
2740 * all buffers ran empty
2742 out_of_events
= TRUE
;
2747 kdsp
= min_kdbp
->kd_list_head
;
2748 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2750 if (kdsp_actual
->kds_lostevents
== TRUE
) {
2751 kdbg_set_timestamp_and_cpu(&lostevent
, kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
].timestamp
, min_cpu
);
2752 *tempbuf
= lostevent
;
2754 kdsp_actual
->kds_lostevents
= FALSE
;
2761 *tempbuf
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
++];
2763 if (kdsp_actual
->kds_readlast
== EVENTS_PER_STORAGE_UNIT
)
2764 release_storage_unit(min_cpu
, kdsp
.raw
);
2767 * Watch for out of order timestamps
2769 if (mintime
< min_kdbp
->kd_prev_timebase
) {
2771 * if so, use the previous timestamp + 1 cycle
2773 min_kdbp
->kd_prev_timebase
++;
2774 kdbg_set_timestamp_and_cpu(tempbuf
, min_kdbp
->kd_prev_timebase
, kdbg_get_cpu(tempbuf
));
2776 min_kdbp
->kd_prev_timebase
= mintime
;
2782 if ((RAW_file_written
+= sizeof(kd_buf
)) >= RAW_FLUSH_SIZE
)
2785 if (tempbuf_number
) {
2788 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
2789 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2791 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
2793 if (RAW_file_written
>= RAW_FLUSH_SIZE
) {
2794 cluster_push(vp
, 0);
2796 RAW_file_written
= 0;
2799 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
2800 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
2807 count
-= tempbuf_number
;
2808 *number
+= tempbuf_number
;
2810 if (out_of_events
== TRUE
)
2812 * all trace buffers are empty
2816 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2817 tempbuf_count
= KDCOPYBUF_COUNT
;
2819 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
2820 enable_wrap(old_kdebug_slowcheck
, lostevents
);
2826 unsigned char *getProcName(struct proc
*proc
);
2827 unsigned char *getProcName(struct proc
*proc
) {
2829 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
2833 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2834 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2835 #if defined(__i386__) || defined (__x86_64__)
2836 #define TRAP_DEBUGGER __asm__ volatile("int3");
2838 #error No TRAP_DEBUGGER definition for this architecture
2841 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2842 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
2844 /* Initialize the mutex governing access to the stack snapshot subsystem */
2845 __private_extern__
void
2846 stackshot_lock_init( void )
2848 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
2850 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
2852 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
2854 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
2858 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2859 * on the system, tracing both kernel and user stacks
2860 * where available. Uses machine specific trace routines
2861 * for ppc, ppc64 and x86.
2862 * Inputs: uap->pid - process id of process to be traced, or -1
2863 * for the entire system
2864 * uap->tracebuf - address of the user space destination
2866 * uap->tracebuf_size - size of the user space trace buffer
2867 * uap->options - various options, including the maximum
2868 * number of frames to trace.
2869 * Outputs: EPERM if the caller is not privileged
2870 * EINVAL if the supplied trace buffer isn't sanely sized
2871 * ENOMEM if we don't have enough memory to satisfy the
2873 * ENOENT if the target pid isn't found
2874 * ENOSPC if the supplied buffer is insufficient
2875 * *retval contains the number of bytes traced, if successful
2876 * and -1 otherwise. If the request failed due to
2877 * tracebuffer exhaustion, we copyout as much as possible.
2880 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
2883 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
2886 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
2887 uap
->flags
, uap
->dispatch_offset
, retval
);
2891 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
)
2896 if ((buf
== NULL
) || (size
<= 0) || (bytesTraced
== NULL
)) {
2900 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
2901 if (size
> SANE_TRACEBUF_SIZE
) {
2902 size
= SANE_TRACEBUF_SIZE
;
2905 /* Serialize tracing */
2906 STACKSHOT_SUBSYS_LOCK();
2907 istate
= ml_set_interrupts_enabled(FALSE
);
2910 /* Preload trace parameters*/
2911 kdp_snapshot_preflight(pid
, buf
, size
, flags
, 0);
2913 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2918 ml_set_interrupts_enabled(istate
);
2920 *bytesTraced
= kdp_stack_snapshot_bytes_traced();
2922 error
= kdp_stack_snapshot_geterror();
2924 STACKSHOT_SUBSYS_UNLOCK();
2931 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
)
2935 unsigned bytesTraced
= 0;
2937 #if CONFIG_TELEMETRY
2938 if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE
) {
2939 telemetry_global_ctl(1);
2942 } else if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE
) {
2943 telemetry_global_ctl(0);
2948 if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE
) {
2949 error
= telemetry_enable_window();
2951 if (error
!= KERN_SUCCESS
) {
2952 /* We are probably out of memory */
2959 } else if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE
) {
2960 telemetry_disable_window();
2967 /* Serialize tracing */
2968 STACKSHOT_SUBSYS_LOCK();
2970 if (tracebuf_size
<= 0) {
2975 #if CONFIG_TELEMETRY
2976 if (flags
& STACKSHOT_GET_MICROSTACKSHOT
) {
2978 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
2983 bytesTraced
= tracebuf_size
;
2984 error
= telemetry_gather(tracebuf
, &bytesTraced
,
2985 (flags
& STACKSHOT_SET_MICROSTACKSHOT_MARK
) ? TRUE
: FALSE
);
2986 if (error
== KERN_NO_SPACE
) {
2990 *retval
= (int)bytesTraced
;
2994 if (flags
& STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS
) {
2996 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3001 bytesTraced
= tracebuf_size
;
3002 error
= telemetry_gather_windowed(tracebuf
, &bytesTraced
);
3003 if (error
== KERN_NO_SPACE
) {
3007 *retval
= (int)bytesTraced
;
3011 if (flags
& STACKSHOT_GET_BOOT_PROFILE
) {
3013 if (tracebuf_size
> SANE_BOOTPROFILE_TRACEBUF_SIZE
) {
3018 bytesTraced
= tracebuf_size
;
3019 error
= bootprofile_gather(tracebuf
, &bytesTraced
);
3020 if (error
== KERN_NO_SPACE
) {
3024 *retval
= (int)bytesTraced
;
3029 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3034 assert(stackshot_snapbuf
== NULL
);
3035 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) {
3040 if (panic_active()) {
3045 istate
= ml_set_interrupts_enabled(FALSE
);
3046 /* Preload trace parameters*/
3047 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
);
3049 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
3055 ml_set_interrupts_enabled(istate
);
3057 bytesTraced
= kdp_stack_snapshot_bytes_traced();
3059 if (bytesTraced
> 0) {
3060 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
3061 ((bytesTraced
< tracebuf_size
) ?
3062 bytesTraced
: tracebuf_size
))))
3064 *retval
= bytesTraced
;
3071 error
= kdp_stack_snapshot_geterror();
3079 if (stackshot_snapbuf
!= NULL
)
3080 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
3081 stackshot_snapbuf
= NULL
;
3082 STACKSHOT_SUBSYS_UNLOCK();
3087 start_kern_tracing(unsigned int new_nkdbufs
, boolean_t need_map
)
3092 nkdbufs
= kdbg_set_nkdbufs(new_nkdbufs
);
3095 kernel_debug_string("start_kern_tracing");
3097 if (0 == kdbg_reinit(TRUE
)) {
3099 if (need_map
== TRUE
) {
3100 uint32_t old1
, old2
;
3104 disable_wrap(&old1
, &old2
);
3107 /* Hold off interrupts until the early traces are cut */
3108 boolean_t s
= ml_set_interrupts_enabled(FALSE
);
3110 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
3113 * Transfer all very early events from the static buffer
3114 * into the real buffers.
3116 kernel_debug_early_end();
3118 ml_set_interrupts_enabled(s
);
3120 printf("kernel tracing started\n");
3122 printf("error from kdbg_reinit,kernel tracing not started\n");
3127 start_kern_tracing_with_typefilter(unsigned int new_nkdbufs
,
3129 unsigned int typefilter
)
3131 /* startup tracing */
3132 start_kern_tracing(new_nkdbufs
, need_map
);
3134 /* check that tracing was actually enabled */
3135 if (!(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
3138 /* setup the typefiltering */
3139 if (0 == kdbg_enable_typefilter())
3140 setbit(type_filter_bitmap
, typefilter
& (CSC_MASK
>> CSC_OFFSET
));
3144 kdbg_dump_trace_to_file(const char *filename
)
3152 if ( !(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
3155 if (global_state_pid
!= -1) {
3156 if ((proc_find(global_state_pid
)) != NULL
) {
3158 * The global pid exists, we're running
3159 * due to fs_usage, latency, etc...
3160 * don't cut the panic/shutdown trace file
3161 * Disable tracing from this point to avoid
3165 kd_ctrl_page
.enabled
= 0;
3166 commpage_update_kdebug_enable();
3170 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
3173 kd_ctrl_page
.enabled
= 0;
3174 commpage_update_kdebug_enable();
3176 ctx
= vfs_context_kernel();
3178 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
3181 number
= kd_mapcount
* sizeof(kd_threadmap
);
3182 kdbg_readthrmap(0, &number
, vp
, ctx
);
3184 number
= nkdbufs
*sizeof(kd_buf
);
3185 kdbg_read(0, &number
, vp
, ctx
);
3187 vnode_close(vp
, FWRITE
, ctx
);
3189 sync(current_proc(), (void *)NULL
, (int *)NULL
);
3192 /* Helper function for filling in the BSD name for an address space
3193 * Defined here because the machine bindings know only Mach threads
3194 * and nothing about BSD processes.
3196 * FIXME: need to grab a lock during this?
3198 void kdbg_get_task_name(char* name_buf
, int len
, task_t task
)
3202 /* Note: we can't use thread->task (and functions that rely on it) here
3203 * because it hasn't been initialized yet when this function is called.
3204 * We use the explicitly-passed task parameter instead.
3206 proc
= get_bsdtask_info(task
);
3207 if (proc
!= PROC_NULL
)
3208 snprintf(name_buf
, len
, "%s/%d", proc
->p_comm
, proc
->p_pid
);
3210 snprintf(name_buf
, len
, "%p [!bsd]", task
);