2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @Apple_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
24 #include <machine/spl.h>
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
37 #include <mach/clock_types.h>
38 #include <mach/mach_types.h>
39 #include <mach/mach_time.h>
40 #include <machine/machine_routines.h>
42 #if defined(__i386__) || defined(__x86_64__)
43 #include <i386/rtclock_protos.h>
45 #include <i386/machine_routines.h>
48 #include <kern/clock.h>
50 #include <kern/thread.h>
51 #include <kern/task.h>
52 #include <kern/debug.h>
53 #include <kern/kalloc.h>
54 #include <kern/cpu_data.h>
55 #include <kern/assert.h>
56 #include <kern/telemetry.h>
57 #include <vm/vm_kern.h>
60 #include <sys/malloc.h>
61 #include <sys/mcache.h>
62 #include <sys/kauth.h>
64 #include <sys/vnode.h>
65 #include <sys/vnode_internal.h>
66 #include <sys/fcntl.h>
67 #include <sys/file_internal.h>
69 #include <sys/param.h> /* for isset() */
71 #include <mach/mach_host.h> /* for host_info() */
72 #include <libkern/OSAtomic.h>
74 #include <machine/pal_routines.h>
79 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
81 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
82 * They are registered dynamically. Each is assigned a cpu_id at registration.
84 * NOTE: IOP trace events may not use the same clock hardware as "normal"
85 * cpus. There is an effort made to synchronize the IOP timebase with the
86 * AP, but it should be understood that there may be discrepancies.
88 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
89 * The current implementation depends on this for thread safety.
91 * New registrations occur by allocating an kd_iop struct and assigning
92 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
93 * list_head pointer resolves any races.
95 * You may safely walk the kd_iops list at any time, without holding locks.
97 * When allocating buffers, the current kd_iops head is captured. Any operations
98 * that depend on the buffer state (such as flushing IOP traces on reads,
99 * etc.) should use the captured list head. This will allow registrations to
100 * take place while trace is in use.
103 typedef struct kd_iop
{
104 kd_callback_t callback
;
106 uint64_t last_timestamp
; /* Prevent timer rollback */
110 static kd_iop_t
* kd_iops
= NULL
;
112 /* XXX should have prototypes, but Mach does not provide one */
113 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
114 int cpu_number(void); /* XXX <machine/...> include path broken */
116 /* XXX should probably be static, but it's debugging code... */
117 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
118 void kdbg_control_chud(int, void *);
119 int kdbg_control(int *, u_int
, user_addr_t
, size_t *);
120 int kdbg_getentropy (user_addr_t
, size_t *, int);
121 int kdbg_readcpumap(user_addr_t
, size_t *);
122 int kdbg_readcurcpumap(user_addr_t
, size_t *);
123 int kdbg_readthrmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
);
124 int kdbg_readcurthrmap(user_addr_t
, size_t *);
125 int kdbg_getreg(kd_regtype
*);
126 int kdbg_setreg(kd_regtype
*);
127 int kdbg_setrtcdec(kd_regtype
*);
128 int kdbg_setpidex(kd_regtype
*);
129 int kdbg_setpid(kd_regtype
*);
130 void kdbg_thrmap_init(void);
131 int kdbg_reinit(boolean_t
);
132 int kdbg_bootstrap(boolean_t
);
134 int kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
);
135 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
);
137 static int kdbg_enable_typefilter(void);
138 static int kdbg_disable_typefilter(void);
140 static int create_buffers(boolean_t
);
141 static void delete_buffers(void);
143 extern void IOSleep(int);
145 /* trace enable status */
146 unsigned int kdebug_enable
= 0;
148 /* track timestamps for security server's entropy needs */
149 uint64_t * kd_entropy_buffer
= 0;
150 unsigned int kd_entropy_bufsize
= 0;
151 unsigned int kd_entropy_count
= 0;
152 unsigned int kd_entropy_indx
= 0;
153 vm_offset_t kd_entropy_buftomem
= 0;
155 #define MAX_ENTROPY_COUNT (128 * 1024)
157 #define SLOW_NOLOG 0x01
158 #define SLOW_CHECKS 0x02
159 #define SLOW_ENTROPY 0x04
160 #define SLOW_CHUD 0x08
162 #define EVENTS_PER_STORAGE_UNIT 2048
163 #define MIN_STORAGE_UNITS_PER_CPU 4
165 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
169 uint32_t buffer_index
:21;
176 union kds_ptr kds_next
;
177 uint32_t kds_bufindx
;
179 uint32_t kds_readlast
;
180 boolean_t kds_lostevents
;
181 uint64_t kds_timestamp
;
183 kd_buf kds_records
[EVENTS_PER_STORAGE_UNIT
];
186 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
187 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
189 struct kd_storage_buffers
{
190 struct kd_storage
*kdsb_addr
;
194 #define KDS_PTR_NULL 0xffffffff
195 struct kd_storage_buffers
*kd_bufs
= NULL
;
196 int n_storage_units
= 0;
197 int n_storage_buffers
= 0;
198 int n_storage_threshold
= 0;
204 union kds_ptr kd_list_head
;
205 union kds_ptr kd_list_tail
;
206 boolean_t kd_lostevents
;
208 uint64_t kd_prev_timebase
;
210 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE
) ));
212 struct kd_ctrl_page_t
{
213 union kds_ptr kds_free_list
;
217 uint32_t kdebug_flags
;
218 uint32_t kdebug_slowcheck
;
220 * The number of kd_bufinfo structs allocated may not match the current
221 * number of active cpus. We capture the iops list head at initialization
222 * which we could use to calculate the number of cpus we allocated data for,
223 * unless it happens to be null. To avoid that case, we explicitly also
224 * capture a cpu count.
226 kd_iop_t
* kdebug_iops
;
227 uint32_t kdebug_cpus
;
228 } kd_ctrl_page
= { .kds_free_list
= {.raw
= KDS_PTR_NULL
}, .kdebug_slowcheck
= SLOW_NOLOG
};
232 struct kd_bufinfo
*kdbip
= NULL
;
234 #define KDCOPYBUF_COUNT 8192
235 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
236 kd_buf
*kdcopybuf
= NULL
;
238 boolean_t kdlog_bg_trace
= FALSE
;
239 boolean_t kdlog_bg_trace_running
= FALSE
;
240 unsigned int bg_nkdbufs
= 0;
242 unsigned int nkdbufs
= 0;
243 unsigned int kdlog_beg
=0;
244 unsigned int kdlog_end
=0;
245 unsigned int kdlog_value1
=0;
246 unsigned int kdlog_value2
=0;
247 unsigned int kdlog_value3
=0;
248 unsigned int kdlog_value4
=0;
250 static lck_spin_t
* kdw_spin_lock
;
251 static lck_spin_t
* kds_spin_lock
;
252 static lck_mtx_t
* kd_trace_mtx_sysctl
;
253 static lck_grp_t
* kd_trace_mtx_sysctl_grp
;
254 static lck_attr_t
* kd_trace_mtx_sysctl_attr
;
255 static lck_grp_attr_t
*kd_trace_mtx_sysctl_grp_attr
;
257 static lck_grp_t
*stackshot_subsys_lck_grp
;
258 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
259 static lck_attr_t
*stackshot_subsys_lck_attr
;
260 static lck_mtx_t stackshot_subsys_mutex
;
262 void *stackshot_snapbuf
= NULL
;
265 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
);
268 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
);
270 kdp_snapshot_preflight(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
);
273 kdp_stack_snapshot_geterror(void);
275 kdp_stack_snapshot_bytes_traced(void);
277 kd_threadmap
*kd_mapptr
= 0;
278 unsigned int kd_mapsize
= 0;
279 unsigned int kd_mapcount
= 0;
281 off_t RAW_file_offset
= 0;
282 int RAW_file_written
= 0;
284 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
286 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
288 #define DBG_FUNC_MASK 0xfffffffc
290 /* TODO: move to kdebug.h */
291 #define CLASS_MASK 0xff000000
292 #define CLASS_OFFSET 24
293 #define SUBCLASS_MASK 0x00ff0000
294 #define SUBCLASS_OFFSET 16
295 #define CSC_MASK 0xffff0000 /* class and subclass mask */
296 #define CSC_OFFSET SUBCLASS_OFFSET
298 #define EXTRACT_CLASS(debugid) ( (uint8_t) ( ((debugid) & CLASS_MASK ) >> CLASS_OFFSET ) )
299 #define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
300 #define EXTRACT_CSC(debugid) ( (uint16_t)( ((debugid) & CSC_MASK ) >> CSC_OFFSET ) )
302 #define INTERRUPT 0x01050000
303 #define MACH_vmfault 0x01300008
304 #define BSC_SysCall 0x040c0000
305 #define MACH_SysCall 0x010c0000
306 #define DBG_SCALL_MASK 0xffff0000
309 /* task to string structure */
312 task_t task
; /* from procs task */
313 pid_t pid
; /* from procs p_pid */
314 char task_comm
[20]; /* from procs p_comm */
317 typedef struct tts tts_t
;
321 kd_threadmap
*map
; /* pointer to the map buffer */
327 typedef struct krt krt_t
;
329 /* This is for the CHUD toolkit call */
330 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
,
331 uintptr_t arg2
, uintptr_t arg3
,
332 uintptr_t arg4
, uintptr_t arg5
);
334 volatile kd_chudhook_fn kdebug_chudhook
= 0; /* pointer to CHUD toolkit function */
336 __private_extern__
void stackshot_lock_init( void );
338 static uint8_t *type_filter_bitmap
;
341 kdbg_cpu_count(boolean_t early_trace
)
345 * we've started tracing before the IOKit has even
346 * started running... just use the static max value
351 host_basic_info_data_t hinfo
;
352 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
353 host_info((host_t
)1 /* BSD_HOST */, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
354 assert(hinfo
.logical_cpu_max
> 0);
355 return hinfo
.logical_cpu_max
;
360 kdbg_iop_list_is_valid(kd_iop_t
* iop
)
363 /* Is list sorted by cpu_id? */
364 kd_iop_t
* temp
= iop
;
366 assert(!temp
->next
|| temp
->next
->cpu_id
== temp
->cpu_id
- 1);
367 assert(temp
->next
|| (temp
->cpu_id
== kdbg_cpu_count(FALSE
) || temp
->cpu_id
== kdbg_cpu_count(TRUE
)));
368 } while ((temp
= temp
->next
));
370 /* Does each entry have a function and a name? */
373 assert(temp
->callback
.func
);
374 assert(strlen(temp
->callback
.iop_name
) < sizeof(temp
->callback
.iop_name
));
375 } while ((temp
= temp
->next
));
382 kdbg_iop_list_contains_cpu_id(kd_iop_t
* list
, uint32_t cpu_id
)
385 if (list
->cpu_id
== cpu_id
)
394 * This is a temporary workaround for <rdar://problem/13512084>
396 * DO NOT CALL IN RELEASE BUILD, LEAKS ADDRESS INFORMATION!
399 kdbg_iop_list_check_for_timestamp_rollback(kd_iop_t
* list
, uint32_t cpu_id
, uint64_t timestamp
)
402 if (list
->cpu_id
== cpu_id
) {
403 if (list
->last_timestamp
> timestamp
) {
404 kprintf("%s is sending trace events that have gone backwards in time. Run the following command: \"symbols -2 -lookup 0x%p\" and file a radar against the matching kext.\n", list
->callback
.iop_name
, (void*)list
->callback
.func
);
406 /* Unconditional set mitigates syslog spam */
407 list
->last_timestamp
= timestamp
;
415 #endif /* MACH_ASSERT */
418 kdbg_iop_list_callback(kd_iop_t
* iop
, kd_callback_type type
, void* arg
)
421 iop
->callback
.func(iop
->callback
.context
, type
, arg
);
427 kdbg_set_tracing_enabled(boolean_t enabled
, uint32_t trace_type
)
429 int s
= ml_set_interrupts_enabled(FALSE
);
430 lck_spin_lock(kds_spin_lock
);
433 kdebug_enable
|= trace_type
;
434 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
435 kd_ctrl_page
.enabled
= 1;
437 kdebug_enable
&= ~(KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
);
438 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
439 kd_ctrl_page
.enabled
= 0;
441 lck_spin_unlock(kds_spin_lock
);
442 ml_set_interrupts_enabled(s
);
445 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_ENABLED
, NULL
);
448 * If you do not flush the IOP trace buffers, they can linger
449 * for a considerable period; consider code which disables and
450 * deallocates without a final sync flush.
452 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_DISABLED
, NULL
);
453 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
);
458 kdbg_set_flags(int slowflag
, int enableflag
, boolean_t enabled
)
460 int s
= ml_set_interrupts_enabled(FALSE
);
461 lck_spin_lock(kds_spin_lock
);
464 kd_ctrl_page
.kdebug_slowcheck
|= slowflag
;
465 kdebug_enable
|= enableflag
;
467 kd_ctrl_page
.kdebug_slowcheck
&= ~slowflag
;
468 kdebug_enable
&= ~enableflag
;
471 lck_spin_unlock(kds_spin_lock
);
472 ml_set_interrupts_enabled(s
);
476 disable_wrap(uint32_t *old_slowcheck
, uint32_t *old_flags
)
478 int s
= ml_set_interrupts_enabled(FALSE
);
479 lck_spin_lock(kds_spin_lock
);
481 *old_slowcheck
= kd_ctrl_page
.kdebug_slowcheck
;
482 *old_flags
= kd_ctrl_page
.kdebug_flags
;
484 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
485 kd_ctrl_page
.kdebug_flags
|= KDBG_NOWRAP
;
487 lck_spin_unlock(kds_spin_lock
);
488 ml_set_interrupts_enabled(s
);
492 enable_wrap(uint32_t old_slowcheck
, boolean_t lostevents
)
494 int s
= ml_set_interrupts_enabled(FALSE
);
495 lck_spin_lock(kds_spin_lock
);
497 kd_ctrl_page
.kdebug_flags
&= ~KDBG_NOWRAP
;
499 if ( !(old_slowcheck
& SLOW_NOLOG
))
500 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_NOLOG
;
502 if (lostevents
== TRUE
)
503 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
505 lck_spin_unlock(kds_spin_lock
);
506 ml_set_interrupts_enabled(s
);
510 create_buffers(boolean_t early_trace
)
519 * For the duration of this allocation, trace code will only reference
520 * kdebug_iops. Any iops registered after this enabling will not be
521 * messaged until the buffers are reallocated.
523 * TLDR; Must read kd_iops once and only once!
525 kd_ctrl_page
.kdebug_iops
= kd_iops
;
527 assert(kdbg_iop_list_is_valid(kd_ctrl_page
.kdebug_iops
));
530 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
531 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
532 * be the list head + 1.
535 kd_ctrl_page
.kdebug_cpus
= kd_ctrl_page
.kdebug_iops
? kd_ctrl_page
.kdebug_iops
->cpu_id
+ 1 : kdbg_cpu_count(early_trace
);
537 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
) != KERN_SUCCESS
) {
542 if (nkdbufs
< (kd_ctrl_page
.kdebug_cpus
* EVENTS_PER_STORAGE_UNIT
* MIN_STORAGE_UNITS_PER_CPU
))
543 n_storage_units
= kd_ctrl_page
.kdebug_cpus
* MIN_STORAGE_UNITS_PER_CPU
;
545 n_storage_units
= nkdbufs
/ EVENTS_PER_STORAGE_UNIT
;
547 nkdbufs
= n_storage_units
* EVENTS_PER_STORAGE_UNIT
;
549 f_buffers
= n_storage_units
/ N_STORAGE_UNITS_PER_BUFFER
;
550 n_storage_buffers
= f_buffers
;
552 f_buffer_size
= N_STORAGE_UNITS_PER_BUFFER
* sizeof(struct kd_storage
);
553 p_buffer_size
= (n_storage_units
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
);
560 if (kdcopybuf
== 0) {
561 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) {
566 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) {
570 bzero(kd_bufs
, n_storage_buffers
* sizeof(struct kd_storage_buffers
));
572 for (i
= 0; i
< f_buffers
; i
++) {
573 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) {
577 bzero(kd_bufs
[i
].kdsb_addr
, f_buffer_size
);
579 kd_bufs
[i
].kdsb_size
= f_buffer_size
;
582 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) {
586 bzero(kd_bufs
[i
].kdsb_addr
, p_buffer_size
);
588 kd_bufs
[i
].kdsb_size
= p_buffer_size
;
592 for (i
= 0; i
< n_storage_buffers
; i
++) {
593 struct kd_storage
*kds
;
597 n_elements
= kd_bufs
[i
].kdsb_size
/ sizeof(struct kd_storage
);
598 kds
= kd_bufs
[i
].kdsb_addr
;
600 for (n
= 0; n
< n_elements
; n
++) {
601 kds
[n
].kds_next
.buffer_index
= kd_ctrl_page
.kds_free_list
.buffer_index
;
602 kds
[n
].kds_next
.offset
= kd_ctrl_page
.kds_free_list
.offset
;
604 kd_ctrl_page
.kds_free_list
.buffer_index
= i
;
605 kd_ctrl_page
.kds_free_list
.offset
= n
;
607 n_storage_units
+= n_elements
;
610 bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
);
612 for (i
= 0; i
< (int)kd_ctrl_page
.kdebug_cpus
; i
++) {
613 kdbip
[i
].kd_list_head
.raw
= KDS_PTR_NULL
;
614 kdbip
[i
].kd_list_tail
.raw
= KDS_PTR_NULL
;
615 kdbip
[i
].kd_lostevents
= FALSE
;
616 kdbip
[i
].num_bufs
= 0;
619 kd_ctrl_page
.kdebug_flags
|= KDBG_BUFINIT
;
621 kd_ctrl_page
.kds_inuse_count
= 0;
622 n_storage_threshold
= n_storage_units
/ 2;
636 for (i
= 0; i
< n_storage_buffers
; i
++) {
637 if (kd_bufs
[i
].kdsb_addr
) {
638 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
);
641 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers
* sizeof(struct kd_storage_buffers
)));
644 n_storage_buffers
= 0;
647 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
);
651 kd_ctrl_page
.kds_free_list
.raw
= KDS_PTR_NULL
;
654 kmem_free(kernel_map
, (vm_offset_t
)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
);
658 kd_ctrl_page
.kdebug_iops
= NULL
;
659 kd_ctrl_page
.kdebug_cpus
= 0;
660 kd_ctrl_page
.kdebug_flags
&= ~KDBG_BUFINIT
;
664 release_storage_unit(int cpu
, uint32_t kdsp_raw
)
667 struct kd_storage
*kdsp_actual
;
668 struct kd_bufinfo
*kdbp
;
673 s
= ml_set_interrupts_enabled(FALSE
);
674 lck_spin_lock(kds_spin_lock
);
678 if (kdsp
.raw
== kdbp
->kd_list_head
.raw
) {
680 * it's possible for the storage unit pointed to
681 * by kdsp to have already been stolen... so
682 * check to see if it's still the head of the list
683 * now that we're behind the lock that protects
684 * adding and removing from the queue...
685 * since we only ever release and steal units from
686 * that position, if it's no longer the head
687 * we having nothing to do in this context
689 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
690 kdbp
->kd_list_head
= kdsp_actual
->kds_next
;
692 kdsp_actual
->kds_next
= kd_ctrl_page
.kds_free_list
;
693 kd_ctrl_page
.kds_free_list
= kdsp
;
695 kd_ctrl_page
.kds_inuse_count
--;
697 lck_spin_unlock(kds_spin_lock
);
698 ml_set_interrupts_enabled(s
);
703 allocate_storage_unit(int cpu
)
706 struct kd_storage
*kdsp_actual
, *kdsp_next_actual
;
707 struct kd_bufinfo
*kdbp
, *kdbp_vict
, *kdbp_try
;
708 uint64_t oldest_ts
, ts
;
709 boolean_t retval
= TRUE
;
712 s
= ml_set_interrupts_enabled(FALSE
);
713 lck_spin_lock(kds_spin_lock
);
717 /* If someone beat us to the allocate, return success */
718 if (kdbp
->kd_list_tail
.raw
!= KDS_PTR_NULL
) {
719 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
);
721 if (kdsp_actual
->kds_bufindx
< EVENTS_PER_STORAGE_UNIT
)
725 if ((kdsp
= kd_ctrl_page
.kds_free_list
).raw
!= KDS_PTR_NULL
) {
726 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
727 kd_ctrl_page
.kds_free_list
= kdsp_actual
->kds_next
;
729 kd_ctrl_page
.kds_inuse_count
++;
731 if (kd_ctrl_page
.kdebug_flags
& KDBG_NOWRAP
) {
732 kd_ctrl_page
.kdebug_slowcheck
|= SLOW_NOLOG
;
733 kdbp
->kd_lostevents
= TRUE
;
738 oldest_ts
= (uint64_t)-1;
740 for (kdbp_try
= &kdbip
[0]; kdbp_try
< &kdbip
[kd_ctrl_page
.kdebug_cpus
]; kdbp_try
++) {
742 if (kdbp_try
->kd_list_head
.raw
== KDS_PTR_NULL
) {
744 * no storage unit to steal
749 kdsp_actual
= POINTER_FROM_KDS_PTR(kdbp_try
->kd_list_head
);
751 if (kdsp_actual
->kds_bufcnt
< EVENTS_PER_STORAGE_UNIT
) {
753 * make sure we don't steal the storage unit
754 * being actively recorded to... need to
755 * move on because we don't want an out-of-order
756 * set of events showing up later
760 ts
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[0]);
762 if (ts
< oldest_ts
) {
764 * when 'wrapping', we want to steal the
765 * storage unit that has the 'earliest' time
766 * associated with it (first event time)
769 kdbp_vict
= kdbp_try
;
772 if (kdbp_vict
== NULL
) {
774 kd_ctrl_page
.enabled
= 0;
778 kdsp
= kdbp_vict
->kd_list_head
;
779 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
780 kdbp_vict
->kd_list_head
= kdsp_actual
->kds_next
;
782 if (kdbp_vict
->kd_list_head
.raw
!= KDS_PTR_NULL
) {
783 kdsp_next_actual
= POINTER_FROM_KDS_PTR(kdbp_vict
->kd_list_head
);
784 kdsp_next_actual
->kds_lostevents
= TRUE
;
786 kdbp_vict
->kd_lostevents
= TRUE
;
788 kd_ctrl_page
.kdebug_flags
|= KDBG_WRAPPED
;
790 kdsp_actual
->kds_timestamp
= mach_absolute_time();
791 kdsp_actual
->kds_next
.raw
= KDS_PTR_NULL
;
792 kdsp_actual
->kds_bufcnt
= 0;
793 kdsp_actual
->kds_readlast
= 0;
795 kdsp_actual
->kds_lostevents
= kdbp
->kd_lostevents
;
796 kdbp
->kd_lostevents
= FALSE
;
797 kdsp_actual
->kds_bufindx
= 0;
799 if (kdbp
->kd_list_head
.raw
== KDS_PTR_NULL
)
800 kdbp
->kd_list_head
= kdsp
;
802 POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
)->kds_next
= kdsp
;
803 kdbp
->kd_list_tail
= kdsp
;
805 lck_spin_unlock(kds_spin_lock
);
806 ml_set_interrupts_enabled(s
);
812 kernel_debug_register_callback(kd_callback_t callback
)
815 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&iop
, sizeof(kd_iop_t
)) == KERN_SUCCESS
) {
816 memcpy(&iop
->callback
, &callback
, sizeof(kd_callback_t
));
819 * <rdar://problem/13351477> Some IOP clients are not providing a name.
824 boolean_t is_valid_name
= FALSE
;
825 for (uint32_t length
=0; length
<sizeof(callback
.iop_name
); ++length
) {
826 /* This is roughly isprintable(c) */
827 if (callback
.iop_name
[length
] > 0x20 && callback
.iop_name
[length
] < 0x7F)
829 if (callback
.iop_name
[length
] == 0) {
831 is_valid_name
= TRUE
;
836 if (!is_valid_name
) {
837 strlcpy(iop
->callback
.iop_name
, "IOP-???", sizeof(iop
->callback
.iop_name
));
841 iop
->last_timestamp
= 0;
845 * We use two pieces of state, the old list head
846 * pointer, and the value of old_list_head->cpu_id.
847 * If we read kd_iops more than once, it can change
850 * TLDR; Must not read kd_iops more than once per loop.
853 iop
->cpu_id
= iop
->next
? (iop
->next
->cpu_id
+1) : kdbg_cpu_count(FALSE
);
856 * Header says OSCompareAndSwapPtr has a memory barrier
858 } while (!OSCompareAndSwapPtr(iop
->next
, iop
, (void* volatile*)&kd_iops
));
880 struct kd_bufinfo
*kdbp
;
881 struct kd_storage
*kdsp_actual
;
882 union kds_ptr kds_raw
;
884 if (kd_ctrl_page
.kdebug_slowcheck
) {
886 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
889 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
890 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
894 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
895 if (debugid
>= kdlog_beg
&& debugid
<= kdlog_end
)
899 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
900 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
901 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
902 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
903 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
)
909 assert(kdbg_iop_list_contains_cpu_id(kd_ctrl_page
.kdebug_iops
, coreid
));
910 /* Remove when <rdar://problem/13512084> is closed. */
911 assert(kdbg_iop_list_check_for_timestamp_rollback(kd_ctrl_page
.kdebug_iops
, coreid
, timestamp
));
913 disable_preemption();
915 if (kd_ctrl_page
.enabled
== 0)
918 kdbp
= &kdbip
[coreid
];
919 timestamp
&= KDBG_TIMESTAMP_MASK
;
922 kds_raw
= kdbp
->kd_list_tail
;
924 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
925 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
926 bindx
= kdsp_actual
->kds_bufindx
;
930 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
931 if (allocate_storage_unit(coreid
) == FALSE
) {
933 * this can only happen if wrapping
940 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
943 // IOP entries can be allocated before xnu allocates and inits the buffer
944 if (timestamp
< kdsp_actual
->kds_timestamp
)
945 kdsp_actual
->kds_timestamp
= timestamp
;
947 kd
= &kdsp_actual
->kds_records
[bindx
];
949 kd
->debugid
= debugid
;
956 kdbg_set_timestamp_and_cpu(kd
, timestamp
, coreid
);
958 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
962 if ((kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
)) {
963 boolean_t need_kds_wakeup
= FALSE
;
967 * try to take the lock here to synchronize with the
968 * waiter entering the blocked state... use the try
969 * mode to prevent deadlocks caused by re-entering this
970 * routine due to various trace points triggered in the
971 * lck_spin_sleep_xxxx routines used to actually enter
972 * our wait condition... no problem if we fail,
973 * there will be lots of additional events coming in that
974 * will eventually succeed in grabbing this lock
976 s
= ml_set_interrupts_enabled(FALSE
);
978 if (lck_spin_try_lock(kdw_spin_lock
)) {
980 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
982 need_kds_wakeup
= TRUE
;
984 lck_spin_unlock(kdw_spin_lock
);
986 ml_set_interrupts_enabled(s
);
988 if (need_kds_wakeup
== TRUE
)
997 kernel_debug_internal(
1006 __attribute__((always_inline
)) void
1007 kernel_debug_internal(
1016 struct proc
*curproc
;
1022 struct kd_bufinfo
*kdbp
;
1023 struct kd_storage
*kdsp_actual
;
1024 union kds_ptr kds_raw
;
1028 if (kd_ctrl_page
.kdebug_slowcheck
) {
1030 if (kdebug_enable
& KDEBUG_ENABLE_CHUD
) {
1031 kd_chudhook_fn chudhook
;
1033 * Mask interrupts to minimize the interval across
1034 * which the driver providing the hook could be
1037 s
= ml_set_interrupts_enabled(FALSE
);
1038 chudhook
= kdebug_chudhook
;
1040 chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
);
1041 ml_set_interrupts_enabled(s
);
1043 if ((kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) && entropy_flag
) {
1045 now
= mach_absolute_time();
1047 s
= ml_set_interrupts_enabled(FALSE
);
1048 lck_spin_lock(kds_spin_lock
);
1050 if (kdebug_enable
& KDEBUG_ENABLE_ENTROPY
) {
1052 if (kd_entropy_indx
< kd_entropy_count
) {
1053 kd_entropy_buffer
[kd_entropy_indx
] = now
;
1056 if (kd_entropy_indx
== kd_entropy_count
) {
1058 * Disable entropy collection
1060 kdebug_enable
&= ~KDEBUG_ENABLE_ENTROPY
;
1061 kd_ctrl_page
.kdebug_slowcheck
&= ~SLOW_ENTROPY
;
1064 lck_spin_unlock(kds_spin_lock
);
1065 ml_set_interrupts_enabled(s
);
1067 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) || !(kdebug_enable
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
)))
1070 if ( !ml_at_interrupt_context()) {
1071 if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDCHECK
) {
1073 * If kdebug flag is not set for current proc, return
1075 curproc
= current_proc();
1077 if ((curproc
&& !(curproc
->p_kdebug
)) &&
1078 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
1079 (debugid
>> 24 != DBG_TRACE
))
1082 else if (kd_ctrl_page
.kdebug_flags
& KDBG_PIDEXCLUDE
) {
1084 * If kdebug flag is set for current proc, return
1086 curproc
= current_proc();
1088 if ((curproc
&& curproc
->p_kdebug
) &&
1089 ((debugid
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) &&
1090 (debugid
>> 24 != DBG_TRACE
))
1095 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1096 /* Always record trace system info */
1097 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1100 if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))
1104 else if (kd_ctrl_page
.kdebug_flags
& KDBG_RANGECHECK
) {
1105 /* Always record trace system info */
1106 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1109 if (debugid
< kdlog_beg
|| debugid
> kdlog_end
)
1112 else if (kd_ctrl_page
.kdebug_flags
& KDBG_VALCHECK
) {
1113 /* Always record trace system info */
1114 if (EXTRACT_CLASS(debugid
) == DBG_TRACE
)
1117 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
1118 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
1119 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
1120 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
)
1125 disable_preemption();
1127 if (kd_ctrl_page
.enabled
== 0)
1133 kds_raw
= kdbp
->kd_list_tail
;
1135 if (kds_raw
.raw
!= KDS_PTR_NULL
) {
1136 kdsp_actual
= POINTER_FROM_KDS_PTR(kds_raw
);
1137 bindx
= kdsp_actual
->kds_bufindx
;
1141 if (kdsp_actual
== NULL
|| bindx
>= EVENTS_PER_STORAGE_UNIT
) {
1142 if (allocate_storage_unit(cpu
) == FALSE
) {
1144 * this can only happen if wrapping
1151 now
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
1153 if ( !OSCompareAndSwap(bindx
, bindx
+ 1, &kdsp_actual
->kds_bufindx
))
1156 kd
= &kdsp_actual
->kds_records
[bindx
];
1158 kd
->debugid
= debugid
;
1165 kdbg_set_timestamp_and_cpu(kd
, now
, cpu
);
1167 OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
);
1169 enable_preemption();
1171 if ((kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) ||
1172 (kde_waiter
&& kd_entropy_indx
>= kd_entropy_count
)) {
1176 etype
= debugid
& DBG_FUNC_MASK
;
1177 stype
= debugid
& DBG_SCALL_MASK
;
1179 if (etype
== INTERRUPT
|| etype
== MACH_vmfault
||
1180 stype
== BSC_SysCall
|| stype
== MACH_SysCall
) {
1182 boolean_t need_kds_wakeup
= FALSE
;
1183 boolean_t need_kde_wakeup
= FALSE
;
1186 * try to take the lock here to synchronize with the
1187 * waiter entering the blocked state... use the try
1188 * mode to prevent deadlocks caused by re-entering this
1189 * routine due to various trace points triggered in the
1190 * lck_spin_sleep_xxxx routines used to actually enter
1191 * one of our 2 wait conditions... no problem if we fail,
1192 * there will be lots of additional events coming in that
1193 * will eventually succeed in grabbing this lock
1195 s
= ml_set_interrupts_enabled(FALSE
);
1197 if (lck_spin_try_lock(kdw_spin_lock
)) {
1199 if (kds_waiter
&& kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
) {
1201 need_kds_wakeup
= TRUE
;
1203 if (kde_waiter
&& kd_entropy_indx
>= kd_entropy_count
) {
1205 need_kde_wakeup
= TRUE
;
1207 lck_spin_unlock(kdw_spin_lock
);
1209 ml_set_interrupts_enabled(s
);
1211 if (need_kds_wakeup
== TRUE
)
1212 wakeup(&kds_waiter
);
1213 if (need_kde_wakeup
== TRUE
)
1214 wakeup(&kde_waiter
);
1226 __unused
uintptr_t arg5
)
1228 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread()), 1);
1240 kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
, 1);
1244 * Support syscall SYS_kdebug_trace
1247 kdebug_trace(__unused
struct proc
*p
, struct kdebug_trace_args
*uap
, __unused
int32_t *retval
)
1249 if ( __probable(kdebug_enable
== 0) )
1252 kernel_debug_internal(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, (uintptr_t)thread_tid(current_thread()), 0);
1259 kdbg_lock_init(void)
1261 if (kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
)
1265 * allocate lock group attribute and group
1267 kd_trace_mtx_sysctl_grp_attr
= lck_grp_attr_alloc_init();
1268 kd_trace_mtx_sysctl_grp
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
);
1271 * allocate the lock attribute
1273 kd_trace_mtx_sysctl_attr
= lck_attr_alloc_init();
1277 * allocate and initialize mutex's
1279 kd_trace_mtx_sysctl
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1280 kds_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1281 kdw_spin_lock
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
);
1283 kd_ctrl_page
.kdebug_flags
|= KDBG_LOCKINIT
;
1288 kdbg_bootstrap(boolean_t early_trace
)
1290 kd_ctrl_page
.kdebug_flags
&= ~KDBG_WRAPPED
;
1292 return (create_buffers(early_trace
));
1296 kdbg_reinit(boolean_t early_trace
)
1301 * Disable trace collecting
1302 * First make sure we're not in
1303 * the middle of cutting a trace
1305 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1308 * make sure the SLOW_NOLOG is seen
1309 * by everyone that might be trying
1316 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
) {
1317 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1318 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1320 kd_mapptr
= (kd_threadmap
*) 0;
1323 ret
= kdbg_bootstrap(early_trace
);
1325 RAW_file_offset
= 0;
1326 RAW_file_written
= 0;
1332 kdbg_trace_data(struct proc
*proc
, long *arg_pid
)
1337 *arg_pid
= proc
->p_pid
;
1342 kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
1356 * Collect the pathname for tracing
1358 dbg_nameptr
= proc
->p_comm
;
1359 dbg_namelen
= (int)strlen(proc
->p_comm
);
1365 if(dbg_namelen
> (int)sizeof(dbg_parms
))
1366 dbg_namelen
= (int)sizeof(dbg_parms
);
1368 strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
);
1377 kdbg_resolve_map(thread_t th_act
, void *opaque
)
1379 kd_threadmap
*mapptr
;
1380 krt_t
*t
= (krt_t
*)opaque
;
1382 if (t
->count
< t
->maxcount
) {
1383 mapptr
= &t
->map
[t
->count
];
1384 mapptr
->thread
= (uintptr_t)thread_tid(th_act
);
1386 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
,
1387 sizeof(t
->atts
->task_comm
));
1389 * Some kernel threads have no associated pid.
1390 * We still need to mark the entry as valid.
1393 mapptr
->valid
= t
->atts
->pid
;
1403 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1405 * You may provide a buffer and size, or if you set the buffer to NULL, a
1406 * buffer of sufficient size will be allocated.
1408 * If you provide a buffer and it is too small, sets cpumap_size to the number
1409 * of bytes required and returns EINVAL.
1411 * On success, if you provided a buffer, cpumap_size is set to the number of
1412 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1413 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1415 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1417 * We may be reporting data from "now", or from the "past".
1419 * The "now" data would be for something like kdbg_readcurcpumap().
1420 * The "past" data would be for kdbg_readcpumap().
1422 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1423 * will need to read "now" state to get the number of cpus, which would be in
1424 * error if we were reporting "past" state.
1428 kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
)
1431 assert(cpumap_size
);
1433 assert(!iops
|| iops
->cpu_id
+ 1 == cpu_count
);
1435 uint32_t bytes_needed
= sizeof(kd_cpumap_header
) + cpu_count
* sizeof(kd_cpumap
);
1436 uint32_t bytes_available
= *cpumap_size
;
1437 *cpumap_size
= bytes_needed
;
1439 if (*cpumap
== NULL
) {
1440 if (kmem_alloc(kernel_map
, (vm_offset_t
*)cpumap
, (vm_size_t
)*cpumap_size
) != KERN_SUCCESS
) {
1443 } else if (bytes_available
< bytes_needed
) {
1447 kd_cpumap_header
* header
= (kd_cpumap_header
*)(uintptr_t)*cpumap
;
1449 header
->version_no
= RAW_VERSION1
;
1450 header
->cpu_count
= cpu_count
;
1452 kd_cpumap
* cpus
= (kd_cpumap
*)&header
[1];
1454 int32_t index
= cpu_count
- 1;
1456 cpus
[index
].cpu_id
= iops
->cpu_id
;
1457 cpus
[index
].flags
= KDBG_CPUMAP_IS_IOP
;
1458 bzero(cpus
[index
].name
, sizeof(cpus
->name
));
1459 strlcpy(cpus
[index
].name
, iops
->callback
.iop_name
, sizeof(cpus
->name
));
1465 while (index
>= 0) {
1466 cpus
[index
].cpu_id
= index
;
1467 cpus
[index
].flags
= 0;
1468 bzero(cpus
[index
].name
, sizeof(cpus
->name
));
1469 strlcpy(cpus
[index
].name
, "AP", sizeof(cpus
->name
));
1474 return KERN_SUCCESS
;
1478 kdbg_thrmap_init(void)
1480 if (kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
)
1483 kd_mapptr
= kdbg_thrmap_init_internal(0, &kd_mapsize
, &kd_mapcount
);
1486 kd_ctrl_page
.kdebug_flags
|= KDBG_MAPINIT
;
1490 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
)
1492 kd_threadmap
*mapptr
;
1495 int tts_count
; /* number of task-to-string structures */
1496 struct tts
*tts_mapptr
;
1497 unsigned int tts_mapsize
= 0;
1502 * need to use PROC_SCANPROCLIST with proc_iterate
1507 * Calculate the sizes of map buffers
1509 for (p
= allproc
.lh_first
, *mapcount
=0, tts_count
=0; p
; p
= p
->p_list
.le_next
) {
1510 *mapcount
+= get_task_numacts((task_t
)p
->task
);
1516 * The proc count could change during buffer allocation,
1517 * so introduce a small fudge factor to bump up the
1518 * buffer sizes. This gives new tasks some chance of
1519 * making into the tables. Bump up by 25%.
1521 *mapcount
+= *mapcount
/4;
1522 tts_count
+= tts_count
/4;
1524 *mapsize
= *mapcount
* sizeof(kd_threadmap
);
1526 if (count
&& count
< *mapcount
)
1529 if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)*mapsize
) == KERN_SUCCESS
)) {
1530 bzero((void *)kaddr
, *mapsize
);
1531 mapptr
= (kd_threadmap
*)kaddr
;
1535 tts_mapsize
= tts_count
* sizeof(struct tts
);
1537 if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) {
1538 bzero((void *)kaddr
, tts_mapsize
);
1539 tts_mapptr
= (struct tts
*)kaddr
;
1541 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, *mapsize
);
1546 * We need to save the procs command string
1547 * and take a reference for each task associated
1548 * with a valid process
1554 * should use proc_iterate
1556 for (p
= allproc
.lh_first
, i
=0; p
&& i
< tts_count
; p
= p
->p_list
.le_next
) {
1557 if (p
->p_lflag
& P_LEXIT
)
1561 task_reference(p
->task
);
1562 tts_mapptr
[i
].task
= p
->task
;
1563 tts_mapptr
[i
].pid
= p
->p_pid
;
1564 (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
));
1573 * Initialize thread map data
1577 akrt
.maxcount
= *mapcount
;
1579 for (i
= 0; i
< tts_count
; i
++) {
1580 akrt
.atts
= &tts_mapptr
[i
];
1581 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
);
1582 task_deallocate((task_t
) tts_mapptr
[i
].task
);
1584 kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
);
1586 *mapcount
= akrt
.count
;
1595 * Clean up the trace buffer
1596 * First make sure we're not in
1597 * the middle of cutting a trace
1599 kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
);
1602 * make sure the SLOW_NOLOG is seen
1603 * by everyone that might be trying
1608 global_state_pid
= -1;
1609 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1610 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
1611 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
1613 kdbg_disable_typefilter();
1618 /* Clean up the thread map buffer */
1619 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
1621 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
1622 kd_mapptr
= (kd_threadmap
*) 0;
1627 RAW_file_offset
= 0;
1628 RAW_file_written
= 0;
1632 kdbg_setpid(kd_regtype
*kdr
)
1638 pid
= (pid_t
)kdr
->value1
;
1639 flag
= (int)kdr
->value2
;
1642 if ((p
= proc_find(pid
)) == NULL
)
1647 * turn on pid check for this and all pids
1649 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDCHECK
;
1650 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
1651 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1656 * turn off pid check for this pid value
1657 * Don't turn off all pid checking though
1659 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1672 /* This is for pid exclusion in the trace buffer */
1674 kdbg_setpidex(kd_regtype
*kdr
)
1680 pid
= (pid_t
)kdr
->value1
;
1681 flag
= (int)kdr
->value2
;
1684 if ((p
= proc_find(pid
)) == NULL
)
1689 * turn on pid exclusion
1691 kd_ctrl_page
.kdebug_flags
|= KDBG_PIDEXCLUDE
;
1692 kd_ctrl_page
.kdebug_flags
&= ~KDBG_PIDCHECK
;
1693 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1699 * turn off pid exclusion for this pid value
1700 * Don't turn off all pid exclusion though
1702 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1716 * This is for setting a maximum decrementer value
1719 kdbg_setrtcdec(kd_regtype
*kdr
)
1724 decval
= (natural_t
)kdr
->value1
;
1726 if (decval
&& decval
< KDBG_MINRTCDEC
)
1735 kdbg_enable_typefilter(void)
1737 if (kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) {
1738 /* free the old filter */
1739 kdbg_disable_typefilter();
1742 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
) != KERN_SUCCESS
) {
1746 bzero(type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1748 /* Turn off range and value checks */
1749 kd_ctrl_page
.kdebug_flags
&= ~(KDBG_RANGECHECK
| KDBG_VALCHECK
);
1751 /* Enable filter checking */
1752 kd_ctrl_page
.kdebug_flags
|= KDBG_TYPEFILTER_CHECK
;
1753 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1758 kdbg_disable_typefilter(void)
1760 /* Disable filter checking */
1761 kd_ctrl_page
.kdebug_flags
&= ~KDBG_TYPEFILTER_CHECK
;
1763 /* Turn off slow checks unless pid checks are using them */
1764 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
)) )
1765 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1767 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1769 if(type_filter_bitmap
== NULL
)
1772 vm_offset_t old_bitmap
= (vm_offset_t
)type_filter_bitmap
;
1773 type_filter_bitmap
= NULL
;
1775 kmem_free(kernel_map
, old_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
);
1780 kdbg_setreg(kd_regtype
* kdr
)
1783 unsigned int val_1
, val_2
, val
;
1784 switch (kdr
->type
) {
1786 case KDBG_CLASSTYPE
:
1787 val_1
= (kdr
->value1
& 0xff);
1788 val_2
= (kdr
->value2
& 0xff);
1789 kdlog_beg
= (val_1
<<24);
1790 kdlog_end
= (val_2
<<24);
1791 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1792 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1793 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1794 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1796 case KDBG_SUBCLSTYPE
:
1797 val_1
= (kdr
->value1
& 0xff);
1798 val_2
= (kdr
->value2
& 0xff);
1800 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1801 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1802 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1803 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1804 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1805 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1807 case KDBG_RANGETYPE
:
1808 kdlog_beg
= (kdr
->value1
);
1809 kdlog_end
= (kdr
->value2
);
1810 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1811 kd_ctrl_page
.kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
1812 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1813 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1816 kdlog_value1
= (kdr
->value1
);
1817 kdlog_value2
= (kdr
->value2
);
1818 kdlog_value3
= (kdr
->value3
);
1819 kdlog_value4
= (kdr
->value4
);
1820 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1821 kd_ctrl_page
.kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
1822 kd_ctrl_page
.kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
1823 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1825 case KDBG_TYPENONE
:
1826 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1828 if ( (kd_ctrl_page
.kdebug_flags
& (KDBG_RANGECHECK
| KDBG_VALCHECK
|
1829 KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
|
1830 KDBG_TYPEFILTER_CHECK
)) )
1831 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
);
1833 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
);
1846 kdbg_getreg(__unused kd_regtype
* kdr
)
1850 unsigned int val_1
, val_2
, val
;
1852 switch (kdr
->type
) {
1853 case KDBG_CLASSTYPE
:
1854 val_1
= (kdr
->value1
& 0xff);
1856 kdlog_beg
= (val_1
<<24);
1857 kdlog_end
= (val_2
<<24);
1858 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1859 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
1861 case KDBG_SUBCLSTYPE
:
1862 val_1
= (kdr
->value1
& 0xff);
1863 val_2
= (kdr
->value2
& 0xff);
1865 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
1866 kdlog_end
= ((val_1
<<24) | (val
<< 16));
1867 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1868 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
1870 case KDBG_RANGETYPE
:
1871 kdlog_beg
= (kdr
->value1
);
1872 kdlog_end
= (kdr
->value2
);
1873 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1874 kd_ctrl_page
.kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
1876 case KDBG_TYPENONE
:
1877 kd_ctrl_page
.kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
1890 kdbg_readcpumap(user_addr_t user_cpumap
, size_t *user_cpumap_size
)
1892 uint8_t* cpumap
= NULL
;
1893 uint32_t cpumap_size
= 0;
1894 int ret
= KERN_SUCCESS
;
1896 if (kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) {
1897 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, &cpumap
, &cpumap_size
) == KERN_SUCCESS
) {
1899 size_t bytes_to_copy
= (*user_cpumap_size
>= cpumap_size
) ? cpumap_size
: *user_cpumap_size
;
1900 if (copyout(cpumap
, user_cpumap
, (size_t)bytes_to_copy
)) {
1904 *user_cpumap_size
= cpumap_size
;
1905 kmem_free(kernel_map
, (vm_offset_t
)cpumap
, cpumap_size
);
1915 kdbg_readcurthrmap(user_addr_t buffer
, size_t *bufsize
)
1917 kd_threadmap
*mapptr
;
1918 unsigned int mapsize
;
1919 unsigned int mapcount
;
1920 unsigned int count
= 0;
1923 count
= *bufsize
/sizeof(kd_threadmap
);
1926 if ( (mapptr
= kdbg_thrmap_init_internal(count
, &mapsize
, &mapcount
)) ) {
1927 if (copyout(mapptr
, buffer
, mapcount
* sizeof(kd_threadmap
)))
1930 *bufsize
= (mapcount
* sizeof(kd_threadmap
));
1932 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, mapsize
);
1940 kdbg_readthrmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
1942 int avail
= *number
;
1945 unsigned int mapsize
;
1947 count
= avail
/sizeof (kd_threadmap
);
1949 mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
1951 if (count
&& (count
<= kd_mapcount
))
1953 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
1955 if (*number
< mapsize
)
1966 uint32_t extra_thread_count
= 0;
1967 uint32_t cpumap_size
;
1970 * To write a RAW_VERSION1+ file, we
1971 * must embed a cpumap in the "padding"
1972 * used to page align the events folloing
1973 * the threadmap. If the threadmap happens
1974 * to not require enough padding, we
1975 * artificially increase its footprint
1976 * until it needs enough padding.
1979 pad_size
= PAGE_SIZE
- ((sizeof(RAW_header
) + (count
* sizeof(kd_threadmap
))) & PAGE_MASK_64
);
1980 cpumap_size
= sizeof(kd_cpumap_header
) + kd_ctrl_page
.kdebug_cpus
* sizeof(kd_cpumap
);
1982 if (cpumap_size
> pad_size
) {
1983 /* Force an overflow onto the next page, we get a full page of padding */
1984 extra_thread_count
= (pad_size
/ sizeof(kd_threadmap
)) + 1;
1987 header
.version_no
= RAW_VERSION1
;
1988 header
.thread_count
= count
+ extra_thread_count
;
1990 clock_get_calendar_microtime(&secs
, &usecs
);
1991 header
.TOD_secs
= secs
;
1992 header
.TOD_usecs
= usecs
;
1994 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&header
, sizeof(RAW_header
), RAW_file_offset
,
1995 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
1998 RAW_file_offset
+= sizeof(RAW_header
);
2000 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, mapsize
, RAW_file_offset
,
2001 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2004 RAW_file_offset
+= mapsize
;
2006 if (extra_thread_count
) {
2007 pad_size
= extra_thread_count
* sizeof(kd_threadmap
);
2008 pad_buf
= (char *)kalloc(pad_size
);
2009 memset(pad_buf
, 0, pad_size
);
2011 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
2012 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2013 kfree(pad_buf
, pad_size
);
2017 RAW_file_offset
+= pad_size
;
2021 pad_size
= PAGE_SIZE
- (RAW_file_offset
& PAGE_MASK_64
);
2023 pad_buf
= (char *)kalloc(pad_size
);
2024 memset(pad_buf
, 0, pad_size
);
2027 * embed a cpumap in the padding bytes.
2028 * older code will skip this.
2029 * newer code will know how to read it.
2031 uint32_t temp
= pad_size
;
2032 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, (uint8_t**)&pad_buf
, &temp
) != KERN_SUCCESS
) {
2033 memset(pad_buf
, 0, pad_size
);
2036 ret
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
,
2037 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2038 kfree(pad_buf
, pad_size
);
2042 RAW_file_offset
+= pad_size
;
2044 RAW_file_written
+= sizeof(RAW_header
) + mapsize
+ pad_size
;
2047 if (copyout(kd_mapptr
, buffer
, mapsize
))
2062 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
,
2063 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2064 RAW_file_offset
+= sizeof(uint32_t);
2065 RAW_file_written
+= sizeof(uint32_t);
2068 if ((kd_ctrl_page
.kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
2070 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
);
2071 kd_ctrl_page
.kdebug_flags
&= ~KDBG_MAPINIT
;
2073 kd_mapptr
= (kd_threadmap
*) 0;
2080 kdbg_getentropy (user_addr_t buffer
, size_t *number
, int ms_timeout
)
2082 int avail
= *number
;
2087 int wait_result
= THREAD_AWAKENED
;
2090 if (kd_entropy_buffer
)
2096 kd_entropy_count
= avail
/sizeof(uint64_t);
2098 if (kd_entropy_count
> MAX_ENTROPY_COUNT
|| kd_entropy_count
== 0) {
2100 * Enforce maximum entropy entries
2104 kd_entropy_bufsize
= kd_entropy_count
* sizeof(uint64_t);
2107 * allocate entropy buffer
2109 if (kmem_alloc(kernel_map
, &kd_entropy_buftomem
, (vm_size_t
)kd_entropy_bufsize
) == KERN_SUCCESS
) {
2110 kd_entropy_buffer
= (uint64_t *) kd_entropy_buftomem
;
2112 kd_entropy_buffer
= (uint64_t *) 0;
2113 kd_entropy_count
= 0;
2117 kd_entropy_indx
= 0;
2119 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START
, ms_timeout
, kd_entropy_count
, 0, 0, 0);
2122 * Enable entropy sampling
2124 kdbg_set_flags(SLOW_ENTROPY
, KDEBUG_ENABLE_ENTROPY
, TRUE
);
2127 ns
= (u_int64_t
)ms_timeout
* (u_int64_t
)(1000 * 1000);
2128 nanoseconds_to_absolutetime(ns
, &abstime
);
2129 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2133 s
= ml_set_interrupts_enabled(FALSE
);
2134 lck_spin_lock(kdw_spin_lock
);
2136 while (wait_result
== THREAD_AWAKENED
&& kd_entropy_indx
< kd_entropy_count
) {
2142 * wait for the specified timeout or
2143 * until we've hit our sample limit
2145 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kde_waiter
, THREAD_ABORTSAFE
, abstime
);
2148 * wait until we've hit our sample limit
2150 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kde_waiter
, THREAD_ABORTSAFE
);
2154 lck_spin_unlock(kdw_spin_lock
);
2155 ml_set_interrupts_enabled(s
);
2158 * Disable entropy sampling
2160 kdbg_set_flags(SLOW_ENTROPY
, KDEBUG_ENABLE_ENTROPY
, FALSE
);
2162 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END
, ms_timeout
, kd_entropy_indx
, 0, 0, 0);
2167 if (kd_entropy_indx
> 0) {
2169 * copyout the buffer
2171 if (copyout(kd_entropy_buffer
, buffer
, kd_entropy_indx
* sizeof(uint64_t)))
2174 *number
= kd_entropy_indx
* sizeof(uint64_t);
2179 kd_entropy_count
= 0;
2180 kd_entropy_indx
= 0;
2181 kd_entropy_buftomem
= 0;
2182 kmem_free(kernel_map
, (vm_offset_t
)kd_entropy_buffer
, kd_entropy_bufsize
);
2183 kd_entropy_buffer
= (uint64_t *) 0;
2190 kdbg_set_nkdbufs(unsigned int value
)
2193 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2194 * 'value' is the desired number of trace entries
2196 unsigned int max_entries
= (sane_size
/2) / sizeof(kd_buf
);
2198 if (value
<= max_entries
)
2201 return (max_entries
);
2206 kdbg_enable_bg_trace(void)
2210 if (kdlog_bg_trace
== TRUE
&& kdlog_bg_trace_running
== FALSE
&& n_storage_buffers
== 0) {
2211 nkdbufs
= bg_nkdbufs
;
2212 ret
= kdbg_reinit(FALSE
);
2214 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
2215 kdlog_bg_trace_running
= TRUE
;
2222 kdbg_disable_bg_trace(void)
2224 if (kdlog_bg_trace_running
== TRUE
) {
2225 kdlog_bg_trace_running
= FALSE
;
2233 * This function is provided for the CHUD toolkit only.
2235 * zero disables kdebug_chudhook function call
2236 * non-zero enables kdebug_chudhook function call
2238 * address of the enabled kdebug_chudhook function
2242 kdbg_control_chud(int val
, void *fn
)
2247 /* enable chudhook */
2248 kdebug_chudhook
= fn
;
2249 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, TRUE
);
2252 /* disable chudhook */
2253 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, FALSE
);
2254 kdebug_chudhook
= 0;
2260 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
)
2263 size_t size
= *sizep
;
2264 unsigned int value
= 0;
2266 kbufinfo_t kd_bufinfo
;
2270 if (name
[0] == KERN_KDGETENTROPY
||
2271 name
[0] == KERN_KDWRITETR
||
2272 name
[0] == KERN_KDWRITEMAP
||
2273 name
[0] == KERN_KDEFLAGS
||
2274 name
[0] == KERN_KDDFLAGS
||
2275 name
[0] == KERN_KDENABLE
||
2276 name
[0] == KERN_KDENABLE_BG_TRACE
||
2277 name
[0] == KERN_KDSETBUF
) {
2286 if ( !(kd_ctrl_page
.kdebug_flags
& KDBG_LOCKINIT
))
2289 lck_mtx_lock(kd_trace_mtx_sysctl
);
2294 * Does not alter the global_state_pid
2295 * This is a passive request.
2297 if (size
< sizeof(kd_bufinfo
.nkdbufs
)) {
2299 * There is not enough room to return even
2300 * the first element of the info structure.
2305 kd_bufinfo
.nkdbufs
= nkdbufs
;
2306 kd_bufinfo
.nkdthreads
= kd_mapcount
;
2308 if ( (kd_ctrl_page
.kdebug_slowcheck
& SLOW_NOLOG
) )
2309 kd_bufinfo
.nolog
= 1;
2311 kd_bufinfo
.nolog
= 0;
2313 kd_bufinfo
.flags
= kd_ctrl_page
.kdebug_flags
;
2314 #if defined(__LP64__)
2315 kd_bufinfo
.flags
|= KDBG_LP64
;
2317 kd_bufinfo
.bufid
= global_state_pid
;
2319 if (size
>= sizeof(kd_bufinfo
)) {
2321 * Provide all the info we have
2323 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
)))
2327 * For backwards compatibility, only provide
2328 * as much info as there is room for.
2330 if (copyout(&kd_bufinfo
, where
, size
))
2336 case KERN_KDGETENTROPY
:
2337 if (kd_entropy_buffer
)
2340 ret
= kdbg_getentropy(where
, sizep
, value
);
2344 case KERN_KDENABLE_BG_TRACE
:
2345 bg_nkdbufs
= kdbg_set_nkdbufs(value
);
2346 kdlog_bg_trace
= TRUE
;
2347 ret
= kdbg_enable_bg_trace();
2351 case KERN_KDDISABLE_BG_TRACE
:
2352 kdlog_bg_trace
= FALSE
;
2353 kdbg_disable_bg_trace();
2358 if ((curproc
= current_proc()) != NULL
)
2359 curpid
= curproc
->p_pid
;
2364 if (global_state_pid
== -1)
2365 global_state_pid
= curpid
;
2366 else if (global_state_pid
!= curpid
) {
2367 if ((p
= proc_find(global_state_pid
)) == NULL
) {
2369 * The global pid no longer exists
2371 global_state_pid
= curpid
;
2374 * The global pid exists, deny this request
2385 kdbg_disable_bg_trace();
2387 value
&= KDBG_USERFLAGS
;
2388 kd_ctrl_page
.kdebug_flags
|= value
;
2391 kdbg_disable_bg_trace();
2393 value
&= KDBG_USERFLAGS
;
2394 kd_ctrl_page
.kdebug_flags
&= ~value
;
2398 * Enable tracing mechanism. Two types:
2399 * KDEBUG_TRACE is the standard one,
2400 * and KDEBUG_PPT which is a carefully
2401 * chosen subset to avoid performance impact.
2405 * enable only if buffer is initialized
2407 if (!(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) ||
2408 !(value
== KDEBUG_ENABLE_TRACE
|| value
== KDEBUG_ENABLE_PPT
)) {
2414 kdbg_set_tracing_enabled(TRUE
, value
);
2418 kdbg_set_tracing_enabled(FALSE
, 0);
2422 kdbg_disable_bg_trace();
2424 nkdbufs
= kdbg_set_nkdbufs(value
);
2427 kdbg_disable_bg_trace();
2429 ret
= kdbg_reinit(FALSE
);
2433 ret
= kdbg_enable_bg_trace();
2436 if(size
< sizeof(kd_regtype
)) {
2440 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2444 kdbg_disable_bg_trace();
2446 ret
= kdbg_setreg(&kd_Reg
);
2449 if (size
< sizeof(kd_regtype
)) {
2453 ret
= kdbg_getreg(&kd_Reg
);
2454 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) {
2457 kdbg_disable_bg_trace();
2461 ret
= kdbg_read(where
, sizep
, NULL
, NULL
);
2463 case KERN_KDWRITETR
:
2464 case KERN_KDWRITEMAP
:
2466 struct vfs_context context
;
2467 struct fileproc
*fp
;
2472 kdbg_disable_bg_trace();
2474 if (name
[0] == KERN_KDWRITETR
) {
2476 int wait_result
= THREAD_AWAKENED
;
2481 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2482 nanoseconds_to_absolutetime(ns
, &abstime
);
2483 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2487 s
= ml_set_interrupts_enabled(FALSE
);
2488 lck_spin_lock(kdw_spin_lock
);
2490 while (wait_result
== THREAD_AWAKENED
&& kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2495 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2497 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2501 lck_spin_unlock(kdw_spin_lock
);
2502 ml_set_interrupts_enabled(s
);
2508 if ( (ret
= fp_lookup(p
, fd
, &fp
, 1)) ) {
2512 context
.vc_thread
= current_thread();
2513 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
2515 if (FILEGLOB_DTYPE(fp
->f_fglob
) != DTYPE_VNODE
) {
2516 fp_drop(p
, fd
, fp
, 1);
2522 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
2525 if ((ret
= vnode_getwithref(vp
)) == 0) {
2527 if (name
[0] == KERN_KDWRITETR
) {
2528 number
= nkdbufs
* sizeof(kd_buf
);
2530 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2531 ret
= kdbg_read(0, &number
, vp
, &context
);
2532 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_END
, number
, 0, 0, 0, 0);
2536 number
= kd_mapcount
* sizeof(kd_threadmap
);
2537 kdbg_readthrmap(0, &number
, vp
, &context
);
2541 fp_drop(p
, fd
, fp
, 0);
2545 case KERN_KDBUFWAIT
:
2547 /* WRITETR lite -- just block until there's data */
2549 int wait_result
= THREAD_AWAKENED
;
2554 kdbg_disable_bg_trace();
2558 ns
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000);
2559 nanoseconds_to_absolutetime(ns
, &abstime
);
2560 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
2564 s
= ml_set_interrupts_enabled(FALSE
);
2566 panic("trying to wait with interrupts off");
2567 lck_spin_lock(kdw_spin_lock
);
2569 /* drop the mutex so don't exclude others from
2572 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2574 while (wait_result
== THREAD_AWAKENED
&&
2575 kd_ctrl_page
.kds_inuse_count
< n_storage_threshold
) {
2580 wait_result
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
);
2582 wait_result
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
);
2587 /* check the count under the spinlock */
2588 number
= (kd_ctrl_page
.kds_inuse_count
>= n_storage_threshold
);
2590 lck_spin_unlock(kdw_spin_lock
);
2591 ml_set_interrupts_enabled(s
);
2593 /* pick the mutex back up again */
2594 lck_mtx_lock(kd_trace_mtx_sysctl
);
2596 /* write out whether we've exceeded the threshold */
2601 if (size
< sizeof(kd_regtype
)) {
2605 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2609 kdbg_disable_bg_trace();
2611 ret
= kdbg_setpid(&kd_Reg
);
2614 if (size
< sizeof(kd_regtype
)) {
2618 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2622 kdbg_disable_bg_trace();
2624 ret
= kdbg_setpidex(&kd_Reg
);
2627 ret
= kdbg_readcpumap(where
, sizep
);
2630 ret
= kdbg_readthrmap(where
, sizep
, NULL
, NULL
);
2632 case KERN_KDREADCURTHRMAP
:
2633 ret
= kdbg_readcurthrmap(where
, sizep
);
2635 case KERN_KDSETRTCDEC
:
2636 if (size
< sizeof(kd_regtype
)) {
2640 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
2644 kdbg_disable_bg_trace();
2646 ret
= kdbg_setrtcdec(&kd_Reg
);
2648 case KERN_KDSET_TYPEFILTER
:
2649 kdbg_disable_bg_trace();
2651 if ((kd_ctrl_page
.kdebug_flags
& KDBG_TYPEFILTER_CHECK
) == 0){
2652 if ((ret
= kdbg_enable_typefilter()))
2656 if (size
!= KDBG_TYPEFILTER_BITMAP_SIZE
) {
2661 if (copyin(where
, type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
)) {
2665 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_TYPEFILTER_CHANGED
, type_filter_bitmap
);
2671 lck_mtx_unlock(kd_trace_mtx_sysctl
);
2678 * This code can run for the most part concurrently with kernel_debug_internal()...
2679 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2680 * synchronize with the recording side of this puzzle... otherwise, we are able to
2681 * move through the lists w/o use of any locks
2684 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
)
2687 unsigned int cpu
, min_cpu
;
2688 uint64_t mintime
, t
, barrier
= 0;
2694 struct kd_storage
*kdsp_actual
;
2695 struct kd_bufinfo
*kdbp
;
2696 struct kd_bufinfo
*min_kdbp
;
2697 uint32_t tempbuf_count
;
2698 uint32_t tempbuf_number
;
2699 uint32_t old_kdebug_flags
;
2700 uint32_t old_kdebug_slowcheck
;
2701 boolean_t lostevents
= FALSE
;
2702 boolean_t out_of_events
= FALSE
;
2704 count
= *number
/sizeof(kd_buf
);
2707 if (count
== 0 || !(kd_ctrl_page
.kdebug_flags
& KDBG_BUFINIT
) || kdcopybuf
== 0)
2710 memset(&lostevent
, 0, sizeof(lostevent
));
2711 lostevent
.debugid
= TRACEDBG_CODE(DBG_TRACE_INFO
, 2);
2713 /* Capture timestamp. Only sort events that have occured before the timestamp.
2714 * Since the iop is being flushed here, its possible that events occur on the AP
2715 * while running live tracing. If we are disabled, no new events should
2719 if (kd_ctrl_page
.enabled
)
2721 // timestamp is non-zero value
2722 barrier
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
;
2725 // Request each IOP to provide us with up to date entries before merging buffers together.
2726 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
);
2729 * because we hold kd_trace_mtx_sysctl, no other control threads can
2730 * be playing with kdebug_flags... the code that cuts new events could
2731 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2732 * storage chunk which is where it examines kdebug_flags... it its adding
2733 * to the same chunk we're reading from, no problem...
2736 disable_wrap(&old_kdebug_slowcheck
, &old_kdebug_flags
);
2738 if (count
> nkdbufs
)
2741 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2742 tempbuf_count
= KDCOPYBUF_COUNT
;
2745 tempbuf
= kdcopybuf
;
2749 while (tempbuf_count
) {
2750 mintime
= 0xffffffffffffffffULL
;
2755 for (cpu
= 0, kdbp
= &kdbip
[0]; cpu
< kd_ctrl_page
.kdebug_cpus
; cpu
++, kdbp
++) {
2757 // Find one with raw data
2758 if ((kdsp
= kdbp
->kd_list_head
).raw
== KDS_PTR_NULL
)
2760 /* Debugging aid: maintain a copy of the "kdsp"
2763 volatile union kds_ptr kdsp_shadow
;
2767 // Get from cpu data to buffer header to buffer
2768 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2770 volatile struct kd_storage
*kdsp_actual_shadow
;
2772 kdsp_actual_shadow
= kdsp_actual
;
2774 // See if there are actual data left in this buffer
2775 rcursor
= kdsp_actual
->kds_readlast
;
2777 if (rcursor
== kdsp_actual
->kds_bufindx
)
2780 t
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[rcursor
]);
2782 if ((t
> barrier
) && (barrier
> 0)) {
2784 * Need to wait to flush iop again before we
2785 * sort any more data from the buffers
2787 out_of_events
= TRUE
;
2790 if (t
< kdsp_actual
->kds_timestamp
) {
2792 * indicates we've not yet completed filling
2794 * this should only occur when we're looking
2795 * at the buf that the record head is utilizing
2796 * we'll pick these events up on the next
2798 * we bail at this point so that we don't
2799 * get an out-of-order timestream by continuing
2800 * to read events from the other CPUs' timestream(s)
2802 out_of_events
= TRUE
;
2811 if (min_kdbp
== NULL
|| out_of_events
== TRUE
) {
2813 * all buffers ran empty
2815 out_of_events
= TRUE
;
2820 kdsp
= min_kdbp
->kd_list_head
;
2821 kdsp_actual
= POINTER_FROM_KDS_PTR(kdsp
);
2823 if (kdsp_actual
->kds_lostevents
== TRUE
) {
2824 kdbg_set_timestamp_and_cpu(&lostevent
, kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
].timestamp
, min_cpu
);
2825 *tempbuf
= lostevent
;
2827 kdsp_actual
->kds_lostevents
= FALSE
;
2834 *tempbuf
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
++];
2836 if (kdsp_actual
->kds_readlast
== EVENTS_PER_STORAGE_UNIT
)
2837 release_storage_unit(min_cpu
, kdsp
.raw
);
2840 * Watch for out of order timestamps
2842 if (mintime
< min_kdbp
->kd_prev_timebase
) {
2844 * if so, use the previous timestamp + 1 cycle
2846 min_kdbp
->kd_prev_timebase
++;
2847 kdbg_set_timestamp_and_cpu(tempbuf
, min_kdbp
->kd_prev_timebase
, kdbg_get_cpu(tempbuf
));
2849 min_kdbp
->kd_prev_timebase
= mintime
;
2855 if ((RAW_file_written
+= sizeof(kd_buf
)) >= RAW_FLUSH_SIZE
)
2858 if (tempbuf_number
) {
2861 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number
* sizeof(kd_buf
), RAW_file_offset
,
2862 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
2864 RAW_file_offset
+= (tempbuf_number
* sizeof(kd_buf
));
2866 if (RAW_file_written
>= RAW_FLUSH_SIZE
) {
2867 cluster_push(vp
, 0);
2869 RAW_file_written
= 0;
2872 error
= copyout(kdcopybuf
, buffer
, tempbuf_number
* sizeof(kd_buf
));
2873 buffer
+= (tempbuf_number
* sizeof(kd_buf
));
2880 count
-= tempbuf_number
;
2881 *number
+= tempbuf_number
;
2883 if (out_of_events
== TRUE
)
2885 * all trace buffers are empty
2889 if ((tempbuf_count
= count
) > KDCOPYBUF_COUNT
)
2890 tempbuf_count
= KDCOPYBUF_COUNT
;
2892 if ( !(old_kdebug_flags
& KDBG_NOWRAP
)) {
2893 enable_wrap(old_kdebug_slowcheck
, lostevents
);
2899 unsigned char *getProcName(struct proc
*proc
);
2900 unsigned char *getProcName(struct proc
*proc
) {
2902 return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */
2906 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2907 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2908 #if defined(__i386__) || defined (__x86_64__)
2909 #define TRAP_DEBUGGER __asm__ volatile("int3");
2911 #error No TRAP_DEBUGGER definition for this architecture
2914 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2915 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
2917 /* Initialize the mutex governing access to the stack snapshot subsystem */
2918 __private_extern__
void
2919 stackshot_lock_init( void )
2921 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
2923 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
2925 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
2927 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
2931 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2932 * on the system, tracing both kernel and user stacks
2933 * where available. Uses machine specific trace routines
2934 * for ppc, ppc64 and x86.
2935 * Inputs: uap->pid - process id of process to be traced, or -1
2936 * for the entire system
2937 * uap->tracebuf - address of the user space destination
2939 * uap->tracebuf_size - size of the user space trace buffer
2940 * uap->options - various options, including the maximum
2941 * number of frames to trace.
2942 * Outputs: EPERM if the caller is not privileged
2943 * EINVAL if the supplied trace buffer isn't sanely sized
2944 * ENOMEM if we don't have enough memory to satisfy the
2946 * ENOENT if the target pid isn't found
2947 * ENOSPC if the supplied buffer is insufficient
2948 * *retval contains the number of bytes traced, if successful
2949 * and -1 otherwise. If the request failed due to
2950 * tracebuffer exhaustion, we copyout as much as possible.
2953 stack_snapshot(struct proc
*p
, register struct stack_snapshot_args
*uap
, int32_t *retval
) {
2956 if ((error
= suser(kauth_cred_get(), &p
->p_acflag
)))
2959 return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
,
2960 uap
->flags
, uap
->dispatch_offset
, retval
);
2964 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
)
2969 if ((buf
== NULL
) || (size
<= 0) || (bytesTraced
== NULL
)) {
2973 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
2974 if (size
> SANE_TRACEBUF_SIZE
) {
2975 size
= SANE_TRACEBUF_SIZE
;
2978 /* Serialize tracing */
2979 STACKSHOT_SUBSYS_LOCK();
2980 istate
= ml_set_interrupts_enabled(FALSE
);
2983 /* Preload trace parameters*/
2984 kdp_snapshot_preflight(pid
, buf
, size
, flags
, 0);
2986 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2991 ml_set_interrupts_enabled(istate
);
2993 *bytesTraced
= kdp_stack_snapshot_bytes_traced();
2995 error
= kdp_stack_snapshot_geterror();
2997 STACKSHOT_SUBSYS_UNLOCK();
3004 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
)
3008 unsigned bytesTraced
= 0;
3010 #if CONFIG_TELEMETRY
3011 if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE
) {
3012 telemetry_global_ctl(1);
3015 } else if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE
) {
3016 telemetry_global_ctl(0);
3023 /* Serialize tracing */
3024 STACKSHOT_SUBSYS_LOCK();
3026 if (tracebuf_size
<= 0) {
3031 #if CONFIG_TELEMETRY
3032 if (flags
& STACKSHOT_GET_MICROSTACKSHOT
) {
3034 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3039 bytesTraced
= tracebuf_size
;
3040 error
= telemetry_gather(tracebuf
, &bytesTraced
,
3041 (flags
& STACKSHOT_SET_MICROSTACKSHOT_MARK
) ? TRUE
: FALSE
);
3042 if (error
== KERN_NO_SPACE
) {
3046 *retval
= (int)bytesTraced
;
3050 if (flags
& STACKSHOT_GET_BOOT_PROFILE
) {
3052 if (tracebuf_size
> SANE_BOOTPROFILE_TRACEBUF_SIZE
) {
3057 bytesTraced
= tracebuf_size
;
3058 error
= bootprofile_gather(tracebuf
, &bytesTraced
);
3059 if (error
== KERN_NO_SPACE
) {
3063 *retval
= (int)bytesTraced
;
3068 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
3073 assert(stackshot_snapbuf
== NULL
);
3074 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) {
3079 if (panic_active()) {
3084 istate
= ml_set_interrupts_enabled(FALSE
);
3085 /* Preload trace parameters*/
3086 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
);
3088 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
3094 ml_set_interrupts_enabled(istate
);
3096 bytesTraced
= kdp_stack_snapshot_bytes_traced();
3098 if (bytesTraced
> 0) {
3099 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
3100 ((bytesTraced
< tracebuf_size
) ?
3101 bytesTraced
: tracebuf_size
))))
3103 *retval
= bytesTraced
;
3110 error
= kdp_stack_snapshot_geterror();
3118 if (stackshot_snapbuf
!= NULL
)
3119 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
3120 stackshot_snapbuf
= NULL
;
3121 STACKSHOT_SUBSYS_UNLOCK();
3126 start_kern_tracing(unsigned int new_nkdbufs
, boolean_t need_map
) {
3130 nkdbufs
= kdbg_set_nkdbufs(new_nkdbufs
);
3133 if (0 == kdbg_reinit(TRUE
)) {
3135 if (need_map
== TRUE
) {
3136 uint32_t old1
, old2
;
3140 disable_wrap(&old1
, &old2
);
3142 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
);
3144 #if defined(__i386__) || defined(__x86_64__)
3145 uint64_t now
= mach_absolute_time();
3147 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
,
3148 (uint32_t)(tsc_rebase_abs_time
>> 32), (uint32_t)tsc_rebase_abs_time
,
3149 (uint32_t)(now
>> 32), (uint32_t)now
,
3152 printf("kernel tracing started\n");
3154 printf("error from kdbg_reinit,kernel tracing not started\n");
3159 kdbg_dump_trace_to_file(const char *filename
)
3167 if ( !(kdebug_enable
& KDEBUG_ENABLE_TRACE
))
3170 if (global_state_pid
!= -1) {
3171 if ((proc_find(global_state_pid
)) != NULL
) {
3173 * The global pid exists, we're running
3174 * due to fs_usage, latency, etc...
3175 * don't cut the panic/shutdown trace file
3176 * Disable tracing from this point to avoid
3180 kd_ctrl_page
.enabled
= 0;
3184 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0);
3187 kd_ctrl_page
.enabled
= 0;
3189 ctx
= vfs_context_kernel();
3191 if ((error
= vnode_open(filename
, (O_CREAT
| FWRITE
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
)))
3194 number
= kd_mapcount
* sizeof(kd_threadmap
);
3195 kdbg_readthrmap(0, &number
, vp
, ctx
);
3197 number
= nkdbufs
*sizeof(kd_buf
);
3198 kdbg_read(0, &number
, vp
, ctx
);
3200 vnode_close(vp
, FWRITE
, ctx
);
3202 sync(current_proc(), (void *)NULL
, (int *)NULL
);
3205 /* Helper function for filling in the BSD name for an address space
3206 * Defined here because the machine bindings know only Mach threads
3207 * and nothing about BSD processes.
3209 * FIXME: need to grab a lock during this?
3211 void kdbg_get_task_name(char* name_buf
, int len
, task_t task
)
3215 /* Note: we can't use thread->task (and functions that rely on it) here
3216 * because it hasn't been initialized yet when this function is called.
3217 * We use the explicitly-passed task parameter instead.
3219 proc
= get_bsdtask_info(task
);
3220 if (proc
!= PROC_NULL
)
3221 snprintf(name_buf
, len
, "%s/%d", proc
->p_comm
, proc
->p_pid
);
3223 snprintf(name_buf
, len
, "%p [!bsd]", task
);