]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
1c79356b 3 *
91447636 4 * @Apple_LICENSE_HEADER_START@
1c79356b 5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b 19 *
2d21ac55 20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
21 */
22
6d2010ae 23
1c79356b
A
24#include <machine/spl.h>
25
91447636
A
26#include <sys/errno.h>
27#include <sys/param.h>
0c530ab8 28#include <sys/systm.h>
91447636
A
29#include <sys/proc_internal.h>
30#include <sys/vm.h>
31#include <sys/sysctl.h>
32#include <sys/kdebug.h>
33#include <sys/sysproto.h>
6d2010ae 34#include <sys/bsdtask_info.h>
fe8ab488 35#include <sys/random.h>
3e170ce0 36#include <sys/stackshot.h>
91447636 37
1c79356b
A
38#define HZ 100
39#include <mach/clock_types.h>
40#include <mach/mach_types.h>
55e303ae 41#include <mach/mach_time.h>
1c79356b
A
42#include <machine/machine_routines.h>
43
b0d623f7 44#if defined(__i386__) || defined(__x86_64__)
6d2010ae
A
45#include <i386/rtclock_protos.h>
46#include <i386/mp.h>
47#include <i386/machine_routines.h>
b0d623f7 48#endif
6d2010ae
A
49
50#include <kern/clock.h>
51
1c79356b
A
52#include <kern/thread.h>
53#include <kern/task.h>
2d21ac55 54#include <kern/debug.h>
6d2010ae
A
55#include <kern/kalloc.h>
56#include <kern/cpu_data.h>
d41d1dae 57#include <kern/assert.h>
39236c6e 58#include <kern/telemetry.h>
3e170ce0 59#include <kern/sched_prim.h>
1c79356b
A
60#include <vm/vm_kern.h>
61#include <sys/lock.h>
62
0c530ab8 63#include <sys/malloc.h>
b0d623f7 64#include <sys/mcache.h>
0c530ab8
A
65#include <sys/kauth.h>
66
b0d623f7
A
67#include <sys/vnode.h>
68#include <sys/vnode_internal.h>
69#include <sys/fcntl.h>
6d2010ae
A
70#include <sys/file_internal.h>
71#include <sys/ubc.h>
316670eb 72#include <sys/param.h> /* for isset() */
b0d623f7 73
0c530ab8
A
74#include <mach/mach_host.h> /* for host_info() */
75#include <libkern/OSAtomic.h>
76
6d2010ae
A
77#include <machine/pal_routines.h>
78
04b8595b
A
79extern boolean_t kdebug_serial;
80#if KDEBUG_MOJO_TRACE
81#include <sys/kdebugevents.h>
82static void kdebug_serial_print( /* forward */
83 uint32_t, uint32_t, uint64_t,
84 uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
85#endif
86
39236c6e
A
87/*
88 * IOP(s)
89 *
90 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
91 *
92 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
93 * They are registered dynamically. Each is assigned a cpu_id at registration.
94 *
95 * NOTE: IOP trace events may not use the same clock hardware as "normal"
96 * cpus. There is an effort made to synchronize the IOP timebase with the
97 * AP, but it should be understood that there may be discrepancies.
98 *
99 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
100 * The current implementation depends on this for thread safety.
101 *
102 * New registrations occur by allocating an kd_iop struct and assigning
103 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
104 * list_head pointer resolves any races.
105 *
106 * You may safely walk the kd_iops list at any time, without holding locks.
107 *
108 * When allocating buffers, the current kd_iops head is captured. Any operations
109 * that depend on the buffer state (such as flushing IOP traces on reads,
110 * etc.) should use the captured list head. This will allow registrations to
111 * take place while trace is in use.
112 */
113
114typedef struct kd_iop {
115 kd_callback_t callback;
116 uint32_t cpu_id;
117 uint64_t last_timestamp; /* Prevent timer rollback */
118 struct kd_iop* next;
119} kd_iop_t;
120
121static kd_iop_t* kd_iops = NULL;
122
0c530ab8
A
123/* XXX should have prototypes, but Mach does not provide one */
124void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
125int cpu_number(void); /* XXX <machine/...> include path broken */
a1c7dba1 126void commpage_update_kdebug_enable(void); /* XXX sign */
0c530ab8
A
127
128/* XXX should probably be static, but it's debugging code... */
3e170ce0 129int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t, uint32_t);
0c530ab8
A
130void kdbg_control_chud(int, void *);
131int kdbg_control(int *, u_int, user_addr_t, size_t *);
39236c6e
A
132int kdbg_readcpumap(user_addr_t, size_t *);
133int kdbg_readcurcpumap(user_addr_t, size_t *);
134int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
3e170ce0 135int kdbg_readthrmap_v3(user_addr_t, size_t *, int);
39236c6e 136int kdbg_readcurthrmap(user_addr_t, size_t *);
0c530ab8
A
137int kdbg_setreg(kd_regtype *);
138int kdbg_setrtcdec(kd_regtype *);
139int kdbg_setpidex(kd_regtype *);
140int kdbg_setpid(kd_regtype *);
39236c6e 141void kdbg_thrmap_init(void);
6d2010ae
A
142int kdbg_reinit(boolean_t);
143int kdbg_bootstrap(boolean_t);
0c530ab8 144
3e170ce0
A
145int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count,
146 uint8_t** cpumap, uint32_t* cpumap_size);
147
148kd_threadmap* kdbg_thrmap_init_internal(unsigned int count,
149 unsigned int *mapsize,
150 unsigned int *mapcount);
151
152static boolean_t kdebug_current_proc_enabled(uint32_t debugid);
153static boolean_t kdebug_debugid_enabled(uint32_t debugid);
154static errno_t kdebug_check_trace_string(uint32_t debugid, uint64_t str_id);
155
156int kdbg_write_v3_header(user_addr_t, size_t *, int);
157int kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag,
158 uint32_t sub_tag, uint64_t length,
159 vnode_t vp, vfs_context_t ctx);
160
161user_addr_t kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag,
162 uint64_t length, vnode_t vp,
163 vfs_context_t ctx);
39236c6e 164
316670eb
A
165static int kdbg_enable_typefilter(void);
166static int kdbg_disable_typefilter(void);
3e170ce0
A
167static int kdbg_allocate_typefilter(void);
168static int kdbg_deallocate_typefilter(void);
316670eb 169
6d2010ae 170static int create_buffers(boolean_t);
0c530ab8
A
171static void delete_buffers(void);
172
2d21ac55
A
173extern void IOSleep(int);
174
9bccf70c
A
175/* trace enable status */
176unsigned int kdebug_enable = 0;
177
fe8ab488
A
178/* A static buffer to record events prior to the start of regular logging */
179#define KD_EARLY_BUFFER_MAX 64
180static kd_buf kd_early_buffer[KD_EARLY_BUFFER_MAX];
181static int kd_early_index = 0;
182static boolean_t kd_early_overflow = FALSE;
6d2010ae 183
91447636
A
184#define SLOW_NOLOG 0x01
185#define SLOW_CHECKS 0x02
6d2010ae 186#define SLOW_CHUD 0x08
91447636 187
b0d623f7
A
188#define EVENTS_PER_STORAGE_UNIT 2048
189#define MIN_STORAGE_UNITS_PER_CPU 4
190
6d2010ae
A
191#define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
192
6d2010ae
A
193union kds_ptr {
194 struct {
195 uint32_t buffer_index:21;
196 uint16_t offset:11;
197 };
198 uint32_t raw;
199};
200
b0d623f7 201struct kd_storage {
6d2010ae
A
202 union kds_ptr kds_next;
203 uint32_t kds_bufindx;
204 uint32_t kds_bufcnt;
205 uint32_t kds_readlast;
206 boolean_t kds_lostevents;
207 uint64_t kds_timestamp;
0c530ab8 208
b0d623f7 209 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
0c530ab8
A
210};
211
b0d623f7
A
212#define MAX_BUFFER_SIZE (1024 * 1024 * 128)
213#define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
214
b0d623f7
A
215struct kd_storage_buffers {
216 struct kd_storage *kdsb_addr;
217 uint32_t kdsb_size;
218};
219
6d2010ae 220#define KDS_PTR_NULL 0xffffffff
b0d623f7
A
221struct kd_storage_buffers *kd_bufs = NULL;
222int n_storage_units = 0;
223int n_storage_buffers = 0;
6d2010ae
A
224int n_storage_threshold = 0;
225int kds_waiter = 0;
b0d623f7 226
6d2010ae 227#pragma pack(0)
b0d623f7 228struct kd_bufinfo {
6d2010ae
A
229 union kds_ptr kd_list_head;
230 union kds_ptr kd_list_tail;
231 boolean_t kd_lostevents;
232 uint32_t _pad;
233 uint64_t kd_prev_timebase;
234 uint32_t num_bufs;
39236c6e 235} __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) ));
b0d623f7 236
3e170ce0
A
237
238/*
239 * In principle, this control block can be shared in DRAM with other
240 * coprocessors and runtimes, for configuring what tracing is enabled.
241 */
6d2010ae
A
242struct kd_ctrl_page_t {
243 union kds_ptr kds_free_list;
244 uint32_t enabled :1;
245 uint32_t _pad0 :31;
246 int kds_inuse_count;
247 uint32_t kdebug_flags;
248 uint32_t kdebug_slowcheck;
39236c6e
A
249 /*
250 * The number of kd_bufinfo structs allocated may not match the current
251 * number of active cpus. We capture the iops list head at initialization
252 * which we could use to calculate the number of cpus we allocated data for,
253 * unless it happens to be null. To avoid that case, we explicitly also
254 * capture a cpu count.
255 */
256 kd_iop_t* kdebug_iops;
257 uint32_t kdebug_cpus;
258} kd_ctrl_page = { .kds_free_list = {.raw = KDS_PTR_NULL}, .kdebug_slowcheck = SLOW_NOLOG };
259
6d2010ae
A
260#pragma pack()
261
0c530ab8
A
262struct kd_bufinfo *kdbip = NULL;
263
6d2010ae 264#define KDCOPYBUF_COUNT 8192
0c530ab8 265#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
3e170ce0
A
266
267#define PAGE_4KB 4096
268#define PAGE_16KB 16384
269
0c530ab8
A
270kd_buf *kdcopybuf = NULL;
271
316670eb
A
272boolean_t kdlog_bg_trace = FALSE;
273boolean_t kdlog_bg_trace_running = FALSE;
274unsigned int bg_nkdbufs = 0;
275
276unsigned int nkdbufs = 0;
1c79356b
A
277unsigned int kdlog_beg=0;
278unsigned int kdlog_end=0;
279unsigned int kdlog_value1=0;
280unsigned int kdlog_value2=0;
281unsigned int kdlog_value3=0;
282unsigned int kdlog_value4=0;
283
6d2010ae 284static lck_spin_t * kdw_spin_lock;
b0d623f7 285static lck_spin_t * kds_spin_lock;
0c530ab8
A
286static lck_mtx_t * kd_trace_mtx_sysctl;
287static lck_grp_t * kd_trace_mtx_sysctl_grp;
288static lck_attr_t * kd_trace_mtx_sysctl_attr;
289static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
290
3e170ce0
A
291extern kern_return_t stack_snapshot2(int pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
292
293#if CONFIG_TELEMETRY
294extern kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
295#endif /* CONFIG_TELEMETRY */
0c530ab8 296
3e170ce0 297extern kern_return_t kern_stack_snapshot_with_reason(char* reason);
0c530ab8 298
3e170ce0 299extern kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user);
91447636 300
3e170ce0 301extern kern_return_t stack_snapshot_from_kernel_internal(int pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced);
91447636 302
3e170ce0 303int stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced);
1c79356b
A
304
305kd_threadmap *kd_mapptr = 0;
306unsigned int kd_mapsize = 0;
307unsigned int kd_mapcount = 0;
b0d623f7
A
308
309off_t RAW_file_offset = 0;
6d2010ae
A
310int RAW_file_written = 0;
311
312#define RAW_FLUSH_SIZE (2 * 1024 * 1024)
313
1c79356b
A
314pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
315
3e170ce0
A
316/*
317 * A globally increasing counter for identifying strings in trace. Starts at
318 * 1 because 0 is a reserved return value.
319 */
320__attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE)))
321static uint64_t g_curr_str_id = 1;
6d2010ae 322
3e170ce0
A
323#define STR_ID_SIG_OFFSET (48)
324#define STR_ID_MASK ((1ULL << STR_ID_SIG_OFFSET) - 1)
325#define STR_ID_SIG_MASK (~STR_ID_MASK)
316670eb 326
3e170ce0
A
327/*
328 * A bit pattern for identifying string IDs generated by
329 * kdebug_trace_string(2).
330 */
331static uint64_t g_str_id_signature = (0x70acULL << STR_ID_SIG_OFFSET);
316670eb 332
6d2010ae
A
333#define INTERRUPT 0x01050000
334#define MACH_vmfault 0x01300008
335#define BSC_SysCall 0x040c0000
336#define MACH_SysCall 0x010c0000
6d2010ae 337
9bccf70c
A
338/* task to string structure */
339struct tts
340{
0c530ab8 341 task_t task; /* from procs task */
55e303ae 342 pid_t pid; /* from procs p_pid */
9bccf70c
A
343 char task_comm[20]; /* from procs p_comm */
344};
345
346typedef struct tts tts_t;
347
1c79356b
A
348struct krt
349{
6d2010ae
A
350 kd_threadmap *map; /* pointer to the map buffer */
351 int count;
352 int maxcount;
353 struct tts *atts;
1c79356b
A
354};
355
356typedef struct krt krt_t;
357
9bccf70c 358/* This is for the CHUD toolkit call */
b0d623f7
A
359typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
360 uintptr_t arg2, uintptr_t arg3,
361 uintptr_t arg4, uintptr_t arg5);
9bccf70c 362
6d2010ae 363volatile kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
9bccf70c 364
316670eb
A
365static uint8_t *type_filter_bitmap;
366
fe8ab488
A
367/*
368 * This allows kperf to swap out the global state pid when kperf ownership is
369 * passed from one process to another. It checks the old global state pid so
370 * that kperf can't accidentally steal control of trace when a non-kperf trace user has
371 * control of trace.
372 */
373void
374kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid);
375
376void
377kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid)
378{
379 if (!(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
380 return;
381
382 lck_mtx_lock(kd_trace_mtx_sysctl);
383
384 if (old_pid == global_state_pid)
385 global_state_pid = new_pid;
386
387 lck_mtx_unlock(kd_trace_mtx_sysctl);
388}
389
39236c6e
A
390static uint32_t
391kdbg_cpu_count(boolean_t early_trace)
392{
393 if (early_trace) {
394 /*
395 * we've started tracing before the IOKit has even
396 * started running... just use the static max value
397 */
398 return max_ncpus;
399 }
400
401 host_basic_info_data_t hinfo;
402 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
403 host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
404 assert(hinfo.logical_cpu_max > 0);
405 return hinfo.logical_cpu_max;
406}
407
408#if MACH_ASSERT
39236c6e
A
409#endif /* MACH_ASSERT */
410
411static void
412kdbg_iop_list_callback(kd_iop_t* iop, kd_callback_type type, void* arg)
413{
414 while (iop) {
415 iop->callback.func(iop->callback.context, type, arg);
416 iop = iop->next;
417 }
418}
419
6d2010ae 420static void
316670eb 421kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
1c79356b 422{
6d2010ae
A
423 int s = ml_set_interrupts_enabled(FALSE);
424 lck_spin_lock(kds_spin_lock);
6d2010ae 425 if (enabled) {
316670eb 426 kdebug_enable |= trace_type;
6d2010ae
A
427 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
428 kd_ctrl_page.enabled = 1;
a1c7dba1 429 commpage_update_kdebug_enable();
6d2010ae 430 } else {
316670eb 431 kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
6d2010ae
A
432 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
433 kd_ctrl_page.enabled = 0;
a1c7dba1 434 commpage_update_kdebug_enable();
6d2010ae
A
435 }
436 lck_spin_unlock(kds_spin_lock);
437 ml_set_interrupts_enabled(s);
39236c6e
A
438
439 if (enabled) {
440 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_ENABLED, NULL);
441 } else {
442 /*
443 * If you do not flush the IOP trace buffers, they can linger
444 * for a considerable period; consider code which disables and
445 * deallocates without a final sync flush.
446 */
447 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_DISABLED, NULL);
448 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
449 }
1c79356b
A
450}
451
6d2010ae
A
452static void
453kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
454{
455 int s = ml_set_interrupts_enabled(FALSE);
456 lck_spin_lock(kds_spin_lock);
457
458 if (enabled) {
459 kd_ctrl_page.kdebug_slowcheck |= slowflag;
460 kdebug_enable |= enableflag;
461 } else {
462 kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
463 kdebug_enable &= ~enableflag;
464 }
39236c6e 465
6d2010ae
A
466 lck_spin_unlock(kds_spin_lock);
467 ml_set_interrupts_enabled(s);
468}
469
6d2010ae
A
470void
471disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
472{
473 int s = ml_set_interrupts_enabled(FALSE);
474 lck_spin_lock(kds_spin_lock);
475
476 *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
477 *old_flags = kd_ctrl_page.kdebug_flags;
478
479 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
480 kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
481
482 lck_spin_unlock(kds_spin_lock);
483 ml_set_interrupts_enabled(s);
484}
485
486void
487enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
488{
489 int s = ml_set_interrupts_enabled(FALSE);
490 lck_spin_lock(kds_spin_lock);
491
492 kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
493
494 if ( !(old_slowcheck & SLOW_NOLOG))
495 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
496
497 if (lostevents == TRUE)
498 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
499
500 lck_spin_unlock(kds_spin_lock);
501 ml_set_interrupts_enabled(s);
502}
503
0c530ab8 504static int
6d2010ae 505create_buffers(boolean_t early_trace)
0c530ab8 506{
b0d623f7
A
507 int i;
508 int p_buffer_size;
509 int f_buffer_size;
510 int f_buffers;
511 int error = 0;
512
39236c6e
A
513 /*
514 * For the duration of this allocation, trace code will only reference
515 * kdebug_iops. Any iops registered after this enabling will not be
516 * messaged until the buffers are reallocated.
517 *
518 * TLDR; Must read kd_iops once and only once!
519 */
520 kd_ctrl_page.kdebug_iops = kd_iops;
6d2010ae 521
39236c6e
A
522
523 /*
524 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
525 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
526 * be the list head + 1.
527 */
6d2010ae 528
39236c6e 529 kd_ctrl_page.kdebug_cpus = kd_ctrl_page.kdebug_iops ? kd_ctrl_page.kdebug_iops->cpu_id + 1 : kdbg_cpu_count(early_trace);
6d2010ae 530
3e170ce0 531 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
39236c6e
A
532 error = ENOSPC;
533 goto out;
6d2010ae 534 }
6d2010ae 535
39236c6e
A
536 if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
537 n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU;
b0d623f7
A
538 else
539 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
0c530ab8 540
b0d623f7 541 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
2d21ac55 542
b0d623f7
A
543 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
544 n_storage_buffers = f_buffers;
0c530ab8 545
b0d623f7
A
546 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
547 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
548
549 if (p_buffer_size)
550 n_storage_buffers++;
551
552 kd_bufs = NULL;
0c530ab8
A
553
554 if (kdcopybuf == 0) {
3e170ce0 555 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
b0d623f7
A
556 error = ENOSPC;
557 goto out;
558 }
0c530ab8 559 }
3e170ce0 560 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)), VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
b0d623f7
A
561 error = ENOSPC;
562 goto out;
0c530ab8 563 }
b0d623f7 564 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
0c530ab8 565
b0d623f7 566 for (i = 0; i < f_buffers; i++) {
3e170ce0 567 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
b0d623f7
A
568 error = ENOSPC;
569 goto out;
570 }
6d2010ae
A
571 bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
572
b0d623f7 573 kd_bufs[i].kdsb_size = f_buffer_size;
0c530ab8 574 }
b0d623f7 575 if (p_buffer_size) {
3e170ce0 576 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
b0d623f7
A
577 error = ENOSPC;
578 goto out;
579 }
6d2010ae
A
580 bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
581
b0d623f7
A
582 kd_bufs[i].kdsb_size = p_buffer_size;
583 }
6d2010ae 584 n_storage_units = 0;
b0d623f7
A
585
586 for (i = 0; i < n_storage_buffers; i++) {
587 struct kd_storage *kds;
588 int n_elements;
589 int n;
590
591 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
592 kds = kd_bufs[i].kdsb_addr;
593
594 for (n = 0; n < n_elements; n++) {
6d2010ae
A
595 kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
596 kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
b0d623f7 597
6d2010ae
A
598 kd_ctrl_page.kds_free_list.buffer_index = i;
599 kd_ctrl_page.kds_free_list.offset = n;
b0d623f7 600 }
6d2010ae 601 n_storage_units += n_elements;
0c530ab8 602 }
6d2010ae 603
39236c6e 604 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
b0d623f7 605
39236c6e 606 for (i = 0; i < (int)kd_ctrl_page.kdebug_cpus; i++) {
6d2010ae
A
607 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
608 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
609 kdbip[i].kd_lostevents = FALSE;
610 kdbip[i].num_bufs = 0;
611 }
39236c6e 612
6d2010ae
A
613 kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
614
615 kd_ctrl_page.kds_inuse_count = 0;
616 n_storage_threshold = n_storage_units / 2;
b0d623f7
A
617out:
618 if (error)
619 delete_buffers();
0c530ab8 620
b0d623f7 621 return(error);
0c530ab8
A
622}
623
0c530ab8
A
624static void
625delete_buffers(void)
4452a7af 626{
39236c6e 627 int i;
b0d623f7
A
628
629 if (kd_bufs) {
630 for (i = 0; i < n_storage_buffers; i++) {
6d2010ae 631 if (kd_bufs[i].kdsb_addr) {
b0d623f7 632 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
6d2010ae 633 }
b0d623f7
A
634 }
635 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
0c530ab8 636
b0d623f7
A
637 kd_bufs = NULL;
638 n_storage_buffers = 0;
0c530ab8
A
639 }
640 if (kdcopybuf) {
641 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
b0d623f7 642
0c530ab8
A
643 kdcopybuf = NULL;
644 }
6d2010ae 645 kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
b0d623f7 646
6d2010ae 647 if (kdbip) {
39236c6e 648 kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
6d2010ae
A
649
650 kdbip = NULL;
651 }
39236c6e
A
652 kd_ctrl_page.kdebug_iops = NULL;
653 kd_ctrl_page.kdebug_cpus = 0;
6d2010ae 654 kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
0c530ab8
A
655}
656
6d2010ae
A
657void
658release_storage_unit(int cpu, uint32_t kdsp_raw)
0c530ab8 659{
b0d623f7 660 int s = 0;
6d2010ae
A
661 struct kd_storage *kdsp_actual;
662 struct kd_bufinfo *kdbp;
663 union kds_ptr kdsp;
664
665 kdsp.raw = kdsp_raw;
666
b0d623f7
A
667 s = ml_set_interrupts_enabled(FALSE);
668 lck_spin_lock(kds_spin_lock);
669
6d2010ae
A
670 kdbp = &kdbip[cpu];
671
672 if (kdsp.raw == kdbp->kd_list_head.raw) {
b0d623f7 673 /*
6d2010ae 674 * it's possible for the storage unit pointed to
b0d623f7 675 * by kdsp to have already been stolen... so
6d2010ae 676 * check to see if it's still the head of the list
b0d623f7
A
677 * now that we're behind the lock that protects
678 * adding and removing from the queue...
679 * since we only ever release and steal units from
6d2010ae 680 * that position, if it's no longer the head
b0d623f7
A
681 * we having nothing to do in this context
682 */
6d2010ae
A
683 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
684 kdbp->kd_list_head = kdsp_actual->kds_next;
39236c6e 685
6d2010ae
A
686 kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
687 kd_ctrl_page.kds_free_list = kdsp;
688
689 kd_ctrl_page.kds_inuse_count--;
b0d623f7
A
690 }
691 lck_spin_unlock(kds_spin_lock);
692 ml_set_interrupts_enabled(s);
693}
694
695
6d2010ae
A
696boolean_t
697allocate_storage_unit(int cpu)
b0d623f7 698{
6d2010ae 699 union kds_ptr kdsp;
316670eb 700 struct kd_storage *kdsp_actual, *kdsp_next_actual;
6d2010ae 701 struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
b0d623f7 702 uint64_t oldest_ts, ts;
6d2010ae
A
703 boolean_t retval = TRUE;
704 int s = 0;
b0d623f7 705
6d2010ae 706 s = ml_set_interrupts_enabled(FALSE);
b0d623f7
A
707 lck_spin_lock(kds_spin_lock);
708
6d2010ae
A
709 kdbp = &kdbip[cpu];
710
711 /* If someone beat us to the allocate, return success */
712 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
713 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
714
715 if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
716 goto out;
717 }
718
719 if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
720 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
721 kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
722
723 kd_ctrl_page.kds_inuse_count++;
724 } else {
725 if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
726 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
727 kdbp->kd_lostevents = TRUE;
728 retval = FALSE;
b0d623f7
A
729 goto out;
730 }
731 kdbp_vict = NULL;
732 oldest_ts = (uint64_t)-1;
733
39236c6e 734 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
b0d623f7 735
6d2010ae 736 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
b0d623f7
A
737 /*
738 * no storage unit to steal
739 */
740 continue;
741 }
6d2010ae
A
742
743 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
744
745 if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
b0d623f7
A
746 /*
747 * make sure we don't steal the storage unit
6d2010ae
A
748 * being actively recorded to... need to
749 * move on because we don't want an out-of-order
750 * set of events showing up later
b0d623f7
A
751 */
752 continue;
753 }
6d2010ae 754 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
b0d623f7
A
755
756 if (ts < oldest_ts) {
757 /*
758 * when 'wrapping', we want to steal the
759 * storage unit that has the 'earliest' time
760 * associated with it (first event time)
761 */
762 oldest_ts = ts;
763 kdbp_vict = kdbp_try;
764 }
765 }
b0d623f7
A
766 if (kdbp_vict == NULL) {
767 kdebug_enable = 0;
6d2010ae 768 kd_ctrl_page.enabled = 0;
a1c7dba1 769 commpage_update_kdebug_enable();
6d2010ae
A
770 retval = FALSE;
771 goto out;
b0d623f7 772 }
b0d623f7 773 kdsp = kdbp_vict->kd_list_head;
6d2010ae 774 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
6d2010ae 775 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
b0d623f7 776
316670eb
A
777 if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
778 kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
779 kdsp_next_actual->kds_lostevents = TRUE;
780 } else
781 kdbp_vict->kd_lostevents = TRUE;
782
6d2010ae 783 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
b0d623f7 784 }
6d2010ae
A
785 kdsp_actual->kds_timestamp = mach_absolute_time();
786 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
787 kdsp_actual->kds_bufcnt = 0;
788 kdsp_actual->kds_readlast = 0;
789
790 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
791 kdbp->kd_lostevents = FALSE;
792 kdsp_actual->kds_bufindx = 0;
b0d623f7 793
6d2010ae 794 if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
b0d623f7
A
795 kdbp->kd_list_head = kdsp;
796 else
6d2010ae 797 POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
b0d623f7
A
798 kdbp->kd_list_tail = kdsp;
799out:
800 lck_spin_unlock(kds_spin_lock);
6d2010ae 801 ml_set_interrupts_enabled(s);
b0d623f7 802
6d2010ae 803 return (retval);
b0d623f7 804}
39236c6e
A
805
806int
807kernel_debug_register_callback(kd_callback_t callback)
808{
809 kd_iop_t* iop;
3e170ce0 810 if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t), VM_KERN_MEMORY_DIAG) == KERN_SUCCESS) {
39236c6e
A
811 memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
812
813 /*
814 * <rdar://problem/13351477> Some IOP clients are not providing a name.
815 *
816 * Remove when fixed.
817 */
818 {
819 boolean_t is_valid_name = FALSE;
820 for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
821 /* This is roughly isprintable(c) */
822 if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
823 continue;
824 if (callback.iop_name[length] == 0) {
825 if (length)
826 is_valid_name = TRUE;
827 break;
828 }
829 }
830
831 if (!is_valid_name) {
832 strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
833 }
834 }
835
836 iop->last_timestamp = 0;
837
838 do {
839 /*
840 * We use two pieces of state, the old list head
841 * pointer, and the value of old_list_head->cpu_id.
842 * If we read kd_iops more than once, it can change
843 * between reads.
844 *
845 * TLDR; Must not read kd_iops more than once per loop.
846 */
847 iop->next = kd_iops;
848 iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
849
850 /*
851 * Header says OSCompareAndSwapPtr has a memory barrier
852 */
853 } while (!OSCompareAndSwapPtr(iop->next, iop, (void* volatile*)&kd_iops));
854
855 return iop->cpu_id;
856 }
857
858 return 0;
859}
860
861void
862kernel_debug_enter(
863 uint32_t coreid,
864 uint32_t debugid,
865 uint64_t timestamp,
866 uintptr_t arg1,
867 uintptr_t arg2,
868 uintptr_t arg3,
869 uintptr_t arg4,
870 uintptr_t threadid
871 )
872{
873 uint32_t bindx;
874 kd_buf *kd;
875 struct kd_bufinfo *kdbp;
876 struct kd_storage *kdsp_actual;
877 union kds_ptr kds_raw;
878
879 if (kd_ctrl_page.kdebug_slowcheck) {
880
881 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
882 goto out1;
883
884 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
3e170ce0
A
885 /*
886 * Recheck if TYPEFILTER is being used, and if so,
887 * dereference bitmap. If the trace facility is being
888 * disabled, we have ~100ms of preemption-free CPU
889 * usage to access the bitmap.
890 */
891 disable_preemption();
892 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
893 if (isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))
894 goto record_event_preempt_disabled;
895 }
896 enable_preemption();
39236c6e
A
897 goto out1;
898 }
899 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
900 if (debugid >= kdlog_beg && debugid <= kdlog_end)
901 goto record_event;
902 goto out1;
903 }
904 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
3e170ce0
A
905 if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
906 (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
907 (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
908 (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
39236c6e
A
909 goto out1;
910 }
911 }
912
913record_event:
39236c6e
A
914
915 disable_preemption();
916
3e170ce0 917record_event_preempt_disabled:
39236c6e
A
918 if (kd_ctrl_page.enabled == 0)
919 goto out;
920
921 kdbp = &kdbip[coreid];
922 timestamp &= KDBG_TIMESTAMP_MASK;
923
04b8595b
A
924#if KDEBUG_MOJO_TRACE
925 if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
926 kdebug_serial_print(coreid, debugid, timestamp,
927 arg1, arg2, arg3, arg4, threadid);
928#endif
929
39236c6e
A
930retry_q:
931 kds_raw = kdbp->kd_list_tail;
932
933 if (kds_raw.raw != KDS_PTR_NULL) {
934 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
935 bindx = kdsp_actual->kds_bufindx;
936 } else
937 kdsp_actual = NULL;
938
939 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
940 if (allocate_storage_unit(coreid) == FALSE) {
941 /*
942 * this can only happen if wrapping
943 * has been disabled
944 */
945 goto out;
946 }
947 goto retry_q;
948 }
949 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
950 goto retry_q;
951
952 // IOP entries can be allocated before xnu allocates and inits the buffer
953 if (timestamp < kdsp_actual->kds_timestamp)
954 kdsp_actual->kds_timestamp = timestamp;
955
956 kd = &kdsp_actual->kds_records[bindx];
957
958 kd->debugid = debugid;
959 kd->arg1 = arg1;
960 kd->arg2 = arg2;
961 kd->arg3 = arg3;
962 kd->arg4 = arg4;
963 kd->arg5 = threadid;
964
965 kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
966
967 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
968out:
969 enable_preemption();
970out1:
971 if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
972 boolean_t need_kds_wakeup = FALSE;
973 int s;
974
975 /*
976 * try to take the lock here to synchronize with the
977 * waiter entering the blocked state... use the try
978 * mode to prevent deadlocks caused by re-entering this
979 * routine due to various trace points triggered in the
980 * lck_spin_sleep_xxxx routines used to actually enter
981 * our wait condition... no problem if we fail,
982 * there will be lots of additional events coming in that
983 * will eventually succeed in grabbing this lock
984 */
985 s = ml_set_interrupts_enabled(FALSE);
986
987 if (lck_spin_try_lock(kdw_spin_lock)) {
988
989 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
990 kds_waiter = 0;
991 need_kds_wakeup = TRUE;
992 }
993 lck_spin_unlock(kdw_spin_lock);
994
995 ml_set_interrupts_enabled(s);
996
997 if (need_kds_wakeup == TRUE)
998 wakeup(&kds_waiter);
999 }
1000 }
1001}
1002
1003
b0d623f7 1004
a1c7dba1 1005static void
b0d623f7
A
1006kernel_debug_internal(
1007 uint32_t debugid,
1008 uintptr_t arg1,
1009 uintptr_t arg2,
1010 uintptr_t arg3,
1011 uintptr_t arg4,
fe8ab488 1012 uintptr_t arg5)
b0d623f7
A
1013{
1014 struct proc *curproc;
1015 uint64_t now;
6d2010ae
A
1016 uint32_t bindx;
1017 boolean_t s;
b0d623f7
A
1018 kd_buf *kd;
1019 int cpu;
1020 struct kd_bufinfo *kdbp;
6d2010ae 1021 struct kd_storage *kdsp_actual;
316670eb 1022 union kds_ptr kds_raw;
b0d623f7 1023
316670eb 1024
91447636 1025
6d2010ae 1026 if (kd_ctrl_page.kdebug_slowcheck) {
9bccf70c 1027
6d2010ae
A
1028 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
1029 kd_chudhook_fn chudhook;
b0d623f7 1030 /*
6d2010ae
A
1031 * Mask interrupts to minimize the interval across
1032 * which the driver providing the hook could be
1033 * unloaded.
b0d623f7 1034 */
6d2010ae
A
1035 s = ml_set_interrupts_enabled(FALSE);
1036 chudhook = kdebug_chudhook;
1037 if (chudhook)
1038 chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
1039 ml_set_interrupts_enabled(s);
b0d623f7 1040 }
316670eb 1041 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
6d2010ae 1042 goto out1;
b0d623f7 1043
6d2010ae
A
1044 if ( !ml_at_interrupt_context()) {
1045 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1046 /*
1047 * If kdebug flag is not set for current proc, return
1048 */
1049 curproc = current_proc();
1c79356b 1050
6d2010ae 1051 if ((curproc && !(curproc->p_kdebug)) &&
316670eb
A
1052 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1053 (debugid >> 24 != DBG_TRACE))
6d2010ae
A
1054 goto out1;
1055 }
1056 else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1057 /*
1058 * If kdebug flag is set for current proc, return
1059 */
1060 curproc = current_proc();
b0d623f7 1061
6d2010ae 1062 if ((curproc && curproc->p_kdebug) &&
316670eb
A
1063 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1064 (debugid >> 24 != DBG_TRACE))
6d2010ae
A
1065 goto out1;
1066 }
1067 }
316670eb
A
1068
1069 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1070 /* Always record trace system info */
3e170ce0 1071 if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
316670eb
A
1072 goto record_event;
1073
3e170ce0
A
1074 /*
1075 * Recheck if TYPEFILTER is being used, and if so,
1076 * dereference bitmap. If the trace facility is being
1077 * disabled, we have ~100ms of preemption-free CPU
1078 * usage to access the bitmap.
1079 */
1080 disable_preemption();
1081 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1082 if (isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))
1083 goto record_event_preempt_disabled;
1084 }
1085 enable_preemption();
316670eb
A
1086 goto out1;
1087 }
1088 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
39236c6e 1089 /* Always record trace system info */
3e170ce0 1090 if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
316670eb 1091 goto record_event;
39236c6e
A
1092
1093 if (debugid < kdlog_beg || debugid > kdlog_end)
1094 goto out1;
6d2010ae
A
1095 }
1096 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
39236c6e 1097 /* Always record trace system info */
3e170ce0 1098 if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
39236c6e
A
1099 goto record_event;
1100
3e170ce0
A
1101 if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
1102 (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
1103 (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
1104 (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
6d2010ae
A
1105 goto out1;
1106 }
b0d623f7 1107 }
316670eb 1108record_event:
6d2010ae 1109 disable_preemption();
39236c6e 1110
3e170ce0 1111record_event_preempt_disabled:
39236c6e
A
1112 if (kd_ctrl_page.enabled == 0)
1113 goto out;
1114
6d2010ae 1115 cpu = cpu_number();
b0d623f7 1116 kdbp = &kdbip[cpu];
04b8595b
A
1117
1118#if KDEBUG_MOJO_TRACE
1119 if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
1120 kdebug_serial_print(cpu, debugid,
1121 mach_absolute_time() & KDBG_TIMESTAMP_MASK,
1122 arg1, arg2, arg3, arg4, arg5);
1123#endif
1124
6d2010ae 1125retry_q:
316670eb
A
1126 kds_raw = kdbp->kd_list_tail;
1127
1128 if (kds_raw.raw != KDS_PTR_NULL) {
1129 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
6d2010ae
A
1130 bindx = kdsp_actual->kds_bufindx;
1131 } else
1132 kdsp_actual = NULL;
1133
1134 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
1135 if (allocate_storage_unit(cpu) == FALSE) {
b0d623f7
A
1136 /*
1137 * this can only happen if wrapping
1138 * has been disabled
1139 */
1140 goto out;
1141 }
6d2010ae 1142 goto retry_q;
b0d623f7 1143 }
6d2010ae
A
1144 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
1145
1146 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
1147 goto retry_q;
1148
1149 kd = &kdsp_actual->kds_records[bindx];
b0d623f7 1150
1c79356b
A
1151 kd->debugid = debugid;
1152 kd->arg1 = arg1;
1153 kd->arg2 = arg2;
1154 kd->arg3 = arg3;
1155 kd->arg4 = arg4;
0c530ab8 1156 kd->arg5 = arg5;
1c79356b 1157
b0d623f7 1158 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1c79356b 1159
6d2010ae 1160 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
0c530ab8 1161out:
6d2010ae
A
1162 enable_preemption();
1163out1:
fe8ab488 1164 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
6d2010ae
A
1165 uint32_t etype;
1166 uint32_t stype;
1167
3e170ce0
A
1168 etype = debugid & KDBG_EVENTID_MASK;
1169 stype = debugid & KDBG_CSC_MASK;
6d2010ae
A
1170
1171 if (etype == INTERRUPT || etype == MACH_vmfault ||
1172 stype == BSC_SysCall || stype == MACH_SysCall) {
1173
1174 boolean_t need_kds_wakeup = FALSE;
6d2010ae
A
1175
1176 /*
1177 * try to take the lock here to synchronize with the
1178 * waiter entering the blocked state... use the try
1179 * mode to prevent deadlocks caused by re-entering this
1180 * routine due to various trace points triggered in the
1181 * lck_spin_sleep_xxxx routines used to actually enter
1182 * one of our 2 wait conditions... no problem if we fail,
1183 * there will be lots of additional events coming in that
1184 * will eventually succeed in grabbing this lock
1185 */
1186 s = ml_set_interrupts_enabled(FALSE);
1187
1188 if (lck_spin_try_lock(kdw_spin_lock)) {
1189
1190 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1191 kds_waiter = 0;
1192 need_kds_wakeup = TRUE;
1193 }
6d2010ae
A
1194 lck_spin_unlock(kdw_spin_lock);
1195 }
1196 ml_set_interrupts_enabled(s);
1197
1198 if (need_kds_wakeup == TRUE)
1199 wakeup(&kds_waiter);
6d2010ae
A
1200 }
1201 }
1c79356b
A
1202}
1203
1204void
b0d623f7
A
1205kernel_debug(
1206 uint32_t debugid,
1207 uintptr_t arg1,
1208 uintptr_t arg2,
1209 uintptr_t arg3,
1210 uintptr_t arg4,
1211 __unused uintptr_t arg5)
1c79356b 1212{
fe8ab488 1213 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()));
0c530ab8 1214}
21362eb3 1215
0c530ab8 1216void
b0d623f7
A
1217kernel_debug1(
1218 uint32_t debugid,
1219 uintptr_t arg1,
1220 uintptr_t arg2,
1221 uintptr_t arg3,
1222 uintptr_t arg4,
1223 uintptr_t arg5)
0c530ab8 1224{
fe8ab488
A
1225 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5);
1226}
1227
1228void
3e170ce0 1229kernel_debug_string_simple(const char *message)
fe8ab488
A
1230{
1231 uintptr_t arg[4] = {0, 0, 0, 0};
1232
1233 /* Stuff the message string in the args and log it. */
1234 strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
1235 KERNEL_DEBUG_EARLY(
04b8595b 1236 TRACE_INFO_STRING,
fe8ab488
A
1237 arg[0], arg[1], arg[2], arg[3]);
1238}
1239
1240extern int master_cpu; /* MACH_KERNEL_PRIVATE */
1241/*
1242 * Used prior to start_kern_tracing() being called.
1243 * Log temporarily into a static buffer.
1244 */
1245void
1246kernel_debug_early(
1247 uint32_t debugid,
1248 uintptr_t arg1,
1249 uintptr_t arg2,
1250 uintptr_t arg3,
1251 uintptr_t arg4)
1252{
1253 /* If tracing is already initialized, use it */
04b8595b 1254 if (nkdbufs) {
fe8ab488 1255 KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
04b8595b
A
1256 return;
1257 }
fe8ab488
A
1258
1259 /* Do nothing if the buffer is full or we're not on the boot cpu */
1260 kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
1261 if (kd_early_overflow ||
1262 cpu_number() != master_cpu)
1263 return;
1264
1265 kd_early_buffer[kd_early_index].debugid = debugid;
1266 kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
1267 kd_early_buffer[kd_early_index].arg1 = arg1;
1268 kd_early_buffer[kd_early_index].arg2 = arg2;
1269 kd_early_buffer[kd_early_index].arg3 = arg3;
1270 kd_early_buffer[kd_early_index].arg4 = arg4;
1271 kd_early_buffer[kd_early_index].arg5 = 0;
1272 kd_early_index++;
1273}
1274
1275/*
04b8595b 1276 * Transfen the contents of the temporary buffer into the trace buffers.
fe8ab488
A
1277 * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
1278 * when mach_absolute_time is set to 0.
1279 */
1280static void
1281kernel_debug_early_end(void)
1282{
1283 int i;
1284
1285 if (cpu_number() != master_cpu)
1286 panic("kernel_debug_early_end() not call on boot processor");
1287
1288 /* Fake sentinel marking the start of kernel time relative to TSC */
1289 kernel_debug_enter(
1290 0,
04b8595b 1291 TRACE_TIMESTAMPS,
fe8ab488
A
1292 0,
1293 (uint32_t)(tsc_rebase_abs_time >> 32),
1294 (uint32_t)tsc_rebase_abs_time,
1295 0,
1296 0,
1297 0);
1298 for (i = 0; i < kd_early_index; i++) {
1299 kernel_debug_enter(
1300 0,
1301 kd_early_buffer[i].debugid,
1302 kd_early_buffer[i].timestamp,
1303 kd_early_buffer[i].arg1,
1304 kd_early_buffer[i].arg2,
1305 kd_early_buffer[i].arg3,
1306 kd_early_buffer[i].arg4,
1307 0);
1308 }
1309
1310 /* Cut events-lost event on overflow */
1311 if (kd_early_overflow)
1312 KERNEL_DEBUG_CONSTANT(
04b8595b 1313 TRACE_LOST_EVENTS, 0, 0, 0, 0, 0);
fe8ab488
A
1314
1315 /* This trace marks the start of kernel tracing */
3e170ce0
A
1316 kernel_debug_string_simple("early trace done");
1317}
1318
1319/*
1320 * Returns non-zero if debugid is in a reserved class.
1321 */
1322static int
1323kdebug_validate_debugid(uint32_t debugid)
1324{
1325 uint8_t debugid_class;
1326
1327 debugid_class = KDBG_EXTRACT_CLASS(debugid);
1328 switch (debugid_class) {
1329 case DBG_TRACE:
1330 return EPERM;
1331 }
1332
1333 return 0;
0c530ab8 1334}
6601e61a 1335
6d2010ae 1336/*
a1c7dba1 1337 * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
6d2010ae
A
1338 */
1339int
a1c7dba1
A
1340kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
1341{
1342 struct kdebug_trace64_args uap64;
1343
1344 uap64.code = uap->code;
1345 uap64.arg1 = uap->arg1;
1346 uap64.arg2 = uap->arg2;
1347 uap64.arg3 = uap->arg3;
1348 uap64.arg4 = uap->arg4;
1349
1350 return kdebug_trace64(p, &uap64, retval);
1351}
1352
1353/*
1354 * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
1355 */
1356int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
0c530ab8 1357{
3e170ce0 1358 int err;
a1c7dba1 1359
3e170ce0
A
1360 if ((err = kdebug_validate_debugid(uap->code)) != 0) {
1361 return err;
a1c7dba1
A
1362 }
1363
6d2010ae 1364 if ( __probable(kdebug_enable == 0) )
a1c7dba1
A
1365 return(0);
1366
1367 kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
91447636 1368
6d2010ae
A
1369 return(0);
1370}
1c79356b 1371
3e170ce0
A
1372/*
1373 * Adding enough padding to contain a full tracepoint for the last
1374 * portion of the string greatly simplifies the logic of splitting the
1375 * string between tracepoints. Full tracepoints can be generated using
1376 * the buffer itself, without having to manually add zeros to pad the
1377 * arguments.
1378 */
1379
1380/* 2 string args in first tracepoint and 9 string data tracepoints */
1381#define STR_BUF_ARGS (2 + (9 * 4))
1382/* times the size of each arg on K64 */
1383#define MAX_STR_LEN (STR_BUF_ARGS * sizeof(uint64_t))
1384/* on K32, ending straddles a tracepoint, so reserve blanks */
1385#define STR_BUF_SIZE (MAX_STR_LEN + (2 * sizeof(uint32_t)))
1386
1387/*
1388 * This function does no error checking and assumes that it is called with
1389 * the correct arguments, including that the buffer pointed to by str is at
1390 * least STR_BUF_SIZE bytes. However, str must be aligned to word-size and
1391 * be NUL-terminated. In cases where a string can fit evenly into a final
1392 * tracepoint without its NUL-terminator, this function will not end those
1393 * strings with a NUL in trace. It's up to clients to look at the function
1394 * qualifier for DBG_FUNC_END in this case, to end the string.
1395 */
1396static uint64_t
1397kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr,
1398 size_t str_len)
1399{
1400 /* str must be word-aligned */
1401 uintptr_t *str = vstr;
1402 size_t written = 0;
1403 uintptr_t thread_id;
1404 int i;
1405 uint32_t trace_debugid = TRACEDBG_CODE(DBG_TRACE_STRING,
1406 TRACE_STRING_GLOBAL);
1407
1408 thread_id = (uintptr_t)thread_tid(current_thread());
1409
1410 /* if the ID is being invalidated, just emit that */
1411 if (str_id != 0 && str_len == 0) {
1412 kernel_debug_internal(trace_debugid | DBG_FUNC_START | DBG_FUNC_END,
1413 (uintptr_t)debugid, (uintptr_t)str_id, 0, 0,
1414 thread_id);
1415 return str_id;
1416 }
1417
1418 /* generate an ID, if necessary */
1419 if (str_id == 0) {
1420 str_id = OSIncrementAtomic64((SInt64 *)&g_curr_str_id);
1421 str_id = (str_id & STR_ID_MASK) | g_str_id_signature;
1422 }
1423
1424 trace_debugid |= DBG_FUNC_START;
1425 /* string can fit in a single tracepoint */
1426 if (str_len <= (2 * sizeof(uintptr_t))) {
1427 trace_debugid |= DBG_FUNC_END;
1428 }
1429
1430 kernel_debug_internal(trace_debugid, (uintptr_t)debugid,
1431 (uintptr_t)str_id, str[0],
1432 str[1], thread_id);
1433
1434 trace_debugid &= KDBG_EVENTID_MASK;
1435 i = 2;
1436 written += 2 * sizeof(uintptr_t);
1437
1438 for (; written < str_len; i += 4, written += 4 * sizeof(uintptr_t)) {
1439 if ((written + (4 * sizeof(uintptr_t))) >= str_len) {
1440 trace_debugid |= DBG_FUNC_END;
1441 }
1442 kernel_debug_internal(trace_debugid, str[i],
1443 str[i + 1],
1444 str[i + 2],
1445 str[i + 3], thread_id);
1446 }
1447
1448 return str_id;
1449}
1450
1451/*
1452 * Returns true if the current process can emit events, and false otherwise.
1453 * Trace system and scheduling events circumvent this check, as do events
1454 * emitted in interrupt context.
1455 */
1456static boolean_t
1457kdebug_current_proc_enabled(uint32_t debugid)
1458{
1459 /* can't determine current process in interrupt context */
1460 if (ml_at_interrupt_context()) {
1461 return TRUE;
1462 }
1463
1464 /* always emit trace system and scheduling events */
1465 if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
1466 (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0)))
1467 {
1468 return TRUE;
1469 }
1470
1471 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1472 proc_t cur_proc = current_proc();
1473
1474 /* only the process with the kdebug bit set is allowed */
1475 if (cur_proc && !(cur_proc->p_kdebug)) {
1476 return FALSE;
1477 }
1478 } else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1479 proc_t cur_proc = current_proc();
1480
1481 /* every process except the one with the kdebug bit set is allowed */
1482 if (cur_proc && cur_proc->p_kdebug) {
1483 return FALSE;
1484 }
1485 }
1486
1487 return TRUE;
1488}
1489
1490/*
1491 * Returns true if the debugid is disabled by filters, and false if the
1492 * debugid is allowed to be traced. A debugid may not be traced if the
1493 * typefilter disables its class and subclass, it's outside a range
1494 * check, or if it's not an allowed debugid in a value check. Trace
1495 * system events bypass this check.
1496 */
1497static boolean_t
1498kdebug_debugid_enabled(uint32_t debugid)
1499{
1500 boolean_t is_enabled = TRUE;
1501
1502 /* if no filtering is enabled */
1503 if (!kd_ctrl_page.kdebug_slowcheck) {
1504 return TRUE;
1505 }
1506
1507 if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
1508 return TRUE;
1509 }
1510
1511 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1512 disable_preemption();
1513
1514 /*
1515 * Recheck if typefilter is still being used. If tracing is being
1516 * disabled, there's a 100ms sleep on the other end to keep the
1517 * bitmap around for this check.
1518 */
1519 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1520 if (!(isset(type_filter_bitmap, KDBG_EXTRACT_CSC(debugid)))) {
1521 is_enabled = FALSE;
1522 }
1523 }
1524
1525 enable_preemption();
1526 } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1527 if (debugid < kdlog_beg || debugid > kdlog_end) {
1528 is_enabled = FALSE;
1529 }
1530 } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1531 if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
1532 (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
1533 (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
1534 (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
1535 {
1536 is_enabled = FALSE;
1537 }
1538 }
1539
1540 return is_enabled;
1541}
1542
1543/*
1544 * Returns 0 if a string can be traced with these arguments. Returns errno
1545 * value if error occurred.
1546 */
1547static errno_t
1548kdebug_check_trace_string(uint32_t debugid, uint64_t str_id)
1549{
1550 /* if there are function qualifiers on the debugid */
1551 if (debugid & ~KDBG_EVENTID_MASK) {
1552 return EINVAL;
1553 }
1554
1555 if (kdebug_validate_debugid(debugid)) {
1556 return EPERM;
1557 }
1558
1559 if (str_id != 0 && (str_id & STR_ID_SIG_MASK) != g_str_id_signature) {
1560 return EINVAL;
1561 }
1562
1563 return 0;
1564}
1565
1566/*
1567 * Implementation of KPI kernel_debug_string.
1568 */
1569int
1570kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str)
1571{
1572 /* arguments to tracepoints must be word-aligned */
1573 __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
1574 assert_static(sizeof(str_buf) > MAX_STR_LEN);
1575 vm_size_t len_copied;
1576 int err;
1577
1578 assert(str_id);
1579
1580 if (__probable(kdebug_enable == 0)) {
1581 return 0;
1582 }
1583
1584 if (!kdebug_current_proc_enabled(debugid)) {
1585 return 0;
1586 }
1587
1588 if (!kdebug_debugid_enabled(debugid)) {
1589 return 0;
1590 }
1591
1592 if ((err = kdebug_check_trace_string(debugid, *str_id)) != 0) {
1593 return err;
1594 }
1595
1596 if (str == NULL) {
1597 if (str_id == 0) {
1598 return EINVAL;
1599 }
1600
1601 *str_id = kernel_debug_string_internal(debugid, *str_id, NULL, 0);
1602 return 0;
1603 }
1604
1605 memset(str_buf, 0, sizeof(str_buf));
1606 len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
1607 *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
1608 len_copied);
1609 return 0;
1610}
1611
1612/*
1613 * Support syscall kdebug_trace_string.
1614 */
1615int
1616kdebug_trace_string(__unused struct proc *p,
1617 struct kdebug_trace_string_args *uap,
1618 uint64_t *retval)
1619{
1620 __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
1621 assert_static(sizeof(str_buf) > MAX_STR_LEN);
1622 size_t len_copied;
1623 int err;
1624
1625 if (__probable(kdebug_enable == 0)) {
1626 return 0;
1627 }
1628
1629 if (!kdebug_current_proc_enabled(uap->debugid)) {
1630 return 0;
1631 }
1632
1633 if (!kdebug_debugid_enabled(uap->debugid)) {
1634 return 0;
1635 }
1636
1637 if ((err = kdebug_check_trace_string(uap->debugid, uap->str_id)) != 0) {
1638 return err;
1639 }
1640
1641 if (uap->str == USER_ADDR_NULL) {
1642 if (uap->str_id == 0) {
1643 return EINVAL;
1644 }
1645
1646 *retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
1647 NULL, 0);
1648 return 0;
1649 }
1650
1651 memset(str_buf, 0, sizeof(str_buf));
1652 err = copyinstr(uap->str, str_buf, MAX_STR_LEN + 1, &len_copied);
1653
1654 /* it's alright to truncate the string, so allow ENAMETOOLONG */
1655 if (err == ENAMETOOLONG) {
1656 str_buf[MAX_STR_LEN] = '\0';
1657 } else if (err) {
1658 return err;
1659 }
1660
1661 if (len_copied <= 1) {
1662 return EINVAL;
1663 }
1664
1665 /* convert back to a length */
1666 len_copied--;
1667
1668 *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
1669 len_copied);
1670 return 0;
1671}
1672
6d2010ae
A
1673static void
1674kdbg_lock_init(void)
1675{
1676 if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
1677 return;
6d2010ae 1678
0c530ab8 1679 /*
91447636
A
1680 * allocate lock group attribute and group
1681 */
0c530ab8
A
1682 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
1683 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
91447636
A
1684
1685 /*
1686 * allocate the lock attribute
1687 */
0c530ab8 1688 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
91447636
A
1689
1690
1691 /*
6d2010ae 1692 * allocate and initialize mutex's
91447636 1693 */
0c530ab8 1694 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
b0d623f7 1695 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
6d2010ae 1696 kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
91447636 1697
6d2010ae 1698 kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
91447636
A
1699}
1700
1701
1702int
6d2010ae 1703kdbg_bootstrap(boolean_t early_trace)
1c79356b 1704{
6d2010ae 1705 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
91447636 1706
6d2010ae 1707 return (create_buffers(early_trace));
1c79356b
A
1708}
1709
0c530ab8 1710int
6d2010ae 1711kdbg_reinit(boolean_t early_trace)
1c79356b 1712{
b0d623f7 1713 int ret = 0;
91447636 1714
b0d623f7
A
1715 /*
1716 * Disable trace collecting
1717 * First make sure we're not in
1718 * the middle of cutting a trace
1719 */
316670eb 1720 kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1c79356b 1721
b0d623f7
A
1722 /*
1723 * make sure the SLOW_NOLOG is seen
1724 * by everyone that might be trying
1725 * to cut a trace..
1726 */
1727 IOSleep(100);
1c79356b 1728
b0d623f7 1729 delete_buffers();
1c79356b 1730
6d2010ae 1731 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
b0d623f7 1732 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
6d2010ae 1733 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7 1734 kd_mapsize = 0;
3e170ce0 1735 kd_mapptr = NULL;
b0d623f7
A
1736 kd_mapcount = 0;
1737 }
6d2010ae
A
1738 ret = kdbg_bootstrap(early_trace);
1739
1740 RAW_file_offset = 0;
1741 RAW_file_written = 0;
1c79356b 1742
b0d623f7 1743 return(ret);
1c79356b
A
1744}
1745
0c530ab8
A
1746void
1747kdbg_trace_data(struct proc *proc, long *arg_pid)
55e303ae 1748{
b0d623f7
A
1749 if (!proc)
1750 *arg_pid = 0;
1751 else
1752 *arg_pid = proc->p_pid;
55e303ae
A
1753}
1754
1755
0c530ab8
A
1756void
1757kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1c79356b 1758{
b0d623f7
A
1759 char *dbg_nameptr;
1760 int dbg_namelen;
1761 long dbg_parms[4];
1762
1763 if (!proc) {
1764 *arg1 = 0;
1765 *arg2 = 0;
1766 *arg3 = 0;
1767 *arg4 = 0;
1768 return;
1769 }
1770 /*
1771 * Collect the pathname for tracing
1772 */
1773 dbg_nameptr = proc->p_comm;
1774 dbg_namelen = (int)strlen(proc->p_comm);
1775 dbg_parms[0]=0L;
1776 dbg_parms[1]=0L;
1777 dbg_parms[2]=0L;
1778 dbg_parms[3]=0L;
1c79356b 1779
b0d623f7
A
1780 if(dbg_namelen > (int)sizeof(dbg_parms))
1781 dbg_namelen = (int)sizeof(dbg_parms);
1c79356b 1782
b0d623f7 1783 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1c79356b 1784
b0d623f7
A
1785 *arg1=dbg_parms[0];
1786 *arg2=dbg_parms[1];
1787 *arg3=dbg_parms[2];
1788 *arg4=dbg_parms[3];
1c79356b
A
1789}
1790
91447636 1791static void
0c530ab8 1792kdbg_resolve_map(thread_t th_act, void *opaque)
1c79356b 1793{
b0d623f7
A
1794 kd_threadmap *mapptr;
1795 krt_t *t = (krt_t *)opaque;
1796
1797 if (t->count < t->maxcount) {
1798 mapptr = &t->map[t->count];
1799 mapptr->thread = (uintptr_t)thread_tid(th_act);
1800
1801 (void) strlcpy (mapptr->command, t->atts->task_comm,
1802 sizeof(t->atts->task_comm));
1803 /*
1804 * Some kernel threads have no associated pid.
1805 * We still need to mark the entry as valid.
1806 */
1807 if (t->atts->pid)
1808 mapptr->valid = t->atts->pid;
1809 else
1810 mapptr->valid = 1;
1811
1812 t->count++;
1813 }
1c79356b
A
1814}
1815
39236c6e
A
1816/*
1817 *
1818 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1819 *
1820 * You may provide a buffer and size, or if you set the buffer to NULL, a
1821 * buffer of sufficient size will be allocated.
1822 *
1823 * If you provide a buffer and it is too small, sets cpumap_size to the number
1824 * of bytes required and returns EINVAL.
1825 *
1826 * On success, if you provided a buffer, cpumap_size is set to the number of
1827 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1828 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1829 *
1830 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1831 *
1832 * We may be reporting data from "now", or from the "past".
1833 *
1834 * The "now" data would be for something like kdbg_readcurcpumap().
1835 * The "past" data would be for kdbg_readcpumap().
1836 *
1837 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1838 * will need to read "now" state to get the number of cpus, which would be in
1839 * error if we were reporting "past" state.
1840 */
1841
1842int
1843kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
1844{
1845 assert(cpumap);
1846 assert(cpumap_size);
1847 assert(cpu_count);
1848 assert(!iops || iops->cpu_id + 1 == cpu_count);
1849
1850 uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
1851 uint32_t bytes_available = *cpumap_size;
1852 *cpumap_size = bytes_needed;
1853
1854 if (*cpumap == NULL) {
3e170ce0 1855 if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
39236c6e
A
1856 return ENOMEM;
1857 }
1858 } else if (bytes_available < bytes_needed) {
1859 return EINVAL;
1860 }
1861
1862 kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
1863
1864 header->version_no = RAW_VERSION1;
1865 header->cpu_count = cpu_count;
1866
1867 kd_cpumap* cpus = (kd_cpumap*)&header[1];
1868
1869 int32_t index = cpu_count - 1;
1870 while (iops) {
1871 cpus[index].cpu_id = iops->cpu_id;
1872 cpus[index].flags = KDBG_CPUMAP_IS_IOP;
1873 bzero(cpus[index].name, sizeof(cpus->name));
1874 strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
1875
1876 iops = iops->next;
1877 index--;
1878 }
1879
1880 while (index >= 0) {
1881 cpus[index].cpu_id = index;
1882 cpus[index].flags = 0;
1883 bzero(cpus[index].name, sizeof(cpus->name));
1884 strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
1885
1886 index--;
1887 }
1888
1889 return KERN_SUCCESS;
1890}
1891
0c530ab8 1892void
39236c6e 1893kdbg_thrmap_init(void)
1c79356b 1894{
39236c6e
A
1895 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
1896 return;
1897
1898 kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
1899
1900 if (kd_mapptr)
1901 kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
1902}
1903
1904
1905kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
1906{
1907 kd_threadmap *mapptr;
b0d623f7
A
1908 struct proc *p;
1909 struct krt akrt;
1910 int tts_count; /* number of task-to-string structures */
1911 struct tts *tts_mapptr;
1912 unsigned int tts_mapsize = 0;
b0d623f7 1913 int i;
39236c6e 1914 vm_offset_t kaddr;
1c79356b 1915
b0d623f7
A
1916 /*
1917 * need to use PROC_SCANPROCLIST with proc_iterate
1918 */
2d21ac55
A
1919 proc_list_lock();
1920
b0d623f7
A
1921 /*
1922 * Calculate the sizes of map buffers
1923 */
39236c6e
A
1924 for (p = allproc.lh_first, *mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
1925 *mapcount += get_task_numacts((task_t)p->task);
b0d623f7
A
1926 tts_count++;
1927 }
2d21ac55
A
1928 proc_list_unlock();
1929
9bccf70c
A
1930 /*
1931 * The proc count could change during buffer allocation,
1932 * so introduce a small fudge factor to bump up the
1933 * buffer sizes. This gives new tasks some chance of
39236c6e 1934 * making into the tables. Bump up by 25%.
9bccf70c 1935 */
39236c6e
A
1936 *mapcount += *mapcount/4;
1937 tts_count += tts_count/4;
1938
1939 *mapsize = *mapcount * sizeof(kd_threadmap);
9bccf70c 1940
39236c6e
A
1941 if (count && count < *mapcount)
1942 return (0);
b0d623f7 1943
3e170ce0 1944 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
39236c6e
A
1945 bzero((void *)kaddr, *mapsize);
1946 mapptr = (kd_threadmap *)kaddr;
b0d623f7 1947 } else
39236c6e 1948 return (0);
1c79356b 1949
9bccf70c 1950 tts_mapsize = tts_count * sizeof(struct tts);
9bccf70c 1951
3e170ce0 1952 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
39236c6e
A
1953 bzero((void *)kaddr, tts_mapsize);
1954 tts_mapptr = (struct tts *)kaddr;
1955 } else {
1956 kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
9bccf70c 1957
39236c6e
A
1958 return (0);
1959 }
9bccf70c
A
1960 /*
1961 * We need to save the procs command string
1962 * and take a reference for each task associated
1963 * with a valid process
1964 */
2d21ac55 1965
39236c6e
A
1966 proc_list_lock();
1967
1968 /*
1969 * should use proc_iterate
1970 */
1971 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
1972 if (p->p_lflag & P_LEXIT)
1973 continue;
1974
1975 if (p->task) {
1976 task_reference(p->task);
1977 tts_mapptr[i].task = p->task;
1978 tts_mapptr[i].pid = p->p_pid;
1979 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
1980 i++;
1981 }
9bccf70c 1982 }
39236c6e 1983 tts_count = i;
9bccf70c 1984
39236c6e 1985 proc_list_unlock();
9bccf70c 1986
39236c6e
A
1987 /*
1988 * Initialize thread map data
1989 */
1990 akrt.map = mapptr;
1991 akrt.count = 0;
1992 akrt.maxcount = *mapcount;
1c79356b 1993
39236c6e
A
1994 for (i = 0; i < tts_count; i++) {
1995 akrt.atts = &tts_mapptr[i];
1996 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
1997 task_deallocate((task_t) tts_mapptr[i].task);
b0d623f7 1998 }
39236c6e
A
1999 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
2000
2001 *mapcount = akrt.count;
2002
2003 return (mapptr);
1c79356b
A
2004}
2005
91447636
A
2006static void
2007kdbg_clear(void)
1c79356b 2008{
3e170ce0 2009 /*
91447636
A
2010 * Clean up the trace buffer
2011 * First make sure we're not in
2012 * the middle of cutting a trace
2013 */
316670eb 2014 kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
3e170ce0 2015 kdbg_disable_typefilter();
91447636 2016
0c530ab8
A
2017 /*
2018 * make sure the SLOW_NOLOG is seen
2019 * by everyone that might be trying
2020 * to cut a trace..
2021 */
2022 IOSleep(100);
2023
3e170ce0 2024 global_state_pid = -1;
6d2010ae
A
2025 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2026 kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
2027 kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
316670eb 2028
3e170ce0 2029 kdbg_deallocate_typefilter();
0c530ab8 2030 delete_buffers();
316670eb 2031 nkdbufs = 0;
1c79356b
A
2032
2033 /* Clean up the thread map buffer */
6d2010ae 2034 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
2035 if (kd_mapptr) {
2036 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
2037 kd_mapptr = (kd_threadmap *) 0;
2038 }
1c79356b
A
2039 kd_mapsize = 0;
2040 kd_mapcount = 0;
6d2010ae
A
2041
2042 RAW_file_offset = 0;
2043 RAW_file_written = 0;
1c79356b
A
2044}
2045
0c530ab8 2046int
1c79356b
A
2047kdbg_setpid(kd_regtype *kdr)
2048{
b0d623f7
A
2049 pid_t pid;
2050 int flag, ret=0;
2051 struct proc *p;
2052
2053 pid = (pid_t)kdr->value1;
2054 flag = (int)kdr->value2;
2055
2056 if (pid > 0) {
2057 if ((p = proc_find(pid)) == NULL)
2058 ret = ESRCH;
2059 else {
2060 if (flag == 1) {
2061 /*
2062 * turn on pid check for this and all pids
2063 */
6d2010ae
A
2064 kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
2065 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
2066 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2067
b0d623f7
A
2068 p->p_kdebug = 1;
2069 } else {
2070 /*
2071 * turn off pid check for this pid value
2072 * Don't turn off all pid checking though
2073 *
6d2010ae 2074 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
b0d623f7
A
2075 */
2076 p->p_kdebug = 0;
2077 }
2078 proc_rele(p);
2079 }
1c79356b 2080 }
b0d623f7
A
2081 else
2082 ret = EINVAL;
2083
2084 return(ret);
1c79356b
A
2085}
2086
2087/* This is for pid exclusion in the trace buffer */
0c530ab8 2088int
1c79356b
A
2089kdbg_setpidex(kd_regtype *kdr)
2090{
b0d623f7
A
2091 pid_t pid;
2092 int flag, ret=0;
2093 struct proc *p;
2094
2095 pid = (pid_t)kdr->value1;
2096 flag = (int)kdr->value2;
2097
2098 if (pid > 0) {
2099 if ((p = proc_find(pid)) == NULL)
2100 ret = ESRCH;
2101 else {
2102 if (flag == 1) {
2103 /*
2104 * turn on pid exclusion
2105 */
6d2010ae
A
2106 kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
2107 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
2108 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
b0d623f7
A
2109
2110 p->p_kdebug = 1;
2111 }
2112 else {
2113 /*
2114 * turn off pid exclusion for this pid value
2115 * Don't turn off all pid exclusion though
2116 *
6d2010ae 2117 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
b0d623f7
A
2118 */
2119 p->p_kdebug = 0;
2120 }
2121 proc_rele(p);
2122 }
2123 } else
2124 ret = EINVAL;
2125
2126 return(ret);
1c79356b
A
2127}
2128
b0d623f7
A
2129
2130/*
2131 * This is for setting a maximum decrementer value
2132 */
0c530ab8 2133int
1c79356b
A
2134kdbg_setrtcdec(kd_regtype *kdr)
2135{
b0d623f7
A
2136 int ret = 0;
2137 natural_t decval;
1c79356b 2138
b0d623f7 2139 decval = (natural_t)kdr->value1;
1c79356b 2140
b0d623f7
A
2141 if (decval && decval < KDBG_MINRTCDEC)
2142 ret = EINVAL;
3a60a9f5
A
2143 else
2144 ret = ENOTSUP;
1c79356b 2145
b0d623f7 2146 return(ret);
1c79356b
A
2147}
2148
316670eb
A
2149int
2150kdbg_enable_typefilter(void)
2151{
3e170ce0
A
2152 int ret;
2153
2154 /* Allocate memory for bitmap if not already allocated */
2155 ret = kdbg_allocate_typefilter();
2156 if (ret) {
2157 return ret;
316670eb 2158 }
316670eb
A
2159
2160 /* Turn off range and value checks */
2161 kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
2162
2163 /* Enable filter checking */
2164 kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
2165 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2166 return 0;
2167}
2168
2169int
2170kdbg_disable_typefilter(void)
2171{
2172 /* Disable filter checking */
2173 kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
3e170ce0 2174
316670eb
A
2175 /* Turn off slow checks unless pid checks are using them */
2176 if ( (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
2177 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2178 else
2179 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
316670eb 2180
3e170ce0
A
2181 /* typefilter bitmap will be deallocated later */
2182
2183 return 0;
2184}
2185
2186static int
2187kdbg_allocate_typefilter(void)
2188{
2189 if (type_filter_bitmap == NULL) {
2190 vm_offset_t bitmap = 0;
2191
2192 if (kmem_alloc(kernel_map, &bitmap, KDBG_TYPEFILTER_BITMAP_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
2193 return ENOSPC;
2194 }
2195
2196 bzero((void *)bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
2197
2198 if (!OSCompareAndSwapPtr(NULL, (void *)bitmap, &type_filter_bitmap)) {
2199 kmem_free(kernel_map, bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
2200 return 0; /* someone assigned a buffer */
2201 }
2202 } else {
2203 bzero(type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
2204 }
2205
2206 return 0;
2207}
2208
2209static int
2210kdbg_deallocate_typefilter(void)
2211{
2212 if(type_filter_bitmap) {
2213 vm_offset_t bitmap = (vm_offset_t)type_filter_bitmap;
2214
2215 if (OSCompareAndSwapPtr((void *)bitmap, NULL, &type_filter_bitmap)) {
2216 kmem_free(kernel_map, bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
2217 return 0;
2218 } else {
2219 /* already swapped */
2220 }
2221 }
316670eb 2222
316670eb
A
2223 return 0;
2224}
2225
0c530ab8 2226int
1c79356b
A
2227kdbg_setreg(kd_regtype * kdr)
2228{
0c530ab8 2229 int ret=0;
1c79356b
A
2230 unsigned int val_1, val_2, val;
2231 switch (kdr->type) {
2232
2233 case KDBG_CLASSTYPE :
2234 val_1 = (kdr->value1 & 0xff);
2235 val_2 = (kdr->value2 & 0xff);
2236 kdlog_beg = (val_1<<24);
2237 kdlog_end = (val_2<<24);
6d2010ae
A
2238 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2239 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
2240 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
2241 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
2242 break;
2243 case KDBG_SUBCLSTYPE :
2244 val_1 = (kdr->value1 & 0xff);
2245 val_2 = (kdr->value2 & 0xff);
2246 val = val_2 + 1;
2247 kdlog_beg = ((val_1<<24) | (val_2 << 16));
2248 kdlog_end = ((val_1<<24) | (val << 16));
6d2010ae
A
2249 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2250 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
2251 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
2252 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
2253 break;
2254 case KDBG_RANGETYPE :
2255 kdlog_beg = (kdr->value1);
2256 kdlog_end = (kdr->value2);
6d2010ae
A
2257 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2258 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
2259 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
2260 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
2261 break;
2262 case KDBG_VALCHECK:
2263 kdlog_value1 = (kdr->value1);
2264 kdlog_value2 = (kdr->value2);
2265 kdlog_value3 = (kdr->value3);
2266 kdlog_value4 = (kdr->value4);
6d2010ae
A
2267 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2268 kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
2269 kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
2270 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
2271 break;
2272 case KDBG_TYPENONE :
6d2010ae 2273 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
91447636 2274
316670eb
A
2275 if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK |
2276 KDBG_PIDCHECK | KDBG_PIDEXCLUDE |
2277 KDBG_TYPEFILTER_CHECK)) )
6d2010ae 2278 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
91447636 2279 else
6d2010ae 2280 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
91447636 2281
1c79356b
A
2282 kdlog_beg = 0;
2283 kdlog_end = 0;
2284 break;
2285 default :
2286 ret = EINVAL;
2287 break;
2288 }
2289 return(ret);
2290}
2291
3e170ce0
A
2292static int
2293kdbg_write_to_vnode(caddr_t buffer, size_t size, vnode_t vp, vfs_context_t ctx, off_t file_offset)
2294{
2295 return vn_rdwr(UIO_WRITE, vp, buffer, size, file_offset, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
2296 vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2297}
2298
39236c6e 2299int
3e170ce0 2300kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag, uint32_t sub_tag, uint64_t length, vnode_t vp, vfs_context_t ctx)
39236c6e 2301{
39236c6e 2302 int ret = KERN_SUCCESS;
3e170ce0
A
2303 kd_chunk_header_v3 header;
2304
2305 header.tag = tag;
2306 header.sub_tag = sub_tag;
2307 header.length = length;
2308
2309 // Check that only one of them is valid
2310 assert(!buffer ^ !vp);
2311 assert((vp == NULL) || (ctx != NULL));
2312
2313 // Write the 8-byte future_chunk_timestamp field in the payload
2314 if (buffer || vp) {
2315 if (vp) {
2316 ret = kdbg_write_to_vnode((caddr_t)&header, sizeof(kd_chunk_header_v3), vp, ctx, RAW_file_offset);
2317 if (ret) {
2318 goto write_error;
39236c6e 2319 }
3e170ce0
A
2320 RAW_file_offset += (sizeof(kd_chunk_header_v3));
2321 }
2322 else {
2323 ret = copyout(&header, buffer, sizeof(kd_chunk_header_v3));
2324 if (ret) {
2325 goto write_error;
2326 }
2327 }
2328 }
2329write_error:
2330 return ret;
39236c6e
A
2331}
2332
2333int
3e170ce0 2334kdbg_write_v3_chunk_header_to_buffer(void * buffer, uint32_t tag, uint32_t sub_tag, uint64_t length)
39236c6e 2335{
3e170ce0 2336 kd_chunk_header_v3 header;
39236c6e 2337
3e170ce0
A
2338 header.tag = tag;
2339 header.sub_tag = sub_tag;
2340 header.length = length;
39236c6e 2341
3e170ce0
A
2342 if (!buffer) {
2343 return 0;
2344 }
2345
2346 memcpy(buffer, &header, sizeof(kd_chunk_header_v3));
2347
2348 return (sizeof(kd_chunk_header_v3));
2349}
2350
2351int
2352kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void *payload, uint64_t payload_size, int fd)
2353{
2354 proc_t p;
2355 struct vfs_context context;
2356 struct fileproc *fp;
2357 vnode_t vp;
2358 p = current_proc();
2359
2360 proc_fdlock(p);
2361 if ( (fp_lookup(p, fd, &fp, 1)) ) {
2362 proc_fdunlock(p);
2363 return EFAULT;
2364 }
2365
2366 context.vc_thread = current_thread();
2367 context.vc_ucred = fp->f_fglob->fg_cred;
2368
2369 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
2370 fp_drop(p, fd, fp, 1);
2371 proc_fdunlock(p);
2372 return EBADF;
2373 }
2374 vp = (struct vnode *) fp->f_fglob->fg_data;
2375 proc_fdunlock(p);
2376
2377 if ( (vnode_getwithref(vp)) == 0 ) {
2378 RAW_file_offset = fp->f_fglob->fg_offset;
2379
2380 kd_chunk_header_v3 chunk_header = { .tag = tag, .sub_tag = sub_tag, .length = length };
2381
2382 int ret = kdbg_write_to_vnode((caddr_t) &chunk_header, sizeof(kd_chunk_header_v3), vp, &context, RAW_file_offset);
2383 if (!ret) {
2384 RAW_file_offset += sizeof(kd_chunk_header_v3);
2385 }
2386
2387 ret = kdbg_write_to_vnode((caddr_t) payload, (size_t) payload_size, vp, &context, RAW_file_offset);
2388 if (!ret) {
2389 RAW_file_offset += payload_size;
2390 }
2391
2392 fp->f_fglob->fg_offset = RAW_file_offset;
2393 vnode_put(vp);
2394 }
2395
2396 fp_drop(p, fd, fp, 0);
2397 return KERN_SUCCESS;
2398}
2399
2400user_addr_t
2401kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag, uint64_t length, vnode_t vp, vfs_context_t ctx)
2402{
2403 uint64_t future_chunk_timestamp = 0;
2404 length += sizeof(uint64_t);
2405
2406 if (kdbg_write_v3_chunk_header(buffer, tag, V3_EVENT_DATA_VERSION, length, vp, ctx)) {
2407 return 0;
2408 }
2409 if (buffer) {
2410 buffer += sizeof(kd_chunk_header_v3);
2411 }
2412
2413 // Check that only one of them is valid
2414 assert(!buffer ^ !vp);
2415 assert((vp == NULL) || (ctx != NULL));
2416
2417 // Write the 8-byte future_chunk_timestamp field in the payload
2418 if (buffer || vp) {
2419 if (vp) {
2420 int ret = kdbg_write_to_vnode((caddr_t)&future_chunk_timestamp, sizeof(uint64_t), vp, ctx, RAW_file_offset);
2421 if (!ret) {
2422 RAW_file_offset += (sizeof(uint64_t));
2423 }
2424 }
2425 else {
2426 if (copyout(&future_chunk_timestamp, buffer, sizeof(uint64_t))) {
2427 return 0;
2428 }
2429 }
2430 }
2431
2432 return (buffer + sizeof(uint64_t));
2433}
2434
2435int
2436kdbg_write_v3_header(user_addr_t user_header, size_t *user_header_size, int fd)
2437{
2438 int ret = KERN_SUCCESS;
2439 kd_header_v3 header;
2440
2441 uint8_t* cpumap = 0;
2442 uint32_t cpumap_size = 0;
2443 uint32_t thrmap_size = 0;
2444
2445 size_t bytes_needed = 0;
2446
2447 // Check that only one of them is valid
2448 assert(!user_header ^ !fd);
2449 assert(user_header_size);
2450
2451 if ( !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ) {
2452 ret = EINVAL;
2453 goto bail;
2454 }
2455
2456 if ( !(user_header || fd) ) {
2457 ret = EINVAL;
2458 goto bail;
2459 }
2460
2461 // Initialize the cpu map
2462 ret = kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size);
2463 if (ret != KERN_SUCCESS) {
2464 goto bail;
2465 }
2466
2467 // Check if a thread map is initialized
2468 if ( !kd_mapptr ) {
2469 ret = EINVAL;
2470 goto bail;
2471 }
2472 thrmap_size = kd_mapcount * sizeof(kd_threadmap);
2473
2474 // Setup the header.
2475 // See v3 header description in sys/kdebug.h for more inforamtion.
2476
2477 header.tag = RAW_VERSION3;
2478 header.sub_tag = V3_HEADER_VERSION;
2479 header.length = ( sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header));
2480
2481 mach_timebase_info_data_t timebase = {0, 0};
2482 clock_timebase_info(&timebase);
2483 header.timebase_numer = timebase.numer;
2484 header.timebase_denom = timebase.denom;
2485 header.timestamp = 0;
2486 header.walltime_secs = 0;
2487 header.walltime_usecs = 0;
2488 header.timezone_minuteswest = 0;
2489 header.timezone_dst = 0;
2490
2491#if defined __LP64__
2492 header.flags = 1;
2493#else
2494 header.flags = 0;
2495#endif
2496
2497 // If its a buffer, check if we have enough space to copy the header and the maps.
2498 if (user_header) {
2499 bytes_needed = header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3));
2500 if ( !user_header_size ) {
2501 ret = EINVAL;
2502 goto bail;
2503 }
2504 if (*user_header_size < bytes_needed) {
2505 ret = EINVAL;
2506 goto bail;
2507 }
2508 }
2509
2510 // Start writing the header
2511 if (fd) {
2512 void *hdr_ptr = (void *)(((uintptr_t) &header) + sizeof(kd_chunk_header_v3));
2513 size_t payload_size = (sizeof(kd_header_v3) - sizeof(kd_chunk_header_v3));
2514
2515 ret = kdbg_write_v3_chunk_to_fd(RAW_VERSION3, V3_HEADER_VERSION, header.length, hdr_ptr, payload_size, fd);
2516 if (ret) {
2517 goto bail;
2518 }
2519 }
2520 else {
2521 if (copyout(&header, user_header, sizeof(kd_header_v3))) {
2522 ret = EFAULT;
2523 goto bail;
2524 }
2525 // Update the user pointer
2526 user_header += sizeof(kd_header_v3);
2527 }
2528
2529 // Write a cpu map. This is a sub chunk of the header
2530 cpumap = (uint8_t*)((uintptr_t) cpumap + sizeof(kd_cpumap_header));
2531 size_t payload_size = (size_t)(cpumap_size - sizeof(kd_cpumap_header));
2532 if (fd) {
2533 ret = kdbg_write_v3_chunk_to_fd(V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, (void *)cpumap, payload_size, fd);
2534 if (ret) {
2535 goto bail;
2536 }
2537 }
2538 else {
2539 ret = kdbg_write_v3_chunk_header(user_header, V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, NULL, NULL);
2540 if (ret) {
2541 goto bail;
2542 }
2543 user_header += sizeof(kd_chunk_header_v3);
2544 if (copyout(cpumap, user_header, payload_size)) {
2545 ret = EFAULT;
2546 goto bail;
2547 }
2548 // Update the user pointer
2549 user_header += payload_size;
2550 }
2551
2552 // Write a thread map
2553 if (fd) {
2554 ret = kdbg_write_v3_chunk_to_fd(V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, (void *)kd_mapptr, thrmap_size, fd);
2555 if (ret) {
2556 goto bail;
2557 }
2558 }
2559 else {
2560 ret = kdbg_write_v3_chunk_header(user_header, V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, NULL, NULL);
2561 if (ret) {
2562 goto bail;
2563 }
2564 user_header += sizeof(kd_chunk_header_v3);
2565 if (copyout(kd_mapptr, user_header, thrmap_size)) {
2566 ret = EFAULT;
2567 goto bail;
2568 }
2569 user_header += thrmap_size;
2570 }
2571
2572 if (fd) {
2573 RAW_file_written += bytes_needed;
2574 }
2575
2576 *user_header_size = bytes_needed;
2577bail:
2578 if (cpumap) {
2579 kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
2580 }
2581 return (ret);
2582}
2583
2584int
2585kdbg_readcpumap(user_addr_t user_cpumap, size_t *user_cpumap_size)
2586{
2587 uint8_t* cpumap = NULL;
2588 uint32_t cpumap_size = 0;
2589 int ret = KERN_SUCCESS;
2590
2591 if (kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) {
2592 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size) == KERN_SUCCESS) {
2593 if (user_cpumap) {
2594 size_t bytes_to_copy = (*user_cpumap_size >= cpumap_size) ? cpumap_size : *user_cpumap_size;
2595 if (copyout(cpumap, user_cpumap, (size_t)bytes_to_copy)) {
2596 ret = EFAULT;
2597 }
2598 }
2599 *user_cpumap_size = cpumap_size;
2600 kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
2601 } else
2602 ret = EINVAL;
39236c6e
A
2603 } else
2604 ret = EINVAL;
2605
2606 return (ret);
2607}
1c79356b 2608
91447636 2609int
3e170ce0 2610kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize)
1c79356b 2611{
3e170ce0 2612 kd_threadmap *mapptr;
39236c6e 2613 unsigned int mapsize;
3e170ce0
A
2614 unsigned int mapcount;
2615 unsigned int count = 0;
2616 int ret = 0;
1c79356b 2617
3e170ce0
A
2618 count = *bufsize/sizeof(kd_threadmap);
2619 *bufsize = 0;
1c79356b 2620
3e170ce0
A
2621 if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) {
2622 if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap)))
2623 ret = EFAULT;
2624 else
2625 *bufsize = (mapcount * sizeof(kd_threadmap));
39236c6e 2626
3e170ce0
A
2627 kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize);
2628 } else
2629 ret = EINVAL;
39236c6e 2630
3e170ce0
A
2631 return (ret);
2632}
39236c6e 2633
3e170ce0
A
2634static int
2635kdbg_write_v1_plus_header(uint32_t count, vnode_t vp, vfs_context_t ctx)
2636{
2637 int ret = 0;
2638 RAW_header header;
2639 clock_sec_t secs;
2640 clock_usec_t usecs;
2641 char *pad_buf;
2642 uint32_t pad_size;
2643 uint32_t extra_thread_count = 0;
2644 uint32_t cpumap_size;
2645 unsigned int mapsize = kd_mapcount * sizeof(kd_threadmap);
6d2010ae 2646
3e170ce0
A
2647 /*
2648 * To write a RAW_VERSION1+ file, we
2649 * must embed a cpumap in the "padding"
2650 * used to page align the events following
2651 * the threadmap. If the threadmap happens
2652 * to not require enough padding, we
2653 * artificially increase its footprint
2654 * until it needs enough padding.
2655 */
6d2010ae 2656
3e170ce0
A
2657 assert(vp);
2658 assert(ctx);
39236c6e 2659
3e170ce0
A
2660 pad_size = PAGE_16KB - ((sizeof(RAW_header) + (count * sizeof(kd_threadmap))) & PAGE_MASK_64);
2661 cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
6d2010ae 2662
3e170ce0
A
2663 if (cpumap_size > pad_size) {
2664 /* If the cpu map doesn't fit in the current available pad_size,
2665 * we increase the pad_size by 16K. We do this so that the event
2666 * data is always available on a page aligned boundary for both
2667 * 4k and 16k systems. We enforce this alignment for the event
2668 * data so that we can take advantage of optimized file/disk writes.*/
2669 pad_size += PAGE_16KB;
2670 }
b0d623f7 2671
3e170ce0
A
2672 /* The way we are silently embedding a cpumap in the "padding" is by artificially
2673 * increasing the number of thread entries. However, we'll also need to ensure that
2674 * the cpumap is embedded in the last 4K page before when the event data is expected.
2675 * This way the tools can read the data starting the next page boundary on both
2676 * 4K and 16K systems preserving compatibility with older versions of the tools
2677 */
2678 if (pad_size > PAGE_4KB) {
2679 pad_size -= PAGE_4KB;
2680 extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
2681 }
39236c6e 2682
3e170ce0
A
2683 header.version_no = RAW_VERSION1;
2684 header.thread_count = count + extra_thread_count;
2685
2686 clock_get_calendar_microtime(&secs, &usecs);
2687 header.TOD_secs = secs;
2688 header.TOD_usecs = usecs;
2689
2690 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
2691 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2692 if (ret)
2693 goto write_error;
2694 RAW_file_offset += sizeof(RAW_header);
2695
2696 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, mapsize, RAW_file_offset,
2697 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2698 if (ret)
2699 goto write_error;
2700 RAW_file_offset += mapsize;
2701
2702 if (extra_thread_count) {
2703 pad_size = extra_thread_count * sizeof(kd_threadmap);
2704 pad_buf = (char *)kalloc(pad_size);
2705 if (!pad_buf) {
2706 ret = ENOMEM;
2707 goto write_error;
2708 }
2709 memset(pad_buf, 0, pad_size);
39236c6e 2710
3e170ce0
A
2711 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2712 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2713 kfree(pad_buf, pad_size);
6d2010ae 2714
3e170ce0
A
2715 if (ret)
2716 goto write_error;
2717 RAW_file_offset += pad_size;
6d2010ae 2718
3e170ce0
A
2719 }
2720
2721 pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
2722 if (pad_size) {
2723 pad_buf = (char *)kalloc(pad_size);
2724 if (!pad_buf) {
2725 ret = ENOMEM;
2726 goto write_error;
2727 }
2728 memset(pad_buf, 0, pad_size);
2729
2730 /*
2731 * embed a cpumap in the padding bytes.
2732 * older code will skip this.
2733 * newer code will know how to read it.
2734 */
2735 uint32_t temp = pad_size;
2736 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, (uint8_t**)&pad_buf, &temp) != KERN_SUCCESS) {
2737 memset(pad_buf, 0, pad_size);
2738 }
2739
2740 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2741 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2742 kfree(pad_buf, pad_size);
2743
2744 if (ret)
2745 goto write_error;
2746 RAW_file_offset += pad_size;
2747 }
2748 RAW_file_written += sizeof(RAW_header) + mapsize + pad_size;
2749
2750write_error:
2751 return ret;
2752}
2753
2754int
2755kdbg_readthrmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
2756{
2757
2758 int avail = 0;
2759 int ret = 0;
2760 uint32_t count = 0;
2761 unsigned int mapsize;
2762
2763 if ((!vp && !buffer) || (vp && buffer)) {
2764 return EINVAL;
2765 }
2766
2767 assert(number);
2768 assert((vp == NULL) || (ctx != NULL));
2769
2770 avail = *number;
2771 count = avail/sizeof (kd_threadmap);
2772 mapsize = kd_mapcount * sizeof(kd_threadmap);
2773
2774 if (count && (count <= kd_mapcount)) {
2775 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
2776 if (*number < mapsize)
2777 ret = EINVAL;
2778 else {
2779 if (vp) {
2780 ret = kdbg_write_v1_plus_header(count, vp, ctx);
2781 if (ret)
2782 goto write_error;
2783 }
2784 else {
39236c6e 2785 if (copyout(kd_mapptr, buffer, mapsize))
b0d623f7
A
2786 ret = EINVAL;
2787 }
2788 }
2789 }
2790 else
2791 ret = EINVAL;
2792 }
2793 else
2794 ret = EINVAL;
2795
6d2010ae
A
2796 if (ret && vp)
2797 {
b0d623f7
A
2798 count = 0;
2799
3e170ce0
A
2800 ret = kdbg_write_to_vnode((caddr_t)&count, sizeof(uint32_t), vp, ctx, RAW_file_offset);
2801 if (!ret) {
2802 RAW_file_offset += sizeof(uint32_t);
2803 RAW_file_written += sizeof(uint32_t);
2804 }
1c79356b 2805 }
6d2010ae
A
2806write_error:
2807 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
b0d623f7
A
2808 {
2809 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
6d2010ae 2810 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
2811 kd_mapsize = 0;
2812 kd_mapptr = (kd_threadmap *) 0;
2813 kd_mapcount = 0;
2814 }
b0d623f7 2815 return(ret);
1c79356b
A
2816}
2817
3e170ce0
A
2818int
2819kdbg_readthrmap_v3(user_addr_t buffer, size_t *number, int fd)
2820{
2821 int avail = 0;
2822 int ret = 0;
2823 uint32_t count = 0;
2824 unsigned int mapsize;
2825
2826 if ((!fd && !buffer) || (fd && buffer)) {
2827 return EINVAL;
2828 }
2829
2830 assert(number);
2831
2832 avail = *number;
2833 count = avail/sizeof (kd_threadmap);
2834 mapsize = kd_mapcount * sizeof(kd_threadmap);
2835
2836 if (count && (count <= kd_mapcount)) {
2837 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
2838 if (*number < mapsize) {
2839 ret = EINVAL;
2840 }
2841 else {
2842 ret = kdbg_write_v3_header(buffer, number, fd);
2843 if (ret) {
2844 goto write_error;
2845 }
2846 }
2847 }
2848 else {
2849 ret = EINVAL;
2850 }
2851 }
2852 else {
2853 ret = EINVAL;
2854 }
2855write_error:
2856 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
2857 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
2858 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
2859 kd_mapsize = 0;
2860 kd_mapptr = (kd_threadmap *) 0;
2861 kd_mapcount = 0;
2862 }
2863 return(ret);
2864}
2865
9bccf70c 2866
316670eb 2867static int
2d21ac55
A
2868kdbg_set_nkdbufs(unsigned int value)
2869{
2870 /*
b0d623f7 2871 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2d21ac55
A
2872 * 'value' is the desired number of trace entries
2873 */
b0d623f7 2874 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2d21ac55
A
2875
2876 if (value <= max_entries)
316670eb 2877 return (value);
2d21ac55 2878 else
316670eb 2879 return (max_entries);
2d21ac55
A
2880}
2881
2882
39236c6e 2883static int
316670eb
A
2884kdbg_enable_bg_trace(void)
2885{
39236c6e
A
2886 int ret = 0;
2887
316670eb
A
2888 if (kdlog_bg_trace == TRUE && kdlog_bg_trace_running == FALSE && n_storage_buffers == 0) {
2889 nkdbufs = bg_nkdbufs;
39236c6e
A
2890 ret = kdbg_reinit(FALSE);
2891 if (0 == ret) {
2892 kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
2893 kdlog_bg_trace_running = TRUE;
2894 }
3e170ce0 2895 wakeup(&kdlog_bg_trace);
316670eb 2896 }
39236c6e 2897 return ret;
316670eb
A
2898}
2899
2900static void
2901kdbg_disable_bg_trace(void)
2902{
2903 if (kdlog_bg_trace_running == TRUE) {
2904 kdlog_bg_trace_running = FALSE;
2905 kdbg_clear();
2906 }
2907}
2908
2909
2910
9bccf70c
A
2911/*
2912 * This function is provided for the CHUD toolkit only.
2913 * int val:
2914 * zero disables kdebug_chudhook function call
2915 * non-zero enables kdebug_chudhook function call
2916 * char *fn:
2917 * address of the enabled kdebug_chudhook function
2918*/
2919
0c530ab8
A
2920void
2921kdbg_control_chud(int val, void *fn)
9bccf70c 2922{
6d2010ae
A
2923 kdbg_lock_init();
2924
2925 if (val) {
2926 /* enable chudhook */
9bccf70c 2927 kdebug_chudhook = fn;
6d2010ae 2928 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
9bccf70c
A
2929 }
2930 else {
6d2010ae
A
2931 /* disable chudhook */
2932 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
9bccf70c
A
2933 kdebug_chudhook = 0;
2934 }
2935}
1c79356b 2936
9bccf70c 2937
0c530ab8 2938int
c910b4d9 2939kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1c79356b 2940{
b0d623f7
A
2941 int ret = 0;
2942 size_t size = *sizep;
c910b4d9 2943 unsigned int value = 0;
91447636
A
2944 kd_regtype kd_Reg;
2945 kbufinfo_t kd_bufinfo;
2946 pid_t curpid;
6d2010ae 2947 proc_t p, curproc;
91447636 2948
c910b4d9 2949 if (name[0] == KERN_KDGETENTROPY ||
39236c6e 2950 name[0] == KERN_KDWRITETR ||
3e170ce0 2951 name[0] == KERN_KDWRITETR_V3 ||
39236c6e 2952 name[0] == KERN_KDWRITEMAP ||
3e170ce0 2953 name[0] == KERN_KDWRITEMAP_V3 ||
c910b4d9
A
2954 name[0] == KERN_KDEFLAGS ||
2955 name[0] == KERN_KDDFLAGS ||
2956 name[0] == KERN_KDENABLE ||
39236c6e 2957 name[0] == KERN_KDENABLE_BG_TRACE ||
c910b4d9
A
2958 name[0] == KERN_KDSETBUF) {
2959
2960 if ( namelen < 2 )
6d2010ae 2961 return(EINVAL);
c910b4d9
A
2962 value = name[1];
2963 }
2964
91447636 2965 kdbg_lock_init();
0c530ab8 2966
6d2010ae 2967 if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
39236c6e 2968 return(ENOSPC);
0c530ab8
A
2969
2970 lck_mtx_lock(kd_trace_mtx_sysctl);
91447636 2971
316670eb 2972 switch(name[0]) {
39236c6e 2973 case KERN_KDGETBUF:
b0d623f7 2974 /*
39236c6e
A
2975 * Does not alter the global_state_pid
2976 * This is a passive request.
b0d623f7 2977 */
39236c6e
A
2978 if (size < sizeof(kd_bufinfo.nkdbufs)) {
2979 /*
2980 * There is not enough room to return even
2981 * the first element of the info structure.
2982 */
b0d623f7 2983 ret = EINVAL;
39236c6e
A
2984 goto out;
2985 }
2986 kd_bufinfo.nkdbufs = nkdbufs;
2987 kd_bufinfo.nkdthreads = kd_mapcount;
2988
2989 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
2990 kd_bufinfo.nolog = 1;
2991 else
2992 kd_bufinfo.nolog = 0;
2993
2994 kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
2995#if defined(__LP64__)
2996 kd_bufinfo.flags |= KDBG_LP64;
2997#endif
2998 kd_bufinfo.bufid = global_state_pid;
2999
3000 if (size >= sizeof(kd_bufinfo)) {
3001 /*
3002 * Provide all the info we have
3003 */
3004 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
3005 ret = EINVAL;
3006 } else {
3007 /*
3008 * For backwards compatibility, only provide
3009 * as much info as there is room for.
3010 */
3011 if (copyout(&kd_bufinfo, where, size))
3012 ret = EINVAL;
3013 }
3014 goto out;
fe8ab488
A
3015 case KERN_KDGETENTROPY: {
3016 /* Obsolescent - just fake with a random buffer */
3017 char *buffer = (char *) kalloc(size);
3018 read_frandom((void *) buffer, size);
3019 ret = copyout(buffer, where, size);
3020 kfree(buffer, size);
39236c6e 3021 goto out;
fe8ab488 3022 }
39236c6e
A
3023
3024 case KERN_KDENABLE_BG_TRACE:
3025 bg_nkdbufs = kdbg_set_nkdbufs(value);
3026 kdlog_bg_trace = TRUE;
3027 ret = kdbg_enable_bg_trace();
3028 goto out;
39236c6e
A
3029
3030 case KERN_KDDISABLE_BG_TRACE:
3031 kdlog_bg_trace = FALSE;
3032 kdbg_disable_bg_trace();
3033 goto out;
3e170ce0
A
3034
3035 case KERN_KDWAIT_BG_TRACE_RESET:
3036 if (!kdlog_bg_trace){
3037 ret = EINVAL;
3038 goto out;
3039 }
3040 wait_result_t wait_result = assert_wait(&kdlog_bg_trace, THREAD_ABORTSAFE);
3041 lck_mtx_unlock(kd_trace_mtx_sysctl);
3042 if (wait_result == THREAD_WAITING)
3043 wait_result = thread_block(THREAD_CONTINUE_NULL);
3044 if (wait_result == THREAD_INTERRUPTED)
3045 ret = EINTR;
3046 lck_mtx_lock(kd_trace_mtx_sysctl);
3047 goto out;
3048
3049 case KERN_KDSET_BG_TYPEFILTER:
3050 if (!kdlog_bg_trace || !kdlog_bg_trace_running){
3051 ret = EINVAL;
3052 goto out;
3053 }
3054
3055 if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
3056 ret = EINVAL;
3057 goto out;
3058 }
3059
3060 if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
3061 if ((ret = kdbg_enable_typefilter()))
3062 goto out;
3063 }
3064
3065 if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
3066 ret = EINVAL;
3067 goto out;
3068 }
3069 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
3070 goto out;
91447636
A
3071 }
3072
0c530ab8 3073 if ((curproc = current_proc()) != NULL)
b0d623f7 3074 curpid = curproc->p_pid;
91447636 3075 else {
b0d623f7
A
3076 ret = ESRCH;
3077 goto out;
91447636 3078 }
39236c6e 3079 if (global_state_pid == -1)
b0d623f7 3080 global_state_pid = curpid;
91447636 3081 else if (global_state_pid != curpid) {
b0d623f7
A
3082 if ((p = proc_find(global_state_pid)) == NULL) {
3083 /*
3084 * The global pid no longer exists
3085 */
3086 global_state_pid = curpid;
3087 } else {
3088 /*
3089 * The global pid exists, deny this request
3090 */
3091 proc_rele(p);
91447636 3092
b0d623f7
A
3093 ret = EBUSY;
3094 goto out;
3095 }
91447636 3096 }
1c79356b
A
3097
3098 switch(name[0]) {
3099 case KERN_KDEFLAGS:
316670eb
A
3100 kdbg_disable_bg_trace();
3101
1c79356b 3102 value &= KDBG_USERFLAGS;
6d2010ae 3103 kd_ctrl_page.kdebug_flags |= value;
1c79356b
A
3104 break;
3105 case KERN_KDDFLAGS:
316670eb
A
3106 kdbg_disable_bg_trace();
3107
1c79356b 3108 value &= KDBG_USERFLAGS;
6d2010ae 3109 kd_ctrl_page.kdebug_flags &= ~value;
1c79356b 3110 break;
b0d623f7
A
3111 case KERN_KDENABLE:
3112 /*
316670eb
A
3113 * Enable tracing mechanism. Two types:
3114 * KDEBUG_TRACE is the standard one,
3115 * and KDEBUG_PPT which is a carefully
3116 * chosen subset to avoid performance impact.
b0d623f7
A
3117 */
3118 if (value) {
3119 /*
3120 * enable only if buffer is initialized
3121 */
316670eb
A
3122 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
3123 !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
b0d623f7
A
3124 ret = EINVAL;
3125 break;
3126 }
39236c6e 3127 kdbg_thrmap_init();
b0d623f7 3128
316670eb 3129 kdbg_set_tracing_enabled(TRUE, value);
1c79356b 3130 }
6d2010ae 3131 else
316670eb
A
3132 {
3133 kdbg_set_tracing_enabled(FALSE, 0);
3134 }
b0d623f7 3135 break;
1c79356b 3136 case KERN_KDSETBUF:
316670eb
A
3137 kdbg_disable_bg_trace();
3138
3139 nkdbufs = kdbg_set_nkdbufs(value);
1c79356b 3140 break;
1c79356b 3141 case KERN_KDSETUP:
316670eb
A
3142 kdbg_disable_bg_trace();
3143
6d2010ae 3144 ret = kdbg_reinit(FALSE);
1c79356b
A
3145 break;
3146 case KERN_KDREMOVE:
3147 kdbg_clear();
39236c6e 3148 ret = kdbg_enable_bg_trace();
1c79356b
A
3149 break;
3150 case KERN_KDSETREG:
3151 if(size < sizeof(kd_regtype)) {
b0d623f7 3152 ret = EINVAL;
1c79356b
A
3153 break;
3154 }
3155 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 3156 ret = EINVAL;
1c79356b
A
3157 break;
3158 }
316670eb
A
3159 kdbg_disable_bg_trace();
3160
1c79356b
A
3161 ret = kdbg_setreg(&kd_Reg);
3162 break;
3163 case KERN_KDGETREG:
316670eb 3164 kdbg_disable_bg_trace();
4bd07ac2 3165 ret = EINVAL;
1c79356b
A
3166 break;
3167 case KERN_KDREADTR:
3e170ce0 3168 ret = kdbg_read(where, sizep, NULL, NULL, RAW_VERSION1);
1c79356b 3169 break;
39236c6e 3170 case KERN_KDWRITETR:
3e170ce0 3171 case KERN_KDWRITETR_V3:
39236c6e 3172 case KERN_KDWRITEMAP:
3e170ce0 3173 case KERN_KDWRITEMAP_V3:
6d2010ae
A
3174 {
3175 struct vfs_context context;
3176 struct fileproc *fp;
3177 size_t number;
3178 vnode_t vp;
3179 int fd;
3180
3e170ce0 3181 if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
6d2010ae
A
3182 int s;
3183 int wait_result = THREAD_AWAKENED;
3184 u_int64_t abstime;
3185 u_int64_t ns;
3186
3187 if (*sizep) {
3188 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
3189 nanoseconds_to_absolutetime(ns, &abstime );
3190 clock_absolutetime_interval_to_deadline( abstime, &abstime );
3191 } else
3192 abstime = 0;
3193
3194 s = ml_set_interrupts_enabled(FALSE);
3195 lck_spin_lock(kdw_spin_lock);
3196
3197 while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
3198
3199 kds_waiter = 1;
3200
3201 if (abstime)
3202 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
3203 else
3204 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
3205
3206 kds_waiter = 0;
3207 }
3208 lck_spin_unlock(kdw_spin_lock);
3209 ml_set_interrupts_enabled(s);
3210 }
3211 p = current_proc();
3212 fd = value;
3213
3214 proc_fdlock(p);
3215 if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
3216 proc_fdunlock(p);
3217 break;
3218 }
3219 context.vc_thread = current_thread();
3220 context.vc_ucred = fp->f_fglob->fg_cred;
3221
39236c6e 3222 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
6d2010ae
A
3223 fp_drop(p, fd, fp, 1);
3224 proc_fdunlock(p);
3225
3226 ret = EBADF;
3227 break;
3228 }
3229 vp = (struct vnode *)fp->f_fglob->fg_data;
3230 proc_fdunlock(p);
3231
3232 if ((ret = vnode_getwithref(vp)) == 0) {
fe8ab488 3233 RAW_file_offset = fp->f_fglob->fg_offset;
3e170ce0 3234 if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
6d2010ae
A
3235 number = nkdbufs * sizeof(kd_buf);
3236
04b8595b 3237 KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_START, 0, 0, 0, 0, 0);
3e170ce0
A
3238 if (name[0] == KERN_KDWRITETR_V3)
3239 ret = kdbg_read(0, &number, vp, &context, RAW_VERSION3);
3240 else
3241 ret = kdbg_read(0, &number, vp, &context, RAW_VERSION1);
04b8595b 3242 KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_END, number, 0, 0, 0, 0);
6d2010ae
A
3243
3244 *sizep = number;
3245 } else {
39236c6e 3246 number = kd_mapcount * sizeof(kd_threadmap);
3e170ce0
A
3247 if (name[0] == KERN_KDWRITEMAP_V3)
3248 kdbg_readthrmap_v3(0, &number, fd);
3249 else
3250 kdbg_readthrmap(0, &number, vp, &context);
6d2010ae 3251 }
fe8ab488 3252 fp->f_fglob->fg_offset = RAW_file_offset;
6d2010ae
A
3253 vnode_put(vp);
3254 }
3255 fp_drop(p, fd, fp, 0);
3256
3257 break;
3258 }
39236c6e
A
3259 case KERN_KDBUFWAIT:
3260 {
3261 /* WRITETR lite -- just block until there's data */
3262 int s;
3263 int wait_result = THREAD_AWAKENED;
3264 u_int64_t abstime;
3265 u_int64_t ns;
3266 size_t number = 0;
3267
3268 kdbg_disable_bg_trace();
3269
3270
3271 if (*sizep) {
3272 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
3273 nanoseconds_to_absolutetime(ns, &abstime );
3274 clock_absolutetime_interval_to_deadline( abstime, &abstime );
3275 } else
3276 abstime = 0;
3277
3278 s = ml_set_interrupts_enabled(FALSE);
3279 if( !s )
3280 panic("trying to wait with interrupts off");
3281 lck_spin_lock(kdw_spin_lock);
3282
3283 /* drop the mutex so don't exclude others from
3284 * accessing trace
3285 */
3286 lck_mtx_unlock(kd_trace_mtx_sysctl);
3287
3288 while (wait_result == THREAD_AWAKENED &&
3289 kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
3290
3291 kds_waiter = 1;
3292
3293 if (abstime)
3294 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
3295 else
3296 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
3297
3298 kds_waiter = 0;
3299 }
3300
3301 /* check the count under the spinlock */
3302 number = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
3303
3304 lck_spin_unlock(kdw_spin_lock);
3305 ml_set_interrupts_enabled(s);
3306
3307 /* pick the mutex back up again */
3308 lck_mtx_lock(kd_trace_mtx_sysctl);
3309
3310 /* write out whether we've exceeded the threshold */
3311 *sizep = number;
3312 break;
3313 }
1c79356b
A
3314 case KERN_KDPIDTR:
3315 if (size < sizeof(kd_regtype)) {
3316 ret = EINVAL;
3317 break;
3318 }
3319 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 3320 ret = EINVAL;
1c79356b
A
3321 break;
3322 }
316670eb
A
3323 kdbg_disable_bg_trace();
3324
1c79356b
A
3325 ret = kdbg_setpid(&kd_Reg);
3326 break;
3327 case KERN_KDPIDEX:
3328 if (size < sizeof(kd_regtype)) {
3329 ret = EINVAL;
3330 break;
3331 }
3332 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 3333 ret = EINVAL;
1c79356b
A
3334 break;
3335 }
316670eb
A
3336 kdbg_disable_bg_trace();
3337
1c79356b
A
3338 ret = kdbg_setpidex(&kd_Reg);
3339 break;
39236c6e
A
3340 case KERN_KDCPUMAP:
3341 ret = kdbg_readcpumap(where, sizep);
3342 break;
3343 case KERN_KDTHRMAP:
3344 ret = kdbg_readthrmap(where, sizep, NULL, NULL);
3345 break;
3346 case KERN_KDREADCURTHRMAP:
3347 ret = kdbg_readcurthrmap(where, sizep);
3348 break;
3349 case KERN_KDSETRTCDEC:
1c79356b
A
3350 if (size < sizeof(kd_regtype)) {
3351 ret = EINVAL;
3352 break;
3353 }
3354 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 3355 ret = EINVAL;
1c79356b
A
3356 break;
3357 }
316670eb
A
3358 kdbg_disable_bg_trace();
3359
1c79356b
A
3360 ret = kdbg_setrtcdec(&kd_Reg);
3361 break;
316670eb
A
3362 case KERN_KDSET_TYPEFILTER:
3363 kdbg_disable_bg_trace();
3364
316670eb
A
3365 if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
3366 ret = EINVAL;
3367 break;
3368 }
3369
3e170ce0
A
3370 if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
3371 if ((ret = kdbg_enable_typefilter()))
3372 break;
3373 }
3374
316670eb
A
3375 if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
3376 ret = EINVAL;
3377 break;
3378 }
39236c6e 3379 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
316670eb 3380 break;
1c79356b 3381 default:
b0d623f7 3382 ret = EINVAL;
1c79356b 3383 }
b0d623f7 3384out:
0c530ab8 3385 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 3386
1c79356b
A
3387 return(ret);
3388}
3389
0c530ab8
A
3390
3391/*
b0d623f7
A
3392 * This code can run for the most part concurrently with kernel_debug_internal()...
3393 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
3394 * synchronize with the recording side of this puzzle... otherwise, we are able to
3395 * move through the lists w/o use of any locks
0c530ab8
A
3396 */
3397int
3e170ce0 3398kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uint32_t file_version)
1c79356b 3399{
0c530ab8 3400 unsigned int count;
6d2010ae 3401 unsigned int cpu, min_cpu;
39236c6e 3402 uint64_t mintime, t, barrier = 0;
6d2010ae 3403 int error = 0;
0c530ab8 3404 kd_buf *tempbuf;
6d2010ae
A
3405 uint32_t rcursor;
3406 kd_buf lostevent;
3407 union kds_ptr kdsp;
3408 struct kd_storage *kdsp_actual;
b0d623f7 3409 struct kd_bufinfo *kdbp;
6d2010ae 3410 struct kd_bufinfo *min_kdbp;
0c530ab8
A
3411 uint32_t tempbuf_count;
3412 uint32_t tempbuf_number;
b0d623f7
A
3413 uint32_t old_kdebug_flags;
3414 uint32_t old_kdebug_slowcheck;
6d2010ae
A
3415 boolean_t lostevents = FALSE;
3416 boolean_t out_of_events = FALSE;
2d21ac55 3417
3e170ce0 3418 assert(number);
0c530ab8
A
3419 count = *number/sizeof(kd_buf);
3420 *number = 0;
3421
6d2010ae 3422 if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
0c530ab8 3423 return EINVAL;
1c79356b 3424
6d2010ae 3425 memset(&lostevent, 0, sizeof(lostevent));
04b8595b 3426 lostevent.debugid = TRACE_LOST_EVENTS;
6d2010ae 3427
39236c6e
A
3428 /* Capture timestamp. Only sort events that have occured before the timestamp.
3429 * Since the iop is being flushed here, its possible that events occur on the AP
3430 * while running live tracing. If we are disabled, no new events should
3431 * occur on the AP.
3432 */
3433
3434 if (kd_ctrl_page.enabled)
3435 {
3436 // timestamp is non-zero value
3437 barrier = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
3438 }
3439
3440 // Request each IOP to provide us with up to date entries before merging buffers together.
3441 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
3442
0c530ab8
A
3443 /*
3444 * because we hold kd_trace_mtx_sysctl, no other control threads can
3445 * be playing with kdebug_flags... the code that cuts new events could
b0d623f7
A
3446 * be running, but it grabs kds_spin_lock if it needs to acquire a new
3447 * storage chunk which is where it examines kdebug_flags... it its adding
3448 * to the same chunk we're reading from, no problem...
0c530ab8 3449 */
0c530ab8 3450
6d2010ae 3451 disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
4452a7af 3452
0c530ab8
A
3453 if (count > nkdbufs)
3454 count = nkdbufs;
4452a7af 3455
0c530ab8
A
3456 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
3457 tempbuf_count = KDCOPYBUF_COUNT;
4452a7af 3458
0c530ab8 3459 while (count) {
39236c6e 3460 tempbuf = kdcopybuf;
0c530ab8
A
3461 tempbuf_number = 0;
3462
316670eb 3463 // While space
39236c6e 3464 while (tempbuf_count) {
6d2010ae
A
3465 mintime = 0xffffffffffffffffULL;
3466 min_kdbp = NULL;
3467 min_cpu = 0;
0c530ab8 3468
316670eb 3469 // Check all CPUs
39236c6e 3470 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
b0d623f7 3471
316670eb 3472 // Find one with raw data
6d2010ae 3473 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
0c530ab8 3474 continue;
39236c6e
A
3475 /* Debugging aid: maintain a copy of the "kdsp"
3476 * index.
3477 */
3478 volatile union kds_ptr kdsp_shadow;
3479
3480 kdsp_shadow = kdsp;
316670eb
A
3481
3482 // Get from cpu data to buffer header to buffer
6d2010ae
A
3483 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
3484
39236c6e
A
3485 volatile struct kd_storage *kdsp_actual_shadow;
3486
3487 kdsp_actual_shadow = kdsp_actual;
3488
316670eb 3489 // See if there are actual data left in this buffer
6d2010ae 3490 rcursor = kdsp_actual->kds_readlast;
b0d623f7 3491
6d2010ae 3492 if (rcursor == kdsp_actual->kds_bufindx)
b0d623f7 3493 continue;
0c530ab8 3494
6d2010ae
A
3495 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
3496
39236c6e
A
3497 if ((t > barrier) && (barrier > 0)) {
3498 /*
3499 * Need to wait to flush iop again before we
3500 * sort any more data from the buffers
3501 */
3502 out_of_events = TRUE;
3503 break;
3504 }
6d2010ae
A
3505 if (t < kdsp_actual->kds_timestamp) {
3506 /*
3507 * indicates we've not yet completed filling
3508 * in this event...
3509 * this should only occur when we're looking
3510 * at the buf that the record head is utilizing
3511 * we'll pick these events up on the next
3512 * call to kdbg_read
3513 * we bail at this point so that we don't
3514 * get an out-of-order timestream by continuing
3515 * to read events from the other CPUs' timestream(s)
3516 */
3517 out_of_events = TRUE;
3518 break;
3519 }
0c530ab8 3520 if (t < mintime) {
b0d623f7 3521 mintime = t;
6d2010ae
A
3522 min_kdbp = kdbp;
3523 min_cpu = cpu;
91447636
A
3524 }
3525 }
6d2010ae
A
3526 if (min_kdbp == NULL || out_of_events == TRUE) {
3527 /*
b0d623f7 3528 * all buffers ran empty
91447636 3529 */
6d2010ae
A
3530 out_of_events = TRUE;
3531 break;
3532 }
316670eb
A
3533
3534 // Get data
6d2010ae
A
3535 kdsp = min_kdbp->kd_list_head;
3536 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
0c530ab8 3537
6d2010ae 3538 if (kdsp_actual->kds_lostevents == TRUE) {
316670eb 3539 kdbg_set_timestamp_and_cpu(&lostevent, kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp, min_cpu);
6d2010ae
A
3540 *tempbuf = lostevent;
3541
3542 kdsp_actual->kds_lostevents = FALSE;
3543 lostevents = TRUE;
2d21ac55 3544
6d2010ae 3545 goto nextevent;
2d21ac55 3546 }
316670eb
A
3547
3548 // Copy into buffer
6d2010ae
A
3549 *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
3550
3551 if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
3552 release_storage_unit(min_cpu, kdsp.raw);
3553
b0d623f7
A
3554 /*
3555 * Watch for out of order timestamps
3556 */
6d2010ae 3557 if (mintime < min_kdbp->kd_prev_timebase) {
b0d623f7
A
3558 /*
3559 * if so, use the previous timestamp + 1 cycle
3560 */
6d2010ae
A
3561 min_kdbp->kd_prev_timebase++;
3562 kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
b0d623f7 3563 } else
6d2010ae
A
3564 min_kdbp->kd_prev_timebase = mintime;
3565nextevent:
0c530ab8
A
3566 tempbuf_count--;
3567 tempbuf_number++;
b0d623f7 3568 tempbuf++;
6d2010ae
A
3569
3570 if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
3571 break;
0c530ab8
A
3572 }
3573 if (tempbuf_number) {
3e170ce0
A
3574 if (file_version == RAW_VERSION3) {
3575 if ( !(kdbg_write_v3_event_chunk_header(buffer, V3_RAW_EVENTS, (tempbuf_number * sizeof(kd_buf)), vp, ctx))) {
3576 error = EFAULT;
3577 goto check_error;
3578 }
3579 if (buffer)
3580 buffer += (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
b0d623f7 3581
3e170ce0
A
3582 assert(count >= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t)));
3583 count -= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
3584 *number += (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
3585 }
b0d623f7 3586 if (vp) {
3e170ce0
A
3587 size_t write_size = tempbuf_number * sizeof(kd_buf);
3588 error = kdbg_write_to_vnode((caddr_t)kdcopybuf, write_size, vp, ctx, RAW_file_offset);
3589 if (!error)
3590 RAW_file_offset += write_size;
6d2010ae
A
3591
3592 if (RAW_file_written >= RAW_FLUSH_SIZE) {
3593 cluster_push(vp, 0);
3594
3595 RAW_file_written = 0;
3596 }
b0d623f7
A
3597 } else {
3598 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
3599 buffer += (tempbuf_number * sizeof(kd_buf));
3600 }
3e170ce0 3601check_error:
b0d623f7
A
3602 if (error) {
3603 *number = 0;
0c530ab8
A
3604 error = EINVAL;
3605 break;
6601e61a 3606 }
0c530ab8
A
3607 count -= tempbuf_number;
3608 *number += tempbuf_number;
0c530ab8 3609 }
6d2010ae 3610 if (out_of_events == TRUE)
0c530ab8
A
3611 /*
3612 * all trace buffers are empty
3613 */
3614 break;
89b3af67 3615
0c530ab8
A
3616 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
3617 tempbuf_count = KDCOPYBUF_COUNT;
3618 }
3619 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
6d2010ae 3620 enable_wrap(old_kdebug_slowcheck, lostevents);
0c530ab8
A
3621 }
3622 return (error);
6601e61a 3623}
4452a7af 3624
0c530ab8 3625
55e303ae
A
3626unsigned char *getProcName(struct proc *proc);
3627unsigned char *getProcName(struct proc *proc) {
3628
3629 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
3630
3631}
0c530ab8 3632
3e170ce0
A
3633static int
3634stackshot_kern_return_to_bsd_error(kern_return_t kr)
0c530ab8 3635{
3e170ce0
A
3636 switch (kr) {
3637 case KERN_SUCCESS:
3638 return 0;
3639 case KERN_RESOURCE_SHORTAGE:
3640 return ENOMEM;
3641 case KERN_NO_SPACE:
3642 return ENOSPC;
3643 case KERN_NO_ACCESS:
3644 return EPERM;
3645 case KERN_MEMORY_PRESENT:
3646 return EEXIST;
3647 case KERN_NOT_SUPPORTED:
3648 return ENOTSUP;
3649 case KERN_NOT_IN_SET:
3650 return ENOENT;
3651 default:
3652 return EINVAL;
3653 }
0c530ab8
A
3654}
3655
3e170ce0 3656
0c530ab8 3657/*
3e170ce0
A
3658 * DEPRECATION WARNING: THIS SYSCALL IS BEING REPLACED WITH SYS_stack_snapshot_with_config and SYS_microstackshot.
3659 *
0c530ab8
A
3660 * stack_snapshot: Obtains a coherent set of stack traces for all threads
3661 * on the system, tracing both kernel and user stacks
3662 * where available. Uses machine specific trace routines
3663 * for ppc, ppc64 and x86.
3664 * Inputs: uap->pid - process id of process to be traced, or -1
3665 * for the entire system
3666 * uap->tracebuf - address of the user space destination
3667 * buffer
3668 * uap->tracebuf_size - size of the user space trace buffer
3669 * uap->options - various options, including the maximum
3670 * number of frames to trace.
3671 * Outputs: EPERM if the caller is not privileged
3672 * EINVAL if the supplied trace buffer isn't sanely sized
3673 * ENOMEM if we don't have enough memory to satisfy the
3674 * request
3675 * ENOENT if the target pid isn't found
3676 * ENOSPC if the supplied buffer is insufficient
3677 * *retval contains the number of bytes traced, if successful
3678 * and -1 otherwise. If the request failed due to
3679 * tracebuffer exhaustion, we copyout as much as possible.
3680 */
3681int
b0d623f7 3682stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
0c530ab8 3683 int error = 0;
3e170ce0 3684 kern_return_t kr;
0c530ab8
A
3685
3686 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
3687 return(error);
3688
3e170ce0
A
3689 kr = stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size, uap->flags, retval);
3690 return stackshot_kern_return_to_bsd_error(kr);
0c530ab8
A
3691}
3692
3e170ce0
A
3693/*
3694 * stack_snapshot_with_config: Obtains a coherent set of stack traces for specified threads on the sysem,
3695 * tracing both kernel and user stacks where available. Allocates a buffer from the
3696 * kernel and maps the buffer into the calling task's address space.
3697 *
3698 * Inputs: uap->stackshot_config_version - version of the stackshot config that is being passed
3699 * uap->stackshot_config - pointer to the stackshot config
3700 * uap->stackshot_config_size- size of the stackshot config being passed
3701 * Outputs: EINVAL if there is a problem with the arguments
3702 * EFAULT if we failed to copy in the arguments succesfully
3703 * EPERM if the caller is not privileged
3704 * ENOTSUP if the caller is passing a version of arguments that is not supported by the kernel
3705 * (indicates libsyscall:kernel mismatch) or if the caller is requesting unsupported flags
3706 * ENOENT if the caller is requesting an existing buffer that doesn't exist or if the
3707 * requested PID isn't found
3708 * ENOMEM if the kernel is unable to allocate enough memory to serve the request
3709 * ENOSPC if there isn't enough space in the caller's address space to remap the buffer
3710 * ESRCH if the target PID isn't found
3711 * returns KERN_SUCCESS on success
39236c6e 3712 */
0c530ab8 3713int
3e170ce0 3714stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_args *uap, __unused int *retval)
0c530ab8
A
3715{
3716 int error = 0;
3e170ce0 3717 kern_return_t kr;
39236c6e 3718
3e170ce0
A
3719 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
3720 return(error);
fe8ab488 3721
3e170ce0
A
3722 if((void*)uap->stackshot_config == NULL) {
3723 return EINVAL;
fe8ab488 3724 }
0c530ab8 3725
3e170ce0
A
3726 switch (uap->stackshot_config_version) {
3727 case STACKSHOT_CONFIG_TYPE:
3728 if (uap->stackshot_config_size != sizeof(stackshot_config_t)) {
3729 return EINVAL;
3730 }
3731 stackshot_config_t config;
3732 error = copyin(uap->stackshot_config, &config, sizeof(stackshot_config_t));
3733 if (error != KERN_SUCCESS)
3734 {
3735 return EFAULT;
3736 }
3737 kr = kern_stack_snapshot_internal(uap->stackshot_config_version, &config, sizeof(stackshot_config_t), TRUE);
3738 return stackshot_kern_return_to_bsd_error(kr);
3739 default:
3740 return ENOTSUP;
39236c6e 3741 }
3e170ce0 3742}
39236c6e
A
3743
3744#if CONFIG_TELEMETRY
3e170ce0
A
3745/*
3746 * microstackshot: Catch all system call for microstackshot related operations, including
3747 * enabling/disabling both global and windowed microstackshots as well
3748 * as retrieving windowed or global stackshots and the boot profile.
3749 * Inputs: uap->tracebuf - address of the user space destination
3750 * buffer
3751 * uap->tracebuf_size - size of the user space trace buffer
3752 * uap->flags - various flags
3753 * Outputs: EPERM if the caller is not privileged
3754 * EINVAL if the supplied mss_args is NULL, mss_args.tracebuf is NULL or mss_args.tracebuf_size is not sane
3755 * ENOMEM if we don't have enough memory to satisfy the request
3756 * *retval contains the number of bytes traced, if successful
3757 * and -1 otherwise.
3758 */
3759int
3760microstackshot(struct proc *p, struct microstackshot_args *uap, int32_t *retval)
3761{
3762 int error = 0;
3763 kern_return_t kr;
0c530ab8 3764
3e170ce0
A
3765 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
3766 return(error);
d41d1dae 3767
3e170ce0
A
3768 kr = stack_microstackshot(uap->tracebuf, uap->tracebuf_size, uap->flags, retval);
3769 return stackshot_kern_return_to_bsd_error(kr);
3770}
3771#endif /* CONFIG_TELEMETRY */
0c530ab8 3772
3e170ce0
A
3773/*
3774 * kern_stack_snapshot_with_reason: Obtains a coherent set of stack traces for specified threads on the sysem,
3775 * tracing both kernel and user stacks where available. Allocates a buffer from the
3776 * kernel and stores the address of this buffer.
3777 *
3778 * Inputs: reason - the reason for triggering a stackshot (unused at the moment, but in the
3779 * future will be saved in the stackshot)
3780 * Outputs: EINVAL/ENOTSUP if there is a problem with the arguments
3781 * EPERM if the caller doesn't pass at least one KERNEL stackshot flag
3782 * ENOMEM if the kernel is unable to allocate enough memory to serve the request
3783 * ESRCH if the target PID isn't found
3784 * returns KERN_SUCCESS on success
0c530ab8 3785 */
3e170ce0
A
3786int
3787kern_stack_snapshot_with_reason(__unused char *reason)
3788{
3789 stackshot_config_t config;
3790 kern_return_t kr;
3791
3792 config.sc_pid = -1;
3793 config.sc_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_IN_KERNEL_BUFFER |
3794 STACKSHOT_KCDATA_FORMAT);
3795 config.sc_since_timestamp = 0;
3796 config.sc_out_buffer_addr = 0;
3797 config.sc_out_size_addr = 0;
3798
3799 kr = kern_stack_snapshot_internal(STACKSHOT_CONFIG_TYPE, &config, sizeof(stackshot_config_t), FALSE);
3800 return stackshot_kern_return_to_bsd_error(kr);
3801}
2d21ac55 3802
3e170ce0
A
3803/*
3804 * stack_snapshot_from_kernel: Stackshot function for kernel consumers who have their own buffer.
3805 *
3806 * Inputs: pid - the PID to be traced or -1 for the whole system
3807 * buf - a pointer to the buffer where the stackshot should be written
3808 * size - the size of the buffer
3809 * flags - flags to be passed to the stackshot
3810 * *bytes_traced - a pointer to be filled with the length of the stackshot
3811 * Outputs: -1 if there is a problem with the arguments
3812 * the error returned by the stackshot code otherwise
3813 */
3814int
3815stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced)
3816{
3817 kern_return_t kr;
0c530ab8 3818
3e170ce0
A
3819 kr = stack_snapshot_from_kernel_internal(pid, buf, size, flags, bytes_traced);
3820 if (kr == KERN_FAILURE) {
3821 return -1;
0c530ab8
A
3822 }
3823
3e170ce0 3824 return kr;
0c530ab8 3825}
2d21ac55
A
3826
3827void
fe8ab488
A
3828start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map)
3829{
6d2010ae 3830
2d21ac55
A
3831 if (!new_nkdbufs)
3832 return;
316670eb 3833 nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
2d21ac55 3834 kdbg_lock_init();
39236c6e 3835
3e170ce0 3836 kernel_debug_string_simple("start_kern_tracing");
fe8ab488 3837
39236c6e
A
3838 if (0 == kdbg_reinit(TRUE)) {
3839
3840 if (need_map == TRUE) {
3841 uint32_t old1, old2;
3842
3843 kdbg_thrmap_init();
3844
3845 disable_wrap(&old1, &old2);
3846 }
fe8ab488
A
3847
3848 /* Hold off interrupts until the early traces are cut */
3849 boolean_t s = ml_set_interrupts_enabled(FALSE);
3850
04b8595b
A
3851 kdbg_set_tracing_enabled(
3852 TRUE,
3853 kdebug_serial ?
3854 (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_SERIAL) :
3855 KDEBUG_ENABLE_TRACE);
b0d623f7 3856
fe8ab488
A
3857 /*
3858 * Transfer all very early events from the static buffer
3859 * into the real buffers.
3860 */
3861 kernel_debug_early_end();
3862
3863 ml_set_interrupts_enabled(s);
b0d623f7 3864
39236c6e 3865 printf("kernel tracing started\n");
04b8595b
A
3866#if KDEBUG_MOJO_TRACE
3867 if (kdebug_serial) {
3868 printf("serial output enabled with %lu named events\n",
3869 sizeof(kd_events)/sizeof(kd_event_t));
3870 }
3871#endif
39236c6e 3872 } else {
04b8595b 3873 printf("error from kdbg_reinit, kernel tracing not started\n");
39236c6e 3874 }
2d21ac55 3875}
b0d623f7 3876
fe8ab488
A
3877void
3878start_kern_tracing_with_typefilter(unsigned int new_nkdbufs,
3879 boolean_t need_map,
3880 unsigned int typefilter)
3881{
3882 /* startup tracing */
3883 start_kern_tracing(new_nkdbufs, need_map);
3884
3885 /* check that tracing was actually enabled */
3886 if (!(kdebug_enable & KDEBUG_ENABLE_TRACE))
3887 return;
3888
3889 /* setup the typefiltering */
3890 if (0 == kdbg_enable_typefilter())
3e170ce0
A
3891 setbit(type_filter_bitmap,
3892 typefilter & (KDBG_CSC_MASK >> KDBG_CSC_OFFSET));
fe8ab488
A
3893}
3894
b0d623f7
A
3895void
3896kdbg_dump_trace_to_file(const char *filename)
3897{
3898 vfs_context_t ctx;
3899 vnode_t vp;
3900 int error;
3901 size_t number;
3902
3903
6d2010ae 3904 if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
b0d623f7
A
3905 return;
3906
3907 if (global_state_pid != -1) {
3908 if ((proc_find(global_state_pid)) != NULL) {
3909 /*
3910 * The global pid exists, we're running
3911 * due to fs_usage, latency, etc...
3912 * don't cut the panic/shutdown trace file
39236c6e
A
3913 * Disable tracing from this point to avoid
3914 * perturbing state.
b0d623f7 3915 */
39236c6e
A
3916 kdebug_enable = 0;
3917 kd_ctrl_page.enabled = 0;
a1c7dba1 3918 commpage_update_kdebug_enable();
b0d623f7
A
3919 return;
3920 }
3921 }
04b8595b 3922 KERNEL_DEBUG_CONSTANT(TRACE_PANIC | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
b0d623f7
A
3923
3924 kdebug_enable = 0;
6d2010ae 3925 kd_ctrl_page.enabled = 0;
a1c7dba1 3926 commpage_update_kdebug_enable();
b0d623f7
A
3927
3928 ctx = vfs_context_kernel();
3929
3930 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
3931 return;
3932
39236c6e
A
3933 number = kd_mapcount * sizeof(kd_threadmap);
3934 kdbg_readthrmap(0, &number, vp, ctx);
b0d623f7
A
3935
3936 number = nkdbufs*sizeof(kd_buf);
3e170ce0 3937 kdbg_read(0, &number, vp, ctx, RAW_VERSION1);
b0d623f7
A
3938
3939 vnode_close(vp, FWRITE, ctx);
3940
3941 sync(current_proc(), (void *)NULL, (int *)NULL);
3942}
6d2010ae
A
3943
3944/* Helper function for filling in the BSD name for an address space
3945 * Defined here because the machine bindings know only Mach threads
3946 * and nothing about BSD processes.
3947 *
3948 * FIXME: need to grab a lock during this?
3949 */
3950void kdbg_get_task_name(char* name_buf, int len, task_t task)
3951{
3952 proc_t proc;
3953
3954 /* Note: we can't use thread->task (and functions that rely on it) here
3955 * because it hasn't been initialized yet when this function is called.
3956 * We use the explicitly-passed task parameter instead.
3957 */
3958 proc = get_bsdtask_info(task);
3959 if (proc != PROC_NULL)
3960 snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
3961 else
3962 snprintf(name_buf, len, "%p [!bsd]", task);
3963}
04b8595b
A
3964
3965#if KDEBUG_MOJO_TRACE
3966static kd_event_t *
3967binary_search(uint32_t id)
3968{
3969 int low, high, mid;
3970
3971 low = 0;
3972 high = sizeof(kd_events)/sizeof(kd_event_t) - 1;
3973
3974 while (TRUE)
3975 {
3976 mid = (low + high) / 2;
3977
3978 if (low > high)
3979 return NULL; /* failed */
3980 else if ( low + 1 >= high) {
3981 /* We have a match */
3982 if (kd_events[high].id == id)
3983 return &kd_events[high];
3984 else if (kd_events[low].id == id)
3985 return &kd_events[low];
3986 else
3987 return NULL; /* search failed */
3988 }
3989 else if (id < kd_events[mid].id)
3990 high = mid;
3991 else
3992 low = mid;
3993 }
3994}
3995
3996/*
3997 * Look up event id to get name string.
3998 * Using a per-cpu cache of a single entry
3999 * before resorting to a binary search of the full table.
4000 */
4001#define NCACHE 1
4002static kd_event_t *last_hit[MAX_CPUS];
4003static kd_event_t *
4004event_lookup_cache(uint32_t cpu, uint32_t id)
4005{
4006 if (last_hit[cpu] == NULL || last_hit[cpu]->id != id)
4007 last_hit[cpu] = binary_search(id);
4008 return last_hit[cpu];
4009}
4010
4011static uint64_t kd_last_timstamp;
4012
4013static void
4014kdebug_serial_print(
4015 uint32_t cpunum,
4016 uint32_t debugid,
4017 uint64_t timestamp,
4018 uintptr_t arg1,
4019 uintptr_t arg2,
4020 uintptr_t arg3,
4021 uintptr_t arg4,
4022 uintptr_t threadid
4023 )
4024{
4025 char kprintf_line[192];
4026 char event[40];
4027 uint64_t us = timestamp / NSEC_PER_USEC;
4028 uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100;
4029 uint64_t delta = timestamp - kd_last_timstamp;
4030 uint64_t delta_us = delta / NSEC_PER_USEC;
4031 uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100;
3e170ce0 4032 uint32_t event_id = debugid & KDBG_EVENTID_MASK;
04b8595b
A
4033 const char *command;
4034 const char *bra;
4035 const char *ket;
4036 kd_event_t *ep;
4037
4038 /* event time and delta from last */
4039 snprintf(kprintf_line, sizeof(kprintf_line),
4040 "%11llu.%1llu %8llu.%1llu ",
4041 us, us_tenth, delta_us, delta_us_tenth);
4042
4043
4044 /* event (id or name) - start prefixed by "[", end postfixed by "]" */
4045 bra = (debugid & DBG_FUNC_START) ? "[" : " ";
4046 ket = (debugid & DBG_FUNC_END) ? "]" : " ";
4047 ep = event_lookup_cache(cpunum, event_id);
4048 if (ep) {
4049 if (strlen(ep->name) < sizeof(event) - 3)
4050 snprintf(event, sizeof(event), "%s%s%s",
4051 bra, ep->name, ket);
4052 else
4053 snprintf(event, sizeof(event), "%s%x(name too long)%s",
4054 bra, event_id, ket);
4055 } else {
4056 snprintf(event, sizeof(event), "%s%x%s",
4057 bra, event_id, ket);
4058 }
4059 snprintf(kprintf_line + strlen(kprintf_line),
4060 sizeof(kprintf_line) - strlen(kprintf_line),
4061 "%-40s ", event);
4062
4063 /* arg1 .. arg4 with special cases for strings */
4064 switch (event_id) {
4065 case VFS_LOOKUP:
4066 case VFS_LOOKUP_DONE:
4067 if (debugid & DBG_FUNC_START) {
4068 /* arg1 hex then arg2..arg4 chars */
4069 snprintf(kprintf_line + strlen(kprintf_line),
4070 sizeof(kprintf_line) - strlen(kprintf_line),
4071 "%-16lx %-8s%-8s%-8s ",
4072 arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
4073 break;
4074 }
4075 /* else fall through for arg1..arg4 chars */
4076 case TRACE_STRING_EXEC:
4077 case TRACE_STRING_NEWTHREAD:
4078 case TRACE_INFO_STRING:
4079 snprintf(kprintf_line + strlen(kprintf_line),
4080 sizeof(kprintf_line) - strlen(kprintf_line),
4081 "%-8s%-8s%-8s%-8s ",
4082 (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
4083 break;
4084 default:
4085 snprintf(kprintf_line + strlen(kprintf_line),
4086 sizeof(kprintf_line) - strlen(kprintf_line),
4087 "%-16lx %-16lx %-16lx %-16lx",
4088 arg1, arg2, arg3, arg4);
4089 }
4090
4091 /* threadid, cpu and command name */
4092 if (threadid == (uintptr_t)thread_tid(current_thread()) &&
4093 current_proc() &&
3e170ce0 4094 current_proc()->p_comm[0])
04b8595b
A
4095 command = current_proc()->p_comm;
4096 else
4097 command = "-";
4098 snprintf(kprintf_line + strlen(kprintf_line),
4099 sizeof(kprintf_line) - strlen(kprintf_line),
4100 " %-16lx %-2d %s\n",
4101 threadid, cpunum, command);
4102
4103 kprintf("%s", kprintf_line);
4104 kd_last_timstamp = timestamp;
4105}
4106#endif