]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23
24 #include <machine/spl.h>
25
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
30 #include <sys/vm.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
35 #include <sys/random.h>
36
37 #define HZ 100
38 #include <mach/clock_types.h>
39 #include <mach/mach_types.h>
40 #include <mach/mach_time.h>
41 #include <machine/machine_routines.h>
42
43 #if defined(__i386__) || defined(__x86_64__)
44 #include <i386/rtclock_protos.h>
45 #include <i386/mp.h>
46 #include <i386/machine_routines.h>
47 #endif
48
49 #include <kern/clock.h>
50
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/debug.h>
54 #include <kern/kalloc.h>
55 #include <kern/cpu_data.h>
56 #include <kern/assert.h>
57 #include <kern/telemetry.h>
58 #include <vm/vm_kern.h>
59 #include <sys/lock.h>
60
61 #include <sys/malloc.h>
62 #include <sys/mcache.h>
63 #include <sys/kauth.h>
64
65 #include <sys/vnode.h>
66 #include <sys/vnode_internal.h>
67 #include <sys/fcntl.h>
68 #include <sys/file_internal.h>
69 #include <sys/ubc.h>
70 #include <sys/param.h> /* for isset() */
71
72 #include <mach/mach_host.h> /* for host_info() */
73 #include <libkern/OSAtomic.h>
74
75 #include <machine/pal_routines.h>
76
77 extern boolean_t kdebug_serial;
78 #if KDEBUG_MOJO_TRACE
79 #include <sys/kdebugevents.h>
80 static void kdebug_serial_print( /* forward */
81 uint32_t, uint32_t, uint64_t,
82 uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
83 #endif
84
85 /*
86 * IOP(s)
87 *
88 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
89 *
90 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
91 * They are registered dynamically. Each is assigned a cpu_id at registration.
92 *
93 * NOTE: IOP trace events may not use the same clock hardware as "normal"
94 * cpus. There is an effort made to synchronize the IOP timebase with the
95 * AP, but it should be understood that there may be discrepancies.
96 *
97 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
98 * The current implementation depends on this for thread safety.
99 *
100 * New registrations occur by allocating an kd_iop struct and assigning
101 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
102 * list_head pointer resolves any races.
103 *
104 * You may safely walk the kd_iops list at any time, without holding locks.
105 *
106 * When allocating buffers, the current kd_iops head is captured. Any operations
107 * that depend on the buffer state (such as flushing IOP traces on reads,
108 * etc.) should use the captured list head. This will allow registrations to
109 * take place while trace is in use.
110 */
111
112 typedef struct kd_iop {
113 kd_callback_t callback;
114 uint32_t cpu_id;
115 uint64_t last_timestamp; /* Prevent timer rollback */
116 struct kd_iop* next;
117 } kd_iop_t;
118
119 static kd_iop_t* kd_iops = NULL;
120
121 /* XXX should have prototypes, but Mach does not provide one */
122 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
123 int cpu_number(void); /* XXX <machine/...> include path broken */
124 void commpage_update_kdebug_enable(void); /* XXX sign */
125
126 /* XXX should probably be static, but it's debugging code... */
127 int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
128 void kdbg_control_chud(int, void *);
129 int kdbg_control(int *, u_int, user_addr_t, size_t *);
130 int kdbg_readcpumap(user_addr_t, size_t *);
131 int kdbg_readcurcpumap(user_addr_t, size_t *);
132 int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
133 int kdbg_readcurthrmap(user_addr_t, size_t *);
134 int kdbg_getreg(kd_regtype *);
135 int kdbg_setreg(kd_regtype *);
136 int kdbg_setrtcdec(kd_regtype *);
137 int kdbg_setpidex(kd_regtype *);
138 int kdbg_setpid(kd_regtype *);
139 void kdbg_thrmap_init(void);
140 int kdbg_reinit(boolean_t);
141 int kdbg_bootstrap(boolean_t);
142
143 int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size);
144 kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount);
145
146 static int kdbg_enable_typefilter(void);
147 static int kdbg_disable_typefilter(void);
148
149 static int create_buffers(boolean_t);
150 static void delete_buffers(void);
151
152 extern void IOSleep(int);
153
154 /* trace enable status */
155 unsigned int kdebug_enable = 0;
156
157 /* A static buffer to record events prior to the start of regular logging */
158 #define KD_EARLY_BUFFER_MAX 64
159 static kd_buf kd_early_buffer[KD_EARLY_BUFFER_MAX];
160 static int kd_early_index = 0;
161 static boolean_t kd_early_overflow = FALSE;
162
163 #define SLOW_NOLOG 0x01
164 #define SLOW_CHECKS 0x02
165 #define SLOW_ENTROPY 0x04 /* Obsolescent */
166 #define SLOW_CHUD 0x08
167
168 #define EVENTS_PER_STORAGE_UNIT 2048
169 #define MIN_STORAGE_UNITS_PER_CPU 4
170
171 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
172
173 union kds_ptr {
174 struct {
175 uint32_t buffer_index:21;
176 uint16_t offset:11;
177 };
178 uint32_t raw;
179 };
180
181 struct kd_storage {
182 union kds_ptr kds_next;
183 uint32_t kds_bufindx;
184 uint32_t kds_bufcnt;
185 uint32_t kds_readlast;
186 boolean_t kds_lostevents;
187 uint64_t kds_timestamp;
188
189 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
190 };
191
192 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
193 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
194
195 struct kd_storage_buffers {
196 struct kd_storage *kdsb_addr;
197 uint32_t kdsb_size;
198 };
199
200 #define KDS_PTR_NULL 0xffffffff
201 struct kd_storage_buffers *kd_bufs = NULL;
202 int n_storage_units = 0;
203 int n_storage_buffers = 0;
204 int n_storage_threshold = 0;
205 int kds_waiter = 0;
206
207 #pragma pack(0)
208 struct kd_bufinfo {
209 union kds_ptr kd_list_head;
210 union kds_ptr kd_list_tail;
211 boolean_t kd_lostevents;
212 uint32_t _pad;
213 uint64_t kd_prev_timebase;
214 uint32_t num_bufs;
215 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) ));
216
217 struct kd_ctrl_page_t {
218 union kds_ptr kds_free_list;
219 uint32_t enabled :1;
220 uint32_t _pad0 :31;
221 int kds_inuse_count;
222 uint32_t kdebug_flags;
223 uint32_t kdebug_slowcheck;
224 /*
225 * The number of kd_bufinfo structs allocated may not match the current
226 * number of active cpus. We capture the iops list head at initialization
227 * which we could use to calculate the number of cpus we allocated data for,
228 * unless it happens to be null. To avoid that case, we explicitly also
229 * capture a cpu count.
230 */
231 kd_iop_t* kdebug_iops;
232 uint32_t kdebug_cpus;
233 } kd_ctrl_page = { .kds_free_list = {.raw = KDS_PTR_NULL}, .kdebug_slowcheck = SLOW_NOLOG };
234
235 #pragma pack()
236
237 struct kd_bufinfo *kdbip = NULL;
238
239 #define KDCOPYBUF_COUNT 8192
240 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
241 kd_buf *kdcopybuf = NULL;
242
243 boolean_t kdlog_bg_trace = FALSE;
244 boolean_t kdlog_bg_trace_running = FALSE;
245 unsigned int bg_nkdbufs = 0;
246
247 unsigned int nkdbufs = 0;
248 unsigned int kdlog_beg=0;
249 unsigned int kdlog_end=0;
250 unsigned int kdlog_value1=0;
251 unsigned int kdlog_value2=0;
252 unsigned int kdlog_value3=0;
253 unsigned int kdlog_value4=0;
254
255 static lck_spin_t * kdw_spin_lock;
256 static lck_spin_t * kds_spin_lock;
257 static lck_mtx_t * kd_trace_mtx_sysctl;
258 static lck_grp_t * kd_trace_mtx_sysctl_grp;
259 static lck_attr_t * kd_trace_mtx_sysctl_attr;
260 static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
261
262 static lck_grp_t *stackshot_subsys_lck_grp;
263 static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
264 static lck_attr_t *stackshot_subsys_lck_attr;
265 static lck_mtx_t stackshot_subsys_mutex;
266
267 void *stackshot_snapbuf = NULL;
268
269 int
270 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
271
272 int
273 stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytesTraced);
274 extern void
275 kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
276
277 extern int
278 kdp_stack_snapshot_geterror(void);
279 extern unsigned int
280 kdp_stack_snapshot_bytes_traced(void);
281
282 kd_threadmap *kd_mapptr = 0;
283 unsigned int kd_mapsize = 0;
284 unsigned int kd_mapcount = 0;
285
286 off_t RAW_file_offset = 0;
287 int RAW_file_written = 0;
288
289 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
290
291 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
292
293 #define DBG_FUNC_MASK 0xfffffffc
294
295 /* TODO: move to kdebug.h */
296 #define CLASS_MASK 0xff000000
297 #define CLASS_OFFSET 24
298 #define SUBCLASS_MASK 0x00ff0000
299 #define SUBCLASS_OFFSET 16
300 #define CSC_MASK 0xffff0000 /* class and subclass mask */
301 #define CSC_OFFSET SUBCLASS_OFFSET
302
303 #define EXTRACT_CLASS(debugid) ( (uint8_t) ( ((debugid) & CLASS_MASK ) >> CLASS_OFFSET ) )
304 #define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
305 #define EXTRACT_CSC(debugid) ( (uint16_t)( ((debugid) & CSC_MASK ) >> CSC_OFFSET ) )
306
307 #define INTERRUPT 0x01050000
308 #define MACH_vmfault 0x01300008
309 #define BSC_SysCall 0x040c0000
310 #define MACH_SysCall 0x010c0000
311 #define DBG_SCALL_MASK 0xffff0000
312
313 /* task to string structure */
314 struct tts
315 {
316 task_t task; /* from procs task */
317 pid_t pid; /* from procs p_pid */
318 char task_comm[20]; /* from procs p_comm */
319 };
320
321 typedef struct tts tts_t;
322
323 struct krt
324 {
325 kd_threadmap *map; /* pointer to the map buffer */
326 int count;
327 int maxcount;
328 struct tts *atts;
329 };
330
331 typedef struct krt krt_t;
332
333 /* This is for the CHUD toolkit call */
334 typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
335 uintptr_t arg2, uintptr_t arg3,
336 uintptr_t arg4, uintptr_t arg5);
337
338 volatile kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
339
340 __private_extern__ void stackshot_lock_init( void );
341
342 static uint8_t *type_filter_bitmap;
343
344 /*
345 * This allows kperf to swap out the global state pid when kperf ownership is
346 * passed from one process to another. It checks the old global state pid so
347 * that kperf can't accidentally steal control of trace when a non-kperf trace user has
348 * control of trace.
349 */
350 void
351 kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid);
352
353 void
354 kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid)
355 {
356 if (!(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
357 return;
358
359 lck_mtx_lock(kd_trace_mtx_sysctl);
360
361 if (old_pid == global_state_pid)
362 global_state_pid = new_pid;
363
364 lck_mtx_unlock(kd_trace_mtx_sysctl);
365 }
366
367 static uint32_t
368 kdbg_cpu_count(boolean_t early_trace)
369 {
370 if (early_trace) {
371 /*
372 * we've started tracing before the IOKit has even
373 * started running... just use the static max value
374 */
375 return max_ncpus;
376 }
377
378 host_basic_info_data_t hinfo;
379 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
380 host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
381 assert(hinfo.logical_cpu_max > 0);
382 return hinfo.logical_cpu_max;
383 }
384
385 #if MACH_ASSERT
386 #endif /* MACH_ASSERT */
387
388 static void
389 kdbg_iop_list_callback(kd_iop_t* iop, kd_callback_type type, void* arg)
390 {
391 while (iop) {
392 iop->callback.func(iop->callback.context, type, arg);
393 iop = iop->next;
394 }
395 }
396
397 static void
398 kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
399 {
400 int s = ml_set_interrupts_enabled(FALSE);
401 lck_spin_lock(kds_spin_lock);
402 if (enabled) {
403 kdebug_enable |= trace_type;
404 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
405 kd_ctrl_page.enabled = 1;
406 commpage_update_kdebug_enable();
407 } else {
408 kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
409 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
410 kd_ctrl_page.enabled = 0;
411 commpage_update_kdebug_enable();
412 }
413 lck_spin_unlock(kds_spin_lock);
414 ml_set_interrupts_enabled(s);
415
416 if (enabled) {
417 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_ENABLED, NULL);
418 } else {
419 /*
420 * If you do not flush the IOP trace buffers, they can linger
421 * for a considerable period; consider code which disables and
422 * deallocates without a final sync flush.
423 */
424 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_DISABLED, NULL);
425 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
426 }
427 }
428
429 static void
430 kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
431 {
432 int s = ml_set_interrupts_enabled(FALSE);
433 lck_spin_lock(kds_spin_lock);
434
435 if (enabled) {
436 kd_ctrl_page.kdebug_slowcheck |= slowflag;
437 kdebug_enable |= enableflag;
438 } else {
439 kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
440 kdebug_enable &= ~enableflag;
441 }
442
443 lck_spin_unlock(kds_spin_lock);
444 ml_set_interrupts_enabled(s);
445 }
446
447 void
448 disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
449 {
450 int s = ml_set_interrupts_enabled(FALSE);
451 lck_spin_lock(kds_spin_lock);
452
453 *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
454 *old_flags = kd_ctrl_page.kdebug_flags;
455
456 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
457 kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
458
459 lck_spin_unlock(kds_spin_lock);
460 ml_set_interrupts_enabled(s);
461 }
462
463 void
464 enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
465 {
466 int s = ml_set_interrupts_enabled(FALSE);
467 lck_spin_lock(kds_spin_lock);
468
469 kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
470
471 if ( !(old_slowcheck & SLOW_NOLOG))
472 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
473
474 if (lostevents == TRUE)
475 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
476
477 lck_spin_unlock(kds_spin_lock);
478 ml_set_interrupts_enabled(s);
479 }
480
481 static int
482 create_buffers(boolean_t early_trace)
483 {
484 int i;
485 int p_buffer_size;
486 int f_buffer_size;
487 int f_buffers;
488 int error = 0;
489
490 /*
491 * For the duration of this allocation, trace code will only reference
492 * kdebug_iops. Any iops registered after this enabling will not be
493 * messaged until the buffers are reallocated.
494 *
495 * TLDR; Must read kd_iops once and only once!
496 */
497 kd_ctrl_page.kdebug_iops = kd_iops;
498
499
500 /*
501 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
502 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
503 * be the list head + 1.
504 */
505
506 kd_ctrl_page.kdebug_cpus = kd_ctrl_page.kdebug_iops ? kd_ctrl_page.kdebug_iops->cpu_id + 1 : kdbg_cpu_count(early_trace);
507
508 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus) != KERN_SUCCESS) {
509 error = ENOSPC;
510 goto out;
511 }
512
513 if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
514 n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU;
515 else
516 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
517
518 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
519
520 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
521 n_storage_buffers = f_buffers;
522
523 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
524 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
525
526 if (p_buffer_size)
527 n_storage_buffers++;
528
529 kd_bufs = NULL;
530
531 if (kdcopybuf == 0) {
532 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
533 error = ENOSPC;
534 goto out;
535 }
536 }
537 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
538 error = ENOSPC;
539 goto out;
540 }
541 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
542
543 for (i = 0; i < f_buffers; i++) {
544 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
545 error = ENOSPC;
546 goto out;
547 }
548 bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
549
550 kd_bufs[i].kdsb_size = f_buffer_size;
551 }
552 if (p_buffer_size) {
553 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
554 error = ENOSPC;
555 goto out;
556 }
557 bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
558
559 kd_bufs[i].kdsb_size = p_buffer_size;
560 }
561 n_storage_units = 0;
562
563 for (i = 0; i < n_storage_buffers; i++) {
564 struct kd_storage *kds;
565 int n_elements;
566 int n;
567
568 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
569 kds = kd_bufs[i].kdsb_addr;
570
571 for (n = 0; n < n_elements; n++) {
572 kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
573 kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
574
575 kd_ctrl_page.kds_free_list.buffer_index = i;
576 kd_ctrl_page.kds_free_list.offset = n;
577 }
578 n_storage_units += n_elements;
579 }
580
581 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
582
583 for (i = 0; i < (int)kd_ctrl_page.kdebug_cpus; i++) {
584 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
585 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
586 kdbip[i].kd_lostevents = FALSE;
587 kdbip[i].num_bufs = 0;
588 }
589
590 kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
591
592 kd_ctrl_page.kds_inuse_count = 0;
593 n_storage_threshold = n_storage_units / 2;
594 out:
595 if (error)
596 delete_buffers();
597
598 return(error);
599 }
600
601 static void
602 delete_buffers(void)
603 {
604 int i;
605
606 if (kd_bufs) {
607 for (i = 0; i < n_storage_buffers; i++) {
608 if (kd_bufs[i].kdsb_addr) {
609 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
610 }
611 }
612 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
613
614 kd_bufs = NULL;
615 n_storage_buffers = 0;
616 }
617 if (kdcopybuf) {
618 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
619
620 kdcopybuf = NULL;
621 }
622 kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
623
624 if (kdbip) {
625 kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
626
627 kdbip = NULL;
628 }
629 kd_ctrl_page.kdebug_iops = NULL;
630 kd_ctrl_page.kdebug_cpus = 0;
631 kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
632 }
633
634 void
635 release_storage_unit(int cpu, uint32_t kdsp_raw)
636 {
637 int s = 0;
638 struct kd_storage *kdsp_actual;
639 struct kd_bufinfo *kdbp;
640 union kds_ptr kdsp;
641
642 kdsp.raw = kdsp_raw;
643
644 s = ml_set_interrupts_enabled(FALSE);
645 lck_spin_lock(kds_spin_lock);
646
647 kdbp = &kdbip[cpu];
648
649 if (kdsp.raw == kdbp->kd_list_head.raw) {
650 /*
651 * it's possible for the storage unit pointed to
652 * by kdsp to have already been stolen... so
653 * check to see if it's still the head of the list
654 * now that we're behind the lock that protects
655 * adding and removing from the queue...
656 * since we only ever release and steal units from
657 * that position, if it's no longer the head
658 * we having nothing to do in this context
659 */
660 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
661 kdbp->kd_list_head = kdsp_actual->kds_next;
662
663 kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
664 kd_ctrl_page.kds_free_list = kdsp;
665
666 kd_ctrl_page.kds_inuse_count--;
667 }
668 lck_spin_unlock(kds_spin_lock);
669 ml_set_interrupts_enabled(s);
670 }
671
672
673 boolean_t
674 allocate_storage_unit(int cpu)
675 {
676 union kds_ptr kdsp;
677 struct kd_storage *kdsp_actual, *kdsp_next_actual;
678 struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
679 uint64_t oldest_ts, ts;
680 boolean_t retval = TRUE;
681 int s = 0;
682
683 s = ml_set_interrupts_enabled(FALSE);
684 lck_spin_lock(kds_spin_lock);
685
686 kdbp = &kdbip[cpu];
687
688 /* If someone beat us to the allocate, return success */
689 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
690 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
691
692 if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
693 goto out;
694 }
695
696 if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
697 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
698 kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
699
700 kd_ctrl_page.kds_inuse_count++;
701 } else {
702 if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
703 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
704 kdbp->kd_lostevents = TRUE;
705 retval = FALSE;
706 goto out;
707 }
708 kdbp_vict = NULL;
709 oldest_ts = (uint64_t)-1;
710
711 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
712
713 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
714 /*
715 * no storage unit to steal
716 */
717 continue;
718 }
719
720 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
721
722 if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
723 /*
724 * make sure we don't steal the storage unit
725 * being actively recorded to... need to
726 * move on because we don't want an out-of-order
727 * set of events showing up later
728 */
729 continue;
730 }
731 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
732
733 if (ts < oldest_ts) {
734 /*
735 * when 'wrapping', we want to steal the
736 * storage unit that has the 'earliest' time
737 * associated with it (first event time)
738 */
739 oldest_ts = ts;
740 kdbp_vict = kdbp_try;
741 }
742 }
743 if (kdbp_vict == NULL) {
744 kdebug_enable = 0;
745 kd_ctrl_page.enabled = 0;
746 commpage_update_kdebug_enable();
747 retval = FALSE;
748 goto out;
749 }
750 kdsp = kdbp_vict->kd_list_head;
751 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
752 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
753
754 if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
755 kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
756 kdsp_next_actual->kds_lostevents = TRUE;
757 } else
758 kdbp_vict->kd_lostevents = TRUE;
759
760 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
761 }
762 kdsp_actual->kds_timestamp = mach_absolute_time();
763 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
764 kdsp_actual->kds_bufcnt = 0;
765 kdsp_actual->kds_readlast = 0;
766
767 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
768 kdbp->kd_lostevents = FALSE;
769 kdsp_actual->kds_bufindx = 0;
770
771 if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
772 kdbp->kd_list_head = kdsp;
773 else
774 POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
775 kdbp->kd_list_tail = kdsp;
776 out:
777 lck_spin_unlock(kds_spin_lock);
778 ml_set_interrupts_enabled(s);
779
780 return (retval);
781 }
782
783 int
784 kernel_debug_register_callback(kd_callback_t callback)
785 {
786 kd_iop_t* iop;
787 if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t)) == KERN_SUCCESS) {
788 memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
789
790 /*
791 * <rdar://problem/13351477> Some IOP clients are not providing a name.
792 *
793 * Remove when fixed.
794 */
795 {
796 boolean_t is_valid_name = FALSE;
797 for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
798 /* This is roughly isprintable(c) */
799 if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
800 continue;
801 if (callback.iop_name[length] == 0) {
802 if (length)
803 is_valid_name = TRUE;
804 break;
805 }
806 }
807
808 if (!is_valid_name) {
809 strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
810 }
811 }
812
813 iop->last_timestamp = 0;
814
815 do {
816 /*
817 * We use two pieces of state, the old list head
818 * pointer, and the value of old_list_head->cpu_id.
819 * If we read kd_iops more than once, it can change
820 * between reads.
821 *
822 * TLDR; Must not read kd_iops more than once per loop.
823 */
824 iop->next = kd_iops;
825 iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
826
827 /*
828 * Header says OSCompareAndSwapPtr has a memory barrier
829 */
830 } while (!OSCompareAndSwapPtr(iop->next, iop, (void* volatile*)&kd_iops));
831
832 return iop->cpu_id;
833 }
834
835 return 0;
836 }
837
838 void
839 kernel_debug_enter(
840 uint32_t coreid,
841 uint32_t debugid,
842 uint64_t timestamp,
843 uintptr_t arg1,
844 uintptr_t arg2,
845 uintptr_t arg3,
846 uintptr_t arg4,
847 uintptr_t threadid
848 )
849 {
850 uint32_t bindx;
851 kd_buf *kd;
852 struct kd_bufinfo *kdbp;
853 struct kd_storage *kdsp_actual;
854 union kds_ptr kds_raw;
855
856 if (kd_ctrl_page.kdebug_slowcheck) {
857
858 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
859 goto out1;
860
861 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
862 if (isset(type_filter_bitmap, EXTRACT_CSC(debugid)))
863 goto record_event;
864 goto out1;
865 }
866 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
867 if (debugid >= kdlog_beg && debugid <= kdlog_end)
868 goto record_event;
869 goto out1;
870 }
871 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
872 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
873 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
874 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
875 (debugid & DBG_FUNC_MASK) != kdlog_value4)
876 goto out1;
877 }
878 }
879
880 record_event:
881
882 disable_preemption();
883
884 if (kd_ctrl_page.enabled == 0)
885 goto out;
886
887 kdbp = &kdbip[coreid];
888 timestamp &= KDBG_TIMESTAMP_MASK;
889
890 #if KDEBUG_MOJO_TRACE
891 if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
892 kdebug_serial_print(coreid, debugid, timestamp,
893 arg1, arg2, arg3, arg4, threadid);
894 #endif
895
896 retry_q:
897 kds_raw = kdbp->kd_list_tail;
898
899 if (kds_raw.raw != KDS_PTR_NULL) {
900 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
901 bindx = kdsp_actual->kds_bufindx;
902 } else
903 kdsp_actual = NULL;
904
905 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
906 if (allocate_storage_unit(coreid) == FALSE) {
907 /*
908 * this can only happen if wrapping
909 * has been disabled
910 */
911 goto out;
912 }
913 goto retry_q;
914 }
915 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
916 goto retry_q;
917
918 // IOP entries can be allocated before xnu allocates and inits the buffer
919 if (timestamp < kdsp_actual->kds_timestamp)
920 kdsp_actual->kds_timestamp = timestamp;
921
922 kd = &kdsp_actual->kds_records[bindx];
923
924 kd->debugid = debugid;
925 kd->arg1 = arg1;
926 kd->arg2 = arg2;
927 kd->arg3 = arg3;
928 kd->arg4 = arg4;
929 kd->arg5 = threadid;
930
931 kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
932
933 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
934 out:
935 enable_preemption();
936 out1:
937 if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
938 boolean_t need_kds_wakeup = FALSE;
939 int s;
940
941 /*
942 * try to take the lock here to synchronize with the
943 * waiter entering the blocked state... use the try
944 * mode to prevent deadlocks caused by re-entering this
945 * routine due to various trace points triggered in the
946 * lck_spin_sleep_xxxx routines used to actually enter
947 * our wait condition... no problem if we fail,
948 * there will be lots of additional events coming in that
949 * will eventually succeed in grabbing this lock
950 */
951 s = ml_set_interrupts_enabled(FALSE);
952
953 if (lck_spin_try_lock(kdw_spin_lock)) {
954
955 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
956 kds_waiter = 0;
957 need_kds_wakeup = TRUE;
958 }
959 lck_spin_unlock(kdw_spin_lock);
960
961 ml_set_interrupts_enabled(s);
962
963 if (need_kds_wakeup == TRUE)
964 wakeup(&kds_waiter);
965 }
966 }
967 }
968
969
970
971 static void
972 kernel_debug_internal(
973 uint32_t debugid,
974 uintptr_t arg1,
975 uintptr_t arg2,
976 uintptr_t arg3,
977 uintptr_t arg4,
978 uintptr_t arg5)
979 {
980 struct proc *curproc;
981 uint64_t now;
982 uint32_t bindx;
983 boolean_t s;
984 kd_buf *kd;
985 int cpu;
986 struct kd_bufinfo *kdbp;
987 struct kd_storage *kdsp_actual;
988 union kds_ptr kds_raw;
989
990
991
992 if (kd_ctrl_page.kdebug_slowcheck) {
993
994 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
995 kd_chudhook_fn chudhook;
996 /*
997 * Mask interrupts to minimize the interval across
998 * which the driver providing the hook could be
999 * unloaded.
1000 */
1001 s = ml_set_interrupts_enabled(FALSE);
1002 chudhook = kdebug_chudhook;
1003 if (chudhook)
1004 chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
1005 ml_set_interrupts_enabled(s);
1006 }
1007 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
1008 goto out1;
1009
1010 if ( !ml_at_interrupt_context()) {
1011 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1012 /*
1013 * If kdebug flag is not set for current proc, return
1014 */
1015 curproc = current_proc();
1016
1017 if ((curproc && !(curproc->p_kdebug)) &&
1018 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1019 (debugid >> 24 != DBG_TRACE))
1020 goto out1;
1021 }
1022 else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1023 /*
1024 * If kdebug flag is set for current proc, return
1025 */
1026 curproc = current_proc();
1027
1028 if ((curproc && curproc->p_kdebug) &&
1029 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1030 (debugid >> 24 != DBG_TRACE))
1031 goto out1;
1032 }
1033 }
1034
1035 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1036 /* Always record trace system info */
1037 if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1038 goto record_event;
1039
1040 if (isset(type_filter_bitmap, EXTRACT_CSC(debugid)))
1041 goto record_event;
1042 goto out1;
1043 }
1044 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1045 /* Always record trace system info */
1046 if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1047 goto record_event;
1048
1049 if (debugid < kdlog_beg || debugid > kdlog_end)
1050 goto out1;
1051 }
1052 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1053 /* Always record trace system info */
1054 if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1055 goto record_event;
1056
1057 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
1058 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
1059 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
1060 (debugid & DBG_FUNC_MASK) != kdlog_value4)
1061 goto out1;
1062 }
1063 }
1064 record_event:
1065 disable_preemption();
1066
1067 if (kd_ctrl_page.enabled == 0)
1068 goto out;
1069
1070 cpu = cpu_number();
1071 kdbp = &kdbip[cpu];
1072
1073 #if KDEBUG_MOJO_TRACE
1074 if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
1075 kdebug_serial_print(cpu, debugid,
1076 mach_absolute_time() & KDBG_TIMESTAMP_MASK,
1077 arg1, arg2, arg3, arg4, arg5);
1078 #endif
1079
1080 retry_q:
1081 kds_raw = kdbp->kd_list_tail;
1082
1083 if (kds_raw.raw != KDS_PTR_NULL) {
1084 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
1085 bindx = kdsp_actual->kds_bufindx;
1086 } else
1087 kdsp_actual = NULL;
1088
1089 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
1090 if (allocate_storage_unit(cpu) == FALSE) {
1091 /*
1092 * this can only happen if wrapping
1093 * has been disabled
1094 */
1095 goto out;
1096 }
1097 goto retry_q;
1098 }
1099 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
1100
1101 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
1102 goto retry_q;
1103
1104 kd = &kdsp_actual->kds_records[bindx];
1105
1106 kd->debugid = debugid;
1107 kd->arg1 = arg1;
1108 kd->arg2 = arg2;
1109 kd->arg3 = arg3;
1110 kd->arg4 = arg4;
1111 kd->arg5 = arg5;
1112
1113 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1114
1115 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
1116 out:
1117 enable_preemption();
1118 out1:
1119 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1120 uint32_t etype;
1121 uint32_t stype;
1122
1123 etype = debugid & DBG_FUNC_MASK;
1124 stype = debugid & DBG_SCALL_MASK;
1125
1126 if (etype == INTERRUPT || etype == MACH_vmfault ||
1127 stype == BSC_SysCall || stype == MACH_SysCall) {
1128
1129 boolean_t need_kds_wakeup = FALSE;
1130
1131 /*
1132 * try to take the lock here to synchronize with the
1133 * waiter entering the blocked state... use the try
1134 * mode to prevent deadlocks caused by re-entering this
1135 * routine due to various trace points triggered in the
1136 * lck_spin_sleep_xxxx routines used to actually enter
1137 * one of our 2 wait conditions... no problem if we fail,
1138 * there will be lots of additional events coming in that
1139 * will eventually succeed in grabbing this lock
1140 */
1141 s = ml_set_interrupts_enabled(FALSE);
1142
1143 if (lck_spin_try_lock(kdw_spin_lock)) {
1144
1145 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1146 kds_waiter = 0;
1147 need_kds_wakeup = TRUE;
1148 }
1149 lck_spin_unlock(kdw_spin_lock);
1150 }
1151 ml_set_interrupts_enabled(s);
1152
1153 if (need_kds_wakeup == TRUE)
1154 wakeup(&kds_waiter);
1155 }
1156 }
1157 }
1158
1159 void
1160 kernel_debug(
1161 uint32_t debugid,
1162 uintptr_t arg1,
1163 uintptr_t arg2,
1164 uintptr_t arg3,
1165 uintptr_t arg4,
1166 __unused uintptr_t arg5)
1167 {
1168 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()));
1169 }
1170
1171 void
1172 kernel_debug1(
1173 uint32_t debugid,
1174 uintptr_t arg1,
1175 uintptr_t arg2,
1176 uintptr_t arg3,
1177 uintptr_t arg4,
1178 uintptr_t arg5)
1179 {
1180 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5);
1181 }
1182
1183 void
1184 kernel_debug_string(const char *message)
1185 {
1186 uintptr_t arg[4] = {0, 0, 0, 0};
1187
1188 /* Stuff the message string in the args and log it. */
1189 strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
1190 KERNEL_DEBUG_EARLY(
1191 TRACE_INFO_STRING,
1192 arg[0], arg[1], arg[2], arg[3]);
1193 }
1194
1195 extern int master_cpu; /* MACH_KERNEL_PRIVATE */
1196 /*
1197 * Used prior to start_kern_tracing() being called.
1198 * Log temporarily into a static buffer.
1199 */
1200 void
1201 kernel_debug_early(
1202 uint32_t debugid,
1203 uintptr_t arg1,
1204 uintptr_t arg2,
1205 uintptr_t arg3,
1206 uintptr_t arg4)
1207 {
1208 /* If tracing is already initialized, use it */
1209 if (nkdbufs) {
1210 KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
1211 return;
1212 }
1213
1214 /* Do nothing if the buffer is full or we're not on the boot cpu */
1215 kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
1216 if (kd_early_overflow ||
1217 cpu_number() != master_cpu)
1218 return;
1219
1220 kd_early_buffer[kd_early_index].debugid = debugid;
1221 kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
1222 kd_early_buffer[kd_early_index].arg1 = arg1;
1223 kd_early_buffer[kd_early_index].arg2 = arg2;
1224 kd_early_buffer[kd_early_index].arg3 = arg3;
1225 kd_early_buffer[kd_early_index].arg4 = arg4;
1226 kd_early_buffer[kd_early_index].arg5 = 0;
1227 kd_early_index++;
1228 }
1229
1230 /*
1231 * Transfen the contents of the temporary buffer into the trace buffers.
1232 * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
1233 * when mach_absolute_time is set to 0.
1234 */
1235 static void
1236 kernel_debug_early_end(void)
1237 {
1238 int i;
1239
1240 if (cpu_number() != master_cpu)
1241 panic("kernel_debug_early_end() not call on boot processor");
1242
1243 /* Fake sentinel marking the start of kernel time relative to TSC */
1244 kernel_debug_enter(
1245 0,
1246 TRACE_TIMESTAMPS,
1247 0,
1248 (uint32_t)(tsc_rebase_abs_time >> 32),
1249 (uint32_t)tsc_rebase_abs_time,
1250 0,
1251 0,
1252 0);
1253 for (i = 0; i < kd_early_index; i++) {
1254 kernel_debug_enter(
1255 0,
1256 kd_early_buffer[i].debugid,
1257 kd_early_buffer[i].timestamp,
1258 kd_early_buffer[i].arg1,
1259 kd_early_buffer[i].arg2,
1260 kd_early_buffer[i].arg3,
1261 kd_early_buffer[i].arg4,
1262 0);
1263 }
1264
1265 /* Cut events-lost event on overflow */
1266 if (kd_early_overflow)
1267 KERNEL_DEBUG_CONSTANT(
1268 TRACE_LOST_EVENTS, 0, 0, 0, 0, 0);
1269
1270 /* This trace marks the start of kernel tracing */
1271 kernel_debug_string("early trace done");
1272 }
1273
1274 /*
1275 * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
1276 */
1277 int
1278 kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
1279 {
1280 struct kdebug_trace64_args uap64;
1281
1282 uap64.code = uap->code;
1283 uap64.arg1 = uap->arg1;
1284 uap64.arg2 = uap->arg2;
1285 uap64.arg3 = uap->arg3;
1286 uap64.arg4 = uap->arg4;
1287
1288 return kdebug_trace64(p, &uap64, retval);
1289 }
1290
1291 /*
1292 * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
1293 */
1294 int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
1295 {
1296 uint8_t code_class;
1297
1298 /*
1299 * Not all class are supported for injection from userspace, especially ones used by the core
1300 * kernel tracing infrastructure.
1301 */
1302 code_class = EXTRACT_CLASS(uap->code);
1303
1304 switch (code_class) {
1305 case DBG_TRACE:
1306 return EPERM;
1307 }
1308
1309 if ( __probable(kdebug_enable == 0) )
1310 return(0);
1311
1312 kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
1313
1314 return(0);
1315 }
1316
1317 static void
1318 kdbg_lock_init(void)
1319 {
1320 if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
1321 return;
1322
1323 /*
1324 * allocate lock group attribute and group
1325 */
1326 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
1327 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
1328
1329 /*
1330 * allocate the lock attribute
1331 */
1332 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
1333
1334
1335 /*
1336 * allocate and initialize mutex's
1337 */
1338 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1339 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1340 kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1341
1342 kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
1343 }
1344
1345
1346 int
1347 kdbg_bootstrap(boolean_t early_trace)
1348 {
1349 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
1350
1351 return (create_buffers(early_trace));
1352 }
1353
1354 int
1355 kdbg_reinit(boolean_t early_trace)
1356 {
1357 int ret = 0;
1358
1359 /*
1360 * Disable trace collecting
1361 * First make sure we're not in
1362 * the middle of cutting a trace
1363 */
1364 kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1365
1366 /*
1367 * make sure the SLOW_NOLOG is seen
1368 * by everyone that might be trying
1369 * to cut a trace..
1370 */
1371 IOSleep(100);
1372
1373 delete_buffers();
1374
1375 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
1376 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1377 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1378 kd_mapsize = 0;
1379 kd_mapptr = (kd_threadmap *) 0;
1380 kd_mapcount = 0;
1381 }
1382 ret = kdbg_bootstrap(early_trace);
1383
1384 RAW_file_offset = 0;
1385 RAW_file_written = 0;
1386
1387 return(ret);
1388 }
1389
1390 void
1391 kdbg_trace_data(struct proc *proc, long *arg_pid)
1392 {
1393 if (!proc)
1394 *arg_pid = 0;
1395 else
1396 *arg_pid = proc->p_pid;
1397 }
1398
1399
1400 void
1401 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1402 {
1403 char *dbg_nameptr;
1404 int dbg_namelen;
1405 long dbg_parms[4];
1406
1407 if (!proc) {
1408 *arg1 = 0;
1409 *arg2 = 0;
1410 *arg3 = 0;
1411 *arg4 = 0;
1412 return;
1413 }
1414 /*
1415 * Collect the pathname for tracing
1416 */
1417 dbg_nameptr = proc->p_comm;
1418 dbg_namelen = (int)strlen(proc->p_comm);
1419 dbg_parms[0]=0L;
1420 dbg_parms[1]=0L;
1421 dbg_parms[2]=0L;
1422 dbg_parms[3]=0L;
1423
1424 if(dbg_namelen > (int)sizeof(dbg_parms))
1425 dbg_namelen = (int)sizeof(dbg_parms);
1426
1427 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1428
1429 *arg1=dbg_parms[0];
1430 *arg2=dbg_parms[1];
1431 *arg3=dbg_parms[2];
1432 *arg4=dbg_parms[3];
1433 }
1434
1435 static void
1436 kdbg_resolve_map(thread_t th_act, void *opaque)
1437 {
1438 kd_threadmap *mapptr;
1439 krt_t *t = (krt_t *)opaque;
1440
1441 if (t->count < t->maxcount) {
1442 mapptr = &t->map[t->count];
1443 mapptr->thread = (uintptr_t)thread_tid(th_act);
1444
1445 (void) strlcpy (mapptr->command, t->atts->task_comm,
1446 sizeof(t->atts->task_comm));
1447 /*
1448 * Some kernel threads have no associated pid.
1449 * We still need to mark the entry as valid.
1450 */
1451 if (t->atts->pid)
1452 mapptr->valid = t->atts->pid;
1453 else
1454 mapptr->valid = 1;
1455
1456 t->count++;
1457 }
1458 }
1459
1460 /*
1461 *
1462 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1463 *
1464 * You may provide a buffer and size, or if you set the buffer to NULL, a
1465 * buffer of sufficient size will be allocated.
1466 *
1467 * If you provide a buffer and it is too small, sets cpumap_size to the number
1468 * of bytes required and returns EINVAL.
1469 *
1470 * On success, if you provided a buffer, cpumap_size is set to the number of
1471 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1472 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1473 *
1474 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1475 *
1476 * We may be reporting data from "now", or from the "past".
1477 *
1478 * The "now" data would be for something like kdbg_readcurcpumap().
1479 * The "past" data would be for kdbg_readcpumap().
1480 *
1481 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1482 * will need to read "now" state to get the number of cpus, which would be in
1483 * error if we were reporting "past" state.
1484 */
1485
1486 int
1487 kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
1488 {
1489 assert(cpumap);
1490 assert(cpumap_size);
1491 assert(cpu_count);
1492 assert(!iops || iops->cpu_id + 1 == cpu_count);
1493
1494 uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
1495 uint32_t bytes_available = *cpumap_size;
1496 *cpumap_size = bytes_needed;
1497
1498 if (*cpumap == NULL) {
1499 if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size) != KERN_SUCCESS) {
1500 return ENOMEM;
1501 }
1502 } else if (bytes_available < bytes_needed) {
1503 return EINVAL;
1504 }
1505
1506 kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
1507
1508 header->version_no = RAW_VERSION1;
1509 header->cpu_count = cpu_count;
1510
1511 kd_cpumap* cpus = (kd_cpumap*)&header[1];
1512
1513 int32_t index = cpu_count - 1;
1514 while (iops) {
1515 cpus[index].cpu_id = iops->cpu_id;
1516 cpus[index].flags = KDBG_CPUMAP_IS_IOP;
1517 bzero(cpus[index].name, sizeof(cpus->name));
1518 strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
1519
1520 iops = iops->next;
1521 index--;
1522 }
1523
1524 while (index >= 0) {
1525 cpus[index].cpu_id = index;
1526 cpus[index].flags = 0;
1527 bzero(cpus[index].name, sizeof(cpus->name));
1528 strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
1529
1530 index--;
1531 }
1532
1533 return KERN_SUCCESS;
1534 }
1535
1536 void
1537 kdbg_thrmap_init(void)
1538 {
1539 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
1540 return;
1541
1542 kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
1543
1544 if (kd_mapptr)
1545 kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
1546 }
1547
1548
1549 kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
1550 {
1551 kd_threadmap *mapptr;
1552 struct proc *p;
1553 struct krt akrt;
1554 int tts_count; /* number of task-to-string structures */
1555 struct tts *tts_mapptr;
1556 unsigned int tts_mapsize = 0;
1557 int i;
1558 vm_offset_t kaddr;
1559
1560 /*
1561 * need to use PROC_SCANPROCLIST with proc_iterate
1562 */
1563 proc_list_lock();
1564
1565 /*
1566 * Calculate the sizes of map buffers
1567 */
1568 for (p = allproc.lh_first, *mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
1569 *mapcount += get_task_numacts((task_t)p->task);
1570 tts_count++;
1571 }
1572 proc_list_unlock();
1573
1574 /*
1575 * The proc count could change during buffer allocation,
1576 * so introduce a small fudge factor to bump up the
1577 * buffer sizes. This gives new tasks some chance of
1578 * making into the tables. Bump up by 25%.
1579 */
1580 *mapcount += *mapcount/4;
1581 tts_count += tts_count/4;
1582
1583 *mapsize = *mapcount * sizeof(kd_threadmap);
1584
1585 if (count && count < *mapcount)
1586 return (0);
1587
1588 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize) == KERN_SUCCESS)) {
1589 bzero((void *)kaddr, *mapsize);
1590 mapptr = (kd_threadmap *)kaddr;
1591 } else
1592 return (0);
1593
1594 tts_mapsize = tts_count * sizeof(struct tts);
1595
1596 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
1597 bzero((void *)kaddr, tts_mapsize);
1598 tts_mapptr = (struct tts *)kaddr;
1599 } else {
1600 kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
1601
1602 return (0);
1603 }
1604 /*
1605 * We need to save the procs command string
1606 * and take a reference for each task associated
1607 * with a valid process
1608 */
1609
1610 proc_list_lock();
1611
1612 /*
1613 * should use proc_iterate
1614 */
1615 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
1616 if (p->p_lflag & P_LEXIT)
1617 continue;
1618
1619 if (p->task) {
1620 task_reference(p->task);
1621 tts_mapptr[i].task = p->task;
1622 tts_mapptr[i].pid = p->p_pid;
1623 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
1624 i++;
1625 }
1626 }
1627 tts_count = i;
1628
1629 proc_list_unlock();
1630
1631 /*
1632 * Initialize thread map data
1633 */
1634 akrt.map = mapptr;
1635 akrt.count = 0;
1636 akrt.maxcount = *mapcount;
1637
1638 for (i = 0; i < tts_count; i++) {
1639 akrt.atts = &tts_mapptr[i];
1640 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
1641 task_deallocate((task_t) tts_mapptr[i].task);
1642 }
1643 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1644
1645 *mapcount = akrt.count;
1646
1647 return (mapptr);
1648 }
1649
1650 static void
1651 kdbg_clear(void)
1652 {
1653 /*
1654 * Clean up the trace buffer
1655 * First make sure we're not in
1656 * the middle of cutting a trace
1657 */
1658 kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1659
1660 /*
1661 * make sure the SLOW_NOLOG is seen
1662 * by everyone that might be trying
1663 * to cut a trace..
1664 */
1665 IOSleep(100);
1666
1667 global_state_pid = -1;
1668 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1669 kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
1670 kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
1671
1672 kdbg_disable_typefilter();
1673
1674 delete_buffers();
1675 nkdbufs = 0;
1676
1677 /* Clean up the thread map buffer */
1678 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1679 if (kd_mapptr) {
1680 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1681 kd_mapptr = (kd_threadmap *) 0;
1682 }
1683 kd_mapsize = 0;
1684 kd_mapcount = 0;
1685
1686 RAW_file_offset = 0;
1687 RAW_file_written = 0;
1688 }
1689
1690 int
1691 kdbg_setpid(kd_regtype *kdr)
1692 {
1693 pid_t pid;
1694 int flag, ret=0;
1695 struct proc *p;
1696
1697 pid = (pid_t)kdr->value1;
1698 flag = (int)kdr->value2;
1699
1700 if (pid > 0) {
1701 if ((p = proc_find(pid)) == NULL)
1702 ret = ESRCH;
1703 else {
1704 if (flag == 1) {
1705 /*
1706 * turn on pid check for this and all pids
1707 */
1708 kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
1709 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1710 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1711
1712 p->p_kdebug = 1;
1713 } else {
1714 /*
1715 * turn off pid check for this pid value
1716 * Don't turn off all pid checking though
1717 *
1718 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1719 */
1720 p->p_kdebug = 0;
1721 }
1722 proc_rele(p);
1723 }
1724 }
1725 else
1726 ret = EINVAL;
1727
1728 return(ret);
1729 }
1730
1731 /* This is for pid exclusion in the trace buffer */
1732 int
1733 kdbg_setpidex(kd_regtype *kdr)
1734 {
1735 pid_t pid;
1736 int flag, ret=0;
1737 struct proc *p;
1738
1739 pid = (pid_t)kdr->value1;
1740 flag = (int)kdr->value2;
1741
1742 if (pid > 0) {
1743 if ((p = proc_find(pid)) == NULL)
1744 ret = ESRCH;
1745 else {
1746 if (flag == 1) {
1747 /*
1748 * turn on pid exclusion
1749 */
1750 kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
1751 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1752 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1753
1754 p->p_kdebug = 1;
1755 }
1756 else {
1757 /*
1758 * turn off pid exclusion for this pid value
1759 * Don't turn off all pid exclusion though
1760 *
1761 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1762 */
1763 p->p_kdebug = 0;
1764 }
1765 proc_rele(p);
1766 }
1767 } else
1768 ret = EINVAL;
1769
1770 return(ret);
1771 }
1772
1773
1774 /*
1775 * This is for setting a maximum decrementer value
1776 */
1777 int
1778 kdbg_setrtcdec(kd_regtype *kdr)
1779 {
1780 int ret = 0;
1781 natural_t decval;
1782
1783 decval = (natural_t)kdr->value1;
1784
1785 if (decval && decval < KDBG_MINRTCDEC)
1786 ret = EINVAL;
1787 else
1788 ret = ENOTSUP;
1789
1790 return(ret);
1791 }
1792
1793 int
1794 kdbg_enable_typefilter(void)
1795 {
1796 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1797 /* free the old filter */
1798 kdbg_disable_typefilter();
1799 }
1800
1801 if (kmem_alloc(kernel_map, (vm_offset_t *)&type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE) != KERN_SUCCESS) {
1802 return ENOSPC;
1803 }
1804
1805 bzero(type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
1806
1807 /* Turn off range and value checks */
1808 kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
1809
1810 /* Enable filter checking */
1811 kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
1812 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1813 return 0;
1814 }
1815
1816 int
1817 kdbg_disable_typefilter(void)
1818 {
1819 /* Disable filter checking */
1820 kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
1821
1822 /* Turn off slow checks unless pid checks are using them */
1823 if ( (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1824 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1825 else
1826 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1827
1828 if(type_filter_bitmap == NULL)
1829 return 0;
1830
1831 vm_offset_t old_bitmap = (vm_offset_t)type_filter_bitmap;
1832 type_filter_bitmap = NULL;
1833
1834 kmem_free(kernel_map, old_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
1835 return 0;
1836 }
1837
1838 int
1839 kdbg_setreg(kd_regtype * kdr)
1840 {
1841 int ret=0;
1842 unsigned int val_1, val_2, val;
1843 switch (kdr->type) {
1844
1845 case KDBG_CLASSTYPE :
1846 val_1 = (kdr->value1 & 0xff);
1847 val_2 = (kdr->value2 & 0xff);
1848 kdlog_beg = (val_1<<24);
1849 kdlog_end = (val_2<<24);
1850 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1851 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1852 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1853 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1854 break;
1855 case KDBG_SUBCLSTYPE :
1856 val_1 = (kdr->value1 & 0xff);
1857 val_2 = (kdr->value2 & 0xff);
1858 val = val_2 + 1;
1859 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1860 kdlog_end = ((val_1<<24) | (val << 16));
1861 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1862 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1863 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1864 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1865 break;
1866 case KDBG_RANGETYPE :
1867 kdlog_beg = (kdr->value1);
1868 kdlog_end = (kdr->value2);
1869 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1870 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1871 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1872 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1873 break;
1874 case KDBG_VALCHECK:
1875 kdlog_value1 = (kdr->value1);
1876 kdlog_value2 = (kdr->value2);
1877 kdlog_value3 = (kdr->value3);
1878 kdlog_value4 = (kdr->value4);
1879 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1880 kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
1881 kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
1882 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1883 break;
1884 case KDBG_TYPENONE :
1885 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1886
1887 if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK |
1888 KDBG_PIDCHECK | KDBG_PIDEXCLUDE |
1889 KDBG_TYPEFILTER_CHECK)) )
1890 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1891 else
1892 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1893
1894 kdlog_beg = 0;
1895 kdlog_end = 0;
1896 break;
1897 default :
1898 ret = EINVAL;
1899 break;
1900 }
1901 return(ret);
1902 }
1903
1904 int
1905 kdbg_getreg(__unused kd_regtype * kdr)
1906 {
1907 #if 0
1908 int i,j, ret=0;
1909 unsigned int val_1, val_2, val;
1910
1911 switch (kdr->type) {
1912 case KDBG_CLASSTYPE :
1913 val_1 = (kdr->value1 & 0xff);
1914 val_2 = val_1 + 1;
1915 kdlog_beg = (val_1<<24);
1916 kdlog_end = (val_2<<24);
1917 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1918 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1919 break;
1920 case KDBG_SUBCLSTYPE :
1921 val_1 = (kdr->value1 & 0xff);
1922 val_2 = (kdr->value2 & 0xff);
1923 val = val_2 + 1;
1924 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1925 kdlog_end = ((val_1<<24) | (val << 16));
1926 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1927 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1928 break;
1929 case KDBG_RANGETYPE :
1930 kdlog_beg = (kdr->value1);
1931 kdlog_end = (kdr->value2);
1932 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1933 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1934 break;
1935 case KDBG_TYPENONE :
1936 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1937 kdlog_beg = 0;
1938 kdlog_end = 0;
1939 break;
1940 default :
1941 ret = EINVAL;
1942 break;
1943 }
1944 #endif /* 0 */
1945 return(EINVAL);
1946 }
1947
1948 int
1949 kdbg_readcpumap(user_addr_t user_cpumap, size_t *user_cpumap_size)
1950 {
1951 uint8_t* cpumap = NULL;
1952 uint32_t cpumap_size = 0;
1953 int ret = KERN_SUCCESS;
1954
1955 if (kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) {
1956 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size) == KERN_SUCCESS) {
1957 if (user_cpumap) {
1958 size_t bytes_to_copy = (*user_cpumap_size >= cpumap_size) ? cpumap_size : *user_cpumap_size;
1959 if (copyout(cpumap, user_cpumap, (size_t)bytes_to_copy)) {
1960 ret = EFAULT;
1961 }
1962 }
1963 *user_cpumap_size = cpumap_size;
1964 kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
1965 } else
1966 ret = EINVAL;
1967 } else
1968 ret = EINVAL;
1969
1970 return (ret);
1971 }
1972
1973 int
1974 kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize)
1975 {
1976 kd_threadmap *mapptr;
1977 unsigned int mapsize;
1978 unsigned int mapcount;
1979 unsigned int count = 0;
1980 int ret = 0;
1981
1982 count = *bufsize/sizeof(kd_threadmap);
1983 *bufsize = 0;
1984
1985 if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) {
1986 if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap)))
1987 ret = EFAULT;
1988 else
1989 *bufsize = (mapcount * sizeof(kd_threadmap));
1990
1991 kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize);
1992 } else
1993 ret = EINVAL;
1994
1995 return (ret);
1996 }
1997
1998 int
1999 kdbg_readthrmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
2000 {
2001 int avail = *number;
2002 int ret = 0;
2003 uint32_t count = 0;
2004 unsigned int mapsize;
2005
2006 count = avail/sizeof (kd_threadmap);
2007
2008 mapsize = kd_mapcount * sizeof(kd_threadmap);
2009
2010 if (count && (count <= kd_mapcount))
2011 {
2012 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
2013 {
2014 if (*number < mapsize)
2015 ret = EINVAL;
2016 else
2017 {
2018 if (vp)
2019 {
2020 RAW_header header;
2021 clock_sec_t secs;
2022 clock_usec_t usecs;
2023 char *pad_buf;
2024 uint32_t pad_size;
2025 uint32_t extra_thread_count = 0;
2026 uint32_t cpumap_size;
2027
2028 /*
2029 * To write a RAW_VERSION1+ file, we
2030 * must embed a cpumap in the "padding"
2031 * used to page align the events folloing
2032 * the threadmap. If the threadmap happens
2033 * to not require enough padding, we
2034 * artificially increase its footprint
2035 * until it needs enough padding.
2036 */
2037
2038 pad_size = PAGE_SIZE - ((sizeof(RAW_header) + (count * sizeof(kd_threadmap))) & PAGE_MASK_64);
2039 cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
2040
2041 if (cpumap_size > pad_size) {
2042 /* Force an overflow onto the next page, we get a full page of padding */
2043 extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
2044 }
2045
2046 header.version_no = RAW_VERSION1;
2047 header.thread_count = count + extra_thread_count;
2048
2049 clock_get_calendar_microtime(&secs, &usecs);
2050 header.TOD_secs = secs;
2051 header.TOD_usecs = usecs;
2052
2053 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
2054 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2055 if (ret)
2056 goto write_error;
2057 RAW_file_offset += sizeof(RAW_header);
2058
2059 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, mapsize, RAW_file_offset,
2060 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2061 if (ret)
2062 goto write_error;
2063 RAW_file_offset += mapsize;
2064
2065 if (extra_thread_count) {
2066 pad_size = extra_thread_count * sizeof(kd_threadmap);
2067 pad_buf = (char *)kalloc(pad_size);
2068 memset(pad_buf, 0, pad_size);
2069
2070 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2071 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2072 kfree(pad_buf, pad_size);
2073
2074 if (ret)
2075 goto write_error;
2076 RAW_file_offset += pad_size;
2077
2078 }
2079
2080 pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
2081 if (pad_size) {
2082 pad_buf = (char *)kalloc(pad_size);
2083 memset(pad_buf, 0, pad_size);
2084
2085 /*
2086 * embed a cpumap in the padding bytes.
2087 * older code will skip this.
2088 * newer code will know how to read it.
2089 */
2090 uint32_t temp = pad_size;
2091 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, (uint8_t**)&pad_buf, &temp) != KERN_SUCCESS) {
2092 memset(pad_buf, 0, pad_size);
2093 }
2094
2095 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2096 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2097 kfree(pad_buf, pad_size);
2098
2099 if (ret)
2100 goto write_error;
2101 RAW_file_offset += pad_size;
2102 }
2103 RAW_file_written += sizeof(RAW_header) + mapsize + pad_size;
2104
2105 } else {
2106 if (copyout(kd_mapptr, buffer, mapsize))
2107 ret = EINVAL;
2108 }
2109 }
2110 }
2111 else
2112 ret = EINVAL;
2113 }
2114 else
2115 ret = EINVAL;
2116
2117 if (ret && vp)
2118 {
2119 count = 0;
2120
2121 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
2122 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2123 RAW_file_offset += sizeof(uint32_t);
2124 RAW_file_written += sizeof(uint32_t);
2125 }
2126 write_error:
2127 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
2128 {
2129 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
2130 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
2131 kd_mapsize = 0;
2132 kd_mapptr = (kd_threadmap *) 0;
2133 kd_mapcount = 0;
2134 }
2135 return(ret);
2136 }
2137
2138
2139 static int
2140 kdbg_set_nkdbufs(unsigned int value)
2141 {
2142 /*
2143 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2144 * 'value' is the desired number of trace entries
2145 */
2146 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2147
2148 if (value <= max_entries)
2149 return (value);
2150 else
2151 return (max_entries);
2152 }
2153
2154
2155 static int
2156 kdbg_enable_bg_trace(void)
2157 {
2158 int ret = 0;
2159
2160 if (kdlog_bg_trace == TRUE && kdlog_bg_trace_running == FALSE && n_storage_buffers == 0) {
2161 nkdbufs = bg_nkdbufs;
2162 ret = kdbg_reinit(FALSE);
2163 if (0 == ret) {
2164 kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
2165 kdlog_bg_trace_running = TRUE;
2166 }
2167 }
2168 return ret;
2169 }
2170
2171 static void
2172 kdbg_disable_bg_trace(void)
2173 {
2174 if (kdlog_bg_trace_running == TRUE) {
2175 kdlog_bg_trace_running = FALSE;
2176 kdbg_clear();
2177 }
2178 }
2179
2180
2181
2182 /*
2183 * This function is provided for the CHUD toolkit only.
2184 * int val:
2185 * zero disables kdebug_chudhook function call
2186 * non-zero enables kdebug_chudhook function call
2187 * char *fn:
2188 * address of the enabled kdebug_chudhook function
2189 */
2190
2191 void
2192 kdbg_control_chud(int val, void *fn)
2193 {
2194 kdbg_lock_init();
2195
2196 if (val) {
2197 /* enable chudhook */
2198 kdebug_chudhook = fn;
2199 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
2200 }
2201 else {
2202 /* disable chudhook */
2203 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
2204 kdebug_chudhook = 0;
2205 }
2206 }
2207
2208
2209 int
2210 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
2211 {
2212 int ret = 0;
2213 size_t size = *sizep;
2214 unsigned int value = 0;
2215 kd_regtype kd_Reg;
2216 kbufinfo_t kd_bufinfo;
2217 pid_t curpid;
2218 proc_t p, curproc;
2219
2220 if (name[0] == KERN_KDGETENTROPY ||
2221 name[0] == KERN_KDWRITETR ||
2222 name[0] == KERN_KDWRITEMAP ||
2223 name[0] == KERN_KDEFLAGS ||
2224 name[0] == KERN_KDDFLAGS ||
2225 name[0] == KERN_KDENABLE ||
2226 name[0] == KERN_KDENABLE_BG_TRACE ||
2227 name[0] == KERN_KDSETBUF) {
2228
2229 if ( namelen < 2 )
2230 return(EINVAL);
2231 value = name[1];
2232 }
2233
2234 kdbg_lock_init();
2235
2236 if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
2237 return(ENOSPC);
2238
2239 lck_mtx_lock(kd_trace_mtx_sysctl);
2240
2241 switch(name[0]) {
2242 case KERN_KDGETBUF:
2243 /*
2244 * Does not alter the global_state_pid
2245 * This is a passive request.
2246 */
2247 if (size < sizeof(kd_bufinfo.nkdbufs)) {
2248 /*
2249 * There is not enough room to return even
2250 * the first element of the info structure.
2251 */
2252 ret = EINVAL;
2253 goto out;
2254 }
2255 kd_bufinfo.nkdbufs = nkdbufs;
2256 kd_bufinfo.nkdthreads = kd_mapcount;
2257
2258 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
2259 kd_bufinfo.nolog = 1;
2260 else
2261 kd_bufinfo.nolog = 0;
2262
2263 kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
2264 #if defined(__LP64__)
2265 kd_bufinfo.flags |= KDBG_LP64;
2266 #endif
2267 kd_bufinfo.bufid = global_state_pid;
2268
2269 if (size >= sizeof(kd_bufinfo)) {
2270 /*
2271 * Provide all the info we have
2272 */
2273 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
2274 ret = EINVAL;
2275 } else {
2276 /*
2277 * For backwards compatibility, only provide
2278 * as much info as there is room for.
2279 */
2280 if (copyout(&kd_bufinfo, where, size))
2281 ret = EINVAL;
2282 }
2283 goto out;
2284
2285 case KERN_KDGETENTROPY: {
2286 /* Obsolescent - just fake with a random buffer */
2287 char *buffer = (char *) kalloc(size);
2288 read_frandom((void *) buffer, size);
2289 ret = copyout(buffer, where, size);
2290 kfree(buffer, size);
2291 goto out;
2292 }
2293
2294 case KERN_KDENABLE_BG_TRACE:
2295 bg_nkdbufs = kdbg_set_nkdbufs(value);
2296 kdlog_bg_trace = TRUE;
2297 ret = kdbg_enable_bg_trace();
2298 goto out;
2299
2300 case KERN_KDDISABLE_BG_TRACE:
2301 kdlog_bg_trace = FALSE;
2302 kdbg_disable_bg_trace();
2303 goto out;
2304 }
2305
2306 if ((curproc = current_proc()) != NULL)
2307 curpid = curproc->p_pid;
2308 else {
2309 ret = ESRCH;
2310 goto out;
2311 }
2312 if (global_state_pid == -1)
2313 global_state_pid = curpid;
2314 else if (global_state_pid != curpid) {
2315 if ((p = proc_find(global_state_pid)) == NULL) {
2316 /*
2317 * The global pid no longer exists
2318 */
2319 global_state_pid = curpid;
2320 } else {
2321 /*
2322 * The global pid exists, deny this request
2323 */
2324 proc_rele(p);
2325
2326 ret = EBUSY;
2327 goto out;
2328 }
2329 }
2330
2331 switch(name[0]) {
2332 case KERN_KDEFLAGS:
2333 kdbg_disable_bg_trace();
2334
2335 value &= KDBG_USERFLAGS;
2336 kd_ctrl_page.kdebug_flags |= value;
2337 break;
2338 case KERN_KDDFLAGS:
2339 kdbg_disable_bg_trace();
2340
2341 value &= KDBG_USERFLAGS;
2342 kd_ctrl_page.kdebug_flags &= ~value;
2343 break;
2344 case KERN_KDENABLE:
2345 /*
2346 * Enable tracing mechanism. Two types:
2347 * KDEBUG_TRACE is the standard one,
2348 * and KDEBUG_PPT which is a carefully
2349 * chosen subset to avoid performance impact.
2350 */
2351 if (value) {
2352 /*
2353 * enable only if buffer is initialized
2354 */
2355 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
2356 !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
2357 ret = EINVAL;
2358 break;
2359 }
2360 kdbg_thrmap_init();
2361
2362 kdbg_set_tracing_enabled(TRUE, value);
2363 }
2364 else
2365 {
2366 kdbg_set_tracing_enabled(FALSE, 0);
2367 }
2368 break;
2369 case KERN_KDSETBUF:
2370 kdbg_disable_bg_trace();
2371
2372 nkdbufs = kdbg_set_nkdbufs(value);
2373 break;
2374 case KERN_KDSETUP:
2375 kdbg_disable_bg_trace();
2376
2377 ret = kdbg_reinit(FALSE);
2378 break;
2379 case KERN_KDREMOVE:
2380 kdbg_clear();
2381 ret = kdbg_enable_bg_trace();
2382 break;
2383 case KERN_KDSETREG:
2384 if(size < sizeof(kd_regtype)) {
2385 ret = EINVAL;
2386 break;
2387 }
2388 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2389 ret = EINVAL;
2390 break;
2391 }
2392 kdbg_disable_bg_trace();
2393
2394 ret = kdbg_setreg(&kd_Reg);
2395 break;
2396 case KERN_KDGETREG:
2397 if (size < sizeof(kd_regtype)) {
2398 ret = EINVAL;
2399 break;
2400 }
2401 ret = kdbg_getreg(&kd_Reg);
2402 if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
2403 ret = EINVAL;
2404 }
2405 kdbg_disable_bg_trace();
2406
2407 break;
2408 case KERN_KDREADTR:
2409 ret = kdbg_read(where, sizep, NULL, NULL);
2410 break;
2411 case KERN_KDWRITETR:
2412 case KERN_KDWRITEMAP:
2413 {
2414 struct vfs_context context;
2415 struct fileproc *fp;
2416 size_t number;
2417 vnode_t vp;
2418 int fd;
2419
2420 kdbg_disable_bg_trace();
2421
2422 if (name[0] == KERN_KDWRITETR) {
2423 int s;
2424 int wait_result = THREAD_AWAKENED;
2425 u_int64_t abstime;
2426 u_int64_t ns;
2427
2428 if (*sizep) {
2429 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
2430 nanoseconds_to_absolutetime(ns, &abstime );
2431 clock_absolutetime_interval_to_deadline( abstime, &abstime );
2432 } else
2433 abstime = 0;
2434
2435 s = ml_set_interrupts_enabled(FALSE);
2436 lck_spin_lock(kdw_spin_lock);
2437
2438 while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
2439
2440 kds_waiter = 1;
2441
2442 if (abstime)
2443 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
2444 else
2445 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
2446
2447 kds_waiter = 0;
2448 }
2449 lck_spin_unlock(kdw_spin_lock);
2450 ml_set_interrupts_enabled(s);
2451 }
2452 p = current_proc();
2453 fd = value;
2454
2455 proc_fdlock(p);
2456 if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
2457 proc_fdunlock(p);
2458 break;
2459 }
2460 context.vc_thread = current_thread();
2461 context.vc_ucred = fp->f_fglob->fg_cred;
2462
2463 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
2464 fp_drop(p, fd, fp, 1);
2465 proc_fdunlock(p);
2466
2467 ret = EBADF;
2468 break;
2469 }
2470 vp = (struct vnode *)fp->f_fglob->fg_data;
2471 proc_fdunlock(p);
2472
2473 if ((ret = vnode_getwithref(vp)) == 0) {
2474 RAW_file_offset = fp->f_fglob->fg_offset;
2475 if (name[0] == KERN_KDWRITETR) {
2476 number = nkdbufs * sizeof(kd_buf);
2477
2478 KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_START, 0, 0, 0, 0, 0);
2479 ret = kdbg_read(0, &number, vp, &context);
2480 KERNEL_DEBUG_CONSTANT(TRACE_WRITING_EVENTS | DBG_FUNC_END, number, 0, 0, 0, 0);
2481
2482 *sizep = number;
2483 } else {
2484 number = kd_mapcount * sizeof(kd_threadmap);
2485 kdbg_readthrmap(0, &number, vp, &context);
2486 }
2487 fp->f_fglob->fg_offset = RAW_file_offset;
2488 vnode_put(vp);
2489 }
2490 fp_drop(p, fd, fp, 0);
2491
2492 break;
2493 }
2494 case KERN_KDBUFWAIT:
2495 {
2496 /* WRITETR lite -- just block until there's data */
2497 int s;
2498 int wait_result = THREAD_AWAKENED;
2499 u_int64_t abstime;
2500 u_int64_t ns;
2501 size_t number = 0;
2502
2503 kdbg_disable_bg_trace();
2504
2505
2506 if (*sizep) {
2507 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
2508 nanoseconds_to_absolutetime(ns, &abstime );
2509 clock_absolutetime_interval_to_deadline( abstime, &abstime );
2510 } else
2511 abstime = 0;
2512
2513 s = ml_set_interrupts_enabled(FALSE);
2514 if( !s )
2515 panic("trying to wait with interrupts off");
2516 lck_spin_lock(kdw_spin_lock);
2517
2518 /* drop the mutex so don't exclude others from
2519 * accessing trace
2520 */
2521 lck_mtx_unlock(kd_trace_mtx_sysctl);
2522
2523 while (wait_result == THREAD_AWAKENED &&
2524 kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
2525
2526 kds_waiter = 1;
2527
2528 if (abstime)
2529 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
2530 else
2531 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
2532
2533 kds_waiter = 0;
2534 }
2535
2536 /* check the count under the spinlock */
2537 number = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
2538
2539 lck_spin_unlock(kdw_spin_lock);
2540 ml_set_interrupts_enabled(s);
2541
2542 /* pick the mutex back up again */
2543 lck_mtx_lock(kd_trace_mtx_sysctl);
2544
2545 /* write out whether we've exceeded the threshold */
2546 *sizep = number;
2547 break;
2548 }
2549 case KERN_KDPIDTR:
2550 if (size < sizeof(kd_regtype)) {
2551 ret = EINVAL;
2552 break;
2553 }
2554 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2555 ret = EINVAL;
2556 break;
2557 }
2558 kdbg_disable_bg_trace();
2559
2560 ret = kdbg_setpid(&kd_Reg);
2561 break;
2562 case KERN_KDPIDEX:
2563 if (size < sizeof(kd_regtype)) {
2564 ret = EINVAL;
2565 break;
2566 }
2567 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2568 ret = EINVAL;
2569 break;
2570 }
2571 kdbg_disable_bg_trace();
2572
2573 ret = kdbg_setpidex(&kd_Reg);
2574 break;
2575 case KERN_KDCPUMAP:
2576 ret = kdbg_readcpumap(where, sizep);
2577 break;
2578 case KERN_KDTHRMAP:
2579 ret = kdbg_readthrmap(where, sizep, NULL, NULL);
2580 break;
2581 case KERN_KDREADCURTHRMAP:
2582 ret = kdbg_readcurthrmap(where, sizep);
2583 break;
2584 case KERN_KDSETRTCDEC:
2585 if (size < sizeof(kd_regtype)) {
2586 ret = EINVAL;
2587 break;
2588 }
2589 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2590 ret = EINVAL;
2591 break;
2592 }
2593 kdbg_disable_bg_trace();
2594
2595 ret = kdbg_setrtcdec(&kd_Reg);
2596 break;
2597 case KERN_KDSET_TYPEFILTER:
2598 kdbg_disable_bg_trace();
2599
2600 if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
2601 if ((ret = kdbg_enable_typefilter()))
2602 break;
2603 }
2604
2605 if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
2606 ret = EINVAL;
2607 break;
2608 }
2609
2610 if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
2611 ret = EINVAL;
2612 break;
2613 }
2614 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
2615 break;
2616 default:
2617 ret = EINVAL;
2618 }
2619 out:
2620 lck_mtx_unlock(kd_trace_mtx_sysctl);
2621
2622 return(ret);
2623 }
2624
2625
2626 /*
2627 * This code can run for the most part concurrently with kernel_debug_internal()...
2628 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2629 * synchronize with the recording side of this puzzle... otherwise, we are able to
2630 * move through the lists w/o use of any locks
2631 */
2632 int
2633 kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
2634 {
2635 unsigned int count;
2636 unsigned int cpu, min_cpu;
2637 uint64_t mintime, t, barrier = 0;
2638 int error = 0;
2639 kd_buf *tempbuf;
2640 uint32_t rcursor;
2641 kd_buf lostevent;
2642 union kds_ptr kdsp;
2643 struct kd_storage *kdsp_actual;
2644 struct kd_bufinfo *kdbp;
2645 struct kd_bufinfo *min_kdbp;
2646 uint32_t tempbuf_count;
2647 uint32_t tempbuf_number;
2648 uint32_t old_kdebug_flags;
2649 uint32_t old_kdebug_slowcheck;
2650 boolean_t lostevents = FALSE;
2651 boolean_t out_of_events = FALSE;
2652
2653 count = *number/sizeof(kd_buf);
2654 *number = 0;
2655
2656 if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
2657 return EINVAL;
2658
2659 memset(&lostevent, 0, sizeof(lostevent));
2660 lostevent.debugid = TRACE_LOST_EVENTS;
2661
2662 /* Capture timestamp. Only sort events that have occured before the timestamp.
2663 * Since the iop is being flushed here, its possible that events occur on the AP
2664 * while running live tracing. If we are disabled, no new events should
2665 * occur on the AP.
2666 */
2667
2668 if (kd_ctrl_page.enabled)
2669 {
2670 // timestamp is non-zero value
2671 barrier = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
2672 }
2673
2674 // Request each IOP to provide us with up to date entries before merging buffers together.
2675 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
2676
2677 /*
2678 * because we hold kd_trace_mtx_sysctl, no other control threads can
2679 * be playing with kdebug_flags... the code that cuts new events could
2680 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2681 * storage chunk which is where it examines kdebug_flags... it its adding
2682 * to the same chunk we're reading from, no problem...
2683 */
2684
2685 disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
2686
2687 if (count > nkdbufs)
2688 count = nkdbufs;
2689
2690 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2691 tempbuf_count = KDCOPYBUF_COUNT;
2692
2693 while (count) {
2694 tempbuf = kdcopybuf;
2695 tempbuf_number = 0;
2696
2697 // While space
2698 while (tempbuf_count) {
2699 mintime = 0xffffffffffffffffULL;
2700 min_kdbp = NULL;
2701 min_cpu = 0;
2702
2703 // Check all CPUs
2704 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
2705
2706 // Find one with raw data
2707 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
2708 continue;
2709 /* Debugging aid: maintain a copy of the "kdsp"
2710 * index.
2711 */
2712 volatile union kds_ptr kdsp_shadow;
2713
2714 kdsp_shadow = kdsp;
2715
2716 // Get from cpu data to buffer header to buffer
2717 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2718
2719 volatile struct kd_storage *kdsp_actual_shadow;
2720
2721 kdsp_actual_shadow = kdsp_actual;
2722
2723 // See if there are actual data left in this buffer
2724 rcursor = kdsp_actual->kds_readlast;
2725
2726 if (rcursor == kdsp_actual->kds_bufindx)
2727 continue;
2728
2729 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
2730
2731 if ((t > barrier) && (barrier > 0)) {
2732 /*
2733 * Need to wait to flush iop again before we
2734 * sort any more data from the buffers
2735 */
2736 out_of_events = TRUE;
2737 break;
2738 }
2739 if (t < kdsp_actual->kds_timestamp) {
2740 /*
2741 * indicates we've not yet completed filling
2742 * in this event...
2743 * this should only occur when we're looking
2744 * at the buf that the record head is utilizing
2745 * we'll pick these events up on the next
2746 * call to kdbg_read
2747 * we bail at this point so that we don't
2748 * get an out-of-order timestream by continuing
2749 * to read events from the other CPUs' timestream(s)
2750 */
2751 out_of_events = TRUE;
2752 break;
2753 }
2754 if (t < mintime) {
2755 mintime = t;
2756 min_kdbp = kdbp;
2757 min_cpu = cpu;
2758 }
2759 }
2760 if (min_kdbp == NULL || out_of_events == TRUE) {
2761 /*
2762 * all buffers ran empty
2763 */
2764 out_of_events = TRUE;
2765 break;
2766 }
2767
2768 // Get data
2769 kdsp = min_kdbp->kd_list_head;
2770 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2771
2772 if (kdsp_actual->kds_lostevents == TRUE) {
2773 kdbg_set_timestamp_and_cpu(&lostevent, kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp, min_cpu);
2774 *tempbuf = lostevent;
2775
2776 kdsp_actual->kds_lostevents = FALSE;
2777 lostevents = TRUE;
2778
2779 goto nextevent;
2780 }
2781
2782 // Copy into buffer
2783 *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
2784
2785 if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
2786 release_storage_unit(min_cpu, kdsp.raw);
2787
2788 /*
2789 * Watch for out of order timestamps
2790 */
2791 if (mintime < min_kdbp->kd_prev_timebase) {
2792 /*
2793 * if so, use the previous timestamp + 1 cycle
2794 */
2795 min_kdbp->kd_prev_timebase++;
2796 kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
2797 } else
2798 min_kdbp->kd_prev_timebase = mintime;
2799 nextevent:
2800 tempbuf_count--;
2801 tempbuf_number++;
2802 tempbuf++;
2803
2804 if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
2805 break;
2806 }
2807 if (tempbuf_number) {
2808
2809 if (vp) {
2810 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
2811 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2812
2813 RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
2814
2815 if (RAW_file_written >= RAW_FLUSH_SIZE) {
2816 cluster_push(vp, 0);
2817
2818 RAW_file_written = 0;
2819 }
2820 } else {
2821 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
2822 buffer += (tempbuf_number * sizeof(kd_buf));
2823 }
2824 if (error) {
2825 *number = 0;
2826 error = EINVAL;
2827 break;
2828 }
2829 count -= tempbuf_number;
2830 *number += tempbuf_number;
2831 }
2832 if (out_of_events == TRUE)
2833 /*
2834 * all trace buffers are empty
2835 */
2836 break;
2837
2838 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2839 tempbuf_count = KDCOPYBUF_COUNT;
2840 }
2841 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
2842 enable_wrap(old_kdebug_slowcheck, lostevents);
2843 }
2844 return (error);
2845 }
2846
2847
2848 unsigned char *getProcName(struct proc *proc);
2849 unsigned char *getProcName(struct proc *proc) {
2850
2851 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
2852
2853 }
2854
2855 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2856 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2857 #if defined(__i386__) || defined (__x86_64__)
2858 #define TRAP_DEBUGGER __asm__ volatile("int3");
2859 #else
2860 #error No TRAP_DEBUGGER definition for this architecture
2861 #endif
2862
2863 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2864 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
2865
2866 /* Initialize the mutex governing access to the stack snapshot subsystem */
2867 __private_extern__ void
2868 stackshot_lock_init( void )
2869 {
2870 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
2871
2872 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
2873
2874 stackshot_subsys_lck_attr = lck_attr_alloc_init();
2875
2876 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
2877 }
2878
2879 /*
2880 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2881 * on the system, tracing both kernel and user stacks
2882 * where available. Uses machine specific trace routines
2883 * for ppc, ppc64 and x86.
2884 * Inputs: uap->pid - process id of process to be traced, or -1
2885 * for the entire system
2886 * uap->tracebuf - address of the user space destination
2887 * buffer
2888 * uap->tracebuf_size - size of the user space trace buffer
2889 * uap->options - various options, including the maximum
2890 * number of frames to trace.
2891 * Outputs: EPERM if the caller is not privileged
2892 * EINVAL if the supplied trace buffer isn't sanely sized
2893 * ENOMEM if we don't have enough memory to satisfy the
2894 * request
2895 * ENOENT if the target pid isn't found
2896 * ENOSPC if the supplied buffer is insufficient
2897 * *retval contains the number of bytes traced, if successful
2898 * and -1 otherwise. If the request failed due to
2899 * tracebuffer exhaustion, we copyout as much as possible.
2900 */
2901 int
2902 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
2903 int error = 0;
2904
2905 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
2906 return(error);
2907
2908 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
2909 uap->flags, uap->dispatch_offset, retval);
2910 }
2911
2912 int
2913 stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytesTraced)
2914 {
2915 int error = 0;
2916 boolean_t istate;
2917
2918 if ((buf == NULL) || (size <= 0) || (bytesTraced == NULL)) {
2919 return -1;
2920 }
2921
2922 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
2923 if (size > SANE_TRACEBUF_SIZE) {
2924 size = SANE_TRACEBUF_SIZE;
2925 }
2926
2927 /* Serialize tracing */
2928 STACKSHOT_SUBSYS_LOCK();
2929 istate = ml_set_interrupts_enabled(FALSE);
2930
2931
2932 /* Preload trace parameters*/
2933 kdp_snapshot_preflight(pid, buf, size, flags, 0);
2934
2935 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2936 * the trace buffer
2937 */
2938 TRAP_DEBUGGER;
2939
2940 ml_set_interrupts_enabled(istate);
2941
2942 *bytesTraced = kdp_stack_snapshot_bytes_traced();
2943
2944 error = kdp_stack_snapshot_geterror();
2945
2946 STACKSHOT_SUBSYS_UNLOCK();
2947
2948 return error;
2949
2950 }
2951
2952 int
2953 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
2954 {
2955 boolean_t istate;
2956 int error = 0;
2957 unsigned bytesTraced = 0;
2958
2959 #if CONFIG_TELEMETRY
2960 if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE) {
2961 telemetry_global_ctl(1);
2962 *retval = 0;
2963 return (0);
2964 } else if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE) {
2965 telemetry_global_ctl(0);
2966 *retval = 0;
2967 return (0);
2968 }
2969
2970 if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE) {
2971 error = telemetry_enable_window();
2972
2973 if (error != KERN_SUCCESS) {
2974 /* We are probably out of memory */
2975 *retval = -1;
2976 return ENOMEM;
2977 }
2978
2979 *retval = 0;
2980 return (0);
2981 } else if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE) {
2982 telemetry_disable_window();
2983 *retval = 0;
2984 return (0);
2985 }
2986 #endif
2987
2988 *retval = -1;
2989 /* Serialize tracing */
2990 STACKSHOT_SUBSYS_LOCK();
2991
2992 if (tracebuf_size <= 0) {
2993 error = EINVAL;
2994 goto error_exit;
2995 }
2996
2997 #if CONFIG_TELEMETRY
2998 if (flags & STACKSHOT_GET_MICROSTACKSHOT) {
2999
3000 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
3001 error = EINVAL;
3002 goto error_exit;
3003 }
3004
3005 bytesTraced = tracebuf_size;
3006 error = telemetry_gather(tracebuf, &bytesTraced,
3007 (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE);
3008 if (error == KERN_NO_SPACE) {
3009 error = ENOSPC;
3010 }
3011
3012 *retval = (int)bytesTraced;
3013 goto error_exit;
3014 }
3015
3016 if (flags & STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS) {
3017
3018 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
3019 error = EINVAL;
3020 goto error_exit;
3021 }
3022
3023 bytesTraced = tracebuf_size;
3024 error = telemetry_gather_windowed(tracebuf, &bytesTraced);
3025 if (error == KERN_NO_SPACE) {
3026 error = ENOSPC;
3027 }
3028
3029 *retval = (int)bytesTraced;
3030 goto error_exit;
3031 }
3032
3033 if (flags & STACKSHOT_GET_BOOT_PROFILE) {
3034
3035 if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
3036 error = EINVAL;
3037 goto error_exit;
3038 }
3039
3040 bytesTraced = tracebuf_size;
3041 error = bootprofile_gather(tracebuf, &bytesTraced);
3042 if (error == KERN_NO_SPACE) {
3043 error = ENOSPC;
3044 }
3045
3046 *retval = (int)bytesTraced;
3047 goto error_exit;
3048 }
3049 #endif
3050
3051 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
3052 error = EINVAL;
3053 goto error_exit;
3054 }
3055
3056 assert(stackshot_snapbuf == NULL);
3057 if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
3058 error = ENOMEM;
3059 goto error_exit;
3060 }
3061
3062 if (panic_active()) {
3063 error = ENOMEM;
3064 goto error_exit;
3065 }
3066
3067 istate = ml_set_interrupts_enabled(FALSE);
3068 /* Preload trace parameters*/
3069 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
3070
3071 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
3072 * the trace buffer
3073 */
3074
3075 TRAP_DEBUGGER;
3076
3077 ml_set_interrupts_enabled(istate);
3078
3079 bytesTraced = kdp_stack_snapshot_bytes_traced();
3080
3081 if (bytesTraced > 0) {
3082 if ((error = copyout(stackshot_snapbuf, tracebuf,
3083 ((bytesTraced < tracebuf_size) ?
3084 bytesTraced : tracebuf_size))))
3085 goto error_exit;
3086 *retval = bytesTraced;
3087 }
3088 else {
3089 error = ENOENT;
3090 goto error_exit;
3091 }
3092
3093 error = kdp_stack_snapshot_geterror();
3094 if (error == -1) {
3095 error = ENOSPC;
3096 *retval = -1;
3097 goto error_exit;
3098 }
3099
3100 error_exit:
3101 if (stackshot_snapbuf != NULL)
3102 kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
3103 stackshot_snapbuf = NULL;
3104 STACKSHOT_SUBSYS_UNLOCK();
3105 return error;
3106 }
3107
3108 void
3109 start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map)
3110 {
3111
3112 if (!new_nkdbufs)
3113 return;
3114 nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
3115 kdbg_lock_init();
3116
3117 kernel_debug_string("start_kern_tracing");
3118
3119 if (0 == kdbg_reinit(TRUE)) {
3120
3121 if (need_map == TRUE) {
3122 uint32_t old1, old2;
3123
3124 kdbg_thrmap_init();
3125
3126 disable_wrap(&old1, &old2);
3127 }
3128
3129 /* Hold off interrupts until the early traces are cut */
3130 boolean_t s = ml_set_interrupts_enabled(FALSE);
3131
3132 kdbg_set_tracing_enabled(
3133 TRUE,
3134 kdebug_serial ?
3135 (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_SERIAL) :
3136 KDEBUG_ENABLE_TRACE);
3137
3138 /*
3139 * Transfer all very early events from the static buffer
3140 * into the real buffers.
3141 */
3142 kernel_debug_early_end();
3143
3144 ml_set_interrupts_enabled(s);
3145
3146 printf("kernel tracing started\n");
3147 #if KDEBUG_MOJO_TRACE
3148 if (kdebug_serial) {
3149 printf("serial output enabled with %lu named events\n",
3150 sizeof(kd_events)/sizeof(kd_event_t));
3151 }
3152 #endif
3153 } else {
3154 printf("error from kdbg_reinit, kernel tracing not started\n");
3155 }
3156 }
3157
3158 void
3159 start_kern_tracing_with_typefilter(unsigned int new_nkdbufs,
3160 boolean_t need_map,
3161 unsigned int typefilter)
3162 {
3163 /* startup tracing */
3164 start_kern_tracing(new_nkdbufs, need_map);
3165
3166 /* check that tracing was actually enabled */
3167 if (!(kdebug_enable & KDEBUG_ENABLE_TRACE))
3168 return;
3169
3170 /* setup the typefiltering */
3171 if (0 == kdbg_enable_typefilter())
3172 setbit(type_filter_bitmap, typefilter & (CSC_MASK >> CSC_OFFSET));
3173 }
3174
3175 void
3176 kdbg_dump_trace_to_file(const char *filename)
3177 {
3178 vfs_context_t ctx;
3179 vnode_t vp;
3180 int error;
3181 size_t number;
3182
3183
3184 if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
3185 return;
3186
3187 if (global_state_pid != -1) {
3188 if ((proc_find(global_state_pid)) != NULL) {
3189 /*
3190 * The global pid exists, we're running
3191 * due to fs_usage, latency, etc...
3192 * don't cut the panic/shutdown trace file
3193 * Disable tracing from this point to avoid
3194 * perturbing state.
3195 */
3196 kdebug_enable = 0;
3197 kd_ctrl_page.enabled = 0;
3198 commpage_update_kdebug_enable();
3199 return;
3200 }
3201 }
3202 KERNEL_DEBUG_CONSTANT(TRACE_PANIC | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
3203
3204 kdebug_enable = 0;
3205 kd_ctrl_page.enabled = 0;
3206 commpage_update_kdebug_enable();
3207
3208 ctx = vfs_context_kernel();
3209
3210 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
3211 return;
3212
3213 number = kd_mapcount * sizeof(kd_threadmap);
3214 kdbg_readthrmap(0, &number, vp, ctx);
3215
3216 number = nkdbufs*sizeof(kd_buf);
3217 kdbg_read(0, &number, vp, ctx);
3218
3219 vnode_close(vp, FWRITE, ctx);
3220
3221 sync(current_proc(), (void *)NULL, (int *)NULL);
3222 }
3223
3224 /* Helper function for filling in the BSD name for an address space
3225 * Defined here because the machine bindings know only Mach threads
3226 * and nothing about BSD processes.
3227 *
3228 * FIXME: need to grab a lock during this?
3229 */
3230 void kdbg_get_task_name(char* name_buf, int len, task_t task)
3231 {
3232 proc_t proc;
3233
3234 /* Note: we can't use thread->task (and functions that rely on it) here
3235 * because it hasn't been initialized yet when this function is called.
3236 * We use the explicitly-passed task parameter instead.
3237 */
3238 proc = get_bsdtask_info(task);
3239 if (proc != PROC_NULL)
3240 snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
3241 else
3242 snprintf(name_buf, len, "%p [!bsd]", task);
3243 }
3244
3245 #if KDEBUG_MOJO_TRACE
3246 static kd_event_t *
3247 binary_search(uint32_t id)
3248 {
3249 int low, high, mid;
3250
3251 low = 0;
3252 high = sizeof(kd_events)/sizeof(kd_event_t) - 1;
3253
3254 while (TRUE)
3255 {
3256 mid = (low + high) / 2;
3257
3258 if (low > high)
3259 return NULL; /* failed */
3260 else if ( low + 1 >= high) {
3261 /* We have a match */
3262 if (kd_events[high].id == id)
3263 return &kd_events[high];
3264 else if (kd_events[low].id == id)
3265 return &kd_events[low];
3266 else
3267 return NULL; /* search failed */
3268 }
3269 else if (id < kd_events[mid].id)
3270 high = mid;
3271 else
3272 low = mid;
3273 }
3274 }
3275
3276 /*
3277 * Look up event id to get name string.
3278 * Using a per-cpu cache of a single entry
3279 * before resorting to a binary search of the full table.
3280 */
3281 #define NCACHE 1
3282 static kd_event_t *last_hit[MAX_CPUS];
3283 static kd_event_t *
3284 event_lookup_cache(uint32_t cpu, uint32_t id)
3285 {
3286 if (last_hit[cpu] == NULL || last_hit[cpu]->id != id)
3287 last_hit[cpu] = binary_search(id);
3288 return last_hit[cpu];
3289 }
3290
3291 static uint64_t kd_last_timstamp;
3292
3293 static void
3294 kdebug_serial_print(
3295 uint32_t cpunum,
3296 uint32_t debugid,
3297 uint64_t timestamp,
3298 uintptr_t arg1,
3299 uintptr_t arg2,
3300 uintptr_t arg3,
3301 uintptr_t arg4,
3302 uintptr_t threadid
3303 )
3304 {
3305 char kprintf_line[192];
3306 char event[40];
3307 uint64_t us = timestamp / NSEC_PER_USEC;
3308 uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100;
3309 uint64_t delta = timestamp - kd_last_timstamp;
3310 uint64_t delta_us = delta / NSEC_PER_USEC;
3311 uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100;
3312 uint32_t event_id = debugid & DBG_FUNC_MASK;
3313 const char *command;
3314 const char *bra;
3315 const char *ket;
3316 kd_event_t *ep;
3317
3318 /* event time and delta from last */
3319 snprintf(kprintf_line, sizeof(kprintf_line),
3320 "%11llu.%1llu %8llu.%1llu ",
3321 us, us_tenth, delta_us, delta_us_tenth);
3322
3323
3324 /* event (id or name) - start prefixed by "[", end postfixed by "]" */
3325 bra = (debugid & DBG_FUNC_START) ? "[" : " ";
3326 ket = (debugid & DBG_FUNC_END) ? "]" : " ";
3327 ep = event_lookup_cache(cpunum, event_id);
3328 if (ep) {
3329 if (strlen(ep->name) < sizeof(event) - 3)
3330 snprintf(event, sizeof(event), "%s%s%s",
3331 bra, ep->name, ket);
3332 else
3333 snprintf(event, sizeof(event), "%s%x(name too long)%s",
3334 bra, event_id, ket);
3335 } else {
3336 snprintf(event, sizeof(event), "%s%x%s",
3337 bra, event_id, ket);
3338 }
3339 snprintf(kprintf_line + strlen(kprintf_line),
3340 sizeof(kprintf_line) - strlen(kprintf_line),
3341 "%-40s ", event);
3342
3343 /* arg1 .. arg4 with special cases for strings */
3344 switch (event_id) {
3345 case VFS_LOOKUP:
3346 case VFS_LOOKUP_DONE:
3347 if (debugid & DBG_FUNC_START) {
3348 /* arg1 hex then arg2..arg4 chars */
3349 snprintf(kprintf_line + strlen(kprintf_line),
3350 sizeof(kprintf_line) - strlen(kprintf_line),
3351 "%-16lx %-8s%-8s%-8s ",
3352 arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
3353 break;
3354 }
3355 /* else fall through for arg1..arg4 chars */
3356 case TRACE_STRING_EXEC:
3357 case TRACE_STRING_NEWTHREAD:
3358 case TRACE_INFO_STRING:
3359 snprintf(kprintf_line + strlen(kprintf_line),
3360 sizeof(kprintf_line) - strlen(kprintf_line),
3361 "%-8s%-8s%-8s%-8s ",
3362 (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
3363 break;
3364 default:
3365 snprintf(kprintf_line + strlen(kprintf_line),
3366 sizeof(kprintf_line) - strlen(kprintf_line),
3367 "%-16lx %-16lx %-16lx %-16lx",
3368 arg1, arg2, arg3, arg4);
3369 }
3370
3371 /* threadid, cpu and command name */
3372 if (threadid == (uintptr_t)thread_tid(current_thread()) &&
3373 current_proc() &&
3374 current_proc()->p_comm)
3375 command = current_proc()->p_comm;
3376 else
3377 command = "-";
3378 snprintf(kprintf_line + strlen(kprintf_line),
3379 sizeof(kprintf_line) - strlen(kprintf_line),
3380 " %-16lx %-2d %s\n",
3381 threadid, cpunum, command);
3382
3383 kprintf("%s", kprintf_line);
3384 kd_last_timstamp = timestamp;
3385 }
3386 #endif