]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/errno.h>
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/proc_internal.h>
27 #include <sys/vm.h>
28 #include <sys/sysctl.h>
29 #include <sys/kdebug.h>
30 #include <sys/kauth.h>
31 #include <sys/ktrace.h>
32 #include <sys/sysproto.h>
33 #include <sys/bsdtask_info.h>
34 #include <sys/random.h>
35
36 #include <mach/clock_types.h>
37 #include <mach/mach_types.h>
38 #include <mach/mach_time.h>
39 #include <mach/mach_vm.h>
40 #include <machine/machine_routines.h>
41
42 #include <mach/machine.h>
43 #include <mach/vm_map.h>
44
45 #if defined(__i386__) || defined(__x86_64__)
46 #include <i386/rtclock_protos.h>
47 #include <i386/mp.h>
48 #include <i386/machine_routines.h>
49 #include <i386/tsc.h>
50 #endif
51
52 #include <kern/clock.h>
53
54 #include <kern/thread.h>
55 #include <kern/task.h>
56 #include <kern/debug.h>
57 #include <kern/kalloc.h>
58 #include <kern/cpu_data.h>
59 #include <kern/assert.h>
60 #include <kern/telemetry.h>
61 #include <kern/sched_prim.h>
62 #include <vm/vm_kern.h>
63 #include <sys/lock.h>
64 #include <kperf/kperf.h>
65 #include <pexpert/device_tree.h>
66
67 #include <sys/malloc.h>
68 #include <sys/mcache.h>
69
70 #include <sys/vnode.h>
71 #include <sys/vnode_internal.h>
72 #include <sys/fcntl.h>
73 #include <sys/file_internal.h>
74 #include <sys/ubc.h>
75 #include <sys/param.h> /* for isset() */
76
77 #include <mach/mach_host.h> /* for host_info() */
78 #include <libkern/OSAtomic.h>
79
80 #include <machine/pal_routines.h>
81 #include <machine/atomic.h>
82
83 /*
84 * IOP(s)
85 *
86 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
87 *
88 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
89 * They are registered dynamically. Each is assigned a cpu_id at registration.
90 *
91 * NOTE: IOP trace events may not use the same clock hardware as "normal"
92 * cpus. There is an effort made to synchronize the IOP timebase with the
93 * AP, but it should be understood that there may be discrepancies.
94 *
95 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
96 * The current implementation depends on this for thread safety.
97 *
98 * New registrations occur by allocating an kd_iop struct and assigning
99 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
100 * list_head pointer resolves any races.
101 *
102 * You may safely walk the kd_iops list at any time, without holding locks.
103 *
104 * When allocating buffers, the current kd_iops head is captured. Any operations
105 * that depend on the buffer state (such as flushing IOP traces on reads,
106 * etc.) should use the captured list head. This will allow registrations to
107 * take place while trace is in use.
108 */
109
110 typedef struct kd_iop {
111 kd_callback_t callback;
112 uint32_t cpu_id;
113 uint64_t last_timestamp; /* Prevent timer rollback */
114 struct kd_iop* next;
115 } kd_iop_t;
116
117 static kd_iop_t* kd_iops = NULL;
118
119 /*
120 * Typefilter(s)
121 *
122 * A typefilter is a 8KB bitmap that is used to selectively filter events
123 * being recorded. It is able to individually address every class & subclass.
124 *
125 * There is a shared typefilter in the kernel which is lazily allocated. Once
126 * allocated, the shared typefilter is never deallocated. The shared typefilter
127 * is also mapped on demand into userspace processes that invoke kdebug_trace
128 * API from Libsyscall. When mapped into a userspace process, the memory is
129 * read only, and does not have a fixed address.
130 *
131 * It is a requirement that the kernel's shared typefilter always pass DBG_TRACE
132 * events. This is enforced automatically, by having the needed bits set any
133 * time the shared typefilter is mutated.
134 */
135
136 typedef uint8_t* typefilter_t;
137
138 static typefilter_t kdbg_typefilter;
139 static mach_port_t kdbg_typefilter_memory_entry;
140
141 /*
142 * There are 3 combinations of page sizes:
143 *
144 * 4KB / 4KB
145 * 4KB / 16KB
146 * 16KB / 16KB
147 *
148 * The typefilter is exactly 8KB. In the first two scenarios, we would like
149 * to use 2 pages exactly; in the third scenario we must make certain that
150 * a full page is allocated so we do not inadvertantly share 8KB of random
151 * data to userspace. The round_page_32 macro rounds to kernel page size.
152 */
153 #define TYPEFILTER_ALLOC_SIZE MAX(round_page_32(KDBG_TYPEFILTER_BITMAP_SIZE), KDBG_TYPEFILTER_BITMAP_SIZE)
154
155 static typefilter_t typefilter_create(void)
156 {
157 typefilter_t tf;
158 if (KERN_SUCCESS == kmem_alloc(kernel_map, (vm_offset_t*)&tf, TYPEFILTER_ALLOC_SIZE, VM_KERN_MEMORY_DIAG)) {
159 memset(&tf[KDBG_TYPEFILTER_BITMAP_SIZE], 0, TYPEFILTER_ALLOC_SIZE - KDBG_TYPEFILTER_BITMAP_SIZE);
160 return tf;
161 }
162 return NULL;
163 }
164
165 static void typefilter_deallocate(typefilter_t tf)
166 {
167 assert(tf != NULL);
168 assert(tf != kdbg_typefilter);
169 kmem_free(kernel_map, (vm_offset_t)tf, TYPEFILTER_ALLOC_SIZE);
170 }
171
172 static void typefilter_copy(typefilter_t dst, typefilter_t src)
173 {
174 assert(src != NULL);
175 assert(dst != NULL);
176 memcpy(dst, src, KDBG_TYPEFILTER_BITMAP_SIZE);
177 }
178
179 static void typefilter_reject_all(typefilter_t tf)
180 {
181 assert(tf != NULL);
182 memset(tf, 0, KDBG_TYPEFILTER_BITMAP_SIZE);
183 }
184
185 static void typefilter_allow_class(typefilter_t tf, uint8_t class)
186 {
187 assert(tf != NULL);
188 const uint32_t BYTES_PER_CLASS = 256 / 8; // 256 subclasses, 1 bit each
189 memset(&tf[class * BYTES_PER_CLASS], 0xFF, BYTES_PER_CLASS);
190 }
191
192 static void typefilter_allow_csc(typefilter_t tf, uint16_t csc)
193 {
194 assert(tf != NULL);
195 setbit(tf, csc);
196 }
197
198 static bool typefilter_is_debugid_allowed(typefilter_t tf, uint32_t id)
199 {
200 assert(tf != NULL);
201 return isset(tf, KDBG_EXTRACT_CSC(id));
202 }
203
204 static mach_port_t typefilter_create_memory_entry(typefilter_t tf)
205 {
206 assert(tf != NULL);
207
208 mach_port_t memory_entry = MACH_PORT_NULL;
209 memory_object_size_t size = TYPEFILTER_ALLOC_SIZE;
210
211 mach_make_memory_entry_64(kernel_map,
212 &size,
213 (memory_object_offset_t)tf,
214 VM_PROT_READ,
215 &memory_entry,
216 MACH_PORT_NULL);
217
218 return memory_entry;
219 }
220
221 static int kdbg_copyin_typefilter(user_addr_t addr, size_t size);
222 static void kdbg_enable_typefilter(void);
223 static void kdbg_disable_typefilter(void);
224
225 /*
226 * External prototypes
227 */
228
229 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
230 int cpu_number(void); /* XXX <machine/...> include path broken */
231 void commpage_update_kdebug_state(void); /* XXX sign */
232
233 extern int log_leaks;
234
235 /*
236 * This flag is for testing purposes only -- it's highly experimental and tools
237 * have not been updated to support it.
238 */
239 static bool kdbg_continuous_time = false;
240
241 static inline uint64_t
242 kdbg_timestamp(void)
243 {
244 if (kdbg_continuous_time) {
245 return mach_continuous_time();
246 } else {
247 return mach_absolute_time();
248 }
249 }
250
251 #if KDEBUG_MOJO_TRACE
252 #include <sys/kdebugevents.h>
253 static void kdebug_serial_print( /* forward */
254 uint32_t, uint32_t, uint64_t,
255 uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
256 #endif
257
258 int kdbg_control(int *, u_int, user_addr_t, size_t *);
259
260 static int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t, uint32_t);
261 static int kdbg_readcpumap(user_addr_t, size_t *);
262 static int kdbg_readthrmap_v3(user_addr_t, size_t, int);
263 static int kdbg_readcurthrmap(user_addr_t, size_t *);
264 static int kdbg_setreg(kd_regtype *);
265 static int kdbg_setpidex(kd_regtype *);
266 static int kdbg_setpid(kd_regtype *);
267 static void kdbg_thrmap_init(void);
268 static int kdbg_reinit(boolean_t);
269 static int kdbg_bootstrap(boolean_t);
270 static int kdbg_test(size_t flavor);
271
272 static int kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx);
273 static int kdbg_write_thread_map(vnode_t vp, vfs_context_t ctx);
274 static int kdbg_copyout_thread_map(user_addr_t buffer, size_t *buffer_size);
275 static void kdbg_clear_thread_map(void);
276
277 static boolean_t kdbg_wait(uint64_t timeout_ms, boolean_t locked_wait);
278 static void kdbg_wakeup(void);
279
280 int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count,
281 uint8_t** cpumap, uint32_t* cpumap_size);
282
283 static kd_threadmap *kdbg_thrmap_init_internal(unsigned int count,
284 unsigned int *mapsize,
285 unsigned int *mapcount);
286
287 static boolean_t kdebug_current_proc_enabled(uint32_t debugid);
288 static errno_t kdebug_check_trace_string(uint32_t debugid, uint64_t str_id);
289
290 int kdbg_write_v3_header(user_addr_t, size_t *, int);
291 int kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag,
292 uint32_t sub_tag, uint64_t length,
293 vnode_t vp, vfs_context_t ctx);
294
295 user_addr_t kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag,
296 uint64_t length, vnode_t vp,
297 vfs_context_t ctx);
298
299 // Helper functions
300
301 static int create_buffers(boolean_t);
302 static void delete_buffers(void);
303
304 extern int tasks_count;
305 extern int threads_count;
306 extern char *proc_best_name(proc_t p);
307 extern void IOSleep(int);
308
309 /* trace enable status */
310 unsigned int kdebug_enable = 0;
311
312 /* A static buffer to record events prior to the start of regular logging */
313
314 #define KD_EARLY_BUFFER_SIZE (16 * 1024)
315 #define KD_EARLY_BUFFER_NBUFS (KD_EARLY_BUFFER_SIZE / sizeof(kd_buf))
316 #if CONFIG_EMBEDDED
317 /*
318 * On embedded, the space for this is carved out by osfmk/arm/data.s -- clang
319 * has problems aligning to greater than 4K.
320 */
321 extern kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS];
322 #else /* CONFIG_EMBEDDED */
323 __attribute__((aligned(KD_EARLY_BUFFER_SIZE)))
324 static kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS];
325 #endif /* !CONFIG_EMBEDDED */
326
327 static unsigned int kd_early_index = 0;
328 static bool kd_early_overflow = false;
329 static bool kd_early_done = false;
330
331 #define SLOW_NOLOG 0x01
332 #define SLOW_CHECKS 0x02
333
334 #define EVENTS_PER_STORAGE_UNIT 2048
335 #define MIN_STORAGE_UNITS_PER_CPU 4
336
337 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
338
339 union kds_ptr {
340 struct {
341 uint32_t buffer_index:21;
342 uint16_t offset:11;
343 };
344 uint32_t raw;
345 };
346
347 struct kd_storage {
348 union kds_ptr kds_next;
349 uint32_t kds_bufindx;
350 uint32_t kds_bufcnt;
351 uint32_t kds_readlast;
352 boolean_t kds_lostevents;
353 uint64_t kds_timestamp;
354
355 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
356 };
357
358 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
359 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
360 static_assert(N_STORAGE_UNITS_PER_BUFFER <= 0x7ff,
361 "shoudn't overflow kds_ptr.offset");
362
363 struct kd_storage_buffers {
364 struct kd_storage *kdsb_addr;
365 uint32_t kdsb_size;
366 };
367
368 #define KDS_PTR_NULL 0xffffffff
369 struct kd_storage_buffers *kd_bufs = NULL;
370 int n_storage_units = 0;
371 unsigned int n_storage_buffers = 0;
372 int n_storage_threshold = 0;
373 int kds_waiter = 0;
374
375 #pragma pack(0)
376 struct kd_bufinfo {
377 union kds_ptr kd_list_head;
378 union kds_ptr kd_list_tail;
379 boolean_t kd_lostevents;
380 uint32_t _pad;
381 uint64_t kd_prev_timebase;
382 uint32_t num_bufs;
383 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) ));
384
385
386 /*
387 * In principle, this control block can be shared in DRAM with other
388 * coprocessors and runtimes, for configuring what tracing is enabled.
389 */
390 struct kd_ctrl_page_t {
391 union kds_ptr kds_free_list;
392 uint32_t enabled :1;
393 uint32_t _pad0 :31;
394 int kds_inuse_count;
395 uint32_t kdebug_flags;
396 uint32_t kdebug_slowcheck;
397 uint64_t oldest_time;
398 /*
399 * The number of kd_bufinfo structs allocated may not match the current
400 * number of active cpus. We capture the iops list head at initialization
401 * which we could use to calculate the number of cpus we allocated data for,
402 * unless it happens to be null. To avoid that case, we explicitly also
403 * capture a cpu count.
404 */
405 kd_iop_t* kdebug_iops;
406 uint32_t kdebug_cpus;
407 } kd_ctrl_page = {
408 .kds_free_list = {.raw = KDS_PTR_NULL},
409 .kdebug_slowcheck = SLOW_NOLOG,
410 .oldest_time = 0
411 };
412
413 #pragma pack()
414
415 struct kd_bufinfo *kdbip = NULL;
416
417 #define KDCOPYBUF_COUNT 8192
418 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
419
420 #define PAGE_4KB 4096
421 #define PAGE_16KB 16384
422
423 kd_buf *kdcopybuf = NULL;
424
425 unsigned int nkdbufs = 0;
426 unsigned int kdlog_beg=0;
427 unsigned int kdlog_end=0;
428 unsigned int kdlog_value1=0;
429 unsigned int kdlog_value2=0;
430 unsigned int kdlog_value3=0;
431 unsigned int kdlog_value4=0;
432
433 static lck_spin_t * kdw_spin_lock;
434 static lck_spin_t * kds_spin_lock;
435
436 kd_threadmap *kd_mapptr = 0;
437 unsigned int kd_mapsize = 0;
438 unsigned int kd_mapcount = 0;
439
440 off_t RAW_file_offset = 0;
441 int RAW_file_written = 0;
442
443 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
444
445 /*
446 * A globally increasing counter for identifying strings in trace. Starts at
447 * 1 because 0 is a reserved return value.
448 */
449 __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE)))
450 static uint64_t g_curr_str_id = 1;
451
452 #define STR_ID_SIG_OFFSET (48)
453 #define STR_ID_MASK ((1ULL << STR_ID_SIG_OFFSET) - 1)
454 #define STR_ID_SIG_MASK (~STR_ID_MASK)
455
456 /*
457 * A bit pattern for identifying string IDs generated by
458 * kdebug_trace_string(2).
459 */
460 static uint64_t g_str_id_signature = (0x70acULL << STR_ID_SIG_OFFSET);
461
462 #define INTERRUPT 0x01050000
463 #define MACH_vmfault 0x01300008
464 #define BSC_SysCall 0x040c0000
465 #define MACH_SysCall 0x010c0000
466
467 /* task to string structure */
468 struct tts
469 {
470 task_t task; /* from procs task */
471 pid_t pid; /* from procs p_pid */
472 char task_comm[20]; /* from procs p_comm */
473 };
474
475 typedef struct tts tts_t;
476
477 struct krt
478 {
479 kd_threadmap *map; /* pointer to the map buffer */
480 int count;
481 int maxcount;
482 struct tts *atts;
483 };
484
485 typedef struct krt krt_t;
486
487 static uint32_t
488 kdbg_cpu_count(boolean_t early_trace)
489 {
490 if (early_trace) {
491 #if CONFIG_EMBEDDED
492 return ml_get_cpu_count();
493 #else
494 return max_ncpus;
495 #endif
496 }
497
498 host_basic_info_data_t hinfo;
499 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
500 host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
501 assert(hinfo.logical_cpu_max > 0);
502 return hinfo.logical_cpu_max;
503 }
504
505 #if MACH_ASSERT
506 #if CONFIG_EMBEDDED
507 static boolean_t
508 kdbg_iop_list_is_valid(kd_iop_t* iop)
509 {
510 if (iop) {
511 /* Is list sorted by cpu_id? */
512 kd_iop_t* temp = iop;
513 do {
514 assert(!temp->next || temp->next->cpu_id == temp->cpu_id - 1);
515 assert(temp->next || (temp->cpu_id == kdbg_cpu_count(FALSE) || temp->cpu_id == kdbg_cpu_count(TRUE)));
516 } while ((temp = temp->next));
517
518 /* Does each entry have a function and a name? */
519 temp = iop;
520 do {
521 assert(temp->callback.func);
522 assert(strlen(temp->callback.iop_name) < sizeof(temp->callback.iop_name));
523 } while ((temp = temp->next));
524 }
525
526 return TRUE;
527 }
528
529 static boolean_t
530 kdbg_iop_list_contains_cpu_id(kd_iop_t* list, uint32_t cpu_id)
531 {
532 while (list) {
533 if (list->cpu_id == cpu_id)
534 return TRUE;
535 list = list->next;
536 }
537
538 return FALSE;
539 }
540 #endif /* CONFIG_EMBEDDED */
541 #endif /* MACH_ASSERT */
542
543 static void
544 kdbg_iop_list_callback(kd_iop_t* iop, kd_callback_type type, void* arg)
545 {
546 while (iop) {
547 iop->callback.func(iop->callback.context, type, arg);
548 iop = iop->next;
549 }
550 }
551
552 static void
553 kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
554 {
555 int s = ml_set_interrupts_enabled(FALSE);
556 lck_spin_lock(kds_spin_lock);
557 if (enabled) {
558 /*
559 * The oldest valid time is now; reject old events from IOPs.
560 */
561 kd_ctrl_page.oldest_time = kdbg_timestamp();
562 kdebug_enable |= trace_type;
563 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
564 kd_ctrl_page.enabled = 1;
565 commpage_update_kdebug_state();
566 } else {
567 kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
568 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
569 kd_ctrl_page.enabled = 0;
570 commpage_update_kdebug_state();
571 }
572 lck_spin_unlock(kds_spin_lock);
573 ml_set_interrupts_enabled(s);
574
575 if (enabled) {
576 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_ENABLED, NULL);
577 } else {
578 /*
579 * If you do not flush the IOP trace buffers, they can linger
580 * for a considerable period; consider code which disables and
581 * deallocates without a final sync flush.
582 */
583 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_DISABLED, NULL);
584 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
585 }
586 }
587
588 static void
589 kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
590 {
591 int s = ml_set_interrupts_enabled(FALSE);
592 lck_spin_lock(kds_spin_lock);
593
594 if (enabled) {
595 kd_ctrl_page.kdebug_slowcheck |= slowflag;
596 kdebug_enable |= enableflag;
597 } else {
598 kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
599 kdebug_enable &= ~enableflag;
600 }
601
602 lck_spin_unlock(kds_spin_lock);
603 ml_set_interrupts_enabled(s);
604 }
605
606 /*
607 * Disable wrapping and return true if trace wrapped, false otherwise.
608 */
609 boolean_t
610 disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
611 {
612 boolean_t wrapped;
613 int s = ml_set_interrupts_enabled(FALSE);
614 lck_spin_lock(kds_spin_lock);
615
616 *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
617 *old_flags = kd_ctrl_page.kdebug_flags;
618
619 wrapped = kd_ctrl_page.kdebug_flags & KDBG_WRAPPED;
620 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
621 kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
622
623 lck_spin_unlock(kds_spin_lock);
624 ml_set_interrupts_enabled(s);
625
626 return wrapped;
627 }
628
629 void
630 enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
631 {
632 int s = ml_set_interrupts_enabled(FALSE);
633 lck_spin_lock(kds_spin_lock);
634
635 kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
636
637 if ( !(old_slowcheck & SLOW_NOLOG))
638 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
639
640 if (lostevents == TRUE)
641 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
642
643 lck_spin_unlock(kds_spin_lock);
644 ml_set_interrupts_enabled(s);
645 }
646
647 static int
648 create_buffers(boolean_t early_trace)
649 {
650 unsigned int i;
651 unsigned int p_buffer_size;
652 unsigned int f_buffer_size;
653 unsigned int f_buffers;
654 int error = 0;
655
656 /*
657 * For the duration of this allocation, trace code will only reference
658 * kdebug_iops. Any iops registered after this enabling will not be
659 * messaged until the buffers are reallocated.
660 *
661 * TLDR; Must read kd_iops once and only once!
662 */
663 kd_ctrl_page.kdebug_iops = kd_iops;
664
665 #if CONFIG_EMBEDDED
666 assert(kdbg_iop_list_is_valid(kd_ctrl_page.kdebug_iops));
667 #endif
668
669 /*
670 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
671 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
672 * be the list head + 1.
673 */
674
675 kd_ctrl_page.kdebug_cpus = kd_ctrl_page.kdebug_iops ? kd_ctrl_page.kdebug_iops->cpu_id + 1 : kdbg_cpu_count(early_trace);
676
677 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
678 error = ENOSPC;
679 goto out;
680 }
681
682 if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
683 n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU;
684 else
685 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
686
687 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
688
689 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
690 n_storage_buffers = f_buffers;
691
692 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
693 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
694
695 if (p_buffer_size)
696 n_storage_buffers++;
697
698 kd_bufs = NULL;
699
700 if (kdcopybuf == 0) {
701 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
702 error = ENOSPC;
703 goto out;
704 }
705 }
706 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)), VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
707 error = ENOSPC;
708 goto out;
709 }
710 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
711
712 for (i = 0; i < f_buffers; i++) {
713 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
714 error = ENOSPC;
715 goto out;
716 }
717 bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
718
719 kd_bufs[i].kdsb_size = f_buffer_size;
720 }
721 if (p_buffer_size) {
722 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
723 error = ENOSPC;
724 goto out;
725 }
726 bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
727
728 kd_bufs[i].kdsb_size = p_buffer_size;
729 }
730 n_storage_units = 0;
731
732 for (i = 0; i < n_storage_buffers; i++) {
733 struct kd_storage *kds;
734 int n_elements;
735 int n;
736
737 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
738 kds = kd_bufs[i].kdsb_addr;
739
740 for (n = 0; n < n_elements; n++) {
741 kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
742 kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
743
744 kd_ctrl_page.kds_free_list.buffer_index = i;
745 kd_ctrl_page.kds_free_list.offset = n;
746 }
747 n_storage_units += n_elements;
748 }
749
750 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
751
752 for (i = 0; i < kd_ctrl_page.kdebug_cpus; i++) {
753 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
754 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
755 kdbip[i].kd_lostevents = FALSE;
756 kdbip[i].num_bufs = 0;
757 }
758
759 kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
760
761 kd_ctrl_page.kds_inuse_count = 0;
762 n_storage_threshold = n_storage_units / 2;
763 out:
764 if (error)
765 delete_buffers();
766
767 return(error);
768 }
769
770 static void
771 delete_buffers(void)
772 {
773 unsigned int i;
774
775 if (kd_bufs) {
776 for (i = 0; i < n_storage_buffers; i++) {
777 if (kd_bufs[i].kdsb_addr) {
778 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
779 }
780 }
781 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
782
783 kd_bufs = NULL;
784 n_storage_buffers = 0;
785 }
786 if (kdcopybuf) {
787 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
788
789 kdcopybuf = NULL;
790 }
791 kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
792
793 if (kdbip) {
794 kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
795
796 kdbip = NULL;
797 }
798 kd_ctrl_page.kdebug_iops = NULL;
799 kd_ctrl_page.kdebug_cpus = 0;
800 kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
801 }
802
803 void
804 release_storage_unit(int cpu, uint32_t kdsp_raw)
805 {
806 int s = 0;
807 struct kd_storage *kdsp_actual;
808 struct kd_bufinfo *kdbp;
809 union kds_ptr kdsp;
810
811 kdsp.raw = kdsp_raw;
812
813 s = ml_set_interrupts_enabled(FALSE);
814 lck_spin_lock(kds_spin_lock);
815
816 kdbp = &kdbip[cpu];
817
818 if (kdsp.raw == kdbp->kd_list_head.raw) {
819 /*
820 * it's possible for the storage unit pointed to
821 * by kdsp to have already been stolen... so
822 * check to see if it's still the head of the list
823 * now that we're behind the lock that protects
824 * adding and removing from the queue...
825 * since we only ever release and steal units from
826 * that position, if it's no longer the head
827 * we having nothing to do in this context
828 */
829 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
830 kdbp->kd_list_head = kdsp_actual->kds_next;
831
832 kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
833 kd_ctrl_page.kds_free_list = kdsp;
834
835 kd_ctrl_page.kds_inuse_count--;
836 }
837 lck_spin_unlock(kds_spin_lock);
838 ml_set_interrupts_enabled(s);
839 }
840
841
842 boolean_t
843 allocate_storage_unit(int cpu)
844 {
845 union kds_ptr kdsp;
846 struct kd_storage *kdsp_actual, *kdsp_next_actual;
847 struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
848 uint64_t oldest_ts, ts;
849 boolean_t retval = TRUE;
850 int s = 0;
851
852 s = ml_set_interrupts_enabled(FALSE);
853 lck_spin_lock(kds_spin_lock);
854
855 kdbp = &kdbip[cpu];
856
857 /* If someone beat us to the allocate, return success */
858 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
859 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
860
861 if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
862 goto out;
863 }
864
865 if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
866 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
867 kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
868
869 kd_ctrl_page.kds_inuse_count++;
870 } else {
871 if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
872 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
873 kdbp->kd_lostevents = TRUE;
874 retval = FALSE;
875 goto out;
876 }
877 kdbp_vict = NULL;
878 oldest_ts = UINT64_MAX;
879
880 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
881
882 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
883 /*
884 * no storage unit to steal
885 */
886 continue;
887 }
888
889 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
890
891 if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
892 /*
893 * make sure we don't steal the storage unit
894 * being actively recorded to... need to
895 * move on because we don't want an out-of-order
896 * set of events showing up later
897 */
898 continue;
899 }
900
901 /*
902 * When wrapping, steal the storage unit with the
903 * earliest timestamp on its last event, instead of the
904 * earliest timestamp on the first event. This allows a
905 * storage unit with more recent events to be preserved,
906 * even if the storage unit contains events that are
907 * older than those found in other CPUs.
908 */
909 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[EVENTS_PER_STORAGE_UNIT - 1]);
910 if (ts < oldest_ts) {
911 oldest_ts = ts;
912 kdbp_vict = kdbp_try;
913 }
914 }
915 if (kdbp_vict == NULL) {
916 kdebug_enable = 0;
917 kd_ctrl_page.enabled = 0;
918 commpage_update_kdebug_state();
919 retval = FALSE;
920 goto out;
921 }
922 kdsp = kdbp_vict->kd_list_head;
923 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
924 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
925
926 if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
927 kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
928 kdsp_next_actual->kds_lostevents = TRUE;
929 } else
930 kdbp_vict->kd_lostevents = TRUE;
931
932 kd_ctrl_page.oldest_time = oldest_ts;
933 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
934 }
935 kdsp_actual->kds_timestamp = kdbg_timestamp();
936 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
937 kdsp_actual->kds_bufcnt = 0;
938 kdsp_actual->kds_readlast = 0;
939
940 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
941 kdbp->kd_lostevents = FALSE;
942 kdsp_actual->kds_bufindx = 0;
943
944 if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
945 kdbp->kd_list_head = kdsp;
946 else
947 POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
948 kdbp->kd_list_tail = kdsp;
949 out:
950 lck_spin_unlock(kds_spin_lock);
951 ml_set_interrupts_enabled(s);
952
953 return (retval);
954 }
955
956 int
957 kernel_debug_register_callback(kd_callback_t callback)
958 {
959 kd_iop_t* iop;
960 if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t), VM_KERN_MEMORY_DIAG) == KERN_SUCCESS) {
961 memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
962
963 /*
964 * <rdar://problem/13351477> Some IOP clients are not providing a name.
965 *
966 * Remove when fixed.
967 */
968 {
969 boolean_t is_valid_name = FALSE;
970 for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
971 /* This is roughly isprintable(c) */
972 if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
973 continue;
974 if (callback.iop_name[length] == 0) {
975 if (length)
976 is_valid_name = TRUE;
977 break;
978 }
979 }
980
981 if (!is_valid_name) {
982 strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
983 }
984 }
985
986 iop->last_timestamp = 0;
987
988 do {
989 /*
990 * We use two pieces of state, the old list head
991 * pointer, and the value of old_list_head->cpu_id.
992 * If we read kd_iops more than once, it can change
993 * between reads.
994 *
995 * TLDR; Must not read kd_iops more than once per loop.
996 */
997 iop->next = kd_iops;
998 iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
999
1000 /*
1001 * Header says OSCompareAndSwapPtr has a memory barrier
1002 */
1003 } while (!OSCompareAndSwapPtr(iop->next, iop, (void* volatile*)&kd_iops));
1004
1005 return iop->cpu_id;
1006 }
1007
1008 return 0;
1009 }
1010
1011 void
1012 kernel_debug_enter(
1013 uint32_t coreid,
1014 uint32_t debugid,
1015 uint64_t timestamp,
1016 uintptr_t arg1,
1017 uintptr_t arg2,
1018 uintptr_t arg3,
1019 uintptr_t arg4,
1020 uintptr_t threadid
1021 )
1022 {
1023 uint32_t bindx;
1024 kd_buf *kd;
1025 struct kd_bufinfo *kdbp;
1026 struct kd_storage *kdsp_actual;
1027 union kds_ptr kds_raw;
1028
1029 if (kd_ctrl_page.kdebug_slowcheck) {
1030
1031 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
1032 goto out1;
1033
1034 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1035 if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid))
1036 goto record_event;
1037 goto out1;
1038 }
1039 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1040 if (debugid >= kdlog_beg && debugid <= kdlog_end)
1041 goto record_event;
1042 goto out1;
1043 }
1044 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1045 if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
1046 (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
1047 (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
1048 (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
1049 goto out1;
1050 }
1051 }
1052
1053 record_event:
1054 if (timestamp < kd_ctrl_page.oldest_time) {
1055 goto out1;
1056 }
1057
1058 #if CONFIG_EMBEDDED
1059 /*
1060 * When start_kern_tracing is called by the kernel to trace very
1061 * early kernel events, it saves data to a secondary buffer until
1062 * it is possible to initialize ktrace, and then dumps the events
1063 * into the ktrace buffer using this method. In this case, iops will
1064 * be NULL, and the coreid will be zero. It is not possible to have
1065 * a valid IOP coreid of zero, so pass if both iops is NULL and coreid
1066 * is zero.
1067 */
1068 assert(kdbg_iop_list_contains_cpu_id(kd_ctrl_page.kdebug_iops, coreid) || (kd_ctrl_page.kdebug_iops == NULL && coreid == 0));
1069 #endif
1070
1071 disable_preemption();
1072
1073 if (kd_ctrl_page.enabled == 0)
1074 goto out;
1075
1076 kdbp = &kdbip[coreid];
1077 timestamp &= KDBG_TIMESTAMP_MASK;
1078
1079 #if KDEBUG_MOJO_TRACE
1080 if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
1081 kdebug_serial_print(coreid, debugid, timestamp,
1082 arg1, arg2, arg3, arg4, threadid);
1083 #endif
1084
1085 retry_q:
1086 kds_raw = kdbp->kd_list_tail;
1087
1088 if (kds_raw.raw != KDS_PTR_NULL) {
1089 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
1090 bindx = kdsp_actual->kds_bufindx;
1091 } else {
1092 kdsp_actual = NULL;
1093 bindx = EVENTS_PER_STORAGE_UNIT;
1094 }
1095
1096 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
1097 if (allocate_storage_unit(coreid) == FALSE) {
1098 /*
1099 * this can only happen if wrapping
1100 * has been disabled
1101 */
1102 goto out;
1103 }
1104 goto retry_q;
1105 }
1106 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
1107 goto retry_q;
1108
1109 // IOP entries can be allocated before xnu allocates and inits the buffer
1110 if (timestamp < kdsp_actual->kds_timestamp)
1111 kdsp_actual->kds_timestamp = timestamp;
1112
1113 kd = &kdsp_actual->kds_records[bindx];
1114
1115 kd->debugid = debugid;
1116 kd->arg1 = arg1;
1117 kd->arg2 = arg2;
1118 kd->arg3 = arg3;
1119 kd->arg4 = arg4;
1120 kd->arg5 = threadid;
1121
1122 kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
1123
1124 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
1125 out:
1126 enable_preemption();
1127 out1:
1128 if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
1129 kdbg_wakeup();
1130 }
1131 }
1132
1133 static void
1134 kernel_debug_internal(
1135 boolean_t only_filter,
1136 uint32_t debugid,
1137 uintptr_t arg1,
1138 uintptr_t arg2,
1139 uintptr_t arg3,
1140 uintptr_t arg4,
1141 uintptr_t arg5)
1142 {
1143 struct proc *curproc;
1144 uint64_t now;
1145 uint32_t bindx;
1146 kd_buf *kd;
1147 int cpu;
1148 struct kd_bufinfo *kdbp;
1149 struct kd_storage *kdsp_actual;
1150 union kds_ptr kds_raw;
1151
1152 if (kd_ctrl_page.kdebug_slowcheck) {
1153 if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) ||
1154 !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT)))
1155 {
1156 goto out1;
1157 }
1158
1159 if ( !ml_at_interrupt_context()) {
1160 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1161 /*
1162 * If kdebug flag is not set for current proc, return
1163 */
1164 curproc = current_proc();
1165
1166 if ((curproc && !(curproc->p_kdebug)) &&
1167 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1168 (debugid >> 24 != DBG_TRACE))
1169 goto out1;
1170 }
1171 else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1172 /*
1173 * If kdebug flag is set for current proc, return
1174 */
1175 curproc = current_proc();
1176
1177 if ((curproc && curproc->p_kdebug) &&
1178 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1179 (debugid >> 24 != DBG_TRACE))
1180 goto out1;
1181 }
1182 }
1183
1184 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1185 if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid))
1186 goto record_event;
1187
1188 goto out1;
1189 } else if (only_filter == TRUE) {
1190 goto out1;
1191 }
1192 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1193 /* Always record trace system info */
1194 if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
1195 goto record_event;
1196
1197 if (debugid < kdlog_beg || debugid > kdlog_end)
1198 goto out1;
1199 }
1200 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1201 /* Always record trace system info */
1202 if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE)
1203 goto record_event;
1204
1205 if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
1206 (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
1207 (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
1208 (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
1209 goto out1;
1210 }
1211 } else if (only_filter == TRUE) {
1212 goto out1;
1213 }
1214
1215 record_event:
1216 disable_preemption();
1217
1218 if (kd_ctrl_page.enabled == 0)
1219 goto out;
1220
1221 cpu = cpu_number();
1222 kdbp = &kdbip[cpu];
1223
1224 #if KDEBUG_MOJO_TRACE
1225 if (kdebug_enable & KDEBUG_ENABLE_SERIAL)
1226 kdebug_serial_print(cpu, debugid,
1227 kdbg_timestamp() & KDBG_TIMESTAMP_MASK,
1228 arg1, arg2, arg3, arg4, arg5);
1229 #endif
1230
1231 retry_q:
1232 kds_raw = kdbp->kd_list_tail;
1233
1234 if (kds_raw.raw != KDS_PTR_NULL) {
1235 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
1236 bindx = kdsp_actual->kds_bufindx;
1237 } else {
1238 kdsp_actual = NULL;
1239 bindx = EVENTS_PER_STORAGE_UNIT;
1240 }
1241
1242 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
1243 if (allocate_storage_unit(cpu) == FALSE) {
1244 /*
1245 * this can only happen if wrapping
1246 * has been disabled
1247 */
1248 goto out;
1249 }
1250 goto retry_q;
1251 }
1252 now = kdbg_timestamp() & KDBG_TIMESTAMP_MASK;
1253
1254 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
1255 goto retry_q;
1256
1257 kd = &kdsp_actual->kds_records[bindx];
1258
1259 kd->debugid = debugid;
1260 kd->arg1 = arg1;
1261 kd->arg2 = arg2;
1262 kd->arg3 = arg3;
1263 kd->arg4 = arg4;
1264 kd->arg5 = arg5;
1265
1266 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1267
1268 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
1269
1270 #if KPERF
1271 kperf_kdebug_callback(debugid, __builtin_frame_address(0));
1272 #endif
1273 out:
1274 enable_preemption();
1275 out1:
1276 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1277 uint32_t etype;
1278 uint32_t stype;
1279
1280 etype = debugid & KDBG_EVENTID_MASK;
1281 stype = debugid & KDBG_CSC_MASK;
1282
1283 if (etype == INTERRUPT || etype == MACH_vmfault ||
1284 stype == BSC_SysCall || stype == MACH_SysCall) {
1285 kdbg_wakeup();
1286 }
1287 }
1288 }
1289
1290 void
1291 kernel_debug(
1292 uint32_t debugid,
1293 uintptr_t arg1,
1294 uintptr_t arg2,
1295 uintptr_t arg3,
1296 uintptr_t arg4,
1297 __unused uintptr_t arg5)
1298 {
1299 kernel_debug_internal(FALSE, debugid, arg1, arg2, arg3, arg4,
1300 (uintptr_t)thread_tid(current_thread()));
1301 }
1302
1303 void
1304 kernel_debug1(
1305 uint32_t debugid,
1306 uintptr_t arg1,
1307 uintptr_t arg2,
1308 uintptr_t arg3,
1309 uintptr_t arg4,
1310 uintptr_t arg5)
1311 {
1312 kernel_debug_internal(FALSE, debugid, arg1, arg2, arg3, arg4, arg5);
1313 }
1314
1315 void
1316 kernel_debug_filtered(
1317 uint32_t debugid,
1318 uintptr_t arg1,
1319 uintptr_t arg2,
1320 uintptr_t arg3,
1321 uintptr_t arg4)
1322 {
1323 kernel_debug_internal(TRUE, debugid, arg1, arg2, arg3, arg4,
1324 (uintptr_t)thread_tid(current_thread()));
1325 }
1326
1327 void
1328 kernel_debug_string_early(const char *message)
1329 {
1330 uintptr_t arg[4] = {0, 0, 0, 0};
1331
1332 /* Stuff the message string in the args and log it. */
1333 strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
1334 KERNEL_DEBUG_EARLY(
1335 TRACE_INFO_STRING,
1336 arg[0], arg[1], arg[2], arg[3]);
1337 }
1338
1339 #define SIMPLE_STR_LEN (64)
1340 static_assert(SIMPLE_STR_LEN % sizeof(uintptr_t) == 0);
1341
1342 void
1343 kernel_debug_string_simple(uint32_t eventid, const char *str)
1344 {
1345 /* array of uintptr_ts simplifies emitting the string as arguments */
1346 uintptr_t str_buf[(SIMPLE_STR_LEN / sizeof(uintptr_t)) + 1] = { 0 };
1347 size_t len = strlcpy((char *)str_buf, str, SIMPLE_STR_LEN + 1);
1348
1349 uintptr_t thread_id = (uintptr_t)thread_tid(current_thread());
1350 uint32_t debugid = eventid | DBG_FUNC_START;
1351
1352 /* string can fit in a single tracepoint */
1353 if (len <= (4 * sizeof(uintptr_t))) {
1354 debugid |= DBG_FUNC_END;
1355 }
1356
1357 kernel_debug_internal(FALSE, debugid, str_buf[0],
1358 str_buf[1],
1359 str_buf[2],
1360 str_buf[3], thread_id);
1361
1362 debugid &= KDBG_EVENTID_MASK;
1363 int i = 4;
1364 size_t written = 4 * sizeof(uintptr_t);
1365
1366 for (; written < len; i += 4, written += 4 * sizeof(uintptr_t)) {
1367 /* if this is the last tracepoint to be emitted */
1368 if ((written + (4 * sizeof(uintptr_t))) >= len) {
1369 debugid |= DBG_FUNC_END;
1370 }
1371 kernel_debug_internal(FALSE, debugid, str_buf[i],
1372 str_buf[i + 1],
1373 str_buf[i + 2],
1374 str_buf[i + 3], thread_id);
1375 }
1376 }
1377
1378 extern int master_cpu; /* MACH_KERNEL_PRIVATE */
1379 /*
1380 * Used prior to start_kern_tracing() being called.
1381 * Log temporarily into a static buffer.
1382 */
1383 void
1384 kernel_debug_early(
1385 uint32_t debugid,
1386 uintptr_t arg1,
1387 uintptr_t arg2,
1388 uintptr_t arg3,
1389 uintptr_t arg4)
1390 {
1391 /* If early tracing is over, use the normal path. */
1392 if (kd_early_done) {
1393 KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
1394 return;
1395 }
1396
1397 /* Do nothing if the buffer is full or we're not on the boot cpu. */
1398 kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_NBUFS;
1399 if (kd_early_overflow || cpu_number() != master_cpu) {
1400 return;
1401 }
1402
1403 kd_early_buffer[kd_early_index].debugid = debugid;
1404 kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
1405 kd_early_buffer[kd_early_index].arg1 = arg1;
1406 kd_early_buffer[kd_early_index].arg2 = arg2;
1407 kd_early_buffer[kd_early_index].arg3 = arg3;
1408 kd_early_buffer[kd_early_index].arg4 = arg4;
1409 kd_early_buffer[kd_early_index].arg5 = 0;
1410 kd_early_index++;
1411 }
1412
1413 /*
1414 * Transfer the contents of the temporary buffer into the trace buffers.
1415 * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
1416 * when mach_absolute_time is set to 0.
1417 */
1418 static void
1419 kernel_debug_early_end(void)
1420 {
1421 if (cpu_number() != master_cpu) {
1422 panic("kernel_debug_early_end() not call on boot processor");
1423 }
1424
1425 /* reset the current oldest time to allow early events */
1426 kd_ctrl_page.oldest_time = 0;
1427
1428 #if !CONFIG_EMBEDDED
1429 /* Fake sentinel marking the start of kernel time relative to TSC */
1430 kernel_debug_enter(0,
1431 TRACE_TIMESTAMPS,
1432 0,
1433 (uint32_t)(tsc_rebase_abs_time >> 32),
1434 (uint32_t)tsc_rebase_abs_time,
1435 tsc_at_boot,
1436 0,
1437 0);
1438 #endif
1439 for (unsigned int i = 0; i < kd_early_index; i++) {
1440 kernel_debug_enter(0,
1441 kd_early_buffer[i].debugid,
1442 kd_early_buffer[i].timestamp,
1443 kd_early_buffer[i].arg1,
1444 kd_early_buffer[i].arg2,
1445 kd_early_buffer[i].arg3,
1446 kd_early_buffer[i].arg4,
1447 0);
1448 }
1449
1450 /* Cut events-lost event on overflow */
1451 if (kd_early_overflow) {
1452 KDBG_RELEASE(TRACE_LOST_EVENTS, 1);
1453 }
1454
1455 kd_early_done = true;
1456
1457 /* This trace marks the start of kernel tracing */
1458 kernel_debug_string_early("early trace done");
1459 }
1460
1461 void
1462 kernel_debug_disable(void)
1463 {
1464 if (kdebug_enable) {
1465 kdbg_set_tracing_enabled(FALSE, 0);
1466 }
1467 }
1468
1469 /*
1470 * Returns non-zero if debugid is in a reserved class.
1471 */
1472 static int
1473 kdebug_validate_debugid(uint32_t debugid)
1474 {
1475 uint8_t debugid_class;
1476
1477 debugid_class = KDBG_EXTRACT_CLASS(debugid);
1478 switch (debugid_class) {
1479 case DBG_TRACE:
1480 return EPERM;
1481 }
1482
1483 return 0;
1484 }
1485
1486 /*
1487 * Support syscall SYS_kdebug_typefilter.
1488 */
1489 int
1490 kdebug_typefilter(__unused struct proc* p,
1491 struct kdebug_typefilter_args* uap,
1492 __unused int *retval)
1493 {
1494 int ret = KERN_SUCCESS;
1495
1496 if (uap->addr == USER_ADDR_NULL ||
1497 uap->size == USER_ADDR_NULL) {
1498 return EINVAL;
1499 }
1500
1501 /*
1502 * The atomic load is to close a race window with setting the typefilter
1503 * and memory entry values. A description follows:
1504 *
1505 * Thread 1 (writer)
1506 *
1507 * Allocate Typefilter
1508 * Allocate MemoryEntry
1509 * Write Global MemoryEntry Ptr
1510 * Atomic Store (Release) Global Typefilter Ptr
1511 *
1512 * Thread 2 (reader, AKA us)
1513 *
1514 * if ((Atomic Load (Acquire) Global Typefilter Ptr) == NULL)
1515 * return;
1516 *
1517 * Without the atomic store, it isn't guaranteed that the write of
1518 * Global MemoryEntry Ptr is visible before we can see the write of
1519 * Global Typefilter Ptr.
1520 *
1521 * Without the atomic load, it isn't guaranteed that the loads of
1522 * Global MemoryEntry Ptr aren't speculated.
1523 *
1524 * The global pointers transition from NULL -> valid once and only once,
1525 * and never change after becoming valid. This means that having passed
1526 * the first atomic load test of Global Typefilter Ptr, this function
1527 * can then safely use the remaining global state without atomic checks.
1528 */
1529 if (!__c11_atomic_load((_Atomic typefilter_t *)&kdbg_typefilter, memory_order_acquire)) {
1530 return EINVAL;
1531 }
1532
1533 assert(kdbg_typefilter_memory_entry);
1534
1535 mach_vm_offset_t user_addr = 0;
1536 vm_map_t user_map = current_map();
1537
1538 ret = mach_to_bsd_errno(
1539 mach_vm_map_kernel(user_map, // target map
1540 &user_addr, // [in, out] target address
1541 TYPEFILTER_ALLOC_SIZE, // initial size
1542 0, // mask (alignment?)
1543 VM_FLAGS_ANYWHERE, // flags
1544 VM_KERN_MEMORY_NONE,
1545 kdbg_typefilter_memory_entry, // port (memory entry!)
1546 0, // offset (in memory entry)
1547 FALSE, // should copy
1548 VM_PROT_READ, // cur_prot
1549 VM_PROT_READ, // max_prot
1550 VM_INHERIT_SHARE)); // inherit behavior on fork
1551
1552 if (ret == KERN_SUCCESS) {
1553 vm_size_t user_ptr_size = vm_map_is_64bit(user_map) ? 8 : 4;
1554 ret = copyout(CAST_DOWN(void *, &user_addr), uap->addr, user_ptr_size );
1555
1556 if (ret != KERN_SUCCESS) {
1557 mach_vm_deallocate(user_map, user_addr, TYPEFILTER_ALLOC_SIZE);
1558 }
1559 }
1560
1561 return ret;
1562 }
1563
1564 /*
1565 * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
1566 */
1567 int
1568 kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
1569 {
1570 struct kdebug_trace64_args uap64;
1571
1572 uap64.code = uap->code;
1573 uap64.arg1 = uap->arg1;
1574 uap64.arg2 = uap->arg2;
1575 uap64.arg3 = uap->arg3;
1576 uap64.arg4 = uap->arg4;
1577
1578 return kdebug_trace64(p, &uap64, retval);
1579 }
1580
1581 /*
1582 * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated
1583 * to fit in 32-bit record format.
1584 *
1585 * It is intentional that error conditions are not checked until kdebug is
1586 * enabled. This is to match the userspace wrapper behavior, which is optimizing
1587 * for non-error case performance.
1588 */
1589 int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
1590 {
1591 int err;
1592
1593 if ( __probable(kdebug_enable == 0) )
1594 return(0);
1595
1596 if ((err = kdebug_validate_debugid(uap->code)) != 0) {
1597 return err;
1598 }
1599
1600 kernel_debug_internal(FALSE, uap->code,
1601 (uintptr_t)uap->arg1,
1602 (uintptr_t)uap->arg2,
1603 (uintptr_t)uap->arg3,
1604 (uintptr_t)uap->arg4,
1605 (uintptr_t)thread_tid(current_thread()));
1606
1607 return(0);
1608 }
1609
1610 /*
1611 * Adding enough padding to contain a full tracepoint for the last
1612 * portion of the string greatly simplifies the logic of splitting the
1613 * string between tracepoints. Full tracepoints can be generated using
1614 * the buffer itself, without having to manually add zeros to pad the
1615 * arguments.
1616 */
1617
1618 /* 2 string args in first tracepoint and 9 string data tracepoints */
1619 #define STR_BUF_ARGS (2 + (9 * 4))
1620 /* times the size of each arg on K64 */
1621 #define MAX_STR_LEN (STR_BUF_ARGS * sizeof(uint64_t))
1622 /* on K32, ending straddles a tracepoint, so reserve blanks */
1623 #define STR_BUF_SIZE (MAX_STR_LEN + (2 * sizeof(uint32_t)))
1624
1625 /*
1626 * This function does no error checking and assumes that it is called with
1627 * the correct arguments, including that the buffer pointed to by str is at
1628 * least STR_BUF_SIZE bytes. However, str must be aligned to word-size and
1629 * be NUL-terminated. In cases where a string can fit evenly into a final
1630 * tracepoint without its NUL-terminator, this function will not end those
1631 * strings with a NUL in trace. It's up to clients to look at the function
1632 * qualifier for DBG_FUNC_END in this case, to end the string.
1633 */
1634 static uint64_t
1635 kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr,
1636 size_t str_len)
1637 {
1638 /* str must be word-aligned */
1639 uintptr_t *str = vstr;
1640 size_t written = 0;
1641 uintptr_t thread_id;
1642 int i;
1643 uint32_t trace_debugid = TRACEDBG_CODE(DBG_TRACE_STRING,
1644 TRACE_STRING_GLOBAL);
1645
1646 thread_id = (uintptr_t)thread_tid(current_thread());
1647
1648 /* if the ID is being invalidated, just emit that */
1649 if (str_id != 0 && str_len == 0) {
1650 kernel_debug_internal(FALSE, trace_debugid | DBG_FUNC_START | DBG_FUNC_END,
1651 (uintptr_t)debugid, (uintptr_t)str_id, 0, 0,
1652 thread_id);
1653 return str_id;
1654 }
1655
1656 /* generate an ID, if necessary */
1657 if (str_id == 0) {
1658 str_id = OSIncrementAtomic64((SInt64 *)&g_curr_str_id);
1659 str_id = (str_id & STR_ID_MASK) | g_str_id_signature;
1660 }
1661
1662 trace_debugid |= DBG_FUNC_START;
1663 /* string can fit in a single tracepoint */
1664 if (str_len <= (2 * sizeof(uintptr_t))) {
1665 trace_debugid |= DBG_FUNC_END;
1666 }
1667
1668 kernel_debug_internal(FALSE, trace_debugid, (uintptr_t)debugid,
1669 (uintptr_t)str_id, str[0],
1670 str[1], thread_id);
1671
1672 trace_debugid &= KDBG_EVENTID_MASK;
1673 i = 2;
1674 written += 2 * sizeof(uintptr_t);
1675
1676 for (; written < str_len; i += 4, written += 4 * sizeof(uintptr_t)) {
1677 if ((written + (4 * sizeof(uintptr_t))) >= str_len) {
1678 trace_debugid |= DBG_FUNC_END;
1679 }
1680 kernel_debug_internal(FALSE, trace_debugid, str[i],
1681 str[i + 1],
1682 str[i + 2],
1683 str[i + 3], thread_id);
1684 }
1685
1686 return str_id;
1687 }
1688
1689 /*
1690 * Returns true if the current process can emit events, and false otherwise.
1691 * Trace system and scheduling events circumvent this check, as do events
1692 * emitted in interrupt context.
1693 */
1694 static boolean_t
1695 kdebug_current_proc_enabled(uint32_t debugid)
1696 {
1697 /* can't determine current process in interrupt context */
1698 if (ml_at_interrupt_context()) {
1699 return TRUE;
1700 }
1701
1702 /* always emit trace system and scheduling events */
1703 if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE ||
1704 (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0)))
1705 {
1706 return TRUE;
1707 }
1708
1709 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1710 proc_t cur_proc = current_proc();
1711
1712 /* only the process with the kdebug bit set is allowed */
1713 if (cur_proc && !(cur_proc->p_kdebug)) {
1714 return FALSE;
1715 }
1716 } else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1717 proc_t cur_proc = current_proc();
1718
1719 /* every process except the one with the kdebug bit set is allowed */
1720 if (cur_proc && cur_proc->p_kdebug) {
1721 return FALSE;
1722 }
1723 }
1724
1725 return TRUE;
1726 }
1727
1728 boolean_t
1729 kdebug_debugid_enabled(uint32_t debugid)
1730 {
1731 /* if no filtering is enabled */
1732 if (!kd_ctrl_page.kdebug_slowcheck) {
1733 return TRUE;
1734 }
1735
1736 return kdebug_debugid_explicitly_enabled(debugid);
1737 }
1738
1739 boolean_t
1740 kdebug_debugid_explicitly_enabled(uint32_t debugid)
1741 {
1742 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1743 return typefilter_is_debugid_allowed(kdbg_typefilter, debugid);
1744 } else if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) {
1745 return TRUE;
1746 } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1747 if (debugid < kdlog_beg || debugid > kdlog_end) {
1748 return FALSE;
1749 }
1750 } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1751 if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 &&
1752 (debugid & KDBG_EVENTID_MASK) != kdlog_value2 &&
1753 (debugid & KDBG_EVENTID_MASK) != kdlog_value3 &&
1754 (debugid & KDBG_EVENTID_MASK) != kdlog_value4)
1755 {
1756 return FALSE;
1757 }
1758 }
1759
1760 return TRUE;
1761 }
1762
1763 /*
1764 * Returns 0 if a string can be traced with these arguments. Returns errno
1765 * value if error occurred.
1766 */
1767 static errno_t
1768 kdebug_check_trace_string(uint32_t debugid, uint64_t str_id)
1769 {
1770 /* if there are function qualifiers on the debugid */
1771 if (debugid & ~KDBG_EVENTID_MASK) {
1772 return EINVAL;
1773 }
1774
1775 if (kdebug_validate_debugid(debugid)) {
1776 return EPERM;
1777 }
1778
1779 if (str_id != 0 && (str_id & STR_ID_SIG_MASK) != g_str_id_signature) {
1780 return EINVAL;
1781 }
1782
1783 return 0;
1784 }
1785
1786 /*
1787 * Implementation of KPI kernel_debug_string.
1788 */
1789 int
1790 kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str)
1791 {
1792 /* arguments to tracepoints must be word-aligned */
1793 __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
1794 static_assert(sizeof(str_buf) > MAX_STR_LEN);
1795 vm_size_t len_copied;
1796 int err;
1797
1798 assert(str_id);
1799
1800 if (__probable(kdebug_enable == 0)) {
1801 return 0;
1802 }
1803
1804 if (!kdebug_current_proc_enabled(debugid)) {
1805 return 0;
1806 }
1807
1808 if (!kdebug_debugid_enabled(debugid)) {
1809 return 0;
1810 }
1811
1812 if ((err = kdebug_check_trace_string(debugid, *str_id)) != 0) {
1813 return err;
1814 }
1815
1816 if (str == NULL) {
1817 if (str_id == 0) {
1818 return EINVAL;
1819 }
1820
1821 *str_id = kernel_debug_string_internal(debugid, *str_id, NULL, 0);
1822 return 0;
1823 }
1824
1825 memset(str_buf, 0, sizeof(str_buf));
1826 len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1);
1827 *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf,
1828 len_copied);
1829 return 0;
1830 }
1831
1832 /*
1833 * Support syscall kdebug_trace_string.
1834 */
1835 int
1836 kdebug_trace_string(__unused struct proc *p,
1837 struct kdebug_trace_string_args *uap,
1838 uint64_t *retval)
1839 {
1840 __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE];
1841 static_assert(sizeof(str_buf) > MAX_STR_LEN);
1842 size_t len_copied;
1843 int err;
1844
1845 if (__probable(kdebug_enable == 0)) {
1846 return 0;
1847 }
1848
1849 if (!kdebug_current_proc_enabled(uap->debugid)) {
1850 return 0;
1851 }
1852
1853 if (!kdebug_debugid_enabled(uap->debugid)) {
1854 return 0;
1855 }
1856
1857 if ((err = kdebug_check_trace_string(uap->debugid, uap->str_id)) != 0) {
1858 return err;
1859 }
1860
1861 if (uap->str == USER_ADDR_NULL) {
1862 if (uap->str_id == 0) {
1863 return EINVAL;
1864 }
1865
1866 *retval = kernel_debug_string_internal(uap->debugid, uap->str_id,
1867 NULL, 0);
1868 return 0;
1869 }
1870
1871 memset(str_buf, 0, sizeof(str_buf));
1872 err = copyinstr(uap->str, str_buf, MAX_STR_LEN + 1, &len_copied);
1873
1874 /* it's alright to truncate the string, so allow ENAMETOOLONG */
1875 if (err == ENAMETOOLONG) {
1876 str_buf[MAX_STR_LEN] = '\0';
1877 } else if (err) {
1878 return err;
1879 }
1880
1881 if (len_copied <= 1) {
1882 return EINVAL;
1883 }
1884
1885 /* convert back to a length */
1886 len_copied--;
1887
1888 *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf,
1889 len_copied);
1890 return 0;
1891 }
1892
1893 static void
1894 kdbg_lock_init(void)
1895 {
1896 static lck_grp_attr_t *kdebug_lck_grp_attr = NULL;
1897 static lck_grp_t *kdebug_lck_grp = NULL;
1898 static lck_attr_t *kdebug_lck_attr = NULL;
1899
1900 if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT) {
1901 return;
1902 }
1903
1904 assert(kdebug_lck_grp_attr == NULL);
1905 kdebug_lck_grp_attr = lck_grp_attr_alloc_init();
1906 kdebug_lck_grp = lck_grp_alloc_init("kdebug", kdebug_lck_grp_attr);
1907 kdebug_lck_attr = lck_attr_alloc_init();
1908
1909 kds_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
1910 kdw_spin_lock = lck_spin_alloc_init(kdebug_lck_grp, kdebug_lck_attr);
1911
1912 kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
1913 }
1914
1915 int
1916 kdbg_bootstrap(boolean_t early_trace)
1917 {
1918 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
1919
1920 return (create_buffers(early_trace));
1921 }
1922
1923 int
1924 kdbg_reinit(boolean_t early_trace)
1925 {
1926 int ret = 0;
1927
1928 /*
1929 * Disable trace collecting
1930 * First make sure we're not in
1931 * the middle of cutting a trace
1932 */
1933 kernel_debug_disable();
1934
1935 /*
1936 * make sure the SLOW_NOLOG is seen
1937 * by everyone that might be trying
1938 * to cut a trace..
1939 */
1940 IOSleep(100);
1941
1942 delete_buffers();
1943
1944 kdbg_clear_thread_map();
1945 ret = kdbg_bootstrap(early_trace);
1946
1947 RAW_file_offset = 0;
1948 RAW_file_written = 0;
1949
1950 return(ret);
1951 }
1952
1953 void
1954 kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid)
1955 {
1956 if (!proc) {
1957 *arg_pid = 0;
1958 *arg_uniqueid = 0;
1959 } else {
1960 *arg_pid = proc->p_pid;
1961 *arg_uniqueid = proc->p_uniqueid;
1962 if ((uint64_t) *arg_uniqueid != proc->p_uniqueid) {
1963 *arg_uniqueid = 0;
1964 }
1965 }
1966 }
1967
1968
1969 void
1970 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1971 {
1972 char *dbg_nameptr;
1973 int dbg_namelen;
1974 long dbg_parms[4];
1975
1976 if (!proc) {
1977 *arg1 = 0;
1978 *arg2 = 0;
1979 *arg3 = 0;
1980 *arg4 = 0;
1981 return;
1982 }
1983 /*
1984 * Collect the pathname for tracing
1985 */
1986 dbg_nameptr = proc->p_comm;
1987 dbg_namelen = (int)strlen(proc->p_comm);
1988 dbg_parms[0]=0L;
1989 dbg_parms[1]=0L;
1990 dbg_parms[2]=0L;
1991 dbg_parms[3]=0L;
1992
1993 if(dbg_namelen > (int)sizeof(dbg_parms))
1994 dbg_namelen = (int)sizeof(dbg_parms);
1995
1996 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1997
1998 *arg1=dbg_parms[0];
1999 *arg2=dbg_parms[1];
2000 *arg3=dbg_parms[2];
2001 *arg4=dbg_parms[3];
2002 }
2003
2004 static void
2005 kdbg_resolve_map(thread_t th_act, void *opaque)
2006 {
2007 kd_threadmap *mapptr;
2008 krt_t *t = (krt_t *)opaque;
2009
2010 if (t->count < t->maxcount) {
2011 mapptr = &t->map[t->count];
2012 mapptr->thread = (uintptr_t)thread_tid(th_act);
2013
2014 (void) strlcpy (mapptr->command, t->atts->task_comm,
2015 sizeof(t->atts->task_comm));
2016 /*
2017 * Some kernel threads have no associated pid.
2018 * We still need to mark the entry as valid.
2019 */
2020 if (t->atts->pid)
2021 mapptr->valid = t->atts->pid;
2022 else
2023 mapptr->valid = 1;
2024
2025 t->count++;
2026 }
2027 }
2028
2029 /*
2030 *
2031 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
2032 *
2033 * You may provide a buffer and size, or if you set the buffer to NULL, a
2034 * buffer of sufficient size will be allocated.
2035 *
2036 * If you provide a buffer and it is too small, sets cpumap_size to the number
2037 * of bytes required and returns EINVAL.
2038 *
2039 * On success, if you provided a buffer, cpumap_size is set to the number of
2040 * bytes written. If you did not provide a buffer, cpumap is set to the newly
2041 * allocated buffer and cpumap_size is set to the number of bytes allocated.
2042 *
2043 * NOTE: It may seem redundant to pass both iops and a cpu_count.
2044 *
2045 * We may be reporting data from "now", or from the "past".
2046 *
2047 * The "past" data would be for kdbg_readcpumap().
2048 *
2049 * If we do not pass both iops and cpu_count, and iops is NULL, this function
2050 * will need to read "now" state to get the number of cpus, which would be in
2051 * error if we were reporting "past" state.
2052 */
2053
2054 int
2055 kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
2056 {
2057 assert(cpumap);
2058 assert(cpumap_size);
2059 assert(cpu_count);
2060 assert(!iops || iops->cpu_id + 1 == cpu_count);
2061
2062 uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
2063 uint32_t bytes_available = *cpumap_size;
2064 *cpumap_size = bytes_needed;
2065
2066 if (*cpumap == NULL) {
2067 if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
2068 return ENOMEM;
2069 }
2070 bzero(*cpumap, *cpumap_size);
2071 } else if (bytes_available < bytes_needed) {
2072 return EINVAL;
2073 }
2074
2075 kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
2076
2077 header->version_no = RAW_VERSION1;
2078 header->cpu_count = cpu_count;
2079
2080 kd_cpumap* cpus = (kd_cpumap*)&header[1];
2081
2082 int32_t index = cpu_count - 1;
2083 while (iops) {
2084 cpus[index].cpu_id = iops->cpu_id;
2085 cpus[index].flags = KDBG_CPUMAP_IS_IOP;
2086 strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
2087
2088 iops = iops->next;
2089 index--;
2090 }
2091
2092 while (index >= 0) {
2093 cpus[index].cpu_id = index;
2094 cpus[index].flags = 0;
2095 strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
2096
2097 index--;
2098 }
2099
2100 return KERN_SUCCESS;
2101 }
2102
2103 void
2104 kdbg_thrmap_init(void)
2105 {
2106 ktrace_assert_lock_held();
2107
2108 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
2109 return;
2110 }
2111
2112 kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
2113
2114 if (kd_mapptr) {
2115 kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
2116 }
2117 }
2118
2119 static kd_threadmap *
2120 kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
2121 {
2122 kd_threadmap *mapptr;
2123 proc_t p;
2124 struct krt akrt;
2125 int tts_count = 0; /* number of task-to-string structures */
2126 struct tts *tts_mapptr;
2127 unsigned int tts_mapsize = 0;
2128 vm_offset_t kaddr;
2129
2130 assert(mapsize != NULL);
2131 assert(mapcount != NULL);
2132
2133 *mapcount = threads_count;
2134 tts_count = tasks_count;
2135
2136 /*
2137 * The proc count could change during buffer allocation,
2138 * so introduce a small fudge factor to bump up the
2139 * buffer sizes. This gives new tasks some chance of
2140 * making into the tables. Bump up by 25%.
2141 */
2142 *mapcount += *mapcount / 4;
2143 tts_count += tts_count / 4;
2144
2145 *mapsize = *mapcount * sizeof(kd_threadmap);
2146
2147 if (count && count < *mapcount) {
2148 return 0;
2149 }
2150
2151 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
2152 bzero((void *)kaddr, *mapsize);
2153 mapptr = (kd_threadmap *)kaddr;
2154 } else {
2155 return 0;
2156 }
2157
2158 tts_mapsize = tts_count * sizeof(struct tts);
2159
2160 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) {
2161 bzero((void *)kaddr, tts_mapsize);
2162 tts_mapptr = (struct tts *)kaddr;
2163 } else {
2164 kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
2165
2166 return 0;
2167 }
2168
2169 /*
2170 * Save the proc's name and take a reference for each task associated
2171 * with a valid process.
2172 */
2173 proc_list_lock();
2174
2175 int i = 0;
2176 ALLPROC_FOREACH(p) {
2177 if (i >= tts_count) {
2178 break;
2179 }
2180 if (p->p_lflag & P_LEXIT) {
2181 continue;
2182 }
2183 if (p->task) {
2184 task_reference(p->task);
2185 tts_mapptr[i].task = p->task;
2186 tts_mapptr[i].pid = p->p_pid;
2187 (void)strlcpy(tts_mapptr[i].task_comm, proc_best_name(p), sizeof(tts_mapptr[i].task_comm));
2188 i++;
2189 }
2190 }
2191 tts_count = i;
2192
2193 proc_list_unlock();
2194
2195 /*
2196 * Initialize thread map data
2197 */
2198 akrt.map = mapptr;
2199 akrt.count = 0;
2200 akrt.maxcount = *mapcount;
2201
2202 for (i = 0; i < tts_count; i++) {
2203 akrt.atts = &tts_mapptr[i];
2204 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
2205 task_deallocate((task_t)tts_mapptr[i].task);
2206 }
2207 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
2208
2209 *mapcount = akrt.count;
2210
2211 return mapptr;
2212 }
2213
2214 static void
2215 kdbg_clear(void)
2216 {
2217 /*
2218 * Clean up the trace buffer
2219 * First make sure we're not in
2220 * the middle of cutting a trace
2221 */
2222 kernel_debug_disable();
2223 kdbg_disable_typefilter();
2224
2225 /*
2226 * make sure the SLOW_NOLOG is seen
2227 * by everyone that might be trying
2228 * to cut a trace..
2229 */
2230 IOSleep(100);
2231
2232 /* reset kdebug state for each process */
2233 if (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) {
2234 proc_list_lock();
2235 proc_t p;
2236 ALLPROC_FOREACH(p) {
2237 p->p_kdebug = 0;
2238 }
2239 proc_list_unlock();
2240 }
2241
2242 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2243 kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
2244 kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
2245
2246 kd_ctrl_page.oldest_time = 0;
2247
2248 delete_buffers();
2249 nkdbufs = 0;
2250
2251 /* Clean up the thread map buffer */
2252 kdbg_clear_thread_map();
2253
2254 RAW_file_offset = 0;
2255 RAW_file_written = 0;
2256 }
2257
2258 void
2259 kdebug_reset(void)
2260 {
2261 ktrace_assert_lock_held();
2262
2263 kdbg_lock_init();
2264
2265 kdbg_clear();
2266 if (kdbg_typefilter) {
2267 typefilter_reject_all(kdbg_typefilter);
2268 typefilter_allow_class(kdbg_typefilter, DBG_TRACE);
2269 }
2270 }
2271
2272 void
2273 kdebug_free_early_buf(void)
2274 {
2275 /* Must be done with the buffer, so release it back to the VM. */
2276 ml_static_mfree((vm_offset_t)&kd_early_buffer, sizeof(kd_early_buffer));
2277 }
2278
2279 int
2280 kdbg_setpid(kd_regtype *kdr)
2281 {
2282 pid_t pid;
2283 int flag, ret=0;
2284 struct proc *p;
2285
2286 pid = (pid_t)kdr->value1;
2287 flag = (int)kdr->value2;
2288
2289 if (pid >= 0) {
2290 if ((p = proc_find(pid)) == NULL)
2291 ret = ESRCH;
2292 else {
2293 if (flag == 1) {
2294 /*
2295 * turn on pid check for this and all pids
2296 */
2297 kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
2298 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
2299 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2300
2301 p->p_kdebug = 1;
2302 } else {
2303 /*
2304 * turn off pid check for this pid value
2305 * Don't turn off all pid checking though
2306 *
2307 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
2308 */
2309 p->p_kdebug = 0;
2310 }
2311 proc_rele(p);
2312 }
2313 }
2314 else
2315 ret = EINVAL;
2316
2317 return(ret);
2318 }
2319
2320 /* This is for pid exclusion in the trace buffer */
2321 int
2322 kdbg_setpidex(kd_regtype *kdr)
2323 {
2324 pid_t pid;
2325 int flag, ret=0;
2326 struct proc *p;
2327
2328 pid = (pid_t)kdr->value1;
2329 flag = (int)kdr->value2;
2330
2331 if (pid >= 0) {
2332 if ((p = proc_find(pid)) == NULL)
2333 ret = ESRCH;
2334 else {
2335 if (flag == 1) {
2336 /*
2337 * turn on pid exclusion
2338 */
2339 kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
2340 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
2341 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2342
2343 p->p_kdebug = 1;
2344 }
2345 else {
2346 /*
2347 * turn off pid exclusion for this pid value
2348 * Don't turn off all pid exclusion though
2349 *
2350 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
2351 */
2352 p->p_kdebug = 0;
2353 }
2354 proc_rele(p);
2355 }
2356 } else
2357 ret = EINVAL;
2358
2359 return(ret);
2360 }
2361
2362 /*
2363 * The following functions all operate on the "global" typefilter singleton.
2364 */
2365
2366 /*
2367 * The tf param is optional, you may pass either a valid typefilter or NULL.
2368 * If you pass a valid typefilter, you release ownership of that typefilter.
2369 */
2370 static int
2371 kdbg_initialize_typefilter(typefilter_t tf)
2372 {
2373 ktrace_assert_lock_held();
2374 assert(!kdbg_typefilter);
2375 assert(!kdbg_typefilter_memory_entry);
2376 typefilter_t deallocate_tf = NULL;
2377
2378 if (!tf && ((tf = deallocate_tf = typefilter_create()) == NULL)) {
2379 return ENOMEM;
2380 }
2381
2382 if ((kdbg_typefilter_memory_entry = typefilter_create_memory_entry(tf)) == MACH_PORT_NULL) {
2383 if (deallocate_tf) {
2384 typefilter_deallocate(deallocate_tf);
2385 }
2386 return ENOMEM;
2387 }
2388
2389 /*
2390 * The atomic store closes a race window with
2391 * the kdebug_typefilter syscall, which assumes
2392 * that any non-null kdbg_typefilter means a
2393 * valid memory_entry is available.
2394 */
2395 __c11_atomic_store(((_Atomic typefilter_t*)&kdbg_typefilter), tf, memory_order_release);
2396
2397 return KERN_SUCCESS;
2398 }
2399
2400 static int
2401 kdbg_copyin_typefilter(user_addr_t addr, size_t size)
2402 {
2403 int ret = ENOMEM;
2404 typefilter_t tf;
2405
2406 ktrace_assert_lock_held();
2407
2408 if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
2409 return EINVAL;
2410 }
2411
2412 if ((tf = typefilter_create())) {
2413 if ((ret = copyin(addr, tf, KDBG_TYPEFILTER_BITMAP_SIZE)) == 0) {
2414 /* The kernel typefilter must always allow DBG_TRACE */
2415 typefilter_allow_class(tf, DBG_TRACE);
2416
2417 /*
2418 * If this is the first typefilter; claim it.
2419 * Otherwise copy and deallocate.
2420 *
2421 * Allocating a typefilter for the copyin allows
2422 * the kernel to hold the invariant that DBG_TRACE
2423 * must always be allowed.
2424 */
2425 if (!kdbg_typefilter) {
2426 if ((ret = kdbg_initialize_typefilter(tf))) {
2427 return ret;
2428 }
2429 tf = NULL;
2430 } else {
2431 typefilter_copy(kdbg_typefilter, tf);
2432 }
2433
2434 kdbg_enable_typefilter();
2435 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter);
2436 }
2437
2438 if (tf)
2439 typefilter_deallocate(tf);
2440 }
2441
2442 return ret;
2443 }
2444
2445 /*
2446 * Enable the flags in the control page for the typefilter. Assumes that
2447 * kdbg_typefilter has already been allocated, so events being written
2448 * don't see a bad typefilter.
2449 */
2450 static void
2451 kdbg_enable_typefilter(void)
2452 {
2453 assert(kdbg_typefilter);
2454 kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
2455 kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
2456 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2457 commpage_update_kdebug_state();
2458 }
2459
2460 /*
2461 * Disable the flags in the control page for the typefilter. The typefilter
2462 * may be safely deallocated shortly after this function returns.
2463 */
2464 static void
2465 kdbg_disable_typefilter(void)
2466 {
2467 kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
2468
2469 if ((kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE))) {
2470 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2471 } else {
2472 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
2473 }
2474 commpage_update_kdebug_state();
2475 }
2476
2477 uint32_t
2478 kdebug_commpage_state(void)
2479 {
2480 if (kdebug_enable) {
2481 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
2482 return KDEBUG_COMMPAGE_ENABLE_TYPEFILTER | KDEBUG_COMMPAGE_ENABLE_TRACE;
2483 }
2484
2485 return KDEBUG_COMMPAGE_ENABLE_TRACE;
2486 }
2487
2488 return 0;
2489 }
2490
2491 int
2492 kdbg_setreg(kd_regtype * kdr)
2493 {
2494 int ret=0;
2495 unsigned int val_1, val_2, val;
2496 switch (kdr->type) {
2497
2498 case KDBG_CLASSTYPE :
2499 val_1 = (kdr->value1 & 0xff);
2500 val_2 = (kdr->value2 & 0xff);
2501 kdlog_beg = (val_1<<24);
2502 kdlog_end = (val_2<<24);
2503 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2504 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
2505 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
2506 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2507 break;
2508 case KDBG_SUBCLSTYPE :
2509 val_1 = (kdr->value1 & 0xff);
2510 val_2 = (kdr->value2 & 0xff);
2511 val = val_2 + 1;
2512 kdlog_beg = ((val_1<<24) | (val_2 << 16));
2513 kdlog_end = ((val_1<<24) | (val << 16));
2514 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2515 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
2516 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
2517 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2518 break;
2519 case KDBG_RANGETYPE :
2520 kdlog_beg = (kdr->value1);
2521 kdlog_end = (kdr->value2);
2522 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2523 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
2524 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
2525 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2526 break;
2527 case KDBG_VALCHECK:
2528 kdlog_value1 = (kdr->value1);
2529 kdlog_value2 = (kdr->value2);
2530 kdlog_value3 = (kdr->value3);
2531 kdlog_value4 = (kdr->value4);
2532 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2533 kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
2534 kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
2535 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2536 break;
2537 case KDBG_TYPENONE :
2538 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
2539
2540 if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK |
2541 KDBG_PIDCHECK | KDBG_PIDEXCLUDE |
2542 KDBG_TYPEFILTER_CHECK)) )
2543 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
2544 else
2545 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
2546
2547 kdlog_beg = 0;
2548 kdlog_end = 0;
2549 break;
2550 default :
2551 ret = EINVAL;
2552 break;
2553 }
2554 return(ret);
2555 }
2556
2557 static int
2558 kdbg_write_to_vnode(caddr_t buffer, size_t size, vnode_t vp, vfs_context_t ctx, off_t file_offset)
2559 {
2560 return vn_rdwr(UIO_WRITE, vp, buffer, size, file_offset, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
2561 vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2562 }
2563
2564 int
2565 kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag, uint32_t sub_tag, uint64_t length, vnode_t vp, vfs_context_t ctx)
2566 {
2567 int ret = KERN_SUCCESS;
2568 kd_chunk_header_v3 header = {
2569 .tag = tag,
2570 .sub_tag = sub_tag,
2571 .length = length,
2572 };
2573
2574 // Check that only one of them is valid
2575 assert(!buffer ^ !vp);
2576 assert((vp == NULL) || (ctx != NULL));
2577
2578 // Write the 8-byte future_chunk_timestamp field in the payload
2579 if (buffer || vp) {
2580 if (vp) {
2581 ret = kdbg_write_to_vnode((caddr_t)&header, sizeof(kd_chunk_header_v3), vp, ctx, RAW_file_offset);
2582 if (ret) {
2583 goto write_error;
2584 }
2585 RAW_file_offset += (sizeof(kd_chunk_header_v3));
2586 }
2587 else {
2588 ret = copyout(&header, buffer, sizeof(kd_chunk_header_v3));
2589 if (ret) {
2590 goto write_error;
2591 }
2592 }
2593 }
2594 write_error:
2595 return ret;
2596 }
2597
2598 int
2599 kdbg_write_v3_chunk_header_to_buffer(void * buffer, uint32_t tag, uint32_t sub_tag, uint64_t length)
2600 {
2601 kd_chunk_header_v3 header = {
2602 .tag = tag,
2603 .sub_tag = sub_tag,
2604 .length = length,
2605 };
2606
2607 if (!buffer) {
2608 return 0;
2609 }
2610
2611 memcpy(buffer, &header, sizeof(kd_chunk_header_v3));
2612
2613 return (sizeof(kd_chunk_header_v3));
2614 }
2615
2616 int
2617 kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void *payload, uint64_t payload_size, int fd)
2618 {
2619 proc_t p;
2620 struct vfs_context context;
2621 struct fileproc *fp;
2622 vnode_t vp;
2623 p = current_proc();
2624
2625 proc_fdlock(p);
2626 if ( (fp_lookup(p, fd, &fp, 1)) ) {
2627 proc_fdunlock(p);
2628 return EFAULT;
2629 }
2630
2631 context.vc_thread = current_thread();
2632 context.vc_ucred = fp->f_fglob->fg_cred;
2633
2634 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
2635 fp_drop(p, fd, fp, 1);
2636 proc_fdunlock(p);
2637 return EBADF;
2638 }
2639 vp = (struct vnode *) fp->f_fglob->fg_data;
2640 proc_fdunlock(p);
2641
2642 if ( (vnode_getwithref(vp)) == 0 ) {
2643 RAW_file_offset = fp->f_fglob->fg_offset;
2644
2645 kd_chunk_header_v3 chunk_header = {
2646 .tag = tag,
2647 .sub_tag = sub_tag,
2648 .length = length,
2649 };
2650
2651 int ret = kdbg_write_to_vnode((caddr_t) &chunk_header, sizeof(kd_chunk_header_v3), vp, &context, RAW_file_offset);
2652 if (!ret) {
2653 RAW_file_offset += sizeof(kd_chunk_header_v3);
2654 }
2655
2656 ret = kdbg_write_to_vnode((caddr_t) payload, (size_t) payload_size, vp, &context, RAW_file_offset);
2657 if (!ret) {
2658 RAW_file_offset += payload_size;
2659 }
2660
2661 fp->f_fglob->fg_offset = RAW_file_offset;
2662 vnode_put(vp);
2663 }
2664
2665 fp_drop(p, fd, fp, 0);
2666 return KERN_SUCCESS;
2667 }
2668
2669 user_addr_t
2670 kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag, uint64_t length, vnode_t vp, vfs_context_t ctx)
2671 {
2672 uint64_t future_chunk_timestamp = 0;
2673 length += sizeof(uint64_t);
2674
2675 if (kdbg_write_v3_chunk_header(buffer, tag, V3_EVENT_DATA_VERSION, length, vp, ctx)) {
2676 return 0;
2677 }
2678 if (buffer) {
2679 buffer += sizeof(kd_chunk_header_v3);
2680 }
2681
2682 // Check that only one of them is valid
2683 assert(!buffer ^ !vp);
2684 assert((vp == NULL) || (ctx != NULL));
2685
2686 // Write the 8-byte future_chunk_timestamp field in the payload
2687 if (buffer || vp) {
2688 if (vp) {
2689 int ret = kdbg_write_to_vnode((caddr_t)&future_chunk_timestamp, sizeof(uint64_t), vp, ctx, RAW_file_offset);
2690 if (!ret) {
2691 RAW_file_offset += (sizeof(uint64_t));
2692 }
2693 }
2694 else {
2695 if (copyout(&future_chunk_timestamp, buffer, sizeof(uint64_t))) {
2696 return 0;
2697 }
2698 }
2699 }
2700
2701 return (buffer + sizeof(uint64_t));
2702 }
2703
2704 int
2705 kdbg_write_v3_header(user_addr_t user_header, size_t *user_header_size, int fd)
2706 {
2707 int ret = KERN_SUCCESS;
2708
2709 uint8_t* cpumap = 0;
2710 uint32_t cpumap_size = 0;
2711 uint32_t thrmap_size = 0;
2712
2713 size_t bytes_needed = 0;
2714
2715 // Check that only one of them is valid
2716 assert(!user_header ^ !fd);
2717 assert(user_header_size);
2718
2719 if ( !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ) {
2720 ret = EINVAL;
2721 goto bail;
2722 }
2723
2724 if ( !(user_header || fd) ) {
2725 ret = EINVAL;
2726 goto bail;
2727 }
2728
2729 // Initialize the cpu map
2730 ret = kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size);
2731 if (ret != KERN_SUCCESS) {
2732 goto bail;
2733 }
2734
2735 // Check if a thread map is initialized
2736 if ( !kd_mapptr ) {
2737 ret = EINVAL;
2738 goto bail;
2739 }
2740 thrmap_size = kd_mapcount * sizeof(kd_threadmap);
2741
2742 mach_timebase_info_data_t timebase = {0, 0};
2743 clock_timebase_info(&timebase);
2744
2745 // Setup the header.
2746 // See v3 header description in sys/kdebug.h for more inforamtion.
2747 kd_header_v3 header = {
2748 .tag = RAW_VERSION3,
2749 .sub_tag = V3_HEADER_VERSION,
2750 .length = (sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header)),
2751 .timebase_numer = timebase.numer,
2752 .timebase_denom = timebase.denom,
2753 .timestamp = 0, /* FIXME rdar://problem/22053009 */
2754 .walltime_secs = 0,
2755 .walltime_usecs = 0,
2756 .timezone_minuteswest = 0,
2757 .timezone_dst = 0,
2758 #if defined(__LP64__)
2759 .flags = 1,
2760 #else
2761 .flags = 0,
2762 #endif
2763 };
2764
2765 // If its a buffer, check if we have enough space to copy the header and the maps.
2766 if (user_header) {
2767 bytes_needed = header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3));
2768 if (*user_header_size < bytes_needed) {
2769 ret = EINVAL;
2770 goto bail;
2771 }
2772 }
2773
2774 // Start writing the header
2775 if (fd) {
2776 void *hdr_ptr = (void *)(((uintptr_t) &header) + sizeof(kd_chunk_header_v3));
2777 size_t payload_size = (sizeof(kd_header_v3) - sizeof(kd_chunk_header_v3));
2778
2779 ret = kdbg_write_v3_chunk_to_fd(RAW_VERSION3, V3_HEADER_VERSION, header.length, hdr_ptr, payload_size, fd);
2780 if (ret) {
2781 goto bail;
2782 }
2783 }
2784 else {
2785 if (copyout(&header, user_header, sizeof(kd_header_v3))) {
2786 ret = EFAULT;
2787 goto bail;
2788 }
2789 // Update the user pointer
2790 user_header += sizeof(kd_header_v3);
2791 }
2792
2793 // Write a cpu map. This is a sub chunk of the header
2794 cpumap = (uint8_t*)((uintptr_t) cpumap + sizeof(kd_cpumap_header));
2795 size_t payload_size = (size_t)(cpumap_size - sizeof(kd_cpumap_header));
2796 if (fd) {
2797 ret = kdbg_write_v3_chunk_to_fd(V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, (void *)cpumap, payload_size, fd);
2798 if (ret) {
2799 goto bail;
2800 }
2801 }
2802 else {
2803 ret = kdbg_write_v3_chunk_header(user_header, V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, NULL, NULL);
2804 if (ret) {
2805 goto bail;
2806 }
2807 user_header += sizeof(kd_chunk_header_v3);
2808 if (copyout(cpumap, user_header, payload_size)) {
2809 ret = EFAULT;
2810 goto bail;
2811 }
2812 // Update the user pointer
2813 user_header += payload_size;
2814 }
2815
2816 // Write a thread map
2817 if (fd) {
2818 ret = kdbg_write_v3_chunk_to_fd(V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, (void *)kd_mapptr, thrmap_size, fd);
2819 if (ret) {
2820 goto bail;
2821 }
2822 }
2823 else {
2824 ret = kdbg_write_v3_chunk_header(user_header, V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, NULL, NULL);
2825 if (ret) {
2826 goto bail;
2827 }
2828 user_header += sizeof(kd_chunk_header_v3);
2829 if (copyout(kd_mapptr, user_header, thrmap_size)) {
2830 ret = EFAULT;
2831 goto bail;
2832 }
2833 user_header += thrmap_size;
2834 }
2835
2836 if (fd) {
2837 RAW_file_written += bytes_needed;
2838 }
2839
2840 *user_header_size = bytes_needed;
2841 bail:
2842 if (cpumap) {
2843 kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
2844 }
2845 return (ret);
2846 }
2847
2848 int
2849 kdbg_readcpumap(user_addr_t user_cpumap, size_t *user_cpumap_size)
2850 {
2851 uint8_t* cpumap = NULL;
2852 uint32_t cpumap_size = 0;
2853 int ret = KERN_SUCCESS;
2854
2855 if (kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) {
2856 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size) == KERN_SUCCESS) {
2857 if (user_cpumap) {
2858 size_t bytes_to_copy = (*user_cpumap_size >= cpumap_size) ? cpumap_size : *user_cpumap_size;
2859 if (copyout(cpumap, user_cpumap, (size_t)bytes_to_copy)) {
2860 ret = EFAULT;
2861 }
2862 }
2863 *user_cpumap_size = cpumap_size;
2864 kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
2865 } else
2866 ret = EINVAL;
2867 } else
2868 ret = EINVAL;
2869
2870 return (ret);
2871 }
2872
2873 int
2874 kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize)
2875 {
2876 kd_threadmap *mapptr;
2877 unsigned int mapsize;
2878 unsigned int mapcount;
2879 unsigned int count = 0;
2880 int ret = 0;
2881
2882 count = *bufsize/sizeof(kd_threadmap);
2883 *bufsize = 0;
2884
2885 if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) {
2886 if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap)))
2887 ret = EFAULT;
2888 else
2889 *bufsize = (mapcount * sizeof(kd_threadmap));
2890
2891 kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize);
2892 } else
2893 ret = EINVAL;
2894
2895 return (ret);
2896 }
2897
2898 static int
2899 kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx)
2900 {
2901 int ret = 0;
2902 RAW_header header;
2903 clock_sec_t secs;
2904 clock_usec_t usecs;
2905 char *pad_buf;
2906 uint32_t pad_size;
2907 uint32_t extra_thread_count = 0;
2908 uint32_t cpumap_size;
2909 size_t map_size = 0;
2910 size_t map_count = 0;
2911
2912 if (write_thread_map) {
2913 assert(kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
2914 map_count = kd_mapcount;
2915 map_size = map_count * sizeof(kd_threadmap);
2916 }
2917
2918 /*
2919 * Without the buffers initialized, we cannot construct a CPU map or a
2920 * thread map, and cannot write a header.
2921 */
2922 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
2923 return EINVAL;
2924 }
2925
2926 /*
2927 * To write a RAW_VERSION1+ file, we must embed a cpumap in the
2928 * "padding" used to page align the events following the threadmap. If
2929 * the threadmap happens to not require enough padding, we artificially
2930 * increase its footprint until it needs enough padding.
2931 */
2932
2933 assert(vp);
2934 assert(ctx);
2935
2936 pad_size = PAGE_16KB - ((sizeof(RAW_header) + map_size) & PAGE_MASK_64);
2937 cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
2938
2939 if (cpumap_size > pad_size) {
2940 /* If the cpu map doesn't fit in the current available pad_size,
2941 * we increase the pad_size by 16K. We do this so that the event
2942 * data is always available on a page aligned boundary for both
2943 * 4k and 16k systems. We enforce this alignment for the event
2944 * data so that we can take advantage of optimized file/disk writes.
2945 */
2946 pad_size += PAGE_16KB;
2947 }
2948
2949 /* The way we are silently embedding a cpumap in the "padding" is by artificially
2950 * increasing the number of thread entries. However, we'll also need to ensure that
2951 * the cpumap is embedded in the last 4K page before when the event data is expected.
2952 * This way the tools can read the data starting the next page boundary on both
2953 * 4K and 16K systems preserving compatibility with older versions of the tools
2954 */
2955 if (pad_size > PAGE_4KB) {
2956 pad_size -= PAGE_4KB;
2957 extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
2958 }
2959
2960 memset(&header, 0, sizeof(header));
2961 header.version_no = RAW_VERSION1;
2962 header.thread_count = map_count + extra_thread_count;
2963
2964 clock_get_calendar_microtime(&secs, &usecs);
2965 header.TOD_secs = secs;
2966 header.TOD_usecs = usecs;
2967
2968 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
2969 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2970 if (ret) {
2971 goto write_error;
2972 }
2973 RAW_file_offset += sizeof(RAW_header);
2974 RAW_file_written += sizeof(RAW_header);
2975
2976 if (write_thread_map) {
2977 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, map_size, RAW_file_offset,
2978 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2979 if (ret) {
2980 goto write_error;
2981 }
2982
2983 RAW_file_offset += map_size;
2984 RAW_file_written += map_size;
2985 }
2986
2987 if (extra_thread_count) {
2988 pad_size = extra_thread_count * sizeof(kd_threadmap);
2989 pad_buf = kalloc(pad_size);
2990 if (!pad_buf) {
2991 ret = ENOMEM;
2992 goto write_error;
2993 }
2994 memset(pad_buf, 0, pad_size);
2995
2996 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2997 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2998 kfree(pad_buf, pad_size);
2999 if (ret) {
3000 goto write_error;
3001 }
3002
3003 RAW_file_offset += pad_size;
3004 RAW_file_written += pad_size;
3005 }
3006
3007 pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
3008 if (pad_size) {
3009 pad_buf = (char *)kalloc(pad_size);
3010 if (!pad_buf) {
3011 ret = ENOMEM;
3012 goto write_error;
3013 }
3014 memset(pad_buf, 0, pad_size);
3015
3016 /*
3017 * embed a cpumap in the padding bytes.
3018 * older code will skip this.
3019 * newer code will know how to read it.
3020 */
3021 uint32_t temp = pad_size;
3022 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, (uint8_t**)&pad_buf, &temp) != KERN_SUCCESS) {
3023 memset(pad_buf, 0, pad_size);
3024 }
3025
3026 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
3027 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
3028 kfree(pad_buf, pad_size);
3029 if (ret) {
3030 goto write_error;
3031 }
3032
3033 RAW_file_offset += pad_size;
3034 RAW_file_written += pad_size;
3035 }
3036
3037 write_error:
3038 return ret;
3039 }
3040
3041 static void
3042 kdbg_clear_thread_map(void)
3043 {
3044 ktrace_assert_lock_held();
3045
3046 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
3047 assert(kd_mapptr != NULL);
3048 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
3049 kd_mapptr = NULL;
3050 kd_mapsize = 0;
3051 kd_mapcount = 0;
3052 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
3053 }
3054 }
3055
3056 /*
3057 * Write out a version 1 header and the thread map, if it is initialized, to a
3058 * vnode. Used by KDWRITEMAP and kdbg_dump_trace_to_file.
3059 *
3060 * Returns write errors from vn_rdwr if a write fails. Returns ENODATA if the
3061 * thread map has not been initialized, but the header will still be written.
3062 * Returns ENOMEM if padding could not be allocated. Returns 0 otherwise.
3063 */
3064 static int
3065 kdbg_write_thread_map(vnode_t vp, vfs_context_t ctx)
3066 {
3067 int ret = 0;
3068 boolean_t map_initialized;
3069
3070 ktrace_assert_lock_held();
3071 assert(ctx != NULL);
3072
3073 map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
3074
3075 ret = kdbg_write_v1_header(map_initialized, vp, ctx);
3076 if (ret == 0) {
3077 if (map_initialized) {
3078 kdbg_clear_thread_map();
3079 } else {
3080 ret = ENODATA;
3081 }
3082 }
3083
3084 return ret;
3085 }
3086
3087 /*
3088 * Copy out the thread map to a user space buffer. Used by KDTHRMAP.
3089 *
3090 * Returns copyout errors if the copyout fails. Returns ENODATA if the thread
3091 * map has not been initialized. Returns EINVAL if the buffer provided is not
3092 * large enough for the entire thread map. Returns 0 otherwise.
3093 */
3094 static int
3095 kdbg_copyout_thread_map(user_addr_t buffer, size_t *buffer_size)
3096 {
3097 boolean_t map_initialized;
3098 size_t map_size;
3099 int ret = 0;
3100
3101 ktrace_assert_lock_held();
3102 assert(buffer_size != NULL);
3103
3104 map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
3105 if (!map_initialized) {
3106 return ENODATA;
3107 }
3108
3109 map_size = kd_mapcount * sizeof(kd_threadmap);
3110 if (*buffer_size < map_size) {
3111 return EINVAL;
3112 }
3113
3114 ret = copyout(kd_mapptr, buffer, map_size);
3115 if (ret == 0) {
3116 kdbg_clear_thread_map();
3117 }
3118
3119 return ret;
3120 }
3121
3122 int
3123 kdbg_readthrmap_v3(user_addr_t buffer, size_t buffer_size, int fd)
3124 {
3125 int ret = 0;
3126 boolean_t map_initialized;
3127 size_t map_size;
3128
3129 ktrace_assert_lock_held();
3130
3131 if ((!fd && !buffer) || (fd && buffer)) {
3132 return EINVAL;
3133 }
3134
3135 map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
3136 map_size = kd_mapcount * sizeof(kd_threadmap);
3137
3138 if (map_initialized && (buffer_size >= map_size))
3139 {
3140 ret = kdbg_write_v3_header(buffer, &buffer_size, fd);
3141
3142 if (ret == 0) {
3143 kdbg_clear_thread_map();
3144 }
3145 } else {
3146 ret = EINVAL;
3147 }
3148
3149 return ret;
3150 }
3151
3152 static void
3153 kdbg_set_nkdbufs(unsigned int value)
3154 {
3155 /*
3156 * We allow a maximum buffer size of 50% of either ram or max mapped
3157 * address, whichever is smaller 'value' is the desired number of trace
3158 * entries
3159 */
3160 unsigned int max_entries = (sane_size / 2) / sizeof(kd_buf);
3161
3162 if (value <= max_entries) {
3163 nkdbufs = value;
3164 } else {
3165 nkdbufs = max_entries;
3166 }
3167 }
3168
3169 /*
3170 * Block until there are `n_storage_threshold` storage units filled with
3171 * events or `timeout_ms` milliseconds have passed. If `locked_wait` is true,
3172 * `ktrace_lock` is held while waiting. This is necessary while waiting to
3173 * write events out of the buffers.
3174 *
3175 * Returns true if the threshold was reached and false otherwise.
3176 *
3177 * Called with `ktrace_lock` locked and interrupts enabled.
3178 */
3179 static boolean_t
3180 kdbg_wait(uint64_t timeout_ms, boolean_t locked_wait)
3181 {
3182 int wait_result = THREAD_AWAKENED;
3183 uint64_t abstime = 0;
3184
3185 ktrace_assert_lock_held();
3186
3187 if (timeout_ms != 0) {
3188 uint64_t ns = timeout_ms * NSEC_PER_MSEC;
3189 nanoseconds_to_absolutetime(ns, &abstime);
3190 clock_absolutetime_interval_to_deadline(abstime, &abstime);
3191 }
3192
3193 boolean_t s = ml_set_interrupts_enabled(FALSE);
3194 if (!s) {
3195 panic("kdbg_wait() called with interrupts disabled");
3196 }
3197 lck_spin_lock(kdw_spin_lock);
3198
3199 if (!locked_wait) {
3200 /* drop the mutex to allow others to access trace */
3201 ktrace_unlock();
3202 }
3203
3204 while (wait_result == THREAD_AWAKENED &&
3205 kd_ctrl_page.kds_inuse_count < n_storage_threshold)
3206 {
3207 kds_waiter = 1;
3208
3209 if (abstime) {
3210 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
3211 } else {
3212 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
3213 }
3214
3215 kds_waiter = 0;
3216 }
3217
3218 /* check the count under the spinlock */
3219 boolean_t threshold_exceeded = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
3220
3221 lck_spin_unlock(kdw_spin_lock);
3222 ml_set_interrupts_enabled(s);
3223
3224 if (!locked_wait) {
3225 /* pick the mutex back up again */
3226 ktrace_lock();
3227 }
3228
3229 /* write out whether we've exceeded the threshold */
3230 return threshold_exceeded;
3231 }
3232
3233 /*
3234 * Wakeup a thread waiting using `kdbg_wait` if there are at least
3235 * `n_storage_threshold` storage units in use.
3236 */
3237 static void
3238 kdbg_wakeup(void)
3239 {
3240 boolean_t need_kds_wakeup = FALSE;
3241
3242 /*
3243 * Try to take the lock here to synchronize with the waiter entering
3244 * the blocked state. Use the try mode to prevent deadlocks caused by
3245 * re-entering this routine due to various trace points triggered in the
3246 * lck_spin_sleep_xxxx routines used to actually enter one of our 2 wait
3247 * conditions. No problem if we fail, there will be lots of additional
3248 * events coming in that will eventually succeed in grabbing this lock.
3249 */
3250 boolean_t s = ml_set_interrupts_enabled(FALSE);
3251
3252 if (lck_spin_try_lock(kdw_spin_lock)) {
3253 if (kds_waiter &&
3254 (kd_ctrl_page.kds_inuse_count >= n_storage_threshold))
3255 {
3256 kds_waiter = 0;
3257 need_kds_wakeup = TRUE;
3258 }
3259 lck_spin_unlock(kdw_spin_lock);
3260 }
3261
3262 ml_set_interrupts_enabled(s);
3263
3264 if (need_kds_wakeup == TRUE) {
3265 wakeup(&kds_waiter);
3266 }
3267 }
3268
3269 int
3270 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
3271 {
3272 int ret = 0;
3273 size_t size = *sizep;
3274 unsigned int value = 0;
3275 kd_regtype kd_Reg;
3276 kbufinfo_t kd_bufinfo;
3277 proc_t p;
3278
3279 if (name[0] == KERN_KDWRITETR ||
3280 name[0] == KERN_KDWRITETR_V3 ||
3281 name[0] == KERN_KDWRITEMAP ||
3282 name[0] == KERN_KDWRITEMAP_V3 ||
3283 name[0] == KERN_KDEFLAGS ||
3284 name[0] == KERN_KDDFLAGS ||
3285 name[0] == KERN_KDENABLE ||
3286 name[0] == KERN_KDSETBUF)
3287 {
3288 if (namelen < 2) {
3289 return EINVAL;
3290 }
3291 value = name[1];
3292 }
3293
3294 kdbg_lock_init();
3295 assert(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT);
3296
3297 ktrace_lock();
3298
3299 /*
3300 * Some requests only require "read" access to kdebug trace. Regardless,
3301 * tell ktrace that a configuration or read is occurring (and see if it's
3302 * allowed).
3303 */
3304 if (name[0] != KERN_KDGETBUF &&
3305 name[0] != KERN_KDGETREG &&
3306 name[0] != KERN_KDREADCURTHRMAP)
3307 {
3308 if ((ret = ktrace_configure(KTRACE_KDEBUG))) {
3309 goto out;
3310 }
3311 } else {
3312 if ((ret = ktrace_read_check())) {
3313 goto out;
3314 }
3315 }
3316
3317 switch(name[0]) {
3318 case KERN_KDGETBUF:
3319 if (size < sizeof(kd_bufinfo.nkdbufs)) {
3320 /*
3321 * There is not enough room to return even
3322 * the first element of the info structure.
3323 */
3324 ret = EINVAL;
3325 break;
3326 }
3327
3328 memset(&kd_bufinfo, 0, sizeof(kd_bufinfo));
3329
3330 kd_bufinfo.nkdbufs = nkdbufs;
3331 kd_bufinfo.nkdthreads = kd_mapcount;
3332
3333 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
3334 kd_bufinfo.nolog = 1;
3335 else
3336 kd_bufinfo.nolog = 0;
3337
3338 kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
3339 #if defined(__LP64__)
3340 kd_bufinfo.flags |= KDBG_LP64;
3341 #endif
3342 {
3343 int pid = ktrace_get_owning_pid();
3344 kd_bufinfo.bufid = (pid == 0 ? -1 : pid);
3345 }
3346
3347 if (size >= sizeof(kd_bufinfo)) {
3348 /*
3349 * Provide all the info we have
3350 */
3351 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
3352 ret = EINVAL;
3353 } else {
3354 /*
3355 * For backwards compatibility, only provide
3356 * as much info as there is room for.
3357 */
3358 if (copyout(&kd_bufinfo, where, size))
3359 ret = EINVAL;
3360 }
3361 break;
3362
3363 case KERN_KDREADCURTHRMAP:
3364 ret = kdbg_readcurthrmap(where, sizep);
3365 break;
3366
3367 case KERN_KDEFLAGS:
3368 value &= KDBG_USERFLAGS;
3369 kd_ctrl_page.kdebug_flags |= value;
3370 break;
3371
3372 case KERN_KDDFLAGS:
3373 value &= KDBG_USERFLAGS;
3374 kd_ctrl_page.kdebug_flags &= ~value;
3375 break;
3376
3377 case KERN_KDENABLE:
3378 /*
3379 * Enable tracing mechanism. Two types:
3380 * KDEBUG_TRACE is the standard one,
3381 * and KDEBUG_PPT which is a carefully
3382 * chosen subset to avoid performance impact.
3383 */
3384 if (value) {
3385 /*
3386 * enable only if buffer is initialized
3387 */
3388 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
3389 !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
3390 ret = EINVAL;
3391 break;
3392 }
3393 kdbg_thrmap_init();
3394
3395 kdbg_set_tracing_enabled(TRUE, value);
3396 }
3397 else
3398 {
3399 if (!kdebug_enable) {
3400 break;
3401 }
3402
3403 kernel_debug_disable();
3404 }
3405 break;
3406
3407 case KERN_KDSETBUF:
3408 kdbg_set_nkdbufs(value);
3409 break;
3410
3411 case KERN_KDSETUP:
3412 ret = kdbg_reinit(FALSE);
3413 break;
3414
3415 case KERN_KDREMOVE:
3416 ktrace_reset(KTRACE_KDEBUG);
3417 break;
3418
3419 case KERN_KDSETREG:
3420 if(size < sizeof(kd_regtype)) {
3421 ret = EINVAL;
3422 break;
3423 }
3424 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
3425 ret = EINVAL;
3426 break;
3427 }
3428
3429 ret = kdbg_setreg(&kd_Reg);
3430 break;
3431
3432 case KERN_KDGETREG:
3433 ret = EINVAL;
3434 break;
3435
3436 case KERN_KDREADTR:
3437 ret = kdbg_read(where, sizep, NULL, NULL, RAW_VERSION1);
3438 break;
3439
3440 case KERN_KDWRITETR:
3441 case KERN_KDWRITETR_V3:
3442 case KERN_KDWRITEMAP:
3443 case KERN_KDWRITEMAP_V3:
3444 {
3445 struct vfs_context context;
3446 struct fileproc *fp;
3447 size_t number;
3448 vnode_t vp;
3449 int fd;
3450
3451 if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
3452 (void)kdbg_wait(size, TRUE);
3453 }
3454 p = current_proc();
3455 fd = value;
3456
3457 proc_fdlock(p);
3458 if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
3459 proc_fdunlock(p);
3460 break;
3461 }
3462 context.vc_thread = current_thread();
3463 context.vc_ucred = fp->f_fglob->fg_cred;
3464
3465 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
3466 fp_drop(p, fd, fp, 1);
3467 proc_fdunlock(p);
3468
3469 ret = EBADF;
3470 break;
3471 }
3472 vp = (struct vnode *)fp->f_fglob->fg_data;
3473 proc_fdunlock(p);
3474
3475 if ((ret = vnode_getwithref(vp)) == 0) {
3476 RAW_file_offset = fp->f_fglob->fg_offset;
3477 if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) {
3478 number = nkdbufs * sizeof(kd_buf);
3479
3480 KDBG(TRACE_WRITING_EVENTS | DBG_FUNC_START);
3481 if (name[0] == KERN_KDWRITETR_V3)
3482 ret = kdbg_read(0, &number, vp, &context, RAW_VERSION3);
3483 else
3484 ret = kdbg_read(0, &number, vp, &context, RAW_VERSION1);
3485 KDBG(TRACE_WRITING_EVENTS | DBG_FUNC_END, number);
3486
3487 *sizep = number;
3488 } else {
3489 number = kd_mapcount * sizeof(kd_threadmap);
3490 if (name[0] == KERN_KDWRITEMAP_V3) {
3491 ret = kdbg_readthrmap_v3(0, number, fd);
3492 } else {
3493 ret = kdbg_write_thread_map(vp, &context);
3494 }
3495 }
3496 fp->f_fglob->fg_offset = RAW_file_offset;
3497 vnode_put(vp);
3498 }
3499 fp_drop(p, fd, fp, 0);
3500
3501 break;
3502 }
3503 case KERN_KDBUFWAIT:
3504 *sizep = kdbg_wait(size, FALSE);
3505 break;
3506
3507 case KERN_KDPIDTR:
3508 if (size < sizeof(kd_regtype)) {
3509 ret = EINVAL;
3510 break;
3511 }
3512 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
3513 ret = EINVAL;
3514 break;
3515 }
3516
3517 ret = kdbg_setpid(&kd_Reg);
3518 break;
3519
3520 case KERN_KDPIDEX:
3521 if (size < sizeof(kd_regtype)) {
3522 ret = EINVAL;
3523 break;
3524 }
3525 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
3526 ret = EINVAL;
3527 break;
3528 }
3529
3530 ret = kdbg_setpidex(&kd_Reg);
3531 break;
3532
3533 case KERN_KDCPUMAP:
3534 ret = kdbg_readcpumap(where, sizep);
3535 break;
3536
3537 case KERN_KDTHRMAP:
3538 ret = kdbg_copyout_thread_map(where, sizep);
3539 break;
3540
3541 case KERN_KDSET_TYPEFILTER: {
3542 ret = kdbg_copyin_typefilter(where, size);
3543 break;
3544 }
3545
3546 case KERN_KDTEST:
3547 ret = kdbg_test(size);
3548 break;
3549
3550 default:
3551 ret = EINVAL;
3552 break;
3553 }
3554 out:
3555 ktrace_unlock();
3556
3557 return ret;
3558 }
3559
3560
3561 /*
3562 * This code can run for the most part concurrently with kernel_debug_internal()...
3563 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
3564 * synchronize with the recording side of this puzzle... otherwise, we are able to
3565 * move through the lists w/o use of any locks
3566 */
3567 int
3568 kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uint32_t file_version)
3569 {
3570 unsigned int count;
3571 unsigned int cpu, min_cpu;
3572 uint64_t barrier_min = 0, barrier_max = 0, t, earliest_time;
3573 int error = 0;
3574 kd_buf *tempbuf;
3575 uint32_t rcursor;
3576 kd_buf lostevent;
3577 union kds_ptr kdsp;
3578 bool traced_retrograde = false;
3579 struct kd_storage *kdsp_actual;
3580 struct kd_bufinfo *kdbp;
3581 struct kd_bufinfo *min_kdbp;
3582 uint32_t tempbuf_count;
3583 uint32_t tempbuf_number;
3584 uint32_t old_kdebug_flags;
3585 uint32_t old_kdebug_slowcheck;
3586 boolean_t lostevents = FALSE;
3587 boolean_t out_of_events = FALSE;
3588 boolean_t wrapped = FALSE;
3589
3590 assert(number);
3591 count = *number/sizeof(kd_buf);
3592 *number = 0;
3593
3594 ktrace_assert_lock_held();
3595
3596 if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
3597 return EINVAL;
3598
3599 thread_set_eager_preempt(current_thread());
3600
3601 memset(&lostevent, 0, sizeof(lostevent));
3602 lostevent.debugid = TRACE_LOST_EVENTS;
3603
3604 /*
3605 * Capture the current time. Only sort events that have occured
3606 * before now. Since the IOPs are being flushed here, it is possible
3607 * that events occur on the AP while running live tracing. If we are
3608 * disabled, no new events should occur on the AP.
3609 */
3610 if (kd_ctrl_page.enabled) {
3611 barrier_max = kdbg_timestamp() & KDBG_TIMESTAMP_MASK;
3612 }
3613
3614 /*
3615 * Request each IOP to provide us with up to date entries before merging
3616 * buffers together.
3617 */
3618 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
3619
3620 /*
3621 * Disable wrap so storage units cannot be stolen out from underneath us
3622 * while merging events.
3623 *
3624 * Because we hold ktrace_lock, no other control threads can be playing
3625 * with kdebug_flags. The code that emits new events could be running,
3626 * but it grabs kds_spin_lock if it needs to acquire a new storage
3627 * chunk, which is where it examines kdebug_flags. If it is adding to
3628 * the same chunk we're reading from, check for that below.
3629 */
3630 wrapped = disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
3631
3632 if (count > nkdbufs)
3633 count = nkdbufs;
3634
3635 if ((tempbuf_count = count) > KDCOPYBUF_COUNT) {
3636 tempbuf_count = KDCOPYBUF_COUNT;
3637 }
3638
3639 /*
3640 * If the buffers have wrapped, capture the earliest time where there
3641 * are events for all CPUs and do not emit additional lost events for
3642 * oldest storage units.
3643 */
3644 if (wrapped) {
3645 barrier_min = kd_ctrl_page.oldest_time;
3646 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
3647 kd_ctrl_page.oldest_time = 0;
3648
3649 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
3650 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
3651 continue;
3652 }
3653 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
3654 kdsp_actual->kds_lostevents = FALSE;
3655 }
3656 }
3657
3658 while (count) {
3659 tempbuf = kdcopybuf;
3660 tempbuf_number = 0;
3661
3662 if (wrapped) {
3663 /* Trace a single lost events event for wrapping. */
3664 kdbg_set_timestamp_and_cpu(&lostevent, barrier_min, 0);
3665 *tempbuf = lostevent;
3666 wrapped = FALSE;
3667 goto nextevent;
3668 }
3669
3670 /* While space left in merged events scratch buffer. */
3671 while (tempbuf_count) {
3672 earliest_time = UINT64_MAX;
3673 min_kdbp = NULL;
3674 min_cpu = 0;
3675
3676 /* Check each CPU's buffers. */
3677 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
3678 /* Skip CPUs without data. */
3679 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
3680 next_cpu:
3681 continue;
3682 }
3683 /* Debugging aid: maintain a copy of the "kdsp"
3684 * index.
3685 */
3686 volatile union kds_ptr kdsp_shadow;
3687
3688 kdsp_shadow = kdsp;
3689
3690 /* From CPU data to buffer header to buffer. */
3691 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
3692
3693 volatile struct kd_storage *kdsp_actual_shadow;
3694
3695 kdsp_actual_shadow = kdsp_actual;
3696
3697 /* Skip buffer if there are no events left. */
3698 rcursor = kdsp_actual->kds_readlast;
3699
3700 if (rcursor == kdsp_actual->kds_bufindx) {
3701 continue;
3702 }
3703
3704 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
3705
3706 /* Ignore events that have aged out due to wrapping. */
3707 while (t < barrier_min) {
3708 rcursor = ++kdsp_actual->kds_readlast;
3709
3710 if (rcursor >= EVENTS_PER_STORAGE_UNIT) {
3711 release_storage_unit(cpu, kdsp.raw);
3712
3713 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) {
3714 goto next_cpu;
3715 }
3716 kdsp_shadow = kdsp;
3717 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
3718 kdsp_actual_shadow = kdsp_actual;
3719 rcursor = kdsp_actual->kds_readlast;
3720 }
3721
3722 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
3723 }
3724
3725 if ((t > barrier_max) && (barrier_max > 0)) {
3726 /*
3727 * Need to flush IOPs again before we
3728 * can sort any more data from the
3729 * buffers.
3730 */
3731 out_of_events = TRUE;
3732 break;
3733 }
3734 if (t < kdsp_actual->kds_timestamp) {
3735 /*
3736 * indicates we've not yet completed filling
3737 * in this event...
3738 * this should only occur when we're looking
3739 * at the buf that the record head is utilizing
3740 * we'll pick these events up on the next
3741 * call to kdbg_read
3742 * we bail at this point so that we don't
3743 * get an out-of-order timestream by continuing
3744 * to read events from the other CPUs' timestream(s)
3745 */
3746 out_of_events = TRUE;
3747 break;
3748 }
3749 if (t < earliest_time) {
3750 earliest_time = t;
3751 min_kdbp = kdbp;
3752 min_cpu = cpu;
3753 }
3754 }
3755 if (min_kdbp == NULL || out_of_events == TRUE) {
3756 /*
3757 * all buffers ran empty
3758 */
3759 out_of_events = TRUE;
3760 break;
3761 }
3762
3763 kdsp = min_kdbp->kd_list_head;
3764 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
3765
3766 /* Copy earliest event into merged events scratch buffer. */
3767 *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
3768
3769 if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
3770 release_storage_unit(min_cpu, kdsp.raw);
3771
3772 /*
3773 * Watch for out of order timestamps
3774 */
3775 if (earliest_time < min_kdbp->kd_prev_timebase) {
3776 /*
3777 * If we haven't already, emit a retrograde events event.
3778 */
3779 if (traced_retrograde) {
3780 continue;
3781 }
3782
3783 kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
3784 tempbuf->arg1 = tempbuf->debugid;
3785 tempbuf->arg2 = earliest_time;
3786 tempbuf->arg3 = 0;
3787 tempbuf->arg4 = 0;
3788 tempbuf->debugid = TRACE_RETROGRADE_EVENTS;
3789 traced_retrograde = true;
3790 } else {
3791 min_kdbp->kd_prev_timebase = earliest_time;
3792 }
3793 nextevent:
3794 tempbuf_count--;
3795 tempbuf_number++;
3796 tempbuf++;
3797
3798 if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
3799 break;
3800 }
3801 if (tempbuf_number) {
3802 if (file_version == RAW_VERSION3) {
3803 if ( !(kdbg_write_v3_event_chunk_header(buffer, V3_RAW_EVENTS, (tempbuf_number * sizeof(kd_buf)), vp, ctx))) {
3804 error = EFAULT;
3805 goto check_error;
3806 }
3807 if (buffer)
3808 buffer += (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
3809
3810 assert(count >= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t)));
3811 count -= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
3812 *number += (sizeof(kd_chunk_header_v3) + sizeof(uint64_t));
3813 }
3814 if (vp) {
3815 size_t write_size = tempbuf_number * sizeof(kd_buf);
3816 error = kdbg_write_to_vnode((caddr_t)kdcopybuf, write_size, vp, ctx, RAW_file_offset);
3817 if (!error)
3818 RAW_file_offset += write_size;
3819
3820 if (RAW_file_written >= RAW_FLUSH_SIZE) {
3821 error = VNOP_FSYNC(vp, MNT_NOWAIT, ctx);
3822
3823 RAW_file_written = 0;
3824 }
3825 } else {
3826 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
3827 buffer += (tempbuf_number * sizeof(kd_buf));
3828 }
3829 check_error:
3830 if (error) {
3831 *number = 0;
3832 error = EINVAL;
3833 break;
3834 }
3835 count -= tempbuf_number;
3836 *number += tempbuf_number;
3837 }
3838 if (out_of_events == TRUE)
3839 /*
3840 * all trace buffers are empty
3841 */
3842 break;
3843
3844 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
3845 tempbuf_count = KDCOPYBUF_COUNT;
3846 }
3847 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
3848 enable_wrap(old_kdebug_slowcheck, lostevents);
3849 }
3850 thread_clear_eager_preempt(current_thread());
3851 return (error);
3852 }
3853
3854 static int
3855 kdbg_test(size_t flavor)
3856 {
3857 int code = 0;
3858 int dummy_iop = 0;
3859
3860 #define KDEBUG_TEST_CODE(code) BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, (code))
3861 switch (flavor) {
3862 case 1:
3863 /* try each macro */
3864 KDBG(KDEBUG_TEST_CODE(code)); code++;
3865 KDBG(KDEBUG_TEST_CODE(code), 1); code++;
3866 KDBG(KDEBUG_TEST_CODE(code), 1, 2); code++;
3867 KDBG(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
3868 KDBG(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
3869
3870 KDBG_RELEASE(KDEBUG_TEST_CODE(code)); code++;
3871 KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1); code++;
3872 KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1, 2); code++;
3873 KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
3874 KDBG_RELEASE(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
3875
3876 KDBG_FILTERED(KDEBUG_TEST_CODE(code)); code++;
3877 KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1); code++;
3878 KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1, 2); code++;
3879 KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
3880 KDBG_FILTERED(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
3881
3882 KDBG_DEBUG(KDEBUG_TEST_CODE(code)); code++;
3883 KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1); code++;
3884 KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1, 2); code++;
3885 KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1, 2, 3); code++;
3886 KDBG_DEBUG(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++;
3887 break;
3888
3889 case 2:
3890 if (kd_ctrl_page.kdebug_iops) {
3891 /* avoid the assertion in kernel_debug_enter for a valid IOP */
3892 dummy_iop = kd_ctrl_page.kdebug_iops[0].cpu_id;
3893 }
3894
3895 /* ensure old timestamps are not emitted from kernel_debug_enter */
3896 kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code),
3897 100 /* very old timestamp */, 0, 0, 0,
3898 0, (uintptr_t)thread_tid(current_thread()));
3899 code++;
3900 kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code),
3901 kdbg_timestamp(), 0, 0, 0, 0,
3902 (uintptr_t)thread_tid(current_thread()));
3903 code++;
3904 break;
3905 default:
3906 return ENOTSUP;
3907 }
3908 #undef KDEBUG_TEST_CODE
3909
3910 return 0;
3911 }
3912
3913 void
3914 kdebug_init(unsigned int n_events, char *filter_desc)
3915 {
3916 assert(filter_desc != NULL);
3917
3918 #if defined(__x86_64__)
3919 /* only trace MACH events when outputting kdebug to serial */
3920 if (kdebug_serial) {
3921 n_events = 1;
3922 if (filter_desc[0] == '\0') {
3923 filter_desc[0] = 'C';
3924 filter_desc[1] = '1';
3925 filter_desc[2] = '\0';
3926 }
3927 }
3928 #endif /* defined(__x86_64__) */
3929
3930 if (log_leaks && n_events == 0) {
3931 n_events = 200000;
3932 }
3933
3934 kdebug_trace_start(n_events, filter_desc, FALSE);
3935 }
3936
3937 static void
3938 kdbg_set_typefilter_string(const char *filter_desc)
3939 {
3940 char *end = NULL;
3941
3942 ktrace_assert_lock_held();
3943
3944 assert(filter_desc != NULL);
3945
3946 typefilter_reject_all(kdbg_typefilter);
3947 typefilter_allow_class(kdbg_typefilter, DBG_TRACE);
3948
3949 /* if the filter description starts with a number, assume it's a csc */
3950 if (filter_desc[0] >= '0' && filter_desc[0] <= '9'){
3951 unsigned long csc = strtoul(filter_desc, NULL, 0);
3952 if (filter_desc != end && csc <= KDBG_CSC_MAX) {
3953 typefilter_allow_csc(kdbg_typefilter, csc);
3954 }
3955 return;
3956 }
3957
3958 while (filter_desc[0] != '\0') {
3959 unsigned long allow_value;
3960
3961 char filter_type = filter_desc[0];
3962 if (filter_type != 'C' && filter_type != 'S') {
3963 return;
3964 }
3965 filter_desc++;
3966
3967 allow_value = strtoul(filter_desc, &end, 0);
3968 if (filter_desc == end) {
3969 /* cannot parse as integer */
3970 return;
3971 }
3972
3973 switch (filter_type) {
3974 case 'C':
3975 if (allow_value <= KDBG_CLASS_MAX) {
3976 typefilter_allow_class(kdbg_typefilter, allow_value);
3977 } else {
3978 /* illegal class */
3979 return;
3980 }
3981 break;
3982 case 'S':
3983 if (allow_value <= KDBG_CSC_MAX) {
3984 typefilter_allow_csc(kdbg_typefilter, allow_value);
3985 } else {
3986 /* illegal class subclass */
3987 return;
3988 }
3989 break;
3990 default:
3991 return;
3992 }
3993
3994 /* advance to next filter entry */
3995 filter_desc = end;
3996 if (filter_desc[0] == ',') {
3997 filter_desc++;
3998 }
3999 }
4000 }
4001
4002 /*
4003 * This function is meant to be called from the bootstrap thread or coming out
4004 * of acpi_idle_kernel.
4005 */
4006 void
4007 kdebug_trace_start(unsigned int n_events, const char *filter_desc,
4008 boolean_t at_wake)
4009 {
4010 uint32_t old1, old2;
4011
4012 if (!n_events) {
4013 kd_early_done = true;
4014 return;
4015 }
4016
4017 ktrace_start_single_threaded();
4018
4019 kdbg_lock_init();
4020
4021 ktrace_kernel_configure(KTRACE_KDEBUG);
4022
4023 kdbg_set_nkdbufs(n_events);
4024
4025 kernel_debug_string_early("start_kern_tracing");
4026
4027 if (kdbg_reinit(TRUE)) {
4028 printf("error from kdbg_reinit, kernel tracing not started\n");
4029 goto out;
4030 }
4031
4032 /*
4033 * Wrapping is disabled because boot and wake tracing is interested in
4034 * the earliest events, at the expense of later ones.
4035 */
4036 (void)disable_wrap(&old1, &old2);
4037
4038 if (filter_desc && filter_desc[0] != '\0') {
4039 if (kdbg_initialize_typefilter(NULL) == KERN_SUCCESS) {
4040 kdbg_set_typefilter_string(filter_desc);
4041 kdbg_enable_typefilter();
4042 }
4043 }
4044
4045 /*
4046 * Hold off interrupts between getting a thread map and enabling trace
4047 * and until the early traces are recorded.
4048 */
4049 boolean_t s = ml_set_interrupts_enabled(FALSE);
4050
4051 if (at_wake) {
4052 kdbg_thrmap_init();
4053 }
4054
4055 kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE | (kdebug_serial ?
4056 KDEBUG_ENABLE_SERIAL : 0));
4057
4058 if (!at_wake) {
4059 /*
4060 * Transfer all very early events from the static buffer into the real
4061 * buffers.
4062 */
4063 kernel_debug_early_end();
4064 }
4065
4066 ml_set_interrupts_enabled(s);
4067
4068 printf("kernel tracing started with %u events\n", n_events);
4069
4070 #if KDEBUG_MOJO_TRACE
4071 if (kdebug_serial) {
4072 printf("serial output enabled with %lu named events\n",
4073 sizeof(kd_events)/sizeof(kd_event_t));
4074 }
4075 #endif /* KDEBUG_MOJO_TRACE */
4076
4077 out:
4078 ktrace_end_single_threaded();
4079 }
4080
4081 void
4082 kdbg_dump_trace_to_file(const char *filename)
4083 {
4084 vfs_context_t ctx;
4085 vnode_t vp;
4086 size_t write_size;
4087 int ret;
4088
4089 ktrace_lock();
4090
4091 if (!(kdebug_enable & KDEBUG_ENABLE_TRACE)) {
4092 goto out;
4093 }
4094
4095 if (ktrace_get_owning_pid() != 0) {
4096 /*
4097 * Another process owns ktrace and is still active, disable tracing to
4098 * prevent wrapping.
4099 */
4100 kdebug_enable = 0;
4101 kd_ctrl_page.enabled = 0;
4102 commpage_update_kdebug_state();
4103 goto out;
4104 }
4105
4106 KDBG(TRACE_WRITING_EVENTS | DBG_FUNC_START);
4107
4108 kdebug_enable = 0;
4109 kd_ctrl_page.enabled = 0;
4110 commpage_update_kdebug_state();
4111
4112 ctx = vfs_context_kernel();
4113
4114 if (vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)) {
4115 goto out;
4116 }
4117
4118 kdbg_write_thread_map(vp, ctx);
4119
4120 write_size = nkdbufs * sizeof(kd_buf);
4121 ret = kdbg_read(0, &write_size, vp, ctx, RAW_VERSION1);
4122 if (ret) {
4123 goto out_close;
4124 }
4125
4126 /*
4127 * Wait to synchronize the file to capture the I/O in the
4128 * TRACE_WRITING_EVENTS interval.
4129 */
4130 ret = VNOP_FSYNC(vp, MNT_WAIT, ctx);
4131
4132 /*
4133 * Balance the starting TRACE_WRITING_EVENTS tracepoint manually.
4134 */
4135 kd_buf end_event = {
4136 .debugid = TRACE_WRITING_EVENTS | DBG_FUNC_END,
4137 .arg1 = write_size,
4138 .arg2 = ret,
4139 .arg5 = thread_tid(current_thread()),
4140 };
4141 kdbg_set_timestamp_and_cpu(&end_event, kdbg_timestamp(),
4142 cpu_number());
4143
4144 /* this is best effort -- ignore any errors */
4145 (void)kdbg_write_to_vnode((caddr_t)&end_event, sizeof(kd_buf), vp, ctx,
4146 RAW_file_offset);
4147
4148 out_close:
4149 vnode_close(vp, FWRITE, ctx);
4150 sync(current_proc(), (void *)NULL, (int *)NULL);
4151
4152 out:
4153 ktrace_unlock();
4154 }
4155
4156 /* Helper function for filling in the BSD name for an address space
4157 * Defined here because the machine bindings know only Mach threads
4158 * and nothing about BSD processes.
4159 *
4160 * FIXME: need to grab a lock during this?
4161 */
4162 void kdbg_get_task_name(char* name_buf, int len, task_t task)
4163 {
4164 proc_t proc;
4165
4166 /* Note: we can't use thread->task (and functions that rely on it) here
4167 * because it hasn't been initialized yet when this function is called.
4168 * We use the explicitly-passed task parameter instead.
4169 */
4170 proc = get_bsdtask_info(task);
4171 if (proc != PROC_NULL)
4172 snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
4173 else
4174 snprintf(name_buf, len, "%p [!bsd]", task);
4175 }
4176
4177 static int
4178 kdbg_sysctl_continuous SYSCTL_HANDLER_ARGS
4179 {
4180 #pragma unused(oidp, arg1, arg2)
4181 int value = kdbg_continuous_time;
4182 int ret = sysctl_io_number(req, value, sizeof(value), &value, NULL);
4183
4184 if (ret || !req->newptr) {
4185 return ret;
4186 }
4187
4188 kdbg_continuous_time = value;
4189 return 0;
4190 }
4191
4192 SYSCTL_NODE(_kern, OID_AUTO, kdbg, CTLFLAG_RD | CTLFLAG_LOCKED, 0,
4193 "kdbg");
4194
4195 SYSCTL_PROC(_kern_kdbg, OID_AUTO, experimental_continuous,
4196 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0,
4197 sizeof(int), kdbg_sysctl_continuous, "I",
4198 "Set kdebug to use mach_continuous_time");
4199
4200 SYSCTL_QUAD(_kern_kdbg, OID_AUTO, oldest_time,
4201 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
4202 &kd_ctrl_page.oldest_time,
4203 "Find the oldest timestamp still in trace");
4204
4205 #if KDEBUG_MOJO_TRACE
4206 static kd_event_t *
4207 binary_search(uint32_t id)
4208 {
4209 int low, high, mid;
4210
4211 low = 0;
4212 high = sizeof(kd_events)/sizeof(kd_event_t) - 1;
4213
4214 while (TRUE)
4215 {
4216 mid = (low + high) / 2;
4217
4218 if (low > high)
4219 return NULL; /* failed */
4220 else if ( low + 1 >= high) {
4221 /* We have a match */
4222 if (kd_events[high].id == id)
4223 return &kd_events[high];
4224 else if (kd_events[low].id == id)
4225 return &kd_events[low];
4226 else
4227 return NULL; /* search failed */
4228 }
4229 else if (id < kd_events[mid].id)
4230 high = mid;
4231 else
4232 low = mid;
4233 }
4234 }
4235
4236 /*
4237 * Look up event id to get name string.
4238 * Using a per-cpu cache of a single entry
4239 * before resorting to a binary search of the full table.
4240 */
4241 #define NCACHE 1
4242 static kd_event_t *last_hit[MAX_CPUS];
4243 static kd_event_t *
4244 event_lookup_cache(uint32_t cpu, uint32_t id)
4245 {
4246 if (last_hit[cpu] == NULL || last_hit[cpu]->id != id)
4247 last_hit[cpu] = binary_search(id);
4248 return last_hit[cpu];
4249 }
4250
4251 static uint64_t kd_last_timstamp;
4252
4253 static void
4254 kdebug_serial_print(
4255 uint32_t cpunum,
4256 uint32_t debugid,
4257 uint64_t timestamp,
4258 uintptr_t arg1,
4259 uintptr_t arg2,
4260 uintptr_t arg3,
4261 uintptr_t arg4,
4262 uintptr_t threadid
4263 )
4264 {
4265 char kprintf_line[192];
4266 char event[40];
4267 uint64_t us = timestamp / NSEC_PER_USEC;
4268 uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100;
4269 uint64_t delta = timestamp - kd_last_timstamp;
4270 uint64_t delta_us = delta / NSEC_PER_USEC;
4271 uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100;
4272 uint32_t event_id = debugid & KDBG_EVENTID_MASK;
4273 const char *command;
4274 const char *bra;
4275 const char *ket;
4276 kd_event_t *ep;
4277
4278 /* event time and delta from last */
4279 snprintf(kprintf_line, sizeof(kprintf_line),
4280 "%11llu.%1llu %8llu.%1llu ",
4281 us, us_tenth, delta_us, delta_us_tenth);
4282
4283
4284 /* event (id or name) - start prefixed by "[", end postfixed by "]" */
4285 bra = (debugid & DBG_FUNC_START) ? "[" : " ";
4286 ket = (debugid & DBG_FUNC_END) ? "]" : " ";
4287 ep = event_lookup_cache(cpunum, event_id);
4288 if (ep) {
4289 if (strlen(ep->name) < sizeof(event) - 3)
4290 snprintf(event, sizeof(event), "%s%s%s",
4291 bra, ep->name, ket);
4292 else
4293 snprintf(event, sizeof(event), "%s%x(name too long)%s",
4294 bra, event_id, ket);
4295 } else {
4296 snprintf(event, sizeof(event), "%s%x%s",
4297 bra, event_id, ket);
4298 }
4299 snprintf(kprintf_line + strlen(kprintf_line),
4300 sizeof(kprintf_line) - strlen(kprintf_line),
4301 "%-40s ", event);
4302
4303 /* arg1 .. arg4 with special cases for strings */
4304 switch (event_id) {
4305 case VFS_LOOKUP:
4306 case VFS_LOOKUP_DONE:
4307 if (debugid & DBG_FUNC_START) {
4308 /* arg1 hex then arg2..arg4 chars */
4309 snprintf(kprintf_line + strlen(kprintf_line),
4310 sizeof(kprintf_line) - strlen(kprintf_line),
4311 "%-16lx %-8s%-8s%-8s ",
4312 arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
4313 break;
4314 }
4315 /* else fall through for arg1..arg4 chars */
4316 case TRACE_STRING_EXEC:
4317 case TRACE_STRING_NEWTHREAD:
4318 case TRACE_INFO_STRING:
4319 snprintf(kprintf_line + strlen(kprintf_line),
4320 sizeof(kprintf_line) - strlen(kprintf_line),
4321 "%-8s%-8s%-8s%-8s ",
4322 (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4);
4323 break;
4324 default:
4325 snprintf(kprintf_line + strlen(kprintf_line),
4326 sizeof(kprintf_line) - strlen(kprintf_line),
4327 "%-16lx %-16lx %-16lx %-16lx",
4328 arg1, arg2, arg3, arg4);
4329 }
4330
4331 /* threadid, cpu and command name */
4332 if (threadid == (uintptr_t)thread_tid(current_thread()) &&
4333 current_proc() &&
4334 current_proc()->p_comm[0])
4335 command = current_proc()->p_comm;
4336 else
4337 command = "-";
4338 snprintf(kprintf_line + strlen(kprintf_line),
4339 sizeof(kprintf_line) - strlen(kprintf_line),
4340 " %-16lx %-2d %s\n",
4341 threadid, cpunum, command);
4342
4343 kprintf("%s", kprintf_line);
4344 kd_last_timstamp = timestamp;
4345 }
4346
4347 #endif