]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-1699.24.8.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23
24 #include <machine/spl.h>
25
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
30 #include <sys/vm.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
35
36 #define HZ 100
37 #include <mach/clock_types.h>
38 #include <mach/mach_types.h>
39 #include <mach/mach_time.h>
40 #include <machine/machine_routines.h>
41
42 #if defined(__i386__) || defined(__x86_64__)
43 #include <i386/rtclock_protos.h>
44 #include <i386/mp.h>
45 #include <i386/machine_routines.h>
46 #endif
47
48 #include <kern/clock.h>
49
50 #include <kern/thread.h>
51 #include <kern/task.h>
52 #include <kern/debug.h>
53 #include <kern/kalloc.h>
54 #include <kern/cpu_data.h>
55 #include <kern/assert.h>
56 #include <vm/vm_kern.h>
57 #include <sys/lock.h>
58
59 #include <sys/malloc.h>
60 #include <sys/mcache.h>
61 #include <sys/kauth.h>
62
63 #include <sys/vnode.h>
64 #include <sys/vnode_internal.h>
65 #include <sys/fcntl.h>
66 #include <sys/file_internal.h>
67 #include <sys/ubc.h>
68
69 #include <mach/mach_host.h> /* for host_info() */
70 #include <libkern/OSAtomic.h>
71
72 #include <machine/pal_routines.h>
73
74 /* XXX should have prototypes, but Mach does not provide one */
75 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
76 int cpu_number(void); /* XXX <machine/...> include path broken */
77
78 /* XXX should probably be static, but it's debugging code... */
79 int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
80 void kdbg_control_chud(int, void *);
81 int kdbg_control(int *, u_int, user_addr_t, size_t *);
82 int kdbg_getentropy (user_addr_t, size_t *, int);
83 int kdbg_readmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
84 int kdbg_getreg(kd_regtype *);
85 int kdbg_setreg(kd_regtype *);
86 int kdbg_setrtcdec(kd_regtype *);
87 int kdbg_setpidex(kd_regtype *);
88 int kdbg_setpid(kd_regtype *);
89 void kdbg_mapinit(void);
90 int kdbg_reinit(boolean_t);
91 int kdbg_bootstrap(boolean_t);
92
93 static int create_buffers(boolean_t);
94 static void delete_buffers(void);
95
96 extern void IOSleep(int);
97
98 /* trace enable status */
99 unsigned int kdebug_enable = 0;
100
101 /* track timestamps for security server's entropy needs */
102 uint64_t * kd_entropy_buffer = 0;
103 unsigned int kd_entropy_bufsize = 0;
104 unsigned int kd_entropy_count = 0;
105 unsigned int kd_entropy_indx = 0;
106 vm_offset_t kd_entropy_buftomem = 0;
107
108 #define MAX_ENTROPY_COUNT (128 * 1024)
109
110
111 #define SLOW_NOLOG 0x01
112 #define SLOW_CHECKS 0x02
113 #define SLOW_ENTROPY 0x04
114 #define SLOW_CHUD 0x08
115
116 unsigned int kd_cpus;
117
118 #define EVENTS_PER_STORAGE_UNIT 2048
119 #define MIN_STORAGE_UNITS_PER_CPU 4
120
121 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
122
123 #define NATIVE_TRACE_FACILITY
124
125 union kds_ptr {
126 struct {
127 uint32_t buffer_index:21;
128 uint16_t offset:11;
129 };
130 uint32_t raw;
131 };
132
133 struct kd_storage {
134 union kds_ptr kds_next;
135 uint32_t kds_bufindx;
136 uint32_t kds_bufcnt;
137 uint32_t kds_readlast;
138 boolean_t kds_lostevents;
139 uint64_t kds_timestamp;
140
141 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
142 };
143
144 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
145 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
146
147 struct kd_storage_buffers {
148 struct kd_storage *kdsb_addr;
149 uint32_t kdsb_size;
150 };
151
152 #define KDS_PTR_NULL 0xffffffff
153 struct kd_storage_buffers *kd_bufs = NULL;
154 int n_storage_units = 0;
155 int n_storage_buffers = 0;
156 int n_storage_threshold = 0;
157 int kds_waiter = 0;
158 int kde_waiter = 0;
159
160 #pragma pack(0)
161 struct kd_bufinfo {
162 union kds_ptr kd_list_head;
163 union kds_ptr kd_list_tail;
164 boolean_t kd_lostevents;
165 uint32_t _pad;
166 uint64_t kd_prev_timebase;
167 uint32_t num_bufs;
168 } __attribute__(( aligned(CPU_CACHE_SIZE) ));
169
170 struct kd_ctrl_page_t {
171 union kds_ptr kds_free_list;
172 uint32_t enabled :1;
173 uint32_t _pad0 :31;
174 int kds_inuse_count;
175 uint32_t kdebug_flags;
176 uint32_t kdebug_slowcheck;
177 uint32_t _pad1;
178 struct {
179 uint64_t tsc_base;
180 uint64_t ns_base;
181 } cpu_timebase[32]; // should be max number of actual logical cpus
182 } kd_ctrl_page = {.kds_free_list = {.raw = KDS_PTR_NULL}, .enabled = 0, .kds_inuse_count = 0, .kdebug_flags = 0, .kdebug_slowcheck = SLOW_NOLOG};
183 #pragma pack()
184
185 struct kd_bufinfo *kdbip = NULL;
186
187 #define KDCOPYBUF_COUNT 8192
188 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
189 kd_buf *kdcopybuf = NULL;
190
191
192 unsigned int nkdbufs = 8192;
193 unsigned int kdlog_beg=0;
194 unsigned int kdlog_end=0;
195 unsigned int kdlog_value1=0;
196 unsigned int kdlog_value2=0;
197 unsigned int kdlog_value3=0;
198 unsigned int kdlog_value4=0;
199
200 static lck_spin_t * kdw_spin_lock;
201 static lck_spin_t * kds_spin_lock;
202 static lck_mtx_t * kd_trace_mtx_sysctl;
203 static lck_grp_t * kd_trace_mtx_sysctl_grp;
204 static lck_attr_t * kd_trace_mtx_sysctl_attr;
205 static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
206
207 static lck_grp_t *stackshot_subsys_lck_grp;
208 static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
209 static lck_attr_t *stackshot_subsys_lck_attr;
210 static lck_mtx_t stackshot_subsys_mutex;
211
212 void *stackshot_snapbuf = NULL;
213
214 int
215 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
216
217 extern void
218 kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
219
220 extern int
221 kdp_stack_snapshot_geterror(void);
222 extern unsigned int
223 kdp_stack_snapshot_bytes_traced(void);
224
225 kd_threadmap *kd_mapptr = 0;
226 unsigned int kd_mapsize = 0;
227 unsigned int kd_mapcount = 0;
228 vm_offset_t kd_maptomem = 0;
229
230 off_t RAW_file_offset = 0;
231 int RAW_file_written = 0;
232
233 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
234
235
236 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
237
238 #define DBG_FUNC_MASK 0xfffffffc
239
240 #define INTERRUPT 0x01050000
241 #define MACH_vmfault 0x01300008
242 #define BSC_SysCall 0x040c0000
243 #define MACH_SysCall 0x010c0000
244 #define DBG_SCALL_MASK 0xffff0000
245
246
247 /* task to string structure */
248 struct tts
249 {
250 task_t task; /* from procs task */
251 pid_t pid; /* from procs p_pid */
252 char task_comm[20]; /* from procs p_comm */
253 };
254
255 typedef struct tts tts_t;
256
257 struct krt
258 {
259 kd_threadmap *map; /* pointer to the map buffer */
260 int count;
261 int maxcount;
262 struct tts *atts;
263 };
264
265 typedef struct krt krt_t;
266
267 /* This is for the CHUD toolkit call */
268 typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
269 uintptr_t arg2, uintptr_t arg3,
270 uintptr_t arg4, uintptr_t arg5);
271
272 volatile kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
273
274 __private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
275
276 static void
277 kdbg_set_tracing_enabled(boolean_t enabled)
278 {
279 int s = ml_set_interrupts_enabled(FALSE);
280 lck_spin_lock(kds_spin_lock);
281
282 if (enabled) {
283 kdebug_enable |= KDEBUG_ENABLE_TRACE;
284 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
285 kd_ctrl_page.enabled = 1;
286 } else {
287 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
288 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
289 kd_ctrl_page.enabled = 0;
290 }
291 lck_spin_unlock(kds_spin_lock);
292 ml_set_interrupts_enabled(s);
293 }
294
295 static void
296 kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
297 {
298 int s = ml_set_interrupts_enabled(FALSE);
299 lck_spin_lock(kds_spin_lock);
300
301 if (enabled) {
302 kd_ctrl_page.kdebug_slowcheck |= slowflag;
303 kdebug_enable |= enableflag;
304 } else {
305 kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
306 kdebug_enable &= ~enableflag;
307 }
308 lck_spin_unlock(kds_spin_lock);
309 ml_set_interrupts_enabled(s);
310 }
311
312
313 #ifdef NATIVE_TRACE_FACILITY
314 void
315 disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
316 {
317 int s = ml_set_interrupts_enabled(FALSE);
318 lck_spin_lock(kds_spin_lock);
319
320 *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
321 *old_flags = kd_ctrl_page.kdebug_flags;
322
323 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
324 kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
325
326 lck_spin_unlock(kds_spin_lock);
327 ml_set_interrupts_enabled(s);
328 }
329
330 void
331 enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
332 {
333 int s = ml_set_interrupts_enabled(FALSE);
334 lck_spin_lock(kds_spin_lock);
335
336 kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
337
338 if ( !(old_slowcheck & SLOW_NOLOG))
339 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
340
341 if (lostevents == TRUE)
342 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
343
344 lck_spin_unlock(kds_spin_lock);
345 ml_set_interrupts_enabled(s);
346 }
347
348 void trace_set_timebases(__unused uint64_t tsc, __unused uint64_t ns)
349 {
350 }
351 #else
352 /* Begin functions that are defined twice */
353 void trace_set_timebases(uint64_t tsc, uint64_t ns)
354 {
355 int cpu = cpu_number();
356 kd_ctrl_page.cpu_timebase[cpu].tsc_base = tsc;
357 kd_ctrl_page.cpu_timebase[cpu].ns_base = ns;
358 }
359
360 #endif
361
362 static int
363 #if defined(__i386__) || defined(__x86_64__)
364 create_buffers(boolean_t early_trace)
365 #else
366 create_buffers(__unused boolean_t early_trace)
367 #endif
368 {
369 int i;
370 int p_buffer_size;
371 int f_buffer_size;
372 int f_buffers;
373 int error = 0;
374
375 /*
376 * get the number of cpus and cache it
377 */
378 #if defined(__i386__) || defined(__x86_64__)
379 if (early_trace == TRUE) {
380 /*
381 * we've started tracing before the
382 * IOKit has even started running... just
383 * use the static max value
384 */
385 kd_cpus = max_ncpus;
386 } else
387 #endif
388 {
389 host_basic_info_data_t hinfo;
390 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
391
392 #define BSD_HOST 1
393 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
394 kd_cpus = hinfo.logical_cpu_max;
395 }
396 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS) {
397 error = ENOSPC;
398 goto out;
399 }
400
401 trace_handler_map_bufinfo((uintptr_t)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
402
403 #if !defined(NATIVE_TRACE_FACILITY)
404 for(i=0;i<(int)kd_cpus;i++) {
405 get_nanotime_timebases(i,
406 &kd_ctrl_page.cpu_timebase[i].tsc_base,
407 &kd_ctrl_page.cpu_timebase[i].ns_base);
408 }
409 #endif
410
411 if (nkdbufs < (kd_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
412 n_storage_units = kd_cpus * MIN_STORAGE_UNITS_PER_CPU;
413 else
414 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
415
416 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
417
418 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
419 n_storage_buffers = f_buffers;
420
421 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
422 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
423
424 if (p_buffer_size)
425 n_storage_buffers++;
426
427 kd_bufs = NULL;
428
429 if (kdcopybuf == 0) {
430 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
431 error = ENOSPC;
432 goto out;
433 }
434 }
435 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
436 error = ENOSPC;
437 goto out;
438 }
439 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
440
441 for (i = 0; i < f_buffers; i++) {
442 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
443 error = ENOSPC;
444 goto out;
445 }
446 bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
447
448 kd_bufs[i].kdsb_size = f_buffer_size;
449 }
450 if (p_buffer_size) {
451 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
452 error = ENOSPC;
453 goto out;
454 }
455 bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
456
457 kd_bufs[i].kdsb_size = p_buffer_size;
458 }
459 n_storage_units = 0;
460
461 for (i = 0; i < n_storage_buffers; i++) {
462 struct kd_storage *kds;
463 int n_elements;
464 int n;
465
466 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
467 kds = kd_bufs[i].kdsb_addr;
468
469 trace_handler_map_buffer(i, (uintptr_t)kd_bufs[i].kdsb_addr, kd_bufs[i].kdsb_size);
470
471 for (n = 0; n < n_elements; n++) {
472 kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
473 kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
474
475 kd_ctrl_page.kds_free_list.buffer_index = i;
476 kd_ctrl_page.kds_free_list.offset = n;
477 }
478 n_storage_units += n_elements;
479 }
480
481 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
482
483 for (i = 0; i < (int)kd_cpus; i++) {
484 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
485 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
486 kdbip[i].kd_lostevents = FALSE;
487 kdbip[i].num_bufs = 0;
488 }
489
490 kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
491
492 kd_ctrl_page.kds_inuse_count = 0;
493 n_storage_threshold = n_storage_units / 2;
494 out:
495 if (error)
496 delete_buffers();
497
498 return(error);
499 }
500
501
502 static void
503 delete_buffers(void)
504 {
505 int i;
506
507 if (kd_bufs) {
508 for (i = 0; i < n_storage_buffers; i++) {
509 if (kd_bufs[i].kdsb_addr) {
510 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
511 trace_handler_unmap_buffer(i);
512 }
513 }
514 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
515
516 kd_bufs = NULL;
517 n_storage_buffers = 0;
518 }
519 if (kdcopybuf) {
520 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
521
522 kdcopybuf = NULL;
523 }
524 kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
525
526 if (kdbip) {
527 trace_handler_unmap_bufinfo();
528
529 kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
530
531 kdbip = NULL;
532 }
533 kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
534 }
535
536
537 #ifdef NATIVE_TRACE_FACILITY
538 void
539 release_storage_unit(int cpu, uint32_t kdsp_raw)
540 {
541 int s = 0;
542 struct kd_storage *kdsp_actual;
543 struct kd_bufinfo *kdbp;
544 union kds_ptr kdsp;
545
546 kdsp.raw = kdsp_raw;
547
548 s = ml_set_interrupts_enabled(FALSE);
549 lck_spin_lock(kds_spin_lock);
550
551 kdbp = &kdbip[cpu];
552
553 if (kdsp.raw == kdbp->kd_list_head.raw) {
554 /*
555 * it's possible for the storage unit pointed to
556 * by kdsp to have already been stolen... so
557 * check to see if it's still the head of the list
558 * now that we're behind the lock that protects
559 * adding and removing from the queue...
560 * since we only ever release and steal units from
561 * that position, if it's no longer the head
562 * we having nothing to do in this context
563 */
564 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
565 kdbp->kd_list_head = kdsp_actual->kds_next;
566
567 kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
568 kd_ctrl_page.kds_free_list = kdsp;
569
570 kd_ctrl_page.kds_inuse_count--;
571 }
572 lck_spin_unlock(kds_spin_lock);
573 ml_set_interrupts_enabled(s);
574 }
575
576
577 boolean_t
578 allocate_storage_unit(int cpu)
579 {
580 union kds_ptr kdsp;
581 struct kd_storage *kdsp_actual;
582 struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
583 uint64_t oldest_ts, ts;
584 boolean_t retval = TRUE;
585 int s = 0;
586
587 s = ml_set_interrupts_enabled(FALSE);
588 lck_spin_lock(kds_spin_lock);
589
590 kdbp = &kdbip[cpu];
591
592 /* If someone beat us to the allocate, return success */
593 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
594 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
595
596 if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
597 goto out;
598 }
599
600 if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
601 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
602 kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
603
604 kd_ctrl_page.kds_inuse_count++;
605 } else {
606 if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
607 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
608 kdbp->kd_lostevents = TRUE;
609 retval = FALSE;
610 goto out;
611 }
612 kdbp_vict = NULL;
613 oldest_ts = (uint64_t)-1;
614
615 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_cpus]; kdbp_try++) {
616
617 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
618 /*
619 * no storage unit to steal
620 */
621 continue;
622 }
623
624 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
625
626 if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
627 /*
628 * make sure we don't steal the storage unit
629 * being actively recorded to... need to
630 * move on because we don't want an out-of-order
631 * set of events showing up later
632 */
633 continue;
634 }
635 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
636
637 if (ts < oldest_ts) {
638 /*
639 * when 'wrapping', we want to steal the
640 * storage unit that has the 'earliest' time
641 * associated with it (first event time)
642 */
643 oldest_ts = ts;
644 kdbp_vict = kdbp_try;
645 }
646 }
647 if (kdbp_vict == NULL) {
648 kdebug_enable = 0;
649 kd_ctrl_page.enabled = 0;
650 retval = FALSE;
651 goto out;
652 }
653 kdsp = kdbp_vict->kd_list_head;
654 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
655
656 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
657
658 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
659 }
660 kdsp_actual->kds_timestamp = mach_absolute_time();
661 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
662 kdsp_actual->kds_bufcnt = 0;
663 kdsp_actual->kds_readlast = 0;
664
665 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
666 kdbp->kd_lostevents = FALSE;
667 kdsp_actual->kds_bufindx = 0;
668
669 if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
670 kdbp->kd_list_head = kdsp;
671 else
672 POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
673 kdbp->kd_list_tail = kdsp;
674 out:
675 lck_spin_unlock(kds_spin_lock);
676 ml_set_interrupts_enabled(s);
677
678 return (retval);
679 }
680 #endif
681
682 void
683 kernel_debug_internal(
684 uint32_t debugid,
685 uintptr_t arg1,
686 uintptr_t arg2,
687 uintptr_t arg3,
688 uintptr_t arg4,
689 uintptr_t arg5,
690 int entropy_flag);
691
692 __attribute__((always_inline)) void
693 kernel_debug_internal(
694 uint32_t debugid,
695 uintptr_t arg1,
696 uintptr_t arg2,
697 uintptr_t arg3,
698 uintptr_t arg4,
699 uintptr_t arg5,
700 int entropy_flag)
701 {
702 struct proc *curproc;
703 uint64_t now;
704 uint32_t bindx;
705 boolean_t s;
706 kd_buf *kd;
707 int cpu;
708 struct kd_bufinfo *kdbp;
709 struct kd_storage *kdsp_actual;
710
711
712 if (kd_ctrl_page.kdebug_slowcheck) {
713
714 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
715 kd_chudhook_fn chudhook;
716 /*
717 * Mask interrupts to minimize the interval across
718 * which the driver providing the hook could be
719 * unloaded.
720 */
721 s = ml_set_interrupts_enabled(FALSE);
722 chudhook = kdebug_chudhook;
723 if (chudhook)
724 chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
725 ml_set_interrupts_enabled(s);
726 }
727 if ((kdebug_enable & KDEBUG_ENABLE_ENTROPY) && entropy_flag) {
728
729 now = mach_absolute_time();
730
731 s = ml_set_interrupts_enabled(FALSE);
732 lck_spin_lock(kds_spin_lock);
733
734 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY) {
735
736 if (kd_entropy_indx < kd_entropy_count) {
737 kd_entropy_buffer[kd_entropy_indx] = now;
738 kd_entropy_indx++;
739 }
740 if (kd_entropy_indx == kd_entropy_count) {
741 /*
742 * Disable entropy collection
743 */
744 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
745 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_ENTROPY;
746 }
747 }
748 lck_spin_unlock(kds_spin_lock);
749 ml_set_interrupts_enabled(s);
750 }
751 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & KDEBUG_ENABLE_TRACE))
752 goto out1;
753
754 if ( !ml_at_interrupt_context()) {
755 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
756 /*
757 * If kdebug flag is not set for current proc, return
758 */
759 curproc = current_proc();
760
761 if ((curproc && !(curproc->p_kdebug)) &&
762 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
763 goto out1;
764 }
765 else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
766 /*
767 * If kdebug flag is set for current proc, return
768 */
769 curproc = current_proc();
770
771 if ((curproc && curproc->p_kdebug) &&
772 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
773 goto out1;
774 }
775 }
776 if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
777 if ((debugid < kdlog_beg)
778 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
779 goto out1;
780 }
781 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
782 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
783 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
784 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
785 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
786 (debugid >> 24 != DBG_TRACE))
787 goto out1;
788 }
789 }
790 disable_preemption();
791 cpu = cpu_number();
792 kdbp = &kdbip[cpu];
793 retry_q:
794 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
795 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
796 bindx = kdsp_actual->kds_bufindx;
797 } else
798 kdsp_actual = NULL;
799
800 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
801 if (allocate_storage_unit(cpu) == FALSE) {
802 /*
803 * this can only happen if wrapping
804 * has been disabled
805 */
806 goto out;
807 }
808 goto retry_q;
809 }
810 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
811
812 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
813 goto retry_q;
814
815 kd = &kdsp_actual->kds_records[bindx];
816
817 kd->debugid = debugid;
818 kd->arg1 = arg1;
819 kd->arg2 = arg2;
820 kd->arg3 = arg3;
821 kd->arg4 = arg4;
822 kd->arg5 = arg5;
823
824 kdbg_set_timestamp_and_cpu(kd, now, cpu);
825
826 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
827 out:
828 enable_preemption();
829 out1:
830 if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) ||
831 (kde_waiter && kd_entropy_indx >= kd_entropy_count)) {
832 uint32_t etype;
833 uint32_t stype;
834
835 etype = debugid & DBG_FUNC_MASK;
836 stype = debugid & DBG_SCALL_MASK;
837
838 if (etype == INTERRUPT || etype == MACH_vmfault ||
839 stype == BSC_SysCall || stype == MACH_SysCall) {
840
841 boolean_t need_kds_wakeup = FALSE;
842 boolean_t need_kde_wakeup = FALSE;
843
844 /*
845 * try to take the lock here to synchronize with the
846 * waiter entering the blocked state... use the try
847 * mode to prevent deadlocks caused by re-entering this
848 * routine due to various trace points triggered in the
849 * lck_spin_sleep_xxxx routines used to actually enter
850 * one of our 2 wait conditions... no problem if we fail,
851 * there will be lots of additional events coming in that
852 * will eventually succeed in grabbing this lock
853 */
854 s = ml_set_interrupts_enabled(FALSE);
855
856 if (lck_spin_try_lock(kdw_spin_lock)) {
857
858 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
859 kds_waiter = 0;
860 need_kds_wakeup = TRUE;
861 }
862 if (kde_waiter && kd_entropy_indx >= kd_entropy_count) {
863 kde_waiter = 0;
864 need_kde_wakeup = TRUE;
865 }
866 lck_spin_unlock(kdw_spin_lock);
867 }
868 ml_set_interrupts_enabled(s);
869
870 if (need_kds_wakeup == TRUE)
871 wakeup(&kds_waiter);
872 if (need_kde_wakeup == TRUE)
873 wakeup(&kde_waiter);
874 }
875 }
876 }
877
878 void
879 kernel_debug(
880 uint32_t debugid,
881 uintptr_t arg1,
882 uintptr_t arg2,
883 uintptr_t arg3,
884 uintptr_t arg4,
885 __unused uintptr_t arg5)
886 {
887 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()), 1);
888 }
889
890 void
891 kernel_debug1(
892 uint32_t debugid,
893 uintptr_t arg1,
894 uintptr_t arg2,
895 uintptr_t arg3,
896 uintptr_t arg4,
897 uintptr_t arg5)
898 {
899 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 1);
900 }
901
902 /*
903 * Support syscall SYS_kdebug_trace
904 */
905 int
906 kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
907 {
908 if ( __probable(kdebug_enable == 0) )
909 return(EINVAL);
910
911 kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()), 0);
912
913 return(0);
914 }
915
916
917 static void
918 kdbg_lock_init(void)
919 {
920 if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
921 return;
922
923 trace_handler_map_ctrl_page((uintptr_t)&kd_ctrl_page, sizeof(kd_ctrl_page), sizeof(struct kd_storage), sizeof(union kds_ptr));
924
925 /*
926 * allocate lock group attribute and group
927 */
928 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
929 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
930
931 /*
932 * allocate the lock attribute
933 */
934 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
935
936
937 /*
938 * allocate and initialize mutex's
939 */
940 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
941 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
942 kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
943
944 kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
945 }
946
947
948 int
949 kdbg_bootstrap(boolean_t early_trace)
950 {
951 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
952
953 return (create_buffers(early_trace));
954 }
955
956 int
957 kdbg_reinit(boolean_t early_trace)
958 {
959 int ret = 0;
960
961 /*
962 * Disable trace collecting
963 * First make sure we're not in
964 * the middle of cutting a trace
965 */
966 kdbg_set_tracing_enabled(FALSE);
967
968 /*
969 * make sure the SLOW_NOLOG is seen
970 * by everyone that might be trying
971 * to cut a trace..
972 */
973 IOSleep(100);
974
975 delete_buffers();
976
977 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
978 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
979 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
980 kd_mapsize = 0;
981 kd_mapptr = (kd_threadmap *) 0;
982 kd_mapcount = 0;
983 }
984 ret = kdbg_bootstrap(early_trace);
985
986 RAW_file_offset = 0;
987 RAW_file_written = 0;
988
989 return(ret);
990 }
991
992 void
993 kdbg_trace_data(struct proc *proc, long *arg_pid)
994 {
995 if (!proc)
996 *arg_pid = 0;
997 else
998 *arg_pid = proc->p_pid;
999 }
1000
1001
1002 void
1003 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1004 {
1005 char *dbg_nameptr;
1006 int dbg_namelen;
1007 long dbg_parms[4];
1008
1009 if (!proc) {
1010 *arg1 = 0;
1011 *arg2 = 0;
1012 *arg3 = 0;
1013 *arg4 = 0;
1014 return;
1015 }
1016 /*
1017 * Collect the pathname for tracing
1018 */
1019 dbg_nameptr = proc->p_comm;
1020 dbg_namelen = (int)strlen(proc->p_comm);
1021 dbg_parms[0]=0L;
1022 dbg_parms[1]=0L;
1023 dbg_parms[2]=0L;
1024 dbg_parms[3]=0L;
1025
1026 if(dbg_namelen > (int)sizeof(dbg_parms))
1027 dbg_namelen = (int)sizeof(dbg_parms);
1028
1029 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1030
1031 *arg1=dbg_parms[0];
1032 *arg2=dbg_parms[1];
1033 *arg3=dbg_parms[2];
1034 *arg4=dbg_parms[3];
1035 }
1036
1037 static void
1038 kdbg_resolve_map(thread_t th_act, void *opaque)
1039 {
1040 kd_threadmap *mapptr;
1041 krt_t *t = (krt_t *)opaque;
1042
1043 if (t->count < t->maxcount) {
1044 mapptr = &t->map[t->count];
1045 mapptr->thread = (uintptr_t)thread_tid(th_act);
1046
1047 (void) strlcpy (mapptr->command, t->atts->task_comm,
1048 sizeof(t->atts->task_comm));
1049 /*
1050 * Some kernel threads have no associated pid.
1051 * We still need to mark the entry as valid.
1052 */
1053 if (t->atts->pid)
1054 mapptr->valid = t->atts->pid;
1055 else
1056 mapptr->valid = 1;
1057
1058 t->count++;
1059 }
1060 }
1061
1062 void
1063 kdbg_mapinit(void)
1064 {
1065 struct proc *p;
1066 struct krt akrt;
1067 int tts_count; /* number of task-to-string structures */
1068 struct tts *tts_mapptr;
1069 unsigned int tts_mapsize = 0;
1070 vm_offset_t tts_maptomem=0;
1071 int i;
1072
1073 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
1074 return;
1075
1076 /*
1077 * need to use PROC_SCANPROCLIST with proc_iterate
1078 */
1079 proc_list_lock();
1080
1081 /*
1082 * Calculate the sizes of map buffers
1083 */
1084 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
1085 kd_mapcount += get_task_numacts((task_t)p->task);
1086 tts_count++;
1087 }
1088 proc_list_unlock();
1089
1090 /*
1091 * The proc count could change during buffer allocation,
1092 * so introduce a small fudge factor to bump up the
1093 * buffer sizes. This gives new tasks some chance of
1094 * making into the tables. Bump up by 10%.
1095 */
1096 kd_mapcount += kd_mapcount/10;
1097 tts_count += tts_count/10;
1098
1099 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
1100
1101 if ((kmem_alloc(kernel_map, & kd_maptomem, (vm_size_t)kd_mapsize) == KERN_SUCCESS)) {
1102 kd_mapptr = (kd_threadmap *) kd_maptomem;
1103 bzero(kd_mapptr, kd_mapsize);
1104 } else
1105 kd_mapptr = (kd_threadmap *) 0;
1106
1107 tts_mapsize = tts_count * sizeof(struct tts);
1108
1109 if ((kmem_alloc(kernel_map, & tts_maptomem, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
1110 tts_mapptr = (struct tts *) tts_maptomem;
1111 bzero(tts_mapptr, tts_mapsize);
1112 } else
1113 tts_mapptr = (struct tts *) 0;
1114
1115 /*
1116 * We need to save the procs command string
1117 * and take a reference for each task associated
1118 * with a valid process
1119 */
1120 if (tts_mapptr) {
1121 /*
1122 * should use proc_iterate
1123 */
1124 proc_list_lock();
1125
1126 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
1127 if (p->p_lflag & P_LEXIT)
1128 continue;
1129
1130 if (p->task) {
1131 task_reference(p->task);
1132 tts_mapptr[i].task = p->task;
1133 tts_mapptr[i].pid = p->p_pid;
1134 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
1135 i++;
1136 }
1137 }
1138 tts_count = i;
1139
1140 proc_list_unlock();
1141 }
1142
1143 if (kd_mapptr && tts_mapptr) {
1144 kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
1145
1146 /*
1147 * Initialize thread map data
1148 */
1149 akrt.map = kd_mapptr;
1150 akrt.count = 0;
1151 akrt.maxcount = kd_mapcount;
1152
1153 for (i = 0; i < tts_count; i++) {
1154 akrt.atts = &tts_mapptr[i];
1155 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
1156 task_deallocate((task_t) tts_mapptr[i].task);
1157 }
1158 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1159 }
1160 }
1161
1162 static void
1163 kdbg_clear(void)
1164 {
1165 /*
1166 * Clean up the trace buffer
1167 * First make sure we're not in
1168 * the middle of cutting a trace
1169 */
1170 kdbg_set_tracing_enabled(FALSE);
1171
1172 /*
1173 * make sure the SLOW_NOLOG is seen
1174 * by everyone that might be trying
1175 * to cut a trace..
1176 */
1177 IOSleep(100);
1178
1179 global_state_pid = -1;
1180 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1181 kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
1182 kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
1183
1184 delete_buffers();
1185
1186 /* Clean up the thread map buffer */
1187 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1188 if (kd_mapptr) {
1189 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1190 kd_mapptr = (kd_threadmap *) 0;
1191 }
1192 kd_mapsize = 0;
1193 kd_mapcount = 0;
1194
1195 RAW_file_offset = 0;
1196 RAW_file_written = 0;
1197 }
1198
1199 int
1200 kdbg_setpid(kd_regtype *kdr)
1201 {
1202 pid_t pid;
1203 int flag, ret=0;
1204 struct proc *p;
1205
1206 pid = (pid_t)kdr->value1;
1207 flag = (int)kdr->value2;
1208
1209 if (pid > 0) {
1210 if ((p = proc_find(pid)) == NULL)
1211 ret = ESRCH;
1212 else {
1213 if (flag == 1) {
1214 /*
1215 * turn on pid check for this and all pids
1216 */
1217 kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
1218 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1219 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1220
1221 p->p_kdebug = 1;
1222 } else {
1223 /*
1224 * turn off pid check for this pid value
1225 * Don't turn off all pid checking though
1226 *
1227 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1228 */
1229 p->p_kdebug = 0;
1230 }
1231 proc_rele(p);
1232 }
1233 }
1234 else
1235 ret = EINVAL;
1236
1237 return(ret);
1238 }
1239
1240 /* This is for pid exclusion in the trace buffer */
1241 int
1242 kdbg_setpidex(kd_regtype *kdr)
1243 {
1244 pid_t pid;
1245 int flag, ret=0;
1246 struct proc *p;
1247
1248 pid = (pid_t)kdr->value1;
1249 flag = (int)kdr->value2;
1250
1251 if (pid > 0) {
1252 if ((p = proc_find(pid)) == NULL)
1253 ret = ESRCH;
1254 else {
1255 if (flag == 1) {
1256 /*
1257 * turn on pid exclusion
1258 */
1259 kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
1260 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1261 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1262
1263 p->p_kdebug = 1;
1264 }
1265 else {
1266 /*
1267 * turn off pid exclusion for this pid value
1268 * Don't turn off all pid exclusion though
1269 *
1270 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1271 */
1272 p->p_kdebug = 0;
1273 }
1274 proc_rele(p);
1275 }
1276 } else
1277 ret = EINVAL;
1278
1279 return(ret);
1280 }
1281
1282
1283 /*
1284 * This is for setting a maximum decrementer value
1285 */
1286 int
1287 kdbg_setrtcdec(kd_regtype *kdr)
1288 {
1289 int ret = 0;
1290 natural_t decval;
1291
1292 decval = (natural_t)kdr->value1;
1293
1294 if (decval && decval < KDBG_MINRTCDEC)
1295 ret = EINVAL;
1296 else
1297 ret = ENOTSUP;
1298
1299 return(ret);
1300 }
1301
1302 int
1303 kdbg_setreg(kd_regtype * kdr)
1304 {
1305 int ret=0;
1306 unsigned int val_1, val_2, val;
1307 switch (kdr->type) {
1308
1309 case KDBG_CLASSTYPE :
1310 val_1 = (kdr->value1 & 0xff);
1311 val_2 = (kdr->value2 & 0xff);
1312 kdlog_beg = (val_1<<24);
1313 kdlog_end = (val_2<<24);
1314 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1315 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1316 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1317 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1318 break;
1319 case KDBG_SUBCLSTYPE :
1320 val_1 = (kdr->value1 & 0xff);
1321 val_2 = (kdr->value2 & 0xff);
1322 val = val_2 + 1;
1323 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1324 kdlog_end = ((val_1<<24) | (val << 16));
1325 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1326 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1327 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1328 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1329 break;
1330 case KDBG_RANGETYPE :
1331 kdlog_beg = (kdr->value1);
1332 kdlog_end = (kdr->value2);
1333 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1334 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1335 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1336 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1337 break;
1338 case KDBG_VALCHECK:
1339 kdlog_value1 = (kdr->value1);
1340 kdlog_value2 = (kdr->value2);
1341 kdlog_value3 = (kdr->value3);
1342 kdlog_value4 = (kdr->value4);
1343 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1344 kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
1345 kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
1346 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1347 break;
1348 case KDBG_TYPENONE :
1349 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1350
1351 if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1352 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1353 else
1354 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1355
1356 kdlog_beg = 0;
1357 kdlog_end = 0;
1358 break;
1359 default :
1360 ret = EINVAL;
1361 break;
1362 }
1363 return(ret);
1364 }
1365
1366 int
1367 kdbg_getreg(__unused kd_regtype * kdr)
1368 {
1369 #if 0
1370 int i,j, ret=0;
1371 unsigned int val_1, val_2, val;
1372
1373 switch (kdr->type) {
1374 case KDBG_CLASSTYPE :
1375 val_1 = (kdr->value1 & 0xff);
1376 val_2 = val_1 + 1;
1377 kdlog_beg = (val_1<<24);
1378 kdlog_end = (val_2<<24);
1379 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1380 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1381 break;
1382 case KDBG_SUBCLSTYPE :
1383 val_1 = (kdr->value1 & 0xff);
1384 val_2 = (kdr->value2 & 0xff);
1385 val = val_2 + 1;
1386 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1387 kdlog_end = ((val_1<<24) | (val << 16));
1388 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1389 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1390 break;
1391 case KDBG_RANGETYPE :
1392 kdlog_beg = (kdr->value1);
1393 kdlog_end = (kdr->value2);
1394 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1395 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1396 break;
1397 case KDBG_TYPENONE :
1398 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1399 kdlog_beg = 0;
1400 kdlog_end = 0;
1401 break;
1402 default :
1403 ret = EINVAL;
1404 break;
1405 }
1406 #endif /* 0 */
1407 return(EINVAL);
1408 }
1409
1410
1411 int
1412 kdbg_readmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1413 {
1414 int avail = *number;
1415 int ret = 0;
1416 uint32_t count = 0;
1417
1418 count = avail/sizeof (kd_threadmap);
1419
1420 if (count && (count <= kd_mapcount))
1421 {
1422 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1423 {
1424 if (*number < kd_mapsize)
1425 ret = EINVAL;
1426 else
1427 {
1428 if (vp)
1429 {
1430 RAW_header header;
1431 clock_sec_t secs;
1432 clock_usec_t usecs;
1433 char *pad_buf;
1434 int pad_size;
1435
1436 header.version_no = RAW_VERSION1;
1437 header.thread_count = count;
1438
1439 clock_get_calendar_microtime(&secs, &usecs);
1440 header.TOD_secs = secs;
1441 header.TOD_usecs = usecs;
1442
1443 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
1444 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1445 if (ret)
1446 goto write_error;
1447 RAW_file_offset += sizeof(RAW_header);
1448
1449 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, kd_mapsize, RAW_file_offset,
1450 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1451 if (ret)
1452 goto write_error;
1453 RAW_file_offset += kd_mapsize;
1454
1455 pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
1456
1457 if (pad_size)
1458 {
1459 pad_buf = (char *)kalloc(pad_size);
1460 memset(pad_buf, 0, pad_size);
1461
1462 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
1463 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1464 kfree(pad_buf, pad_size);
1465
1466 if (ret)
1467 goto write_error;
1468 RAW_file_offset += pad_size;
1469 }
1470 RAW_file_written += sizeof(RAW_header) + kd_mapsize + pad_size;
1471
1472 } else {
1473 if (copyout(kd_mapptr, buffer, kd_mapsize))
1474 ret = EINVAL;
1475 }
1476 }
1477 }
1478 else
1479 ret = EINVAL;
1480 }
1481 else
1482 ret = EINVAL;
1483
1484 if (ret && vp)
1485 {
1486 count = 0;
1487
1488 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
1489 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1490 RAW_file_offset += sizeof(uint32_t);
1491 RAW_file_written += sizeof(uint32_t);
1492 }
1493 write_error:
1494 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1495 {
1496 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1497 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1498 kd_mapsize = 0;
1499 kd_mapptr = (kd_threadmap *) 0;
1500 kd_mapcount = 0;
1501 }
1502 return(ret);
1503 }
1504
1505 int
1506 kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
1507 {
1508 int avail = *number;
1509 int ret = 0;
1510 int s;
1511 u_int64_t abstime;
1512 u_int64_t ns;
1513 int wait_result = THREAD_AWAKENED;
1514
1515
1516 if (kd_entropy_buffer)
1517 return(EBUSY);
1518
1519 if (ms_timeout < 0)
1520 return(EINVAL);
1521
1522 kd_entropy_count = avail/sizeof(uint64_t);
1523
1524 if (kd_entropy_count > MAX_ENTROPY_COUNT || kd_entropy_count == 0) {
1525 /*
1526 * Enforce maximum entropy entries
1527 */
1528 return(EINVAL);
1529 }
1530 kd_entropy_bufsize = kd_entropy_count * sizeof(uint64_t);
1531
1532 /*
1533 * allocate entropy buffer
1534 */
1535 if (kmem_alloc(kernel_map, &kd_entropy_buftomem, (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
1536 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
1537 } else {
1538 kd_entropy_buffer = (uint64_t *) 0;
1539 kd_entropy_count = 0;
1540
1541 return (ENOMEM);
1542 }
1543 kd_entropy_indx = 0;
1544
1545 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START, ms_timeout, kd_entropy_count, 0, 0, 0);
1546
1547 /*
1548 * Enable entropy sampling
1549 */
1550 kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, TRUE);
1551
1552 if (ms_timeout) {
1553 ns = (u_int64_t)ms_timeout * (u_int64_t)(1000 * 1000);
1554 nanoseconds_to_absolutetime(ns, &abstime );
1555 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1556 } else
1557 abstime = 0;
1558
1559 s = ml_set_interrupts_enabled(FALSE);
1560 lck_spin_lock(kdw_spin_lock);
1561
1562 while (wait_result == THREAD_AWAKENED && kd_entropy_indx < kd_entropy_count) {
1563
1564 kde_waiter = 1;
1565
1566 if (abstime) {
1567 /*
1568 * wait for the specified timeout or
1569 * until we've hit our sample limit
1570 */
1571 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE, abstime);
1572 } else {
1573 /*
1574 * wait until we've hit our sample limit
1575 */
1576 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE);
1577 }
1578 kde_waiter = 0;
1579 }
1580 lck_spin_unlock(kdw_spin_lock);
1581 ml_set_interrupts_enabled(s);
1582
1583 /*
1584 * Disable entropy sampling
1585 */
1586 kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, FALSE);
1587
1588 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END, ms_timeout, kd_entropy_indx, 0, 0, 0);
1589
1590 *number = 0;
1591 ret = 0;
1592
1593 if (kd_entropy_indx > 0) {
1594 /*
1595 * copyout the buffer
1596 */
1597 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(uint64_t)))
1598 ret = EINVAL;
1599 else
1600 *number = kd_entropy_indx * sizeof(uint64_t);
1601 }
1602 /*
1603 * Always cleanup
1604 */
1605 kd_entropy_count = 0;
1606 kd_entropy_indx = 0;
1607 kd_entropy_buftomem = 0;
1608 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
1609 kd_entropy_buffer = (uint64_t *) 0;
1610
1611 return(ret);
1612 }
1613
1614
1615 static void
1616 kdbg_set_nkdbufs(unsigned int value)
1617 {
1618 /*
1619 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
1620 * 'value' is the desired number of trace entries
1621 */
1622 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
1623
1624 if (value <= max_entries)
1625 nkdbufs = value;
1626 else
1627 nkdbufs = max_entries;
1628 }
1629
1630
1631 /*
1632 * This function is provided for the CHUD toolkit only.
1633 * int val:
1634 * zero disables kdebug_chudhook function call
1635 * non-zero enables kdebug_chudhook function call
1636 * char *fn:
1637 * address of the enabled kdebug_chudhook function
1638 */
1639
1640 void
1641 kdbg_control_chud(int val, void *fn)
1642 {
1643 kdbg_lock_init();
1644
1645 if (val) {
1646 /* enable chudhook */
1647 kdebug_chudhook = fn;
1648 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
1649 }
1650 else {
1651 /* disable chudhook */
1652 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
1653 kdebug_chudhook = 0;
1654 }
1655 }
1656
1657
1658 int
1659 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1660 {
1661 int ret = 0;
1662 size_t size = *sizep;
1663 unsigned int value = 0;
1664 kd_regtype kd_Reg;
1665 kbufinfo_t kd_bufinfo;
1666 pid_t curpid;
1667 proc_t p, curproc;
1668
1669 if (name[0] == KERN_KDGETENTROPY ||
1670 name[0] == KERN_KDWRITETR ||
1671 name[0] == KERN_KDWRITEMAP ||
1672 name[0] == KERN_KDEFLAGS ||
1673 name[0] == KERN_KDDFLAGS ||
1674 name[0] == KERN_KDENABLE ||
1675 name[0] == KERN_KDSETBUF) {
1676
1677 if ( namelen < 2 )
1678 return(EINVAL);
1679 value = name[1];
1680 }
1681
1682 kdbg_lock_init();
1683
1684 if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
1685 return(ENOSPC);
1686
1687 lck_mtx_lock(kd_trace_mtx_sysctl);
1688
1689 if (name[0] == KERN_KDGETBUF) {
1690 /*
1691 * Does not alter the global_state_pid
1692 * This is a passive request.
1693 */
1694 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1695 /*
1696 * There is not enough room to return even
1697 * the first element of the info structure.
1698 */
1699 ret = EINVAL;
1700 goto out;
1701 }
1702 kd_bufinfo.nkdbufs = nkdbufs;
1703 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1704
1705 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
1706 kd_bufinfo.nolog = 1;
1707 else
1708 kd_bufinfo.nolog = 0;
1709
1710 kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
1711 #if defined(__LP64__)
1712 kd_bufinfo.flags |= KDBG_LP64;
1713 #endif
1714 kd_bufinfo.bufid = global_state_pid;
1715
1716 if (size >= sizeof(kd_bufinfo)) {
1717 /*
1718 * Provide all the info we have
1719 */
1720 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
1721 ret = EINVAL;
1722 } else {
1723 /*
1724 * For backwards compatibility, only provide
1725 * as much info as there is room for.
1726 */
1727 if (copyout(&kd_bufinfo, where, size))
1728 ret = EINVAL;
1729 }
1730 goto out;
1731
1732 } else if (name[0] == KERN_KDGETENTROPY) {
1733 if (kd_entropy_buffer)
1734 ret = EBUSY;
1735 else
1736 ret = kdbg_getentropy(where, sizep, value);
1737 goto out;
1738 }
1739
1740 if ((curproc = current_proc()) != NULL)
1741 curpid = curproc->p_pid;
1742 else {
1743 ret = ESRCH;
1744 goto out;
1745 }
1746 if (global_state_pid == -1)
1747 global_state_pid = curpid;
1748 else if (global_state_pid != curpid) {
1749 if ((p = proc_find(global_state_pid)) == NULL) {
1750 /*
1751 * The global pid no longer exists
1752 */
1753 global_state_pid = curpid;
1754 } else {
1755 /*
1756 * The global pid exists, deny this request
1757 */
1758 proc_rele(p);
1759
1760 ret = EBUSY;
1761 goto out;
1762 }
1763 }
1764
1765 switch(name[0]) {
1766 case KERN_KDEFLAGS:
1767 value &= KDBG_USERFLAGS;
1768 kd_ctrl_page.kdebug_flags |= value;
1769 break;
1770 case KERN_KDDFLAGS:
1771 value &= KDBG_USERFLAGS;
1772 kd_ctrl_page.kdebug_flags &= ~value;
1773 break;
1774 case KERN_KDENABLE:
1775 /*
1776 * used to enable or disable
1777 */
1778 if (value) {
1779 /*
1780 * enable only if buffer is initialized
1781 */
1782 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
1783 ret = EINVAL;
1784 break;
1785 }
1786 kdbg_mapinit();
1787
1788 kdbg_set_tracing_enabled(TRUE);
1789 }
1790 else
1791 kdbg_set_tracing_enabled(FALSE);
1792 break;
1793 case KERN_KDSETBUF:
1794 kdbg_set_nkdbufs(value);
1795 break;
1796 case KERN_KDSETUP:
1797 ret = kdbg_reinit(FALSE);
1798 break;
1799 case KERN_KDREMOVE:
1800 kdbg_clear();
1801 break;
1802 case KERN_KDSETREG:
1803 if(size < sizeof(kd_regtype)) {
1804 ret = EINVAL;
1805 break;
1806 }
1807 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1808 ret = EINVAL;
1809 break;
1810 }
1811 ret = kdbg_setreg(&kd_Reg);
1812 break;
1813 case KERN_KDGETREG:
1814 if (size < sizeof(kd_regtype)) {
1815 ret = EINVAL;
1816 break;
1817 }
1818 ret = kdbg_getreg(&kd_Reg);
1819 if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
1820 ret = EINVAL;
1821 }
1822 break;
1823 case KERN_KDREADTR:
1824 ret = kdbg_read(where, sizep, NULL, NULL);
1825 break;
1826 case KERN_KDWRITETR:
1827 case KERN_KDWRITEMAP:
1828 {
1829 struct vfs_context context;
1830 struct fileproc *fp;
1831 size_t number;
1832 vnode_t vp;
1833 int fd;
1834
1835 if (name[0] == KERN_KDWRITETR) {
1836 int s;
1837 int wait_result = THREAD_AWAKENED;
1838 u_int64_t abstime;
1839 u_int64_t ns;
1840
1841 if (*sizep) {
1842 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
1843 nanoseconds_to_absolutetime(ns, &abstime );
1844 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1845 } else
1846 abstime = 0;
1847
1848 s = ml_set_interrupts_enabled(FALSE);
1849 lck_spin_lock(kdw_spin_lock);
1850
1851 while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
1852
1853 kds_waiter = 1;
1854
1855 if (abstime)
1856 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
1857 else
1858 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
1859
1860 kds_waiter = 0;
1861 }
1862 lck_spin_unlock(kdw_spin_lock);
1863 ml_set_interrupts_enabled(s);
1864 }
1865 p = current_proc();
1866 fd = value;
1867
1868 proc_fdlock(p);
1869 if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
1870 proc_fdunlock(p);
1871 break;
1872 }
1873 context.vc_thread = current_thread();
1874 context.vc_ucred = fp->f_fglob->fg_cred;
1875
1876 if (fp->f_fglob->fg_type != DTYPE_VNODE) {
1877 fp_drop(p, fd, fp, 1);
1878 proc_fdunlock(p);
1879
1880 ret = EBADF;
1881 break;
1882 }
1883 vp = (struct vnode *)fp->f_fglob->fg_data;
1884 proc_fdunlock(p);
1885
1886 if ((ret = vnode_getwithref(vp)) == 0) {
1887
1888 if (name[0] == KERN_KDWRITETR) {
1889 number = nkdbufs * sizeof(kd_buf);
1890
1891 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_START, 0, 0, 0, 0, 0);
1892 ret = kdbg_read(0, &number, vp, &context);
1893 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_END, number, 0, 0, 0, 0);
1894
1895 *sizep = number;
1896 } else {
1897 number = kd_mapsize;
1898 kdbg_readmap(0, &number, vp, &context);
1899 }
1900 vnode_put(vp);
1901 }
1902 fp_drop(p, fd, fp, 0);
1903
1904 break;
1905 }
1906 case KERN_KDPIDTR:
1907 if (size < sizeof(kd_regtype)) {
1908 ret = EINVAL;
1909 break;
1910 }
1911 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1912 ret = EINVAL;
1913 break;
1914 }
1915 ret = kdbg_setpid(&kd_Reg);
1916 break;
1917 case KERN_KDPIDEX:
1918 if (size < sizeof(kd_regtype)) {
1919 ret = EINVAL;
1920 break;
1921 }
1922 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1923 ret = EINVAL;
1924 break;
1925 }
1926 ret = kdbg_setpidex(&kd_Reg);
1927 break;
1928 case KERN_KDTHRMAP:
1929 ret = kdbg_readmap(where, sizep, NULL, NULL);
1930 break;
1931 case KERN_KDSETRTCDEC:
1932 if (size < sizeof(kd_regtype)) {
1933 ret = EINVAL;
1934 break;
1935 }
1936 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1937 ret = EINVAL;
1938 break;
1939 }
1940 ret = kdbg_setrtcdec(&kd_Reg);
1941 break;
1942
1943 default:
1944 ret = EINVAL;
1945 }
1946 out:
1947 lck_mtx_unlock(kd_trace_mtx_sysctl);
1948
1949 return(ret);
1950 }
1951
1952
1953 /*
1954 * This code can run for the most part concurrently with kernel_debug_internal()...
1955 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1956 * synchronize with the recording side of this puzzle... otherwise, we are able to
1957 * move through the lists w/o use of any locks
1958 */
1959 int
1960 kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1961 {
1962 unsigned int count;
1963 unsigned int cpu, min_cpu;
1964 uint64_t mintime, t;
1965 int error = 0;
1966 kd_buf *tempbuf;
1967 uint32_t rcursor;
1968 kd_buf lostevent;
1969 union kds_ptr kdsp;
1970 struct kd_storage *kdsp_actual;
1971 struct kd_bufinfo *kdbp;
1972 struct kd_bufinfo *min_kdbp;
1973 uint32_t tempbuf_count;
1974 uint32_t tempbuf_number;
1975 uint32_t old_kdebug_flags;
1976 uint32_t old_kdebug_slowcheck;
1977 boolean_t lostevents = FALSE;
1978 boolean_t out_of_events = FALSE;
1979
1980 count = *number/sizeof(kd_buf);
1981 *number = 0;
1982
1983 if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1984 return EINVAL;
1985
1986 memset(&lostevent, 0, sizeof(lostevent));
1987 lostevent.debugid = TRACEDBG_CODE(DBG_TRACE_INFO, 2);
1988
1989 /*
1990 * because we hold kd_trace_mtx_sysctl, no other control threads can
1991 * be playing with kdebug_flags... the code that cuts new events could
1992 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1993 * storage chunk which is where it examines kdebug_flags... it its adding
1994 * to the same chunk we're reading from, no problem...
1995 */
1996
1997 disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
1998
1999 if (count > nkdbufs)
2000 count = nkdbufs;
2001
2002 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2003 tempbuf_count = KDCOPYBUF_COUNT;
2004
2005 while (count) {
2006 tempbuf = kdcopybuf;
2007 tempbuf_number = 0;
2008
2009 while (tempbuf_count) {
2010 mintime = 0xffffffffffffffffULL;
2011 min_kdbp = NULL;
2012 min_cpu = 0;
2013
2014 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_cpus; cpu++, kdbp++) {
2015
2016 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
2017 continue;
2018 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2019
2020 rcursor = kdsp_actual->kds_readlast;
2021
2022 if (rcursor == kdsp_actual->kds_bufindx)
2023 continue;
2024
2025 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
2026
2027 if (t < kdsp_actual->kds_timestamp) {
2028 /*
2029 * indicates we've not yet completed filling
2030 * in this event...
2031 * this should only occur when we're looking
2032 * at the buf that the record head is utilizing
2033 * we'll pick these events up on the next
2034 * call to kdbg_read
2035 * we bail at this point so that we don't
2036 * get an out-of-order timestream by continuing
2037 * to read events from the other CPUs' timestream(s)
2038 */
2039 out_of_events = TRUE;
2040 break;
2041 }
2042 if (t < mintime) {
2043 mintime = t;
2044 min_kdbp = kdbp;
2045 min_cpu = cpu;
2046 }
2047 }
2048 if (min_kdbp == NULL || out_of_events == TRUE) {
2049 /*
2050 * all buffers ran empty
2051 */
2052 out_of_events = TRUE;
2053 break;
2054 }
2055 kdsp = min_kdbp->kd_list_head;
2056 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2057
2058 if (kdsp_actual->kds_lostevents == TRUE) {
2059 lostevent.timestamp = kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp;
2060 *tempbuf = lostevent;
2061
2062 kdsp_actual->kds_lostevents = FALSE;
2063 lostevents = TRUE;
2064
2065 goto nextevent;
2066 }
2067 *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
2068
2069 if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
2070 release_storage_unit(min_cpu, kdsp.raw);
2071
2072 /*
2073 * Watch for out of order timestamps
2074 */
2075 if (mintime < min_kdbp->kd_prev_timebase) {
2076 /*
2077 * if so, use the previous timestamp + 1 cycle
2078 */
2079 min_kdbp->kd_prev_timebase++;
2080 kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
2081 } else
2082 min_kdbp->kd_prev_timebase = mintime;
2083 nextevent:
2084 tempbuf_count--;
2085 tempbuf_number++;
2086 tempbuf++;
2087
2088 if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
2089 break;
2090 }
2091 if (tempbuf_number) {
2092
2093 if (vp) {
2094 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
2095 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2096
2097 RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
2098
2099 if (RAW_file_written >= RAW_FLUSH_SIZE) {
2100 cluster_push(vp, 0);
2101
2102 RAW_file_written = 0;
2103 }
2104 } else {
2105 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
2106 buffer += (tempbuf_number * sizeof(kd_buf));
2107 }
2108 if (error) {
2109 *number = 0;
2110 error = EINVAL;
2111 break;
2112 }
2113 count -= tempbuf_number;
2114 *number += tempbuf_number;
2115 }
2116 if (out_of_events == TRUE)
2117 /*
2118 * all trace buffers are empty
2119 */
2120 break;
2121
2122 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2123 tempbuf_count = KDCOPYBUF_COUNT;
2124 }
2125 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
2126 enable_wrap(old_kdebug_slowcheck, lostevents);
2127 }
2128 return (error);
2129 }
2130
2131
2132 unsigned char *getProcName(struct proc *proc);
2133 unsigned char *getProcName(struct proc *proc) {
2134
2135 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
2136
2137 }
2138
2139 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2140 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2141 #if defined(__i386__) || defined (__x86_64__)
2142 #define TRAP_DEBUGGER __asm__ volatile("int3");
2143 #endif
2144
2145 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2146
2147 /* Initialize the mutex governing access to the stack snapshot subsystem */
2148 __private_extern__ void
2149 stackshot_lock_init( void )
2150 {
2151 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
2152
2153 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
2154
2155 stackshot_subsys_lck_attr = lck_attr_alloc_init();
2156
2157 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
2158 }
2159
2160 /*
2161 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2162 * on the system, tracing both kernel and user stacks
2163 * where available. Uses machine specific trace routines
2164 * for ppc, ppc64 and x86.
2165 * Inputs: uap->pid - process id of process to be traced, or -1
2166 * for the entire system
2167 * uap->tracebuf - address of the user space destination
2168 * buffer
2169 * uap->tracebuf_size - size of the user space trace buffer
2170 * uap->options - various options, including the maximum
2171 * number of frames to trace.
2172 * Outputs: EPERM if the caller is not privileged
2173 * EINVAL if the supplied trace buffer isn't sanely sized
2174 * ENOMEM if we don't have enough memory to satisfy the
2175 * request
2176 * ENOENT if the target pid isn't found
2177 * ENOSPC if the supplied buffer is insufficient
2178 * *retval contains the number of bytes traced, if successful
2179 * and -1 otherwise. If the request failed due to
2180 * tracebuffer exhaustion, we copyout as much as possible.
2181 */
2182 int
2183 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
2184 int error = 0;
2185
2186 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
2187 return(error);
2188
2189 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
2190 uap->flags, uap->dispatch_offset, retval);
2191 }
2192
2193 int
2194 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
2195 {
2196 int error = 0;
2197 unsigned bytesTraced = 0;
2198 boolean_t istate;
2199
2200 *retval = -1;
2201 /* Serialize tracing */
2202 STACKSHOT_SUBSYS_LOCK();
2203
2204 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
2205 error = EINVAL;
2206 goto error_exit;
2207 }
2208
2209 assert(stackshot_snapbuf == NULL);
2210 if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
2211 error = ENOMEM;
2212 goto error_exit;
2213 }
2214
2215 if (panic_active()) {
2216 error = ENOMEM;
2217 goto error_exit;
2218 }
2219
2220 istate = ml_set_interrupts_enabled(FALSE);
2221 /* Preload trace parameters*/
2222 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
2223
2224 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2225 * the trace buffer
2226 */
2227
2228 TRAP_DEBUGGER;
2229
2230 ml_set_interrupts_enabled(istate);
2231
2232 bytesTraced = kdp_stack_snapshot_bytes_traced();
2233
2234 if (bytesTraced > 0) {
2235 if ((error = copyout(stackshot_snapbuf, tracebuf,
2236 ((bytesTraced < tracebuf_size) ?
2237 bytesTraced : tracebuf_size))))
2238 goto error_exit;
2239 *retval = bytesTraced;
2240 }
2241 else {
2242 error = ENOENT;
2243 goto error_exit;
2244 }
2245
2246 error = kdp_stack_snapshot_geterror();
2247 if (error == -1) {
2248 error = ENOSPC;
2249 *retval = -1;
2250 goto error_exit;
2251 }
2252
2253 error_exit:
2254 if (stackshot_snapbuf != NULL)
2255 kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
2256 stackshot_snapbuf = NULL;
2257 STACKSHOT_SUBSYS_UNLOCK();
2258 return error;
2259 }
2260
2261 void
2262 start_kern_tracing(unsigned int new_nkdbufs) {
2263
2264 if (!new_nkdbufs)
2265 return;
2266 kdbg_set_nkdbufs(new_nkdbufs);
2267 kdbg_lock_init();
2268 kdbg_reinit(TRUE);
2269 kdbg_set_tracing_enabled(TRUE);
2270
2271 #if defined(__i386__) || defined(__x86_64__)
2272 uint64_t now = mach_absolute_time();
2273
2274 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
2275 (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
2276 (uint32_t)(now >> 32), (uint32_t)now,
2277 0);
2278 #endif
2279 printf("kernel tracing started\n");
2280 }
2281
2282 void
2283 kdbg_dump_trace_to_file(const char *filename)
2284 {
2285 vfs_context_t ctx;
2286 vnode_t vp;
2287 int error;
2288 size_t number;
2289
2290
2291 if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
2292 return;
2293
2294 if (global_state_pid != -1) {
2295 if ((proc_find(global_state_pid)) != NULL) {
2296 /*
2297 * The global pid exists, we're running
2298 * due to fs_usage, latency, etc...
2299 * don't cut the panic/shutdown trace file
2300 */
2301 return;
2302 }
2303 }
2304 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
2305
2306 kdebug_enable = 0;
2307 kd_ctrl_page.enabled = 0;
2308
2309 ctx = vfs_context_kernel();
2310
2311 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
2312 return;
2313
2314 number = kd_mapsize;
2315 kdbg_readmap(0, &number, vp, ctx);
2316
2317 number = nkdbufs*sizeof(kd_buf);
2318 kdbg_read(0, &number, vp, ctx);
2319
2320 vnode_close(vp, FWRITE, ctx);
2321
2322 sync(current_proc(), (void *)NULL, (int *)NULL);
2323 }
2324
2325 /* Helper function for filling in the BSD name for an address space
2326 * Defined here because the machine bindings know only Mach threads
2327 * and nothing about BSD processes.
2328 *
2329 * FIXME: need to grab a lock during this?
2330 */
2331 void kdbg_get_task_name(char* name_buf, int len, task_t task)
2332 {
2333 proc_t proc;
2334
2335 /* Note: we can't use thread->task (and functions that rely on it) here
2336 * because it hasn't been initialized yet when this function is called.
2337 * We use the explicitly-passed task parameter instead.
2338 */
2339 proc = get_bsdtask_info(task);
2340 if (proc != PROC_NULL)
2341 snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
2342 else
2343 snprintf(name_buf, len, "%p [!bsd]", task);
2344 }
2345
2346
2347
2348 #if defined(NATIVE_TRACE_FACILITY)
2349 void trace_handler_map_ctrl_page(__unused uintptr_t addr, __unused size_t ctrl_page_size, __unused size_t storage_size, __unused size_t kds_ptr_size)
2350 {
2351 }
2352 void trace_handler_map_bufinfo(__unused uintptr_t addr, __unused size_t size)
2353 {
2354 }
2355 void trace_handler_unmap_bufinfo(void)
2356 {
2357 }
2358 void trace_handler_map_buffer(__unused int index, __unused uintptr_t addr, __unused size_t size)
2359 {
2360 }
2361 void trace_handler_unmap_buffer(__unused int index)
2362 {
2363 }
2364 #endif