]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
91447636 4 * @Apple_LICENSE_HEADER_START@
1c79356b 5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b 19 *
2d21ac55 20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
21 */
22
23#include <machine/spl.h>
24
91447636
A
25#include <sys/errno.h>
26#include <sys/param.h>
0c530ab8 27#include <sys/systm.h>
91447636
A
28#include <sys/proc_internal.h>
29#include <sys/vm.h>
30#include <sys/sysctl.h>
31#include <sys/kdebug.h>
32#include <sys/sysproto.h>
33
1c79356b
A
34#define HZ 100
35#include <mach/clock_types.h>
36#include <mach/mach_types.h>
55e303ae 37#include <mach/mach_time.h>
1c79356b
A
38#include <machine/machine_routines.h>
39
b0d623f7
A
40#if defined(__i386__) || defined(__x86_64__)
41#include <i386/rtclock.h>
42#endif
1c79356b
A
43#include <kern/thread.h>
44#include <kern/task.h>
2d21ac55 45#include <kern/debug.h>
d41d1dae 46#include <kern/assert.h>
1c79356b
A
47#include <vm/vm_kern.h>
48#include <sys/lock.h>
49
0c530ab8 50#include <sys/malloc.h>
b0d623f7 51#include <sys/mcache.h>
0c530ab8
A
52#include <sys/kauth.h>
53
b0d623f7
A
54#include <sys/vnode.h>
55#include <sys/vnode_internal.h>
56#include <sys/fcntl.h>
57
0c530ab8
A
58#include <mach/mach_host.h> /* for host_info() */
59#include <libkern/OSAtomic.h>
60
61/* XXX should have prototypes, but Mach does not provide one */
62void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
63int cpu_number(void); /* XXX <machine/...> include path broken */
64
65/* XXX should probably be static, but it's debugging code... */
b0d623f7 66int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
0c530ab8
A
67void kdbg_control_chud(int, void *);
68int kdbg_control(int *, u_int, user_addr_t, size_t *);
69int kdbg_getentropy (user_addr_t, size_t *, int);
b0d623f7 70int kdbg_readmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
0c530ab8
A
71int kdbg_getreg(kd_regtype *);
72int kdbg_setreg(kd_regtype *);
73int kdbg_setrtcdec(kd_regtype *);
74int kdbg_setpidex(kd_regtype *);
75int kdbg_setpid(kd_regtype *);
76void kdbg_mapinit(void);
77int kdbg_reinit(void);
78int kdbg_bootstrap(void);
79
2d21ac55 80static int create_buffers(void);
0c530ab8
A
81static void delete_buffers(void);
82
2d21ac55
A
83extern void IOSleep(int);
84
0c530ab8
A
85#ifdef ppc
86extern uint32_t maxDec;
87#endif
88
9bccf70c
A
89/* trace enable status */
90unsigned int kdebug_enable = 0;
91
92/* track timestamps for security server's entropy needs */
55e303ae 93uint64_t * kd_entropy_buffer = 0;
9bccf70c
A
94unsigned int kd_entropy_bufsize = 0;
95unsigned int kd_entropy_count = 0;
96unsigned int kd_entropy_indx = 0;
b0d623f7 97vm_offset_t kd_entropy_buftomem = 0;
9bccf70c 98
91447636
A
99
100#define SLOW_NOLOG 0x01
101#define SLOW_CHECKS 0x02
102#define SLOW_ENTROPY 0x04
103
b0d623f7 104unsigned int kdebug_slowcheck = SLOW_NOLOG;
91447636 105
0c530ab8
A
106unsigned int kd_cpus;
107
b0d623f7
A
108#define EVENTS_PER_STORAGE_UNIT 2048
109#define MIN_STORAGE_UNITS_PER_CPU 4
110
111struct kd_storage {
112 struct kd_storage *kds_next;
113 kd_buf *kds_bufptr;
114 kd_buf *kds_buflast;
115 kd_buf *kds_readlast;
0c530ab8 116
b0d623f7 117 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
0c530ab8
A
118};
119
b0d623f7
A
120#define MAX_BUFFER_SIZE (1024 * 1024 * 128)
121#define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
122
123
124struct kd_storage_buffers {
125 struct kd_storage *kdsb_addr;
126 uint32_t kdsb_size;
127};
128
129
130struct kd_storage *kds_free_list = NULL;
131struct kd_storage_buffers *kd_bufs = NULL;
132int n_storage_units = 0;
133int n_storage_buffers = 0;
134
135struct kd_bufinfo {
136 struct kd_storage *kd_list_head;
137 struct kd_storage *kd_list_tail;
138 struct kd_storage *kd_active;
139 uint64_t kd_prev_timebase;
140} __attribute__(( aligned(CPU_CACHE_SIZE) ));
141
0c530ab8
A
142struct kd_bufinfo *kdbip = NULL;
143
b0d623f7 144#define KDCOPYBUF_COUNT 2048
0c530ab8
A
145#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
146kd_buf *kdcopybuf = NULL;
147
148
1c79356b 149unsigned int nkdbufs = 8192;
1c79356b 150unsigned int kdebug_flags = 0;
1c79356b
A
151unsigned int kdlog_beg=0;
152unsigned int kdlog_end=0;
153unsigned int kdlog_value1=0;
154unsigned int kdlog_value2=0;
155unsigned int kdlog_value3=0;
156unsigned int kdlog_value4=0;
157
b0d623f7 158static lck_spin_t * kds_spin_lock;
0c530ab8
A
159static lck_mtx_t * kd_trace_mtx_sysctl;
160static lck_grp_t * kd_trace_mtx_sysctl_grp;
161static lck_attr_t * kd_trace_mtx_sysctl_attr;
162static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
163
164static lck_grp_t *stackshot_subsys_lck_grp;
165static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
166static lck_attr_t *stackshot_subsys_lck_attr;
167static lck_mtx_t stackshot_subsys_mutex;
168
169void *stackshot_snapbuf = NULL;
170
171int
b7266188 172stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
91447636 173
0c530ab8 174extern void
b7266188 175kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
91447636 176
0c530ab8
A
177extern int
178kdp_stack_snapshot_geterror(void);
179extern unsigned int
180kdp_stack_snapshot_bytes_traced(void);
1c79356b
A
181
182kd_threadmap *kd_mapptr = 0;
183unsigned int kd_mapsize = 0;
184unsigned int kd_mapcount = 0;
b0d623f7
A
185vm_offset_t kd_maptomem = 0;
186
187off_t RAW_file_offset = 0;
1c79356b
A
188
189pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
190
191#define DBG_FUNC_MASK 0xfffffffc
192
9bccf70c
A
193/* task to string structure */
194struct tts
195{
0c530ab8 196 task_t task; /* from procs task */
55e303ae 197 pid_t pid; /* from procs p_pid */
9bccf70c
A
198 char task_comm[20]; /* from procs p_comm */
199};
200
201typedef struct tts tts_t;
202
1c79356b
A
203struct krt
204{
205 kd_threadmap *map; /* pointer to the map buffer */
206 int count;
207 int maxcount;
9bccf70c 208 struct tts *atts;
1c79356b
A
209};
210
211typedef struct krt krt_t;
212
9bccf70c 213/* This is for the CHUD toolkit call */
b0d623f7
A
214typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
215 uintptr_t arg2, uintptr_t arg3,
216 uintptr_t arg4, uintptr_t arg5);
9bccf70c
A
217
218kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
219
2d21ac55 220__private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
91447636 221
1c79356b 222/* Support syscall SYS_kdebug_trace */
0c530ab8 223int
b0d623f7 224kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
1c79356b 225{
91447636
A
226 if ( (kdebug_enable == 0) )
227 return(EINVAL);
1c79356b 228
91447636
A
229 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
230 return(0);
1c79356b
A
231}
232
b0d623f7 233
0c530ab8
A
234static int
235create_buffers(void)
236{
b0d623f7
A
237 int i;
238 int p_buffer_size;
239 int f_buffer_size;
240 int f_buffers;
241 int error = 0;
242
243 if (nkdbufs < (kd_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
244 n_storage_units = kd_cpus * MIN_STORAGE_UNITS_PER_CPU;
245 else
246 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
0c530ab8 247
b0d623f7 248 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
2d21ac55 249
b0d623f7
A
250 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
251 n_storage_buffers = f_buffers;
0c530ab8 252
b0d623f7
A
253 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
254 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
255
256 if (p_buffer_size)
257 n_storage_buffers++;
258
259 kd_bufs = NULL;
0c530ab8
A
260
261 if (kdcopybuf == 0) {
b0d623f7
A
262 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
263 error = ENOSPC;
264 goto out;
265 }
0c530ab8 266 }
b0d623f7
A
267 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
268 error = ENOSPC;
269 goto out;
0c530ab8 270 }
b0d623f7 271 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
0c530ab8 272
b0d623f7
A
273 for (i = 0; i < f_buffers; i++) {
274 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
275 error = ENOSPC;
276 goto out;
277 }
278 kd_bufs[i].kdsb_size = f_buffer_size;
0c530ab8 279 }
b0d623f7
A
280 if (p_buffer_size) {
281 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
282 error = ENOSPC;
283 goto out;
284 }
285 kd_bufs[i].kdsb_size = p_buffer_size;
286 }
287
288 for (i = 0; i < n_storage_buffers; i++) {
289 struct kd_storage *kds;
290 int n_elements;
291 int n;
292
293 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
294 kds = kd_bufs[i].kdsb_addr;
295
296 for (n = 0; n < n_elements; n++) {
297 kds[n].kds_next = kds_free_list;
298 kds_free_list = &kds[n];
299
300 kds[n].kds_buflast = &kds[n].kds_records[EVENTS_PER_STORAGE_UNIT];
301 }
0c530ab8 302 }
b0d623f7
A
303 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
304
0c530ab8 305 kdebug_flags |= KDBG_BUFINIT;
b0d623f7
A
306out:
307 if (error)
308 delete_buffers();
0c530ab8 309
b0d623f7 310 return(error);
0c530ab8
A
311}
312
313
314static void
315delete_buffers(void)
4452a7af 316{
b0d623f7
A
317 int i;
318
319 if (kd_bufs) {
320 for (i = 0; i < n_storage_buffers; i++) {
321 if (kd_bufs[i].kdsb_addr)
322 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
323 }
324 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
0c530ab8 325
b0d623f7
A
326 kd_bufs = NULL;
327 n_storage_buffers = 0;
0c530ab8
A
328 }
329 if (kdcopybuf) {
330 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
b0d623f7 331
0c530ab8
A
332 kdcopybuf = NULL;
333 }
b0d623f7
A
334 kds_free_list = NULL;
335
0c530ab8
A
336 kdebug_flags &= ~KDBG_BUFINIT;
337}
338
339
340static void
b0d623f7 341release_storage_unit(struct kd_bufinfo *kdbp, struct kd_storage *kdsp)
0c530ab8 342{
b0d623f7
A
343
344 int s = 0;
345 s = ml_set_interrupts_enabled(FALSE);
346 lck_spin_lock(kds_spin_lock);
347
348 if (kdsp == kdbp->kd_list_head) {
349 /*
350 * its possible for the storage unit pointed to
351 * by kdsp to have already been stolen... so
352 * check to see if its still the head of the list
353 * now that we're behind the lock that protects
354 * adding and removing from the queue...
355 * since we only ever release and steal units from
356 * that position, if its no longer the head
357 * we having nothing to do in this context
358 */
359 kdbp->kd_list_head = kdsp->kds_next;
0c530ab8 360
b0d623f7
A
361 kdsp->kds_next = kds_free_list;
362 kds_free_list = kdsp;
363 }
364 lck_spin_unlock(kds_spin_lock);
365 ml_set_interrupts_enabled(s);
366}
367
368
369/*
370 * Interrupts are disabled when we enter this routine.
371 */
372static struct kd_storage *
373allocate_storage_unit(struct kd_bufinfo *kdbp)
374{
375 struct kd_storage *kdsp;
376 struct kd_bufinfo *kdbp_vict, *kdbp_try;
377 uint64_t oldest_ts, ts;
378
379 lck_spin_lock(kds_spin_lock);
380
381 if ((kdsp = kds_free_list))
382 kds_free_list = kdsp->kds_next;
383 else {
384 if (kdebug_flags & KDBG_NOWRAP) {
385 kdebug_slowcheck |= SLOW_NOLOG;
386 goto out;
387 }
388 kdbp_vict = NULL;
389 oldest_ts = (uint64_t)-1;
390
391 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_cpus]; kdbp_try++) {
392
393 if ((kdsp = kdbp_try->kd_list_head) == NULL) {
394 /*
395 * no storage unit to steal
396 */
397 continue;
398 }
399 if (kdsp == kdbp_try->kd_active) {
400 /*
401 * make sure we don't steal the storage unit
402 * being actively recorded to... this state
403 * also implies that this is the only unit assigned
404 * to this CPU, so we can immediately move on
405 */
406 continue;
407 }
408 ts = kdbg_get_timestamp(&(kdbp_try->kd_list_head->kds_records[0]));
409
410 if (ts < oldest_ts) {
411 /*
412 * when 'wrapping', we want to steal the
413 * storage unit that has the 'earliest' time
414 * associated with it (first event time)
415 */
416 oldest_ts = ts;
417 kdbp_vict = kdbp_try;
418 }
419 }
420#if 1
421 if (kdbp_vict == NULL) {
422 kdebug_enable = 0;
423
424 panic("allocate_storage_unit: no storage units available\n");
425 }
426#endif
427 kdsp = kdbp_vict->kd_list_head;
428
429 kdbp_vict->kd_list_head = kdsp->kds_next;
430
431 kdebug_flags |= KDBG_WRAPPED;
432 }
433 kdsp->kds_next = NULL;
434 kdsp->kds_bufptr = &kdsp->kds_records[0];
435 kdsp->kds_readlast = kdsp->kds_bufptr;
436
437 if (kdbp->kd_list_head == NULL)
438 kdbp->kd_list_head = kdsp;
439 else
440 kdbp->kd_list_tail->kds_next = kdsp;
441 kdbp->kd_list_tail = kdsp;
442out:
443 lck_spin_unlock(kds_spin_lock);
444
445 return (kdsp);
446}
447
448
449
450static void
451kernel_debug_internal(
452 uint32_t debugid,
453 uintptr_t arg1,
454 uintptr_t arg2,
455 uintptr_t arg3,
456 uintptr_t arg4,
457 uintptr_t arg5,
458 int entropy_flag)
459{
460 struct proc *curproc;
461 uint64_t now;
462 int s;
463 kd_buf *kd;
464 int cpu;
465 struct kd_bufinfo *kdbp;
466 struct kd_storage *kdsp;
467
0c530ab8 468 s = ml_set_interrupts_enabled(FALSE);
91447636 469
0c530ab8
A
470 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
471 cpu = cpu_number();
1c79356b 472
9bccf70c 473 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
b0d623f7
A
474 if (kdebug_chudhook)
475 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
9bccf70c 476
b0d623f7
A
477 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
478 goto out;
9bccf70c 479 }
91447636 480 if (kdebug_slowcheck == 0)
b0d623f7 481 goto record_trace;
9bccf70c 482
b0d623f7
A
483 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY)) {
484 if (kd_entropy_indx < kd_entropy_count) {
485 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
486 kd_entropy_indx++;
487 }
488
489 if (kd_entropy_indx == kd_entropy_count) {
490 /*
491 * Disable entropy collection
492 */
493 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
494 kdebug_slowcheck &= ~SLOW_ENTROPY;
495 }
496 }
91447636 497 if ( (kdebug_slowcheck & SLOW_NOLOG) )
0c530ab8 498 goto out;
b0d623f7
A
499
500 if (kdebug_flags & KDBG_PIDCHECK) {
501 /*
502 * If kdebug flag is not set for current proc, return
503 */
504 curproc = current_proc();
1c79356b 505
b0d623f7
A
506 if ((curproc && !(curproc->p_kdebug)) &&
507 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
508 goto out;
509 }
510 else if (kdebug_flags & KDBG_PIDEXCLUDE) {
511 /*
512 * If kdebug flag is set for current proc, return
513 */
514 curproc = current_proc();
515
516 if ((curproc && curproc->p_kdebug) &&
517 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
518 goto out;
519 }
520 if (kdebug_flags & KDBG_RANGECHECK) {
521 if ((debugid < kdlog_beg)
522 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
523 goto out;
524 }
525 else if (kdebug_flags & KDBG_VALCHECK) {
526 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
527 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
528 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
529 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
530 (debugid >> 24 != DBG_TRACE))
531 goto out;
532 }
91447636
A
533
534record_trace:
b0d623f7
A
535 kdbp = &kdbip[cpu];
536
537 if ((kdsp = kdbp->kd_active) == NULL) {
538 if ((kdsp = allocate_storage_unit(kdbp)) == NULL) {
539 /*
540 * this can only happen if wrapping
541 * has been disabled
542 */
543 goto out;
544 }
545 kdbp->kd_active = kdsp;
546 }
547 kd = kdsp->kds_bufptr;
548
1c79356b
A
549 kd->debugid = debugid;
550 kd->arg1 = arg1;
551 kd->arg2 = arg2;
552 kd->arg3 = arg3;
553 kd->arg4 = arg4;
0c530ab8 554 kd->arg5 = arg5;
1c79356b 555
b0d623f7 556 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1c79356b 557
b0d623f7 558 kdsp->kds_bufptr++;
0c530ab8 559
b0d623f7
A
560 if (kdsp->kds_bufptr >= kdsp->kds_buflast)
561 kdbp->kd_active = NULL;
0c530ab8 562out:
1c79356b
A
563 ml_set_interrupts_enabled(s);
564}
565
566void
b0d623f7
A
567kernel_debug(
568 uint32_t debugid,
569 uintptr_t arg1,
570 uintptr_t arg2,
571 uintptr_t arg3,
572 uintptr_t arg4,
573 __unused uintptr_t arg5)
1c79356b 574{
b0d623f7 575 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()), 1);
0c530ab8 576}
21362eb3 577
0c530ab8 578void
b0d623f7
A
579kernel_debug1(
580 uint32_t debugid,
581 uintptr_t arg1,
582 uintptr_t arg2,
583 uintptr_t arg3,
584 uintptr_t arg4,
585 uintptr_t arg5)
0c530ab8
A
586{
587 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
588}
6601e61a 589
0c530ab8
A
590static void
591kdbg_lock_init(void)
592{
593 host_basic_info_data_t hinfo;
594 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
91447636 595
0c530ab8 596 if (kdebug_flags & KDBG_LOCKINIT)
1c79356b 597 return;
1c79356b 598
0c530ab8
A
599 /* get the number of cpus and cache it */
600#define BSD_HOST 1
601 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
593a1d5f 602 kd_cpus = hinfo.logical_cpu_max;
1c79356b 603
b0d623f7 604 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip,
2d21ac55 605 sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
0c530ab8 606 return;
6601e61a 607
0c530ab8 608 /*
91447636
A
609 * allocate lock group attribute and group
610 */
0c530ab8
A
611 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
612 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
91447636
A
613
614 /*
615 * allocate the lock attribute
616 */
0c530ab8 617 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
91447636
A
618
619
620 /*
621 * allocate and initialize spin lock and mutex
622 */
0c530ab8 623 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
b0d623f7 624 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
91447636
A
625
626 kdebug_flags |= KDBG_LOCKINIT;
627}
628
629
630int
0c530ab8 631kdbg_bootstrap(void)
1c79356b 632{
0c530ab8 633 kdebug_flags &= ~KDBG_WRAPPED;
91447636 634
0c530ab8 635 return (create_buffers());
1c79356b
A
636}
637
0c530ab8
A
638int
639kdbg_reinit(void)
1c79356b 640{
b0d623f7 641 int ret = 0;
91447636 642
b0d623f7
A
643 /*
644 * Disable trace collecting
645 * First make sure we're not in
646 * the middle of cutting a trace
647 */
648 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
649 kdebug_slowcheck |= SLOW_NOLOG;
1c79356b 650
b0d623f7
A
651 /*
652 * make sure the SLOW_NOLOG is seen
653 * by everyone that might be trying
654 * to cut a trace..
655 */
656 IOSleep(100);
1c79356b 657
b0d623f7 658 delete_buffers();
1c79356b 659
b0d623f7
A
660 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
661 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
662 kdebug_flags &= ~KDBG_MAPINIT;
663 kd_mapsize = 0;
664 kd_mapptr = (kd_threadmap *) 0;
665 kd_mapcount = 0;
666 }
667 ret = kdbg_bootstrap();
1c79356b 668
b0d623f7 669 return(ret);
1c79356b
A
670}
671
0c530ab8
A
672void
673kdbg_trace_data(struct proc *proc, long *arg_pid)
55e303ae 674{
b0d623f7
A
675 if (!proc)
676 *arg_pid = 0;
677 else
678 *arg_pid = proc->p_pid;
55e303ae
A
679}
680
681
0c530ab8
A
682void
683kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1c79356b 684{
b0d623f7
A
685 char *dbg_nameptr;
686 int dbg_namelen;
687 long dbg_parms[4];
688
689 if (!proc) {
690 *arg1 = 0;
691 *arg2 = 0;
692 *arg3 = 0;
693 *arg4 = 0;
694 return;
695 }
696 /*
697 * Collect the pathname for tracing
698 */
699 dbg_nameptr = proc->p_comm;
700 dbg_namelen = (int)strlen(proc->p_comm);
701 dbg_parms[0]=0L;
702 dbg_parms[1]=0L;
703 dbg_parms[2]=0L;
704 dbg_parms[3]=0L;
1c79356b 705
b0d623f7
A
706 if(dbg_namelen > (int)sizeof(dbg_parms))
707 dbg_namelen = (int)sizeof(dbg_parms);
1c79356b 708
b0d623f7 709 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1c79356b 710
b0d623f7
A
711 *arg1=dbg_parms[0];
712 *arg2=dbg_parms[1];
713 *arg3=dbg_parms[2];
714 *arg4=dbg_parms[3];
1c79356b
A
715}
716
91447636 717static void
0c530ab8 718kdbg_resolve_map(thread_t th_act, void *opaque)
1c79356b 719{
b0d623f7
A
720 kd_threadmap *mapptr;
721 krt_t *t = (krt_t *)opaque;
722
723 if (t->count < t->maxcount) {
724 mapptr = &t->map[t->count];
725 mapptr->thread = (uintptr_t)thread_tid(th_act);
726
727 (void) strlcpy (mapptr->command, t->atts->task_comm,
728 sizeof(t->atts->task_comm));
729 /*
730 * Some kernel threads have no associated pid.
731 * We still need to mark the entry as valid.
732 */
733 if (t->atts->pid)
734 mapptr->valid = t->atts->pid;
735 else
736 mapptr->valid = 1;
737
738 t->count++;
739 }
1c79356b
A
740}
741
0c530ab8
A
742void
743kdbg_mapinit(void)
1c79356b 744{
b0d623f7
A
745 struct proc *p;
746 struct krt akrt;
747 int tts_count; /* number of task-to-string structures */
748 struct tts *tts_mapptr;
749 unsigned int tts_mapsize = 0;
750 vm_offset_t tts_maptomem=0;
751 int i;
1c79356b
A
752
753 if (kdebug_flags & KDBG_MAPINIT)
b0d623f7 754 return;
1c79356b 755
b0d623f7
A
756 /*
757 * need to use PROC_SCANPROCLIST with proc_iterate
758 */
2d21ac55
A
759 proc_list_lock();
760
b0d623f7
A
761 /*
762 * Calculate the sizes of map buffers
763 */
764 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
765 kd_mapcount += get_task_numacts((task_t)p->task);
766 tts_count++;
767 }
2d21ac55
A
768 proc_list_unlock();
769
9bccf70c
A
770 /*
771 * The proc count could change during buffer allocation,
772 * so introduce a small fudge factor to bump up the
773 * buffer sizes. This gives new tasks some chance of
774 * making into the tables. Bump up by 10%.
775 */
776 kd_mapcount += kd_mapcount/10;
777 tts_count += tts_count/10;
778
1c79356b 779 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
b0d623f7
A
780
781 if ((kmem_alloc(kernel_map, & kd_maptomem, (vm_size_t)kd_mapsize) == KERN_SUCCESS)) {
782 kd_mapptr = (kd_threadmap *) kd_maptomem;
783 bzero(kd_mapptr, kd_mapsize);
784 } else
785 kd_mapptr = (kd_threadmap *) 0;
1c79356b 786
9bccf70c 787 tts_mapsize = tts_count * sizeof(struct tts);
9bccf70c 788
b0d623f7
A
789 if ((kmem_alloc(kernel_map, & tts_maptomem, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
790 tts_mapptr = (struct tts *) tts_maptomem;
791 bzero(tts_mapptr, tts_mapsize);
792 } else
793 tts_mapptr = (struct tts *) 0;
9bccf70c
A
794
795 /*
796 * We need to save the procs command string
797 * and take a reference for each task associated
798 * with a valid process
799 */
9bccf70c 800 if (tts_mapptr) {
b0d623f7
A
801 /*
802 * should use proc_iterate
803 */
2d21ac55
A
804 proc_list_lock();
805
b0d623f7 806 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
2d21ac55 807 if (p->p_lflag & P_LEXIT)
9bccf70c
A
808 continue;
809
91447636
A
810 if (p->task) {
811 task_reference(p->task);
812 tts_mapptr[i].task = p->task;
55e303ae 813 tts_mapptr[i].pid = p->p_pid;
2d21ac55 814 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
91447636 815 i++;
9bccf70c
A
816 }
817 }
818 tts_count = i;
2d21ac55
A
819
820 proc_list_unlock();
9bccf70c
A
821 }
822
b0d623f7
A
823 if (kd_mapptr && tts_mapptr) {
824 kdebug_flags |= KDBG_MAPINIT;
9bccf70c 825
b0d623f7
A
826 /*
827 * Initialize thread map data
828 */
829 akrt.map = kd_mapptr;
830 akrt.count = 0;
831 akrt.maxcount = kd_mapcount;
1c79356b 832
b0d623f7
A
833 for (i = 0; i < tts_count; i++) {
834 akrt.atts = &tts_mapptr[i];
835 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
836 task_deallocate((task_t) tts_mapptr[i].task);
837 }
838 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
839 }
1c79356b
A
840}
841
91447636
A
842static void
843kdbg_clear(void)
1c79356b 844{
91447636
A
845 /*
846 * Clean up the trace buffer
847 * First make sure we're not in
848 * the middle of cutting a trace
849 */
1c79356b 850
9bccf70c 851 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
91447636
A
852 kdebug_slowcheck = SLOW_NOLOG;
853
0c530ab8
A
854 /*
855 * make sure the SLOW_NOLOG is seen
856 * by everyone that might be trying
857 * to cut a trace..
858 */
859 IOSleep(100);
860
91447636
A
861 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
862 kdebug_slowcheck |= SLOW_ENTROPY;
863
91447636 864 global_state_pid = -1;
1c79356b
A
865 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
866 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
867 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
0c530ab8
A
868
869 delete_buffers();
1c79356b
A
870
871 /* Clean up the thread map buffer */
872 kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
873 if (kd_mapptr) {
874 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
875 kd_mapptr = (kd_threadmap *) 0;
876 }
1c79356b
A
877 kd_mapsize = 0;
878 kd_mapcount = 0;
879}
880
0c530ab8 881int
1c79356b
A
882kdbg_setpid(kd_regtype *kdr)
883{
b0d623f7
A
884 pid_t pid;
885 int flag, ret=0;
886 struct proc *p;
887
888 pid = (pid_t)kdr->value1;
889 flag = (int)kdr->value2;
890
891 if (pid > 0) {
892 if ((p = proc_find(pid)) == NULL)
893 ret = ESRCH;
894 else {
895 if (flag == 1) {
896 /*
897 * turn on pid check for this and all pids
898 */
899 kdebug_flags |= KDBG_PIDCHECK;
900 kdebug_flags &= ~KDBG_PIDEXCLUDE;
901 kdebug_slowcheck |= SLOW_CHECKS;
902
903 p->p_kdebug = 1;
904 } else {
905 /*
906 * turn off pid check for this pid value
907 * Don't turn off all pid checking though
908 *
909 * kdebug_flags &= ~KDBG_PIDCHECK;
910 */
911 p->p_kdebug = 0;
912 }
913 proc_rele(p);
914 }
1c79356b 915 }
b0d623f7
A
916 else
917 ret = EINVAL;
918
919 return(ret);
1c79356b
A
920}
921
922/* This is for pid exclusion in the trace buffer */
0c530ab8 923int
1c79356b
A
924kdbg_setpidex(kd_regtype *kdr)
925{
b0d623f7
A
926 pid_t pid;
927 int flag, ret=0;
928 struct proc *p;
929
930 pid = (pid_t)kdr->value1;
931 flag = (int)kdr->value2;
932
933 if (pid > 0) {
934 if ((p = proc_find(pid)) == NULL)
935 ret = ESRCH;
936 else {
937 if (flag == 1) {
938 /*
939 * turn on pid exclusion
940 */
941 kdebug_flags |= KDBG_PIDEXCLUDE;
942 kdebug_flags &= ~KDBG_PIDCHECK;
943 kdebug_slowcheck |= SLOW_CHECKS;
944
945 p->p_kdebug = 1;
946 }
947 else {
948 /*
949 * turn off pid exclusion for this pid value
950 * Don't turn off all pid exclusion though
951 *
952 * kdebug_flags &= ~KDBG_PIDEXCLUDE;
953 */
954 p->p_kdebug = 0;
955 }
956 proc_rele(p);
957 }
958 } else
959 ret = EINVAL;
960
961 return(ret);
1c79356b
A
962}
963
b0d623f7
A
964
965/*
966 * This is for setting a maximum decrementer value
967 */
0c530ab8 968int
1c79356b
A
969kdbg_setrtcdec(kd_regtype *kdr)
970{
b0d623f7
A
971 int ret = 0;
972 natural_t decval;
1c79356b 973
b0d623f7 974 decval = (natural_t)kdr->value1;
1c79356b 975
b0d623f7
A
976 if (decval && decval < KDBG_MINRTCDEC)
977 ret = EINVAL;
1c79356b 978#ifdef ppc
3a60a9f5 979 else {
3a60a9f5
A
980 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
981 }
1c79356b 982#else
3a60a9f5
A
983 else
984 ret = ENOTSUP;
1c79356b
A
985#endif /* ppc */
986
b0d623f7 987 return(ret);
1c79356b
A
988}
989
0c530ab8 990int
1c79356b
A
991kdbg_setreg(kd_regtype * kdr)
992{
0c530ab8 993 int ret=0;
1c79356b
A
994 unsigned int val_1, val_2, val;
995 switch (kdr->type) {
996
997 case KDBG_CLASSTYPE :
998 val_1 = (kdr->value1 & 0xff);
999 val_2 = (kdr->value2 & 0xff);
1000 kdlog_beg = (val_1<<24);
1001 kdlog_end = (val_2<<24);
1002 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1003 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1004 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
91447636 1005 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1006 break;
1007 case KDBG_SUBCLSTYPE :
1008 val_1 = (kdr->value1 & 0xff);
1009 val_2 = (kdr->value2 & 0xff);
1010 val = val_2 + 1;
1011 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1012 kdlog_end = ((val_1<<24) | (val << 16));
1013 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1014 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1015 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
91447636 1016 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1017 break;
1018 case KDBG_RANGETYPE :
1019 kdlog_beg = (kdr->value1);
1020 kdlog_end = (kdr->value2);
1021 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1022 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1023 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
91447636 1024 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1025 break;
1026 case KDBG_VALCHECK:
1027 kdlog_value1 = (kdr->value1);
1028 kdlog_value2 = (kdr->value2);
1029 kdlog_value3 = (kdr->value3);
1030 kdlog_value4 = (kdr->value4);
1031 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1032 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
1033 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
91447636 1034 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1035 break;
1036 case KDBG_TYPENONE :
1037 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
91447636
A
1038
1039 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1040 kdebug_slowcheck |= SLOW_CHECKS;
1041 else
1042 kdebug_slowcheck &= ~SLOW_CHECKS;
1043
1c79356b
A
1044 kdlog_beg = 0;
1045 kdlog_end = 0;
1046 break;
1047 default :
1048 ret = EINVAL;
1049 break;
1050 }
1051 return(ret);
1052}
1053
0c530ab8
A
1054int
1055kdbg_getreg(__unused kd_regtype * kdr)
1c79356b 1056{
0c530ab8 1057#if 0
1c79356b
A
1058 int i,j, ret=0;
1059 unsigned int val_1, val_2, val;
0c530ab8 1060
1c79356b
A
1061 switch (kdr->type) {
1062 case KDBG_CLASSTYPE :
1063 val_1 = (kdr->value1 & 0xff);
1064 val_2 = val_1 + 1;
1065 kdlog_beg = (val_1<<24);
1066 kdlog_end = (val_2<<24);
1067 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1068 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1069 break;
1070 case KDBG_SUBCLSTYPE :
1071 val_1 = (kdr->value1 & 0xff);
1072 val_2 = (kdr->value2 & 0xff);
1073 val = val_2 + 1;
1074 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1075 kdlog_end = ((val_1<<24) | (val << 16));
1076 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1077 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1078 break;
1079 case KDBG_RANGETYPE :
1080 kdlog_beg = (kdr->value1);
1081 kdlog_end = (kdr->value2);
1082 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1083 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1084 break;
1085 case KDBG_TYPENONE :
1086 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1087 kdlog_beg = 0;
1088 kdlog_end = 0;
1089 break;
1090 default :
1091 ret = EINVAL;
1092 break;
1093 }
1094#endif /* 0 */
1095 return(EINVAL);
1096}
1097
1098
91447636 1099int
b0d623f7 1100kdbg_readmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1c79356b 1101{
b0d623f7
A
1102 int avail = *number;
1103 int ret = 0;
1104 uint32_t count = 0;
1c79356b 1105
b0d623f7 1106 count = avail/sizeof (kd_threadmap);
1c79356b 1107
b0d623f7 1108 if (count && (count <= kd_mapcount))
1c79356b 1109 {
b0d623f7
A
1110 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1111 {
1112 if (*number < kd_mapsize)
1113 ret = EINVAL;
1114 else
1115 {
1116 if (vp) {
1117 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
1118 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1119 RAW_file_offset += sizeof(uint32_t);
1120
1121 vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, kd_mapsize, RAW_file_offset,
1122 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1123 RAW_file_offset += kd_mapsize;
1124
1125 } else {
1126 if (copyout(kd_mapptr, buffer, kd_mapsize))
1127 ret = EINVAL;
1128 }
1129 }
1130 }
1131 else
1132 ret = EINVAL;
1133 }
1134 else
1135 ret = EINVAL;
1136
1137 if (ret && vp) {
1138 count = 0;
1139
1140 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
1141 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1142 RAW_file_offset += sizeof(uint32_t);
1c79356b 1143 }
b0d623f7
A
1144 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1145 {
1146 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1147 kdebug_flags &= ~KDBG_MAPINIT;
1148 kd_mapsize = 0;
1149 kd_mapptr = (kd_threadmap *) 0;
1150 kd_mapcount = 0;
1151 }
1152
1153 return(ret);
1c79356b
A
1154}
1155
91447636
A
1156int
1157kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
9bccf70c 1158{
b0d623f7
A
1159 int avail = *number;
1160 int ret = 0;
1161
1162 if (kd_entropy_buffer)
1163 return(EBUSY);
1164
1165 kd_entropy_count = avail/sizeof(mach_timespec_t);
1166 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
1167 kd_entropy_indx = 0;
1168
1169 /*
1170 * Enforce maximum entropy entries here if needed
1171 * allocate entropy buffer
1172 */
1173 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
1174 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
1175 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
1176 } else {
1177 kd_entropy_buffer = (uint64_t *) 0;
1178 kd_entropy_count = 0;
1179 kd_entropy_indx = 0;
1180 return (EINVAL);
1181 }
1182
1183 if (ms_timeout < 10)
1184 ms_timeout = 10;
1185
1186 /*
1187 * Enable entropy sampling
1188 */
1189 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
1190 kdebug_slowcheck |= SLOW_ENTROPY;
1191
1192 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
1193
1194 /*
1195 * Disable entropy sampling
1196 */
1197 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
1198 kdebug_slowcheck &= ~SLOW_ENTROPY;
1199
1200 *number = 0;
1201 ret = 0;
1202
1203 if (kd_entropy_indx > 0) {
1204 /*
1205 * copyout the buffer
1206 */
1207 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
1208 ret = EINVAL;
1209 else
1210 *number = kd_entropy_indx;
1211 }
1212 /*
1213 * Always cleanup
1214 */
1215 kd_entropy_count = 0;
1216 kd_entropy_indx = 0;
1217 kd_entropy_buftomem = 0;
1218 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
1219 kd_entropy_buffer = (uint64_t *) 0;
1220
1221 return(ret);
9bccf70c
A
1222}
1223
1224
2d21ac55
A
1225static void
1226kdbg_set_nkdbufs(unsigned int value)
1227{
1228 /*
b0d623f7 1229 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2d21ac55
A
1230 * 'value' is the desired number of trace entries
1231 */
b0d623f7 1232 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2d21ac55
A
1233
1234 if (value <= max_entries)
1235 nkdbufs = value;
1236 else
1237 nkdbufs = max_entries;
1238}
1239
1240
9bccf70c
A
1241/*
1242 * This function is provided for the CHUD toolkit only.
1243 * int val:
1244 * zero disables kdebug_chudhook function call
1245 * non-zero enables kdebug_chudhook function call
1246 * char *fn:
1247 * address of the enabled kdebug_chudhook function
1248*/
1249
0c530ab8
A
1250void
1251kdbg_control_chud(int val, void *fn)
9bccf70c
A
1252{
1253 if (val) {
1254 /* enable chudhook */
9bccf70c 1255 kdebug_chudhook = fn;
91447636 1256 kdebug_enable |= KDEBUG_ENABLE_CHUD;
9bccf70c
A
1257 }
1258 else {
1259 /* disable chudhook */
1260 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1261 kdebug_chudhook = 0;
1262 }
1263}
1c79356b 1264
9bccf70c 1265
0c530ab8 1266int
c910b4d9 1267kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1c79356b 1268{
b0d623f7
A
1269 int ret = 0;
1270 size_t size = *sizep;
c910b4d9 1271 unsigned int value = 0;
91447636
A
1272 kd_regtype kd_Reg;
1273 kbufinfo_t kd_bufinfo;
1274 pid_t curpid;
1275 struct proc *p, *curproc;
1276
c910b4d9
A
1277 if (name[0] == KERN_KDGETENTROPY ||
1278 name[0] == KERN_KDEFLAGS ||
1279 name[0] == KERN_KDDFLAGS ||
1280 name[0] == KERN_KDENABLE ||
1281 name[0] == KERN_KDSETBUF) {
1282
1283 if ( namelen < 2 )
1284 return(EINVAL);
1285 value = name[1];
1286 }
1287
91447636 1288 kdbg_lock_init();
0c530ab8
A
1289
1290 if ( !(kdebug_flags & KDBG_LOCKINIT))
b0d623f7 1291 return(ENOSPC);
0c530ab8
A
1292
1293 lck_mtx_lock(kd_trace_mtx_sysctl);
91447636
A
1294
1295 if (name[0] == KERN_KDGETBUF) {
b0d623f7
A
1296 /*
1297 * Does not alter the global_state_pid
1298 * This is a passive request.
91447636 1299 */
b0d623f7
A
1300 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1301 /*
1302 * There is not enough room to return even
1303 * the first element of the info structure.
1304 */
1305 ret = EINVAL;
1306 goto out;
1307 }
1308 kd_bufinfo.nkdbufs = nkdbufs;
1309 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1310
1311 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1312 kd_bufinfo.nolog = 1;
1313 else
1314 kd_bufinfo.nolog = 0;
1315
1316 kd_bufinfo.flags = kdebug_flags;
1317#if defined(__LP64__)
1318 kd_bufinfo.flags |= KDBG_LP64;
1319#endif
1320 kd_bufinfo.bufid = global_state_pid;
9bccf70c 1321
b0d623f7
A
1322 if (size >= sizeof(kd_bufinfo)) {
1323 /*
1324 * Provide all the info we have
1325 */
1326 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
1327 ret = EINVAL;
1328 } else {
1329 /*
91447636
A
1330 * For backwards compatibility, only provide
1331 * as much info as there is room for.
1332 */
b0d623f7
A
1333 if (copyout(&kd_bufinfo, where, size))
1334 ret = EINVAL;
1335 }
1336 goto out;
1337
1338 } else if (name[0] == KERN_KDGETENTROPY) {
1339 if (kd_entropy_buffer)
1340 ret = EBUSY;
1341 else
1342 ret = kdbg_getentropy(where, sizep, value);
1343 goto out;
91447636
A
1344 }
1345
0c530ab8 1346 if ((curproc = current_proc()) != NULL)
b0d623f7 1347 curpid = curproc->p_pid;
91447636 1348 else {
b0d623f7
A
1349 ret = ESRCH;
1350 goto out;
91447636 1351 }
1c79356b 1352 if (global_state_pid == -1)
b0d623f7 1353 global_state_pid = curpid;
91447636 1354 else if (global_state_pid != curpid) {
b0d623f7
A
1355 if ((p = proc_find(global_state_pid)) == NULL) {
1356 /*
1357 * The global pid no longer exists
1358 */
1359 global_state_pid = curpid;
1360 } else {
1361 /*
1362 * The global pid exists, deny this request
1363 */
1364 proc_rele(p);
91447636 1365
b0d623f7
A
1366 ret = EBUSY;
1367 goto out;
1368 }
91447636 1369 }
1c79356b
A
1370
1371 switch(name[0]) {
1372 case KERN_KDEFLAGS:
1373 value &= KDBG_USERFLAGS;
1374 kdebug_flags |= value;
1375 break;
1376 case KERN_KDDFLAGS:
1377 value &= KDBG_USERFLAGS;
1378 kdebug_flags &= ~value;
1379 break;
b0d623f7
A
1380 case KERN_KDENABLE:
1381 /*
1382 * used to enable or disable
1383 */
1384 if (value) {
1385 /*
1386 * enable only if buffer is initialized
1387 */
1388 if (!(kdebug_flags & KDBG_BUFINIT)) {
1389 ret = EINVAL;
1390 break;
1391 }
1392 kdbg_mapinit();
1393
1394 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1395 kdebug_slowcheck &= ~SLOW_NOLOG;
1396 }
1397 else {
1398 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1399 kdebug_slowcheck |= SLOW_NOLOG;
1c79356b 1400 }
b0d623f7 1401 break;
1c79356b 1402 case KERN_KDSETBUF:
2d21ac55 1403 kdbg_set_nkdbufs(value);
1c79356b 1404 break;
1c79356b 1405 case KERN_KDSETUP:
b0d623f7 1406 ret = kdbg_reinit();
1c79356b
A
1407 break;
1408 case KERN_KDREMOVE:
1409 kdbg_clear();
1410 break;
1411 case KERN_KDSETREG:
1412 if(size < sizeof(kd_regtype)) {
b0d623f7 1413 ret = EINVAL;
1c79356b
A
1414 break;
1415 }
1416 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1417 ret = EINVAL;
1c79356b
A
1418 break;
1419 }
1420 ret = kdbg_setreg(&kd_Reg);
1421 break;
1422 case KERN_KDGETREG:
b0d623f7 1423 if (size < sizeof(kd_regtype)) {
1c79356b
A
1424 ret = EINVAL;
1425 break;
1426 }
1427 ret = kdbg_getreg(&kd_Reg);
b0d623f7
A
1428 if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
1429 ret = EINVAL;
1c79356b
A
1430 }
1431 break;
1432 case KERN_KDREADTR:
b0d623f7 1433 ret = kdbg_read(where, sizep, NULL, NULL);
1c79356b
A
1434 break;
1435 case KERN_KDPIDTR:
1436 if (size < sizeof(kd_regtype)) {
1437 ret = EINVAL;
1438 break;
1439 }
1440 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1441 ret = EINVAL;
1c79356b
A
1442 break;
1443 }
1444 ret = kdbg_setpid(&kd_Reg);
1445 break;
1446 case KERN_KDPIDEX:
1447 if (size < sizeof(kd_regtype)) {
1448 ret = EINVAL;
1449 break;
1450 }
1451 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1452 ret = EINVAL;
1c79356b
A
1453 break;
1454 }
1455 ret = kdbg_setpidex(&kd_Reg);
1456 break;
1457 case KERN_KDTHRMAP:
b0d623f7 1458 ret = kdbg_readmap(where, sizep, NULL, NULL);
1c79356b
A
1459 break;
1460 case KERN_KDSETRTCDEC:
1461 if (size < sizeof(kd_regtype)) {
1462 ret = EINVAL;
1463 break;
1464 }
1465 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1466 ret = EINVAL;
1c79356b
A
1467 break;
1468 }
1469 ret = kdbg_setrtcdec(&kd_Reg);
1470 break;
1471
1472 default:
b0d623f7 1473 ret = EINVAL;
1c79356b 1474 }
b0d623f7 1475out:
0c530ab8 1476 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1477
1c79356b
A
1478 return(ret);
1479}
1480
0c530ab8
A
1481
1482/*
b0d623f7
A
1483 * This code can run for the most part concurrently with kernel_debug_internal()...
1484 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1485 * synchronize with the recording side of this puzzle... otherwise, we are able to
1486 * move through the lists w/o use of any locks
0c530ab8
A
1487 */
1488int
b0d623f7 1489kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1c79356b 1490{
0c530ab8 1491 unsigned int count;
b0d623f7
A
1492 unsigned int cpu, mincpu;
1493 uint64_t mintime, t;
1494 int error = 0,s = 0;
0c530ab8 1495 kd_buf *tempbuf;
b0d623f7
A
1496 kd_buf *rcursor;
1497 kd_buf *min_rcursor;
1498 struct kd_storage *kdsp;
1499 struct kd_bufinfo *kdbp;
0c530ab8
A
1500 uint32_t tempbuf_count;
1501 uint32_t tempbuf_number;
b0d623f7
A
1502 uint32_t old_kdebug_flags;
1503 uint32_t old_kdebug_slowcheck;
2d21ac55 1504
0c530ab8
A
1505 count = *number/sizeof(kd_buf);
1506 *number = 0;
1507
1508 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1509 return EINVAL;
1c79356b 1510
0c530ab8
A
1511 /*
1512 * because we hold kd_trace_mtx_sysctl, no other control threads can
1513 * be playing with kdebug_flags... the code that cuts new events could
b0d623f7
A
1514 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1515 * storage chunk which is where it examines kdebug_flags... it its adding
1516 * to the same chunk we're reading from, no problem...
0c530ab8 1517 */
b0d623f7
A
1518 s = ml_set_interrupts_enabled(FALSE);
1519 lck_spin_lock(kds_spin_lock);
0c530ab8 1520
b0d623f7
A
1521 old_kdebug_slowcheck = kdebug_slowcheck;
1522 old_kdebug_flags = kdebug_flags;
0c530ab8 1523
b0d623f7
A
1524 kdebug_flags &= ~KDBG_WRAPPED;
1525 kdebug_flags |= KDBG_NOWRAP;
89b3af67 1526
b0d623f7
A
1527 lck_spin_unlock(kds_spin_lock);
1528 ml_set_interrupts_enabled(s);
4452a7af 1529
0c530ab8
A
1530 if (count > nkdbufs)
1531 count = nkdbufs;
4452a7af 1532
0c530ab8
A
1533 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1534 tempbuf_count = KDCOPYBUF_COUNT;
4452a7af 1535
0c530ab8
A
1536 while (count) {
1537 tempbuf = kdcopybuf;
1538 tempbuf_number = 0;
1539
1540 while (tempbuf_count) {
1541 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1542 mincpu = -1;
b0d623f7 1543 min_rcursor = NULL;
0c530ab8 1544
b0d623f7
A
1545 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_cpus; cpu++, kdbp++) {
1546
1547 if ((kdsp = kdbp->kd_list_head) == NULL)
0c530ab8 1548 continue;
b0d623f7
A
1549 rcursor = kdsp->kds_readlast;
1550
1551 if (rcursor == kdsp->kds_bufptr)
1552 continue;
1553 t = kdbg_get_timestamp(rcursor);
0c530ab8
A
1554
1555 if (t < mintime) {
0c530ab8 1556 mincpu = cpu;
b0d623f7
A
1557 mintime = t;
1558 min_rcursor = rcursor;
91447636
A
1559 }
1560 }
b0d623f7 1561 if (mincpu == (unsigned int)-1)
0c530ab8 1562 /*
b0d623f7 1563 * all buffers ran empty
91447636 1564 */
0c530ab8 1565 break;
b0d623f7
A
1566
1567 kdbp = &kdbip[mincpu];
1568 kdsp = kdbp->kd_list_head;
0c530ab8 1569
b0d623f7 1570 *tempbuf = *min_rcursor;
2d21ac55 1571
b0d623f7
A
1572 if (mintime != kdbg_get_timestamp(tempbuf)) {
1573 /*
1574 * we stole this storage unit and used it
1575 * before we could slurp the selected event out
1576 * so we need to re-evaluate
1577 */
2d21ac55
A
1578 continue;
1579 }
b0d623f7
A
1580 /*
1581 * Watch for out of order timestamps
1582 */
1583 if (mintime < kdbp->kd_prev_timebase) {
1584 /*
1585 * if so, use the previous timestamp + 1 cycle
1586 */
1587 kdbp->kd_prev_timebase++;
1588 kdbg_set_timestamp_and_cpu(tempbuf, kdbp->kd_prev_timebase, mincpu);
1589 } else
1590 kdbp->kd_prev_timebase = mintime;
0c530ab8 1591
b0d623f7
A
1592 if (min_rcursor == kdsp->kds_readlast)
1593 kdsp->kds_readlast++;
0c530ab8 1594
b0d623f7
A
1595 if (kdsp->kds_readlast == kdsp->kds_buflast)
1596 release_storage_unit(kdbp, kdsp);
0c530ab8 1597
0c530ab8
A
1598 tempbuf_count--;
1599 tempbuf_number++;
b0d623f7 1600 tempbuf++;
0c530ab8
A
1601 }
1602 if (tempbuf_number) {
b0d623f7
A
1603
1604 if (vp) {
1605 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
1606 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1607
1608 RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
1609 } else {
1610 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
1611 buffer += (tempbuf_number * sizeof(kd_buf));
1612 }
1613 if (error) {
1614 *number = 0;
0c530ab8
A
1615 error = EINVAL;
1616 break;
6601e61a 1617 }
0c530ab8
A
1618 count -= tempbuf_number;
1619 *number += tempbuf_number;
0c530ab8
A
1620 }
1621 if (tempbuf_count)
1622 /*
1623 * all trace buffers are empty
1624 */
1625 break;
89b3af67 1626
0c530ab8
A
1627 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1628 tempbuf_count = KDCOPYBUF_COUNT;
1629 }
1630 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
b0d623f7
A
1631
1632 s = ml_set_interrupts_enabled(FALSE);
1633 lck_spin_lock(kds_spin_lock);
1634
1635 kdebug_flags &= ~KDBG_NOWRAP;
1636
1637 if ( !(old_kdebug_slowcheck & SLOW_NOLOG))
1638 kdebug_slowcheck &= ~SLOW_NOLOG;
1639
1640 lck_spin_unlock(kds_spin_lock);
1641 ml_set_interrupts_enabled(s);
0c530ab8
A
1642 }
1643 return (error);
6601e61a 1644}
4452a7af 1645
0c530ab8 1646
55e303ae
A
1647unsigned char *getProcName(struct proc *proc);
1648unsigned char *getProcName(struct proc *proc) {
1649
1650 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1651
1652}
0c530ab8
A
1653
1654#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1655#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
b0d623f7 1656#if defined(__i386__) || defined (__x86_64__)
0c530ab8
A
1657#define TRAP_DEBUGGER __asm__ volatile("int3");
1658#endif
1659#ifdef __ppc__
2d21ac55 1660#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
0c530ab8
A
1661#endif
1662
d41d1dae 1663#define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
0c530ab8
A
1664
1665/* Initialize the mutex governing access to the stack snapshot subsystem */
1666__private_extern__ void
1667stackshot_lock_init( void )
1668{
1669 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1670
1671 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1672
1673 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1674
1675 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1676}
1677
1678/*
1679 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1680 * on the system, tracing both kernel and user stacks
1681 * where available. Uses machine specific trace routines
1682 * for ppc, ppc64 and x86.
1683 * Inputs: uap->pid - process id of process to be traced, or -1
1684 * for the entire system
1685 * uap->tracebuf - address of the user space destination
1686 * buffer
1687 * uap->tracebuf_size - size of the user space trace buffer
1688 * uap->options - various options, including the maximum
1689 * number of frames to trace.
1690 * Outputs: EPERM if the caller is not privileged
1691 * EINVAL if the supplied trace buffer isn't sanely sized
1692 * ENOMEM if we don't have enough memory to satisfy the
1693 * request
1694 * ENOENT if the target pid isn't found
1695 * ENOSPC if the supplied buffer is insufficient
1696 * *retval contains the number of bytes traced, if successful
1697 * and -1 otherwise. If the request failed due to
1698 * tracebuffer exhaustion, we copyout as much as possible.
1699 */
1700int
b0d623f7 1701stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
0c530ab8
A
1702 int error = 0;
1703
b0d623f7 1704
0c530ab8
A
1705 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1706 return(error);
1707
1708 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
b7266188 1709 uap->flags, uap->dispatch_offset, retval);
0c530ab8
A
1710}
1711
1712int
b7266188 1713stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
0c530ab8
A
1714{
1715 int error = 0;
1716 unsigned bytesTraced = 0;
d41d1dae 1717 boolean_t istate;
0c530ab8
A
1718
1719 *retval = -1;
1720/* Serialize tracing */
1721 STACKSHOT_SUBSYS_LOCK();
1722
1723 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1724 error = EINVAL;
1725 goto error_exit;
1726 }
1727
d41d1dae
A
1728 assert(stackshot_snapbuf == NULL);
1729 if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
1730 error = ENOMEM;
1731 goto error_exit;
1732 }
0c530ab8 1733
d41d1dae 1734 if (panic_active()) {
0c530ab8
A
1735 error = ENOMEM;
1736 goto error_exit;
1737 }
d41d1dae
A
1738
1739 istate = ml_set_interrupts_enabled(FALSE);
0c530ab8 1740/* Preload trace parameters*/
b7266188 1741 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
0c530ab8
A
1742
1743/* Trap to the debugger to obtain a coherent stack snapshot; this populates
1744 * the trace buffer
1745 */
2d21ac55 1746
0c530ab8
A
1747 TRAP_DEBUGGER;
1748
d41d1dae
A
1749 ml_set_interrupts_enabled(istate);
1750
0c530ab8
A
1751 bytesTraced = kdp_stack_snapshot_bytes_traced();
1752
1753 if (bytesTraced > 0) {
1754 if ((error = copyout(stackshot_snapbuf, tracebuf,
1755 ((bytesTraced < tracebuf_size) ?
1756 bytesTraced : tracebuf_size))))
1757 goto error_exit;
1758 *retval = bytesTraced;
1759 }
1760 else {
1761 error = ENOENT;
1762 goto error_exit;
1763 }
1764
1765 error = kdp_stack_snapshot_geterror();
1766 if (error == -1) {
1767 error = ENOSPC;
1768 *retval = -1;
1769 goto error_exit;
1770 }
1771
1772error_exit:
1773 if (stackshot_snapbuf != NULL)
d41d1dae 1774 kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
0c530ab8
A
1775 stackshot_snapbuf = NULL;
1776 STACKSHOT_SUBSYS_UNLOCK();
1777 return error;
1778}
2d21ac55
A
1779
1780void
1781start_kern_tracing(unsigned int new_nkdbufs) {
1782 if (!new_nkdbufs)
1783 return;
1784 kdbg_set_nkdbufs(new_nkdbufs);
1785 kdbg_lock_init();
1786 kdbg_reinit();
1787 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1788 kdebug_slowcheck &= ~SLOW_NOLOG;
1789 kdbg_mapinit();
b0d623f7
A
1790
1791#if defined(__i386__) || defined(__x86_64__)
1792 uint64_t now = mach_absolute_time();
1793
1794 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
1795 (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
1796 (uint32_t)(now >> 32), (uint32_t)now,
1797 0);
1798#endif
2d21ac55
A
1799 printf("kernel tracing started\n");
1800}
b0d623f7
A
1801
1802void
1803kdbg_dump_trace_to_file(const char *filename)
1804{
1805 vfs_context_t ctx;
1806 vnode_t vp;
1807 int error;
1808 size_t number;
1809
1810
1811 if (kdebug_enable & (KDEBUG_ENABLE_CHUD | KDEBUG_ENABLE_ENTROPY))
1812 return;
1813
1814 if (global_state_pid != -1) {
1815 if ((proc_find(global_state_pid)) != NULL) {
1816 /*
1817 * The global pid exists, we're running
1818 * due to fs_usage, latency, etc...
1819 * don't cut the panic/shutdown trace file
1820 */
1821 return;
1822 }
1823 }
1824 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
1825
1826 kdebug_enable = 0;
1827
1828 ctx = vfs_context_kernel();
1829
1830 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
1831 return;
1832
1833 number = kd_mapsize;
1834 kdbg_readmap(0, &number, vp, ctx);
1835
1836 number = nkdbufs*sizeof(kd_buf);
1837 kdbg_read(0, &number, vp, ctx);
1838
1839 vnode_close(vp, FWRITE, ctx);
1840
1841 sync(current_proc(), (void *)NULL, (int *)NULL);
1842}