]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
91447636 4 * @Apple_LICENSE_HEADER_START@
1c79356b 5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b 19 *
2d21ac55 20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
21 */
22
23#include <machine/spl.h>
24
91447636
A
25#include <sys/errno.h>
26#include <sys/param.h>
0c530ab8 27#include <sys/systm.h>
91447636
A
28#include <sys/proc_internal.h>
29#include <sys/vm.h>
30#include <sys/sysctl.h>
31#include <sys/kdebug.h>
32#include <sys/sysproto.h>
33
1c79356b
A
34#define HZ 100
35#include <mach/clock_types.h>
36#include <mach/mach_types.h>
55e303ae 37#include <mach/mach_time.h>
1c79356b
A
38#include <machine/machine_routines.h>
39
b0d623f7
A
40#if defined(__i386__) || defined(__x86_64__)
41#include <i386/rtclock.h>
42#endif
1c79356b
A
43#include <kern/thread.h>
44#include <kern/task.h>
2d21ac55 45#include <kern/debug.h>
1c79356b
A
46#include <vm/vm_kern.h>
47#include <sys/lock.h>
48
0c530ab8 49#include <sys/malloc.h>
b0d623f7 50#include <sys/mcache.h>
0c530ab8
A
51#include <sys/kauth.h>
52
b0d623f7
A
53#include <sys/vnode.h>
54#include <sys/vnode_internal.h>
55#include <sys/fcntl.h>
56
0c530ab8
A
57#include <mach/mach_host.h> /* for host_info() */
58#include <libkern/OSAtomic.h>
59
60/* XXX should have prototypes, but Mach does not provide one */
61void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
62int cpu_number(void); /* XXX <machine/...> include path broken */
63
64/* XXX should probably be static, but it's debugging code... */
b0d623f7 65int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
0c530ab8
A
66void kdbg_control_chud(int, void *);
67int kdbg_control(int *, u_int, user_addr_t, size_t *);
68int kdbg_getentropy (user_addr_t, size_t *, int);
b0d623f7 69int kdbg_readmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
0c530ab8
A
70int kdbg_getreg(kd_regtype *);
71int kdbg_setreg(kd_regtype *);
72int kdbg_setrtcdec(kd_regtype *);
73int kdbg_setpidex(kd_regtype *);
74int kdbg_setpid(kd_regtype *);
75void kdbg_mapinit(void);
76int kdbg_reinit(void);
77int kdbg_bootstrap(void);
78
2d21ac55 79static int create_buffers(void);
0c530ab8
A
80static void delete_buffers(void);
81
2d21ac55
A
82extern void IOSleep(int);
83
0c530ab8
A
84#ifdef ppc
85extern uint32_t maxDec;
86#endif
87
9bccf70c
A
88/* trace enable status */
89unsigned int kdebug_enable = 0;
90
91/* track timestamps for security server's entropy needs */
55e303ae 92uint64_t * kd_entropy_buffer = 0;
9bccf70c
A
93unsigned int kd_entropy_bufsize = 0;
94unsigned int kd_entropy_count = 0;
95unsigned int kd_entropy_indx = 0;
b0d623f7 96vm_offset_t kd_entropy_buftomem = 0;
9bccf70c 97
91447636
A
98
99#define SLOW_NOLOG 0x01
100#define SLOW_CHECKS 0x02
101#define SLOW_ENTROPY 0x04
102
b0d623f7 103unsigned int kdebug_slowcheck = SLOW_NOLOG;
91447636 104
0c530ab8
A
105unsigned int kd_cpus;
106
b0d623f7
A
107#define EVENTS_PER_STORAGE_UNIT 2048
108#define MIN_STORAGE_UNITS_PER_CPU 4
109
110struct kd_storage {
111 struct kd_storage *kds_next;
112 kd_buf *kds_bufptr;
113 kd_buf *kds_buflast;
114 kd_buf *kds_readlast;
0c530ab8 115
b0d623f7 116 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
0c530ab8
A
117};
118
b0d623f7
A
119#define MAX_BUFFER_SIZE (1024 * 1024 * 128)
120#define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
121
122
123struct kd_storage_buffers {
124 struct kd_storage *kdsb_addr;
125 uint32_t kdsb_size;
126};
127
128
129struct kd_storage *kds_free_list = NULL;
130struct kd_storage_buffers *kd_bufs = NULL;
131int n_storage_units = 0;
132int n_storage_buffers = 0;
133
134struct kd_bufinfo {
135 struct kd_storage *kd_list_head;
136 struct kd_storage *kd_list_tail;
137 struct kd_storage *kd_active;
138 uint64_t kd_prev_timebase;
139} __attribute__(( aligned(CPU_CACHE_SIZE) ));
140
0c530ab8
A
141struct kd_bufinfo *kdbip = NULL;
142
b0d623f7 143#define KDCOPYBUF_COUNT 2048
0c530ab8
A
144#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
145kd_buf *kdcopybuf = NULL;
146
147
1c79356b 148unsigned int nkdbufs = 8192;
1c79356b 149unsigned int kdebug_flags = 0;
1c79356b
A
150unsigned int kdlog_beg=0;
151unsigned int kdlog_end=0;
152unsigned int kdlog_value1=0;
153unsigned int kdlog_value2=0;
154unsigned int kdlog_value3=0;
155unsigned int kdlog_value4=0;
156
b0d623f7 157static lck_spin_t * kds_spin_lock;
0c530ab8
A
158static lck_mtx_t * kd_trace_mtx_sysctl;
159static lck_grp_t * kd_trace_mtx_sysctl_grp;
160static lck_attr_t * kd_trace_mtx_sysctl_attr;
161static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
162
163static lck_grp_t *stackshot_subsys_lck_grp;
164static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
165static lck_attr_t *stackshot_subsys_lck_attr;
166static lck_mtx_t stackshot_subsys_mutex;
167
168void *stackshot_snapbuf = NULL;
169
170int
b7266188 171stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
91447636 172
0c530ab8 173extern void
b7266188 174kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
91447636 175
0c530ab8
A
176extern int
177kdp_stack_snapshot_geterror(void);
178extern unsigned int
179kdp_stack_snapshot_bytes_traced(void);
1c79356b
A
180
181kd_threadmap *kd_mapptr = 0;
182unsigned int kd_mapsize = 0;
183unsigned int kd_mapcount = 0;
b0d623f7
A
184vm_offset_t kd_maptomem = 0;
185
186off_t RAW_file_offset = 0;
1c79356b
A
187
188pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
189
190#define DBG_FUNC_MASK 0xfffffffc
191
9bccf70c
A
192/* task to string structure */
193struct tts
194{
0c530ab8 195 task_t task; /* from procs task */
55e303ae 196 pid_t pid; /* from procs p_pid */
9bccf70c
A
197 char task_comm[20]; /* from procs p_comm */
198};
199
200typedef struct tts tts_t;
201
1c79356b
A
202struct krt
203{
204 kd_threadmap *map; /* pointer to the map buffer */
205 int count;
206 int maxcount;
9bccf70c 207 struct tts *atts;
1c79356b
A
208};
209
210typedef struct krt krt_t;
211
9bccf70c 212/* This is for the CHUD toolkit call */
b0d623f7
A
213typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
214 uintptr_t arg2, uintptr_t arg3,
215 uintptr_t arg4, uintptr_t arg5);
9bccf70c
A
216
217kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
218
2d21ac55 219__private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
91447636 220
1c79356b 221/* Support syscall SYS_kdebug_trace */
0c530ab8 222int
b0d623f7 223kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
1c79356b 224{
91447636
A
225 if ( (kdebug_enable == 0) )
226 return(EINVAL);
1c79356b 227
91447636
A
228 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
229 return(0);
1c79356b
A
230}
231
b0d623f7 232
0c530ab8
A
233static int
234create_buffers(void)
235{
b0d623f7
A
236 int i;
237 int p_buffer_size;
238 int f_buffer_size;
239 int f_buffers;
240 int error = 0;
241
242 if (nkdbufs < (kd_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
243 n_storage_units = kd_cpus * MIN_STORAGE_UNITS_PER_CPU;
244 else
245 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
0c530ab8 246
b0d623f7 247 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
2d21ac55 248
b0d623f7
A
249 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
250 n_storage_buffers = f_buffers;
0c530ab8 251
b0d623f7
A
252 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
253 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
254
255 if (p_buffer_size)
256 n_storage_buffers++;
257
258 kd_bufs = NULL;
0c530ab8
A
259
260 if (kdcopybuf == 0) {
b0d623f7
A
261 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
262 error = ENOSPC;
263 goto out;
264 }
0c530ab8 265 }
b0d623f7
A
266 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
267 error = ENOSPC;
268 goto out;
0c530ab8 269 }
b0d623f7 270 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
0c530ab8 271
b0d623f7
A
272 for (i = 0; i < f_buffers; i++) {
273 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
274 error = ENOSPC;
275 goto out;
276 }
277 kd_bufs[i].kdsb_size = f_buffer_size;
0c530ab8 278 }
b0d623f7
A
279 if (p_buffer_size) {
280 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
281 error = ENOSPC;
282 goto out;
283 }
284 kd_bufs[i].kdsb_size = p_buffer_size;
285 }
286
287 for (i = 0; i < n_storage_buffers; i++) {
288 struct kd_storage *kds;
289 int n_elements;
290 int n;
291
292 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
293 kds = kd_bufs[i].kdsb_addr;
294
295 for (n = 0; n < n_elements; n++) {
296 kds[n].kds_next = kds_free_list;
297 kds_free_list = &kds[n];
298
299 kds[n].kds_buflast = &kds[n].kds_records[EVENTS_PER_STORAGE_UNIT];
300 }
0c530ab8 301 }
b0d623f7
A
302 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
303
0c530ab8 304 kdebug_flags |= KDBG_BUFINIT;
b0d623f7
A
305out:
306 if (error)
307 delete_buffers();
0c530ab8 308
b0d623f7 309 return(error);
0c530ab8
A
310}
311
312
313static void
314delete_buffers(void)
4452a7af 315{
b0d623f7
A
316 int i;
317
318 if (kd_bufs) {
319 for (i = 0; i < n_storage_buffers; i++) {
320 if (kd_bufs[i].kdsb_addr)
321 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
322 }
323 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
0c530ab8 324
b0d623f7
A
325 kd_bufs = NULL;
326 n_storage_buffers = 0;
0c530ab8
A
327 }
328 if (kdcopybuf) {
329 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
b0d623f7 330
0c530ab8
A
331 kdcopybuf = NULL;
332 }
b0d623f7
A
333 kds_free_list = NULL;
334
0c530ab8
A
335 kdebug_flags &= ~KDBG_BUFINIT;
336}
337
338
339static void
b0d623f7 340release_storage_unit(struct kd_bufinfo *kdbp, struct kd_storage *kdsp)
0c530ab8 341{
b0d623f7
A
342
343 int s = 0;
344 s = ml_set_interrupts_enabled(FALSE);
345 lck_spin_lock(kds_spin_lock);
346
347 if (kdsp == kdbp->kd_list_head) {
348 /*
349 * its possible for the storage unit pointed to
350 * by kdsp to have already been stolen... so
351 * check to see if its still the head of the list
352 * now that we're behind the lock that protects
353 * adding and removing from the queue...
354 * since we only ever release and steal units from
355 * that position, if its no longer the head
356 * we having nothing to do in this context
357 */
358 kdbp->kd_list_head = kdsp->kds_next;
0c530ab8 359
b0d623f7
A
360 kdsp->kds_next = kds_free_list;
361 kds_free_list = kdsp;
362 }
363 lck_spin_unlock(kds_spin_lock);
364 ml_set_interrupts_enabled(s);
365}
366
367
368/*
369 * Interrupts are disabled when we enter this routine.
370 */
371static struct kd_storage *
372allocate_storage_unit(struct kd_bufinfo *kdbp)
373{
374 struct kd_storage *kdsp;
375 struct kd_bufinfo *kdbp_vict, *kdbp_try;
376 uint64_t oldest_ts, ts;
377
378 lck_spin_lock(kds_spin_lock);
379
380 if ((kdsp = kds_free_list))
381 kds_free_list = kdsp->kds_next;
382 else {
383 if (kdebug_flags & KDBG_NOWRAP) {
384 kdebug_slowcheck |= SLOW_NOLOG;
385 goto out;
386 }
387 kdbp_vict = NULL;
388 oldest_ts = (uint64_t)-1;
389
390 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_cpus]; kdbp_try++) {
391
392 if ((kdsp = kdbp_try->kd_list_head) == NULL) {
393 /*
394 * no storage unit to steal
395 */
396 continue;
397 }
398 if (kdsp == kdbp_try->kd_active) {
399 /*
400 * make sure we don't steal the storage unit
401 * being actively recorded to... this state
402 * also implies that this is the only unit assigned
403 * to this CPU, so we can immediately move on
404 */
405 continue;
406 }
407 ts = kdbg_get_timestamp(&(kdbp_try->kd_list_head->kds_records[0]));
408
409 if (ts < oldest_ts) {
410 /*
411 * when 'wrapping', we want to steal the
412 * storage unit that has the 'earliest' time
413 * associated with it (first event time)
414 */
415 oldest_ts = ts;
416 kdbp_vict = kdbp_try;
417 }
418 }
419#if 1
420 if (kdbp_vict == NULL) {
421 kdebug_enable = 0;
422
423 panic("allocate_storage_unit: no storage units available\n");
424 }
425#endif
426 kdsp = kdbp_vict->kd_list_head;
427
428 kdbp_vict->kd_list_head = kdsp->kds_next;
429
430 kdebug_flags |= KDBG_WRAPPED;
431 }
432 kdsp->kds_next = NULL;
433 kdsp->kds_bufptr = &kdsp->kds_records[0];
434 kdsp->kds_readlast = kdsp->kds_bufptr;
435
436 if (kdbp->kd_list_head == NULL)
437 kdbp->kd_list_head = kdsp;
438 else
439 kdbp->kd_list_tail->kds_next = kdsp;
440 kdbp->kd_list_tail = kdsp;
441out:
442 lck_spin_unlock(kds_spin_lock);
443
444 return (kdsp);
445}
446
447
448
449static void
450kernel_debug_internal(
451 uint32_t debugid,
452 uintptr_t arg1,
453 uintptr_t arg2,
454 uintptr_t arg3,
455 uintptr_t arg4,
456 uintptr_t arg5,
457 int entropy_flag)
458{
459 struct proc *curproc;
460 uint64_t now;
461 int s;
462 kd_buf *kd;
463 int cpu;
464 struct kd_bufinfo *kdbp;
465 struct kd_storage *kdsp;
466
0c530ab8 467 s = ml_set_interrupts_enabled(FALSE);
91447636 468
0c530ab8
A
469 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
470 cpu = cpu_number();
1c79356b 471
9bccf70c 472 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
b0d623f7
A
473 if (kdebug_chudhook)
474 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
9bccf70c 475
b0d623f7
A
476 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
477 goto out;
9bccf70c 478 }
91447636 479 if (kdebug_slowcheck == 0)
b0d623f7 480 goto record_trace;
9bccf70c 481
b0d623f7
A
482 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY)) {
483 if (kd_entropy_indx < kd_entropy_count) {
484 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
485 kd_entropy_indx++;
486 }
487
488 if (kd_entropy_indx == kd_entropy_count) {
489 /*
490 * Disable entropy collection
491 */
492 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
493 kdebug_slowcheck &= ~SLOW_ENTROPY;
494 }
495 }
91447636 496 if ( (kdebug_slowcheck & SLOW_NOLOG) )
0c530ab8 497 goto out;
b0d623f7
A
498
499 if (kdebug_flags & KDBG_PIDCHECK) {
500 /*
501 * If kdebug flag is not set for current proc, return
502 */
503 curproc = current_proc();
1c79356b 504
b0d623f7
A
505 if ((curproc && !(curproc->p_kdebug)) &&
506 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
507 goto out;
508 }
509 else if (kdebug_flags & KDBG_PIDEXCLUDE) {
510 /*
511 * If kdebug flag is set for current proc, return
512 */
513 curproc = current_proc();
514
515 if ((curproc && curproc->p_kdebug) &&
516 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
517 goto out;
518 }
519 if (kdebug_flags & KDBG_RANGECHECK) {
520 if ((debugid < kdlog_beg)
521 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
522 goto out;
523 }
524 else if (kdebug_flags & KDBG_VALCHECK) {
525 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
526 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
527 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
528 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
529 (debugid >> 24 != DBG_TRACE))
530 goto out;
531 }
91447636
A
532
533record_trace:
b0d623f7
A
534 kdbp = &kdbip[cpu];
535
536 if ((kdsp = kdbp->kd_active) == NULL) {
537 if ((kdsp = allocate_storage_unit(kdbp)) == NULL) {
538 /*
539 * this can only happen if wrapping
540 * has been disabled
541 */
542 goto out;
543 }
544 kdbp->kd_active = kdsp;
545 }
546 kd = kdsp->kds_bufptr;
547
1c79356b
A
548 kd->debugid = debugid;
549 kd->arg1 = arg1;
550 kd->arg2 = arg2;
551 kd->arg3 = arg3;
552 kd->arg4 = arg4;
0c530ab8 553 kd->arg5 = arg5;
1c79356b 554
b0d623f7 555 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1c79356b 556
b0d623f7 557 kdsp->kds_bufptr++;
0c530ab8 558
b0d623f7
A
559 if (kdsp->kds_bufptr >= kdsp->kds_buflast)
560 kdbp->kd_active = NULL;
0c530ab8 561out:
1c79356b
A
562 ml_set_interrupts_enabled(s);
563}
564
565void
b0d623f7
A
566kernel_debug(
567 uint32_t debugid,
568 uintptr_t arg1,
569 uintptr_t arg2,
570 uintptr_t arg3,
571 uintptr_t arg4,
572 __unused uintptr_t arg5)
1c79356b 573{
b0d623f7 574 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()), 1);
0c530ab8 575}
21362eb3 576
0c530ab8 577void
b0d623f7
A
578kernel_debug1(
579 uint32_t debugid,
580 uintptr_t arg1,
581 uintptr_t arg2,
582 uintptr_t arg3,
583 uintptr_t arg4,
584 uintptr_t arg5)
0c530ab8
A
585{
586 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
587}
6601e61a 588
0c530ab8
A
589static void
590kdbg_lock_init(void)
591{
592 host_basic_info_data_t hinfo;
593 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
91447636 594
0c530ab8 595 if (kdebug_flags & KDBG_LOCKINIT)
1c79356b 596 return;
1c79356b 597
0c530ab8
A
598 /* get the number of cpus and cache it */
599#define BSD_HOST 1
600 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
593a1d5f 601 kd_cpus = hinfo.logical_cpu_max;
1c79356b 602
b0d623f7 603 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip,
2d21ac55 604 sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
0c530ab8 605 return;
6601e61a 606
0c530ab8 607 /*
91447636
A
608 * allocate lock group attribute and group
609 */
0c530ab8
A
610 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
611 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
91447636
A
612
613 /*
614 * allocate the lock attribute
615 */
0c530ab8 616 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
91447636
A
617
618
619 /*
620 * allocate and initialize spin lock and mutex
621 */
0c530ab8 622 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
b0d623f7 623 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
91447636
A
624
625 kdebug_flags |= KDBG_LOCKINIT;
626}
627
628
629int
0c530ab8 630kdbg_bootstrap(void)
1c79356b 631{
0c530ab8 632 kdebug_flags &= ~KDBG_WRAPPED;
91447636 633
0c530ab8 634 return (create_buffers());
1c79356b
A
635}
636
0c530ab8
A
637int
638kdbg_reinit(void)
1c79356b 639{
b0d623f7 640 int ret = 0;
91447636 641
b0d623f7
A
642 /*
643 * Disable trace collecting
644 * First make sure we're not in
645 * the middle of cutting a trace
646 */
647 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
648 kdebug_slowcheck |= SLOW_NOLOG;
1c79356b 649
b0d623f7
A
650 /*
651 * make sure the SLOW_NOLOG is seen
652 * by everyone that might be trying
653 * to cut a trace..
654 */
655 IOSleep(100);
1c79356b 656
b0d623f7 657 delete_buffers();
1c79356b 658
b0d623f7
A
659 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
660 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
661 kdebug_flags &= ~KDBG_MAPINIT;
662 kd_mapsize = 0;
663 kd_mapptr = (kd_threadmap *) 0;
664 kd_mapcount = 0;
665 }
666 ret = kdbg_bootstrap();
1c79356b 667
b0d623f7 668 return(ret);
1c79356b
A
669}
670
0c530ab8
A
671void
672kdbg_trace_data(struct proc *proc, long *arg_pid)
55e303ae 673{
b0d623f7
A
674 if (!proc)
675 *arg_pid = 0;
676 else
677 *arg_pid = proc->p_pid;
55e303ae
A
678}
679
680
0c530ab8
A
681void
682kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1c79356b 683{
b0d623f7
A
684 char *dbg_nameptr;
685 int dbg_namelen;
686 long dbg_parms[4];
687
688 if (!proc) {
689 *arg1 = 0;
690 *arg2 = 0;
691 *arg3 = 0;
692 *arg4 = 0;
693 return;
694 }
695 /*
696 * Collect the pathname for tracing
697 */
698 dbg_nameptr = proc->p_comm;
699 dbg_namelen = (int)strlen(proc->p_comm);
700 dbg_parms[0]=0L;
701 dbg_parms[1]=0L;
702 dbg_parms[2]=0L;
703 dbg_parms[3]=0L;
1c79356b 704
b0d623f7
A
705 if(dbg_namelen > (int)sizeof(dbg_parms))
706 dbg_namelen = (int)sizeof(dbg_parms);
1c79356b 707
b0d623f7 708 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1c79356b 709
b0d623f7
A
710 *arg1=dbg_parms[0];
711 *arg2=dbg_parms[1];
712 *arg3=dbg_parms[2];
713 *arg4=dbg_parms[3];
1c79356b
A
714}
715
91447636 716static void
0c530ab8 717kdbg_resolve_map(thread_t th_act, void *opaque)
1c79356b 718{
b0d623f7
A
719 kd_threadmap *mapptr;
720 krt_t *t = (krt_t *)opaque;
721
722 if (t->count < t->maxcount) {
723 mapptr = &t->map[t->count];
724 mapptr->thread = (uintptr_t)thread_tid(th_act);
725
726 (void) strlcpy (mapptr->command, t->atts->task_comm,
727 sizeof(t->atts->task_comm));
728 /*
729 * Some kernel threads have no associated pid.
730 * We still need to mark the entry as valid.
731 */
732 if (t->atts->pid)
733 mapptr->valid = t->atts->pid;
734 else
735 mapptr->valid = 1;
736
737 t->count++;
738 }
1c79356b
A
739}
740
0c530ab8
A
741void
742kdbg_mapinit(void)
1c79356b 743{
b0d623f7
A
744 struct proc *p;
745 struct krt akrt;
746 int tts_count; /* number of task-to-string structures */
747 struct tts *tts_mapptr;
748 unsigned int tts_mapsize = 0;
749 vm_offset_t tts_maptomem=0;
750 int i;
1c79356b
A
751
752 if (kdebug_flags & KDBG_MAPINIT)
b0d623f7 753 return;
1c79356b 754
b0d623f7
A
755 /*
756 * need to use PROC_SCANPROCLIST with proc_iterate
757 */
2d21ac55
A
758 proc_list_lock();
759
b0d623f7
A
760 /*
761 * Calculate the sizes of map buffers
762 */
763 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
764 kd_mapcount += get_task_numacts((task_t)p->task);
765 tts_count++;
766 }
2d21ac55
A
767 proc_list_unlock();
768
9bccf70c
A
769 /*
770 * The proc count could change during buffer allocation,
771 * so introduce a small fudge factor to bump up the
772 * buffer sizes. This gives new tasks some chance of
773 * making into the tables. Bump up by 10%.
774 */
775 kd_mapcount += kd_mapcount/10;
776 tts_count += tts_count/10;
777
1c79356b 778 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
b0d623f7
A
779
780 if ((kmem_alloc(kernel_map, & kd_maptomem, (vm_size_t)kd_mapsize) == KERN_SUCCESS)) {
781 kd_mapptr = (kd_threadmap *) kd_maptomem;
782 bzero(kd_mapptr, kd_mapsize);
783 } else
784 kd_mapptr = (kd_threadmap *) 0;
1c79356b 785
9bccf70c 786 tts_mapsize = tts_count * sizeof(struct tts);
9bccf70c 787
b0d623f7
A
788 if ((kmem_alloc(kernel_map, & tts_maptomem, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
789 tts_mapptr = (struct tts *) tts_maptomem;
790 bzero(tts_mapptr, tts_mapsize);
791 } else
792 tts_mapptr = (struct tts *) 0;
9bccf70c
A
793
794 /*
795 * We need to save the procs command string
796 * and take a reference for each task associated
797 * with a valid process
798 */
9bccf70c 799 if (tts_mapptr) {
b0d623f7
A
800 /*
801 * should use proc_iterate
802 */
2d21ac55
A
803 proc_list_lock();
804
b0d623f7 805 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
2d21ac55 806 if (p->p_lflag & P_LEXIT)
9bccf70c
A
807 continue;
808
91447636
A
809 if (p->task) {
810 task_reference(p->task);
811 tts_mapptr[i].task = p->task;
55e303ae 812 tts_mapptr[i].pid = p->p_pid;
2d21ac55 813 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
91447636 814 i++;
9bccf70c
A
815 }
816 }
817 tts_count = i;
2d21ac55
A
818
819 proc_list_unlock();
9bccf70c
A
820 }
821
b0d623f7
A
822 if (kd_mapptr && tts_mapptr) {
823 kdebug_flags |= KDBG_MAPINIT;
9bccf70c 824
b0d623f7
A
825 /*
826 * Initialize thread map data
827 */
828 akrt.map = kd_mapptr;
829 akrt.count = 0;
830 akrt.maxcount = kd_mapcount;
1c79356b 831
b0d623f7
A
832 for (i = 0; i < tts_count; i++) {
833 akrt.atts = &tts_mapptr[i];
834 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
835 task_deallocate((task_t) tts_mapptr[i].task);
836 }
837 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
838 }
1c79356b
A
839}
840
91447636
A
841static void
842kdbg_clear(void)
1c79356b 843{
91447636
A
844 /*
845 * Clean up the trace buffer
846 * First make sure we're not in
847 * the middle of cutting a trace
848 */
1c79356b 849
9bccf70c 850 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
91447636
A
851 kdebug_slowcheck = SLOW_NOLOG;
852
0c530ab8
A
853 /*
854 * make sure the SLOW_NOLOG is seen
855 * by everyone that might be trying
856 * to cut a trace..
857 */
858 IOSleep(100);
859
91447636
A
860 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
861 kdebug_slowcheck |= SLOW_ENTROPY;
862
91447636 863 global_state_pid = -1;
1c79356b
A
864 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
865 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
866 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
0c530ab8
A
867
868 delete_buffers();
1c79356b
A
869
870 /* Clean up the thread map buffer */
871 kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
872 if (kd_mapptr) {
873 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
874 kd_mapptr = (kd_threadmap *) 0;
875 }
1c79356b
A
876 kd_mapsize = 0;
877 kd_mapcount = 0;
878}
879
0c530ab8 880int
1c79356b
A
881kdbg_setpid(kd_regtype *kdr)
882{
b0d623f7
A
883 pid_t pid;
884 int flag, ret=0;
885 struct proc *p;
886
887 pid = (pid_t)kdr->value1;
888 flag = (int)kdr->value2;
889
890 if (pid > 0) {
891 if ((p = proc_find(pid)) == NULL)
892 ret = ESRCH;
893 else {
894 if (flag == 1) {
895 /*
896 * turn on pid check for this and all pids
897 */
898 kdebug_flags |= KDBG_PIDCHECK;
899 kdebug_flags &= ~KDBG_PIDEXCLUDE;
900 kdebug_slowcheck |= SLOW_CHECKS;
901
902 p->p_kdebug = 1;
903 } else {
904 /*
905 * turn off pid check for this pid value
906 * Don't turn off all pid checking though
907 *
908 * kdebug_flags &= ~KDBG_PIDCHECK;
909 */
910 p->p_kdebug = 0;
911 }
912 proc_rele(p);
913 }
1c79356b 914 }
b0d623f7
A
915 else
916 ret = EINVAL;
917
918 return(ret);
1c79356b
A
919}
920
921/* This is for pid exclusion in the trace buffer */
0c530ab8 922int
1c79356b
A
923kdbg_setpidex(kd_regtype *kdr)
924{
b0d623f7
A
925 pid_t pid;
926 int flag, ret=0;
927 struct proc *p;
928
929 pid = (pid_t)kdr->value1;
930 flag = (int)kdr->value2;
931
932 if (pid > 0) {
933 if ((p = proc_find(pid)) == NULL)
934 ret = ESRCH;
935 else {
936 if (flag == 1) {
937 /*
938 * turn on pid exclusion
939 */
940 kdebug_flags |= KDBG_PIDEXCLUDE;
941 kdebug_flags &= ~KDBG_PIDCHECK;
942 kdebug_slowcheck |= SLOW_CHECKS;
943
944 p->p_kdebug = 1;
945 }
946 else {
947 /*
948 * turn off pid exclusion for this pid value
949 * Don't turn off all pid exclusion though
950 *
951 * kdebug_flags &= ~KDBG_PIDEXCLUDE;
952 */
953 p->p_kdebug = 0;
954 }
955 proc_rele(p);
956 }
957 } else
958 ret = EINVAL;
959
960 return(ret);
1c79356b
A
961}
962
b0d623f7
A
963
964/*
965 * This is for setting a maximum decrementer value
966 */
0c530ab8 967int
1c79356b
A
968kdbg_setrtcdec(kd_regtype *kdr)
969{
b0d623f7
A
970 int ret = 0;
971 natural_t decval;
1c79356b 972
b0d623f7 973 decval = (natural_t)kdr->value1;
1c79356b 974
b0d623f7
A
975 if (decval && decval < KDBG_MINRTCDEC)
976 ret = EINVAL;
1c79356b 977#ifdef ppc
3a60a9f5 978 else {
3a60a9f5
A
979 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
980 }
1c79356b 981#else
3a60a9f5
A
982 else
983 ret = ENOTSUP;
1c79356b
A
984#endif /* ppc */
985
b0d623f7 986 return(ret);
1c79356b
A
987}
988
0c530ab8 989int
1c79356b
A
990kdbg_setreg(kd_regtype * kdr)
991{
0c530ab8 992 int ret=0;
1c79356b
A
993 unsigned int val_1, val_2, val;
994 switch (kdr->type) {
995
996 case KDBG_CLASSTYPE :
997 val_1 = (kdr->value1 & 0xff);
998 val_2 = (kdr->value2 & 0xff);
999 kdlog_beg = (val_1<<24);
1000 kdlog_end = (val_2<<24);
1001 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1002 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1003 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
91447636 1004 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1005 break;
1006 case KDBG_SUBCLSTYPE :
1007 val_1 = (kdr->value1 & 0xff);
1008 val_2 = (kdr->value2 & 0xff);
1009 val = val_2 + 1;
1010 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1011 kdlog_end = ((val_1<<24) | (val << 16));
1012 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1013 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1014 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
91447636 1015 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1016 break;
1017 case KDBG_RANGETYPE :
1018 kdlog_beg = (kdr->value1);
1019 kdlog_end = (kdr->value2);
1020 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1021 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1022 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
91447636 1023 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1024 break;
1025 case KDBG_VALCHECK:
1026 kdlog_value1 = (kdr->value1);
1027 kdlog_value2 = (kdr->value2);
1028 kdlog_value3 = (kdr->value3);
1029 kdlog_value4 = (kdr->value4);
1030 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1031 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
1032 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
91447636 1033 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
1034 break;
1035 case KDBG_TYPENONE :
1036 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
91447636
A
1037
1038 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1039 kdebug_slowcheck |= SLOW_CHECKS;
1040 else
1041 kdebug_slowcheck &= ~SLOW_CHECKS;
1042
1c79356b
A
1043 kdlog_beg = 0;
1044 kdlog_end = 0;
1045 break;
1046 default :
1047 ret = EINVAL;
1048 break;
1049 }
1050 return(ret);
1051}
1052
0c530ab8
A
1053int
1054kdbg_getreg(__unused kd_regtype * kdr)
1c79356b 1055{
0c530ab8 1056#if 0
1c79356b
A
1057 int i,j, ret=0;
1058 unsigned int val_1, val_2, val;
0c530ab8 1059
1c79356b
A
1060 switch (kdr->type) {
1061 case KDBG_CLASSTYPE :
1062 val_1 = (kdr->value1 & 0xff);
1063 val_2 = val_1 + 1;
1064 kdlog_beg = (val_1<<24);
1065 kdlog_end = (val_2<<24);
1066 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1067 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1068 break;
1069 case KDBG_SUBCLSTYPE :
1070 val_1 = (kdr->value1 & 0xff);
1071 val_2 = (kdr->value2 & 0xff);
1072 val = val_2 + 1;
1073 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1074 kdlog_end = ((val_1<<24) | (val << 16));
1075 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1076 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1077 break;
1078 case KDBG_RANGETYPE :
1079 kdlog_beg = (kdr->value1);
1080 kdlog_end = (kdr->value2);
1081 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1082 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1083 break;
1084 case KDBG_TYPENONE :
1085 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1086 kdlog_beg = 0;
1087 kdlog_end = 0;
1088 break;
1089 default :
1090 ret = EINVAL;
1091 break;
1092 }
1093#endif /* 0 */
1094 return(EINVAL);
1095}
1096
1097
91447636 1098int
b0d623f7 1099kdbg_readmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1c79356b 1100{
b0d623f7
A
1101 int avail = *number;
1102 int ret = 0;
1103 uint32_t count = 0;
1c79356b 1104
b0d623f7 1105 count = avail/sizeof (kd_threadmap);
1c79356b 1106
b0d623f7 1107 if (count && (count <= kd_mapcount))
1c79356b 1108 {
b0d623f7
A
1109 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1110 {
1111 if (*number < kd_mapsize)
1112 ret = EINVAL;
1113 else
1114 {
1115 if (vp) {
1116 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
1117 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1118 RAW_file_offset += sizeof(uint32_t);
1119
1120 vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, kd_mapsize, RAW_file_offset,
1121 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1122 RAW_file_offset += kd_mapsize;
1123
1124 } else {
1125 if (copyout(kd_mapptr, buffer, kd_mapsize))
1126 ret = EINVAL;
1127 }
1128 }
1129 }
1130 else
1131 ret = EINVAL;
1132 }
1133 else
1134 ret = EINVAL;
1135
1136 if (ret && vp) {
1137 count = 0;
1138
1139 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
1140 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1141 RAW_file_offset += sizeof(uint32_t);
1c79356b 1142 }
b0d623f7
A
1143 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1144 {
1145 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1146 kdebug_flags &= ~KDBG_MAPINIT;
1147 kd_mapsize = 0;
1148 kd_mapptr = (kd_threadmap *) 0;
1149 kd_mapcount = 0;
1150 }
1151
1152 return(ret);
1c79356b
A
1153}
1154
91447636
A
1155int
1156kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
9bccf70c 1157{
b0d623f7
A
1158 int avail = *number;
1159 int ret = 0;
1160
1161 if (kd_entropy_buffer)
1162 return(EBUSY);
1163
1164 kd_entropy_count = avail/sizeof(mach_timespec_t);
1165 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
1166 kd_entropy_indx = 0;
1167
1168 /*
1169 * Enforce maximum entropy entries here if needed
1170 * allocate entropy buffer
1171 */
1172 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
1173 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
1174 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
1175 } else {
1176 kd_entropy_buffer = (uint64_t *) 0;
1177 kd_entropy_count = 0;
1178 kd_entropy_indx = 0;
1179 return (EINVAL);
1180 }
1181
1182 if (ms_timeout < 10)
1183 ms_timeout = 10;
1184
1185 /*
1186 * Enable entropy sampling
1187 */
1188 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
1189 kdebug_slowcheck |= SLOW_ENTROPY;
1190
1191 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
1192
1193 /*
1194 * Disable entropy sampling
1195 */
1196 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
1197 kdebug_slowcheck &= ~SLOW_ENTROPY;
1198
1199 *number = 0;
1200 ret = 0;
1201
1202 if (kd_entropy_indx > 0) {
1203 /*
1204 * copyout the buffer
1205 */
1206 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
1207 ret = EINVAL;
1208 else
1209 *number = kd_entropy_indx;
1210 }
1211 /*
1212 * Always cleanup
1213 */
1214 kd_entropy_count = 0;
1215 kd_entropy_indx = 0;
1216 kd_entropy_buftomem = 0;
1217 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
1218 kd_entropy_buffer = (uint64_t *) 0;
1219
1220 return(ret);
9bccf70c
A
1221}
1222
1223
2d21ac55
A
1224static void
1225kdbg_set_nkdbufs(unsigned int value)
1226{
1227 /*
b0d623f7 1228 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2d21ac55
A
1229 * 'value' is the desired number of trace entries
1230 */
b0d623f7 1231 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2d21ac55
A
1232
1233 if (value <= max_entries)
1234 nkdbufs = value;
1235 else
1236 nkdbufs = max_entries;
1237}
1238
1239
9bccf70c
A
1240/*
1241 * This function is provided for the CHUD toolkit only.
1242 * int val:
1243 * zero disables kdebug_chudhook function call
1244 * non-zero enables kdebug_chudhook function call
1245 * char *fn:
1246 * address of the enabled kdebug_chudhook function
1247*/
1248
0c530ab8
A
1249void
1250kdbg_control_chud(int val, void *fn)
9bccf70c
A
1251{
1252 if (val) {
1253 /* enable chudhook */
9bccf70c 1254 kdebug_chudhook = fn;
91447636 1255 kdebug_enable |= KDEBUG_ENABLE_CHUD;
9bccf70c
A
1256 }
1257 else {
1258 /* disable chudhook */
1259 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1260 kdebug_chudhook = 0;
1261 }
1262}
1c79356b 1263
9bccf70c 1264
0c530ab8 1265int
c910b4d9 1266kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1c79356b 1267{
b0d623f7
A
1268 int ret = 0;
1269 size_t size = *sizep;
c910b4d9 1270 unsigned int value = 0;
91447636
A
1271 kd_regtype kd_Reg;
1272 kbufinfo_t kd_bufinfo;
1273 pid_t curpid;
1274 struct proc *p, *curproc;
1275
c910b4d9
A
1276 if (name[0] == KERN_KDGETENTROPY ||
1277 name[0] == KERN_KDEFLAGS ||
1278 name[0] == KERN_KDDFLAGS ||
1279 name[0] == KERN_KDENABLE ||
1280 name[0] == KERN_KDSETBUF) {
1281
1282 if ( namelen < 2 )
1283 return(EINVAL);
1284 value = name[1];
1285 }
1286
91447636 1287 kdbg_lock_init();
0c530ab8
A
1288
1289 if ( !(kdebug_flags & KDBG_LOCKINIT))
b0d623f7 1290 return(ENOSPC);
0c530ab8
A
1291
1292 lck_mtx_lock(kd_trace_mtx_sysctl);
91447636
A
1293
1294 if (name[0] == KERN_KDGETBUF) {
b0d623f7
A
1295 /*
1296 * Does not alter the global_state_pid
1297 * This is a passive request.
91447636 1298 */
b0d623f7
A
1299 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1300 /*
1301 * There is not enough room to return even
1302 * the first element of the info structure.
1303 */
1304 ret = EINVAL;
1305 goto out;
1306 }
1307 kd_bufinfo.nkdbufs = nkdbufs;
1308 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1309
1310 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1311 kd_bufinfo.nolog = 1;
1312 else
1313 kd_bufinfo.nolog = 0;
1314
1315 kd_bufinfo.flags = kdebug_flags;
1316#if defined(__LP64__)
1317 kd_bufinfo.flags |= KDBG_LP64;
1318#endif
1319 kd_bufinfo.bufid = global_state_pid;
9bccf70c 1320
b0d623f7
A
1321 if (size >= sizeof(kd_bufinfo)) {
1322 /*
1323 * Provide all the info we have
1324 */
1325 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
1326 ret = EINVAL;
1327 } else {
1328 /*
91447636
A
1329 * For backwards compatibility, only provide
1330 * as much info as there is room for.
1331 */
b0d623f7
A
1332 if (copyout(&kd_bufinfo, where, size))
1333 ret = EINVAL;
1334 }
1335 goto out;
1336
1337 } else if (name[0] == KERN_KDGETENTROPY) {
1338 if (kd_entropy_buffer)
1339 ret = EBUSY;
1340 else
1341 ret = kdbg_getentropy(where, sizep, value);
1342 goto out;
91447636
A
1343 }
1344
0c530ab8 1345 if ((curproc = current_proc()) != NULL)
b0d623f7 1346 curpid = curproc->p_pid;
91447636 1347 else {
b0d623f7
A
1348 ret = ESRCH;
1349 goto out;
91447636 1350 }
1c79356b 1351 if (global_state_pid == -1)
b0d623f7 1352 global_state_pid = curpid;
91447636 1353 else if (global_state_pid != curpid) {
b0d623f7
A
1354 if ((p = proc_find(global_state_pid)) == NULL) {
1355 /*
1356 * The global pid no longer exists
1357 */
1358 global_state_pid = curpid;
1359 } else {
1360 /*
1361 * The global pid exists, deny this request
1362 */
1363 proc_rele(p);
91447636 1364
b0d623f7
A
1365 ret = EBUSY;
1366 goto out;
1367 }
91447636 1368 }
1c79356b
A
1369
1370 switch(name[0]) {
1371 case KERN_KDEFLAGS:
1372 value &= KDBG_USERFLAGS;
1373 kdebug_flags |= value;
1374 break;
1375 case KERN_KDDFLAGS:
1376 value &= KDBG_USERFLAGS;
1377 kdebug_flags &= ~value;
1378 break;
b0d623f7
A
1379 case KERN_KDENABLE:
1380 /*
1381 * used to enable or disable
1382 */
1383 if (value) {
1384 /*
1385 * enable only if buffer is initialized
1386 */
1387 if (!(kdebug_flags & KDBG_BUFINIT)) {
1388 ret = EINVAL;
1389 break;
1390 }
1391 kdbg_mapinit();
1392
1393 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1394 kdebug_slowcheck &= ~SLOW_NOLOG;
1395 }
1396 else {
1397 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1398 kdebug_slowcheck |= SLOW_NOLOG;
1c79356b 1399 }
b0d623f7 1400 break;
1c79356b 1401 case KERN_KDSETBUF:
2d21ac55 1402 kdbg_set_nkdbufs(value);
1c79356b 1403 break;
1c79356b 1404 case KERN_KDSETUP:
b0d623f7 1405 ret = kdbg_reinit();
1c79356b
A
1406 break;
1407 case KERN_KDREMOVE:
1408 kdbg_clear();
1409 break;
1410 case KERN_KDSETREG:
1411 if(size < sizeof(kd_regtype)) {
b0d623f7 1412 ret = EINVAL;
1c79356b
A
1413 break;
1414 }
1415 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1416 ret = EINVAL;
1c79356b
A
1417 break;
1418 }
1419 ret = kdbg_setreg(&kd_Reg);
1420 break;
1421 case KERN_KDGETREG:
b0d623f7 1422 if (size < sizeof(kd_regtype)) {
1c79356b
A
1423 ret = EINVAL;
1424 break;
1425 }
1426 ret = kdbg_getreg(&kd_Reg);
b0d623f7
A
1427 if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
1428 ret = EINVAL;
1c79356b
A
1429 }
1430 break;
1431 case KERN_KDREADTR:
b0d623f7 1432 ret = kdbg_read(where, sizep, NULL, NULL);
1c79356b
A
1433 break;
1434 case KERN_KDPIDTR:
1435 if (size < sizeof(kd_regtype)) {
1436 ret = EINVAL;
1437 break;
1438 }
1439 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1440 ret = EINVAL;
1c79356b
A
1441 break;
1442 }
1443 ret = kdbg_setpid(&kd_Reg);
1444 break;
1445 case KERN_KDPIDEX:
1446 if (size < sizeof(kd_regtype)) {
1447 ret = EINVAL;
1448 break;
1449 }
1450 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1451 ret = EINVAL;
1c79356b
A
1452 break;
1453 }
1454 ret = kdbg_setpidex(&kd_Reg);
1455 break;
1456 case KERN_KDTHRMAP:
b0d623f7 1457 ret = kdbg_readmap(where, sizep, NULL, NULL);
1c79356b
A
1458 break;
1459 case KERN_KDSETRTCDEC:
1460 if (size < sizeof(kd_regtype)) {
1461 ret = EINVAL;
1462 break;
1463 }
1464 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1465 ret = EINVAL;
1c79356b
A
1466 break;
1467 }
1468 ret = kdbg_setrtcdec(&kd_Reg);
1469 break;
1470
1471 default:
b0d623f7 1472 ret = EINVAL;
1c79356b 1473 }
b0d623f7 1474out:
0c530ab8 1475 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1476
1c79356b
A
1477 return(ret);
1478}
1479
0c530ab8
A
1480
1481/*
b0d623f7
A
1482 * This code can run for the most part concurrently with kernel_debug_internal()...
1483 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1484 * synchronize with the recording side of this puzzle... otherwise, we are able to
1485 * move through the lists w/o use of any locks
0c530ab8
A
1486 */
1487int
b0d623f7 1488kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1c79356b 1489{
0c530ab8 1490 unsigned int count;
b0d623f7
A
1491 unsigned int cpu, mincpu;
1492 uint64_t mintime, t;
1493 int error = 0,s = 0;
0c530ab8 1494 kd_buf *tempbuf;
b0d623f7
A
1495 kd_buf *rcursor;
1496 kd_buf *min_rcursor;
1497 struct kd_storage *kdsp;
1498 struct kd_bufinfo *kdbp;
0c530ab8
A
1499 uint32_t tempbuf_count;
1500 uint32_t tempbuf_number;
b0d623f7
A
1501 uint32_t old_kdebug_flags;
1502 uint32_t old_kdebug_slowcheck;
2d21ac55 1503
0c530ab8
A
1504 count = *number/sizeof(kd_buf);
1505 *number = 0;
1506
1507 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1508 return EINVAL;
1c79356b 1509
0c530ab8
A
1510 /*
1511 * because we hold kd_trace_mtx_sysctl, no other control threads can
1512 * be playing with kdebug_flags... the code that cuts new events could
b0d623f7
A
1513 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1514 * storage chunk which is where it examines kdebug_flags... it its adding
1515 * to the same chunk we're reading from, no problem...
0c530ab8 1516 */
b0d623f7
A
1517 s = ml_set_interrupts_enabled(FALSE);
1518 lck_spin_lock(kds_spin_lock);
0c530ab8 1519
b0d623f7
A
1520 old_kdebug_slowcheck = kdebug_slowcheck;
1521 old_kdebug_flags = kdebug_flags;
0c530ab8 1522
b0d623f7
A
1523 kdebug_flags &= ~KDBG_WRAPPED;
1524 kdebug_flags |= KDBG_NOWRAP;
89b3af67 1525
b0d623f7
A
1526 lck_spin_unlock(kds_spin_lock);
1527 ml_set_interrupts_enabled(s);
4452a7af 1528
0c530ab8
A
1529 if (count > nkdbufs)
1530 count = nkdbufs;
4452a7af 1531
0c530ab8
A
1532 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1533 tempbuf_count = KDCOPYBUF_COUNT;
4452a7af 1534
0c530ab8
A
1535 while (count) {
1536 tempbuf = kdcopybuf;
1537 tempbuf_number = 0;
1538
1539 while (tempbuf_count) {
1540 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1541 mincpu = -1;
b0d623f7 1542 min_rcursor = NULL;
0c530ab8 1543
b0d623f7
A
1544 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_cpus; cpu++, kdbp++) {
1545
1546 if ((kdsp = kdbp->kd_list_head) == NULL)
0c530ab8 1547 continue;
b0d623f7
A
1548 rcursor = kdsp->kds_readlast;
1549
1550 if (rcursor == kdsp->kds_bufptr)
1551 continue;
1552 t = kdbg_get_timestamp(rcursor);
0c530ab8
A
1553
1554 if (t < mintime) {
0c530ab8 1555 mincpu = cpu;
b0d623f7
A
1556 mintime = t;
1557 min_rcursor = rcursor;
91447636
A
1558 }
1559 }
b0d623f7 1560 if (mincpu == (unsigned int)-1)
0c530ab8 1561 /*
b0d623f7 1562 * all buffers ran empty
91447636 1563 */
0c530ab8 1564 break;
b0d623f7
A
1565
1566 kdbp = &kdbip[mincpu];
1567 kdsp = kdbp->kd_list_head;
0c530ab8 1568
b0d623f7 1569 *tempbuf = *min_rcursor;
2d21ac55 1570
b0d623f7
A
1571 if (mintime != kdbg_get_timestamp(tempbuf)) {
1572 /*
1573 * we stole this storage unit and used it
1574 * before we could slurp the selected event out
1575 * so we need to re-evaluate
1576 */
2d21ac55
A
1577 continue;
1578 }
b0d623f7
A
1579 /*
1580 * Watch for out of order timestamps
1581 */
1582 if (mintime < kdbp->kd_prev_timebase) {
1583 /*
1584 * if so, use the previous timestamp + 1 cycle
1585 */
1586 kdbp->kd_prev_timebase++;
1587 kdbg_set_timestamp_and_cpu(tempbuf, kdbp->kd_prev_timebase, mincpu);
1588 } else
1589 kdbp->kd_prev_timebase = mintime;
0c530ab8 1590
b0d623f7
A
1591 if (min_rcursor == kdsp->kds_readlast)
1592 kdsp->kds_readlast++;
0c530ab8 1593
b0d623f7
A
1594 if (kdsp->kds_readlast == kdsp->kds_buflast)
1595 release_storage_unit(kdbp, kdsp);
0c530ab8 1596
0c530ab8
A
1597 tempbuf_count--;
1598 tempbuf_number++;
b0d623f7 1599 tempbuf++;
0c530ab8
A
1600 }
1601 if (tempbuf_number) {
b0d623f7
A
1602
1603 if (vp) {
1604 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
1605 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1606
1607 RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
1608 } else {
1609 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
1610 buffer += (tempbuf_number * sizeof(kd_buf));
1611 }
1612 if (error) {
1613 *number = 0;
0c530ab8
A
1614 error = EINVAL;
1615 break;
6601e61a 1616 }
0c530ab8
A
1617 count -= tempbuf_number;
1618 *number += tempbuf_number;
0c530ab8
A
1619 }
1620 if (tempbuf_count)
1621 /*
1622 * all trace buffers are empty
1623 */
1624 break;
89b3af67 1625
0c530ab8
A
1626 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1627 tempbuf_count = KDCOPYBUF_COUNT;
1628 }
1629 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
b0d623f7
A
1630
1631 s = ml_set_interrupts_enabled(FALSE);
1632 lck_spin_lock(kds_spin_lock);
1633
1634 kdebug_flags &= ~KDBG_NOWRAP;
1635
1636 if ( !(old_kdebug_slowcheck & SLOW_NOLOG))
1637 kdebug_slowcheck &= ~SLOW_NOLOG;
1638
1639 lck_spin_unlock(kds_spin_lock);
1640 ml_set_interrupts_enabled(s);
0c530ab8
A
1641 }
1642 return (error);
6601e61a 1643}
4452a7af 1644
0c530ab8 1645
55e303ae
A
1646unsigned char *getProcName(struct proc *proc);
1647unsigned char *getProcName(struct proc *proc) {
1648
1649 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1650
1651}
0c530ab8
A
1652
1653#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1654#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
b0d623f7 1655#if defined(__i386__) || defined (__x86_64__)
0c530ab8
A
1656#define TRAP_DEBUGGER __asm__ volatile("int3");
1657#endif
1658#ifdef __ppc__
2d21ac55 1659#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
0c530ab8
A
1660#endif
1661
1662#define SANE_TRACEBUF_SIZE 2*1024*1024
1663
1664/* Initialize the mutex governing access to the stack snapshot subsystem */
1665__private_extern__ void
1666stackshot_lock_init( void )
1667{
1668 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1669
1670 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1671
1672 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1673
1674 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1675}
1676
1677/*
1678 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1679 * on the system, tracing both kernel and user stacks
1680 * where available. Uses machine specific trace routines
1681 * for ppc, ppc64 and x86.
1682 * Inputs: uap->pid - process id of process to be traced, or -1
1683 * for the entire system
1684 * uap->tracebuf - address of the user space destination
1685 * buffer
1686 * uap->tracebuf_size - size of the user space trace buffer
1687 * uap->options - various options, including the maximum
1688 * number of frames to trace.
1689 * Outputs: EPERM if the caller is not privileged
1690 * EINVAL if the supplied trace buffer isn't sanely sized
1691 * ENOMEM if we don't have enough memory to satisfy the
1692 * request
1693 * ENOENT if the target pid isn't found
1694 * ENOSPC if the supplied buffer is insufficient
1695 * *retval contains the number of bytes traced, if successful
1696 * and -1 otherwise. If the request failed due to
1697 * tracebuffer exhaustion, we copyout as much as possible.
1698 */
1699int
b0d623f7 1700stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
0c530ab8
A
1701 int error = 0;
1702
b0d623f7 1703
0c530ab8
A
1704 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1705 return(error);
1706
1707 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
b7266188 1708 uap->flags, uap->dispatch_offset, retval);
0c530ab8
A
1709}
1710
1711int
b7266188 1712stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
0c530ab8
A
1713{
1714 int error = 0;
1715 unsigned bytesTraced = 0;
1716
1717 *retval = -1;
1718/* Serialize tracing */
1719 STACKSHOT_SUBSYS_LOCK();
1720
1721 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1722 error = EINVAL;
1723 goto error_exit;
1724 }
1725
1726 MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
1727
1728 if (stackshot_snapbuf == NULL) {
1729 error = ENOMEM;
1730 goto error_exit;
1731 }
1732/* Preload trace parameters*/
b7266188 1733 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
0c530ab8
A
1734
1735/* Trap to the debugger to obtain a coherent stack snapshot; this populates
1736 * the trace buffer
1737 */
2d21ac55
A
1738 if (panic_active()) {
1739 error = ENOMEM;
1740 goto error_exit;
1741 }
1742
0c530ab8
A
1743 TRAP_DEBUGGER;
1744
1745 bytesTraced = kdp_stack_snapshot_bytes_traced();
1746
1747 if (bytesTraced > 0) {
1748 if ((error = copyout(stackshot_snapbuf, tracebuf,
1749 ((bytesTraced < tracebuf_size) ?
1750 bytesTraced : tracebuf_size))))
1751 goto error_exit;
1752 *retval = bytesTraced;
1753 }
1754 else {
1755 error = ENOENT;
1756 goto error_exit;
1757 }
1758
1759 error = kdp_stack_snapshot_geterror();
1760 if (error == -1) {
1761 error = ENOSPC;
1762 *retval = -1;
1763 goto error_exit;
1764 }
1765
1766error_exit:
1767 if (stackshot_snapbuf != NULL)
1768 FREE(stackshot_snapbuf, M_TEMP);
1769 stackshot_snapbuf = NULL;
1770 STACKSHOT_SUBSYS_UNLOCK();
1771 return error;
1772}
2d21ac55
A
1773
1774void
1775start_kern_tracing(unsigned int new_nkdbufs) {
1776 if (!new_nkdbufs)
1777 return;
1778 kdbg_set_nkdbufs(new_nkdbufs);
1779 kdbg_lock_init();
1780 kdbg_reinit();
1781 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1782 kdebug_slowcheck &= ~SLOW_NOLOG;
1783 kdbg_mapinit();
b0d623f7
A
1784
1785#if defined(__i386__) || defined(__x86_64__)
1786 uint64_t now = mach_absolute_time();
1787
1788 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
1789 (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
1790 (uint32_t)(now >> 32), (uint32_t)now,
1791 0);
1792#endif
2d21ac55
A
1793 printf("kernel tracing started\n");
1794}
b0d623f7
A
1795
1796void
1797kdbg_dump_trace_to_file(const char *filename)
1798{
1799 vfs_context_t ctx;
1800 vnode_t vp;
1801 int error;
1802 size_t number;
1803
1804
1805 if (kdebug_enable & (KDEBUG_ENABLE_CHUD | KDEBUG_ENABLE_ENTROPY))
1806 return;
1807
1808 if (global_state_pid != -1) {
1809 if ((proc_find(global_state_pid)) != NULL) {
1810 /*
1811 * The global pid exists, we're running
1812 * due to fs_usage, latency, etc...
1813 * don't cut the panic/shutdown trace file
1814 */
1815 return;
1816 }
1817 }
1818 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
1819
1820 kdebug_enable = 0;
1821
1822 ctx = vfs_context_kernel();
1823
1824 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
1825 return;
1826
1827 number = kd_mapsize;
1828 kdbg_readmap(0, &number, vp, ctx);
1829
1830 number = nkdbufs*sizeof(kd_buf);
1831 kdbg_read(0, &number, vp, ctx);
1832
1833 vnode_close(vp, FWRITE, ctx);
1834
1835 sync(current_proc(), (void *)NULL, (int *)NULL);
1836}