]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-1699.24.8.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
91447636 4 * @Apple_LICENSE_HEADER_START@
1c79356b 5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b 19 *
2d21ac55 20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
21 */
22
6d2010ae 23
1c79356b
A
24#include <machine/spl.h>
25
91447636
A
26#include <sys/errno.h>
27#include <sys/param.h>
0c530ab8 28#include <sys/systm.h>
91447636
A
29#include <sys/proc_internal.h>
30#include <sys/vm.h>
31#include <sys/sysctl.h>
32#include <sys/kdebug.h>
33#include <sys/sysproto.h>
6d2010ae 34#include <sys/bsdtask_info.h>
91447636 35
1c79356b
A
36#define HZ 100
37#include <mach/clock_types.h>
38#include <mach/mach_types.h>
55e303ae 39#include <mach/mach_time.h>
1c79356b
A
40#include <machine/machine_routines.h>
41
b0d623f7 42#if defined(__i386__) || defined(__x86_64__)
6d2010ae
A
43#include <i386/rtclock_protos.h>
44#include <i386/mp.h>
45#include <i386/machine_routines.h>
b0d623f7 46#endif
6d2010ae
A
47
48#include <kern/clock.h>
49
1c79356b
A
50#include <kern/thread.h>
51#include <kern/task.h>
2d21ac55 52#include <kern/debug.h>
6d2010ae
A
53#include <kern/kalloc.h>
54#include <kern/cpu_data.h>
d41d1dae 55#include <kern/assert.h>
1c79356b
A
56#include <vm/vm_kern.h>
57#include <sys/lock.h>
58
0c530ab8 59#include <sys/malloc.h>
b0d623f7 60#include <sys/mcache.h>
0c530ab8
A
61#include <sys/kauth.h>
62
b0d623f7
A
63#include <sys/vnode.h>
64#include <sys/vnode_internal.h>
65#include <sys/fcntl.h>
6d2010ae
A
66#include <sys/file_internal.h>
67#include <sys/ubc.h>
b0d623f7 68
0c530ab8
A
69#include <mach/mach_host.h> /* for host_info() */
70#include <libkern/OSAtomic.h>
71
6d2010ae
A
72#include <machine/pal_routines.h>
73
0c530ab8
A
74/* XXX should have prototypes, but Mach does not provide one */
75void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
76int cpu_number(void); /* XXX <machine/...> include path broken */
77
78/* XXX should probably be static, but it's debugging code... */
b0d623f7 79int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
0c530ab8
A
80void kdbg_control_chud(int, void *);
81int kdbg_control(int *, u_int, user_addr_t, size_t *);
82int kdbg_getentropy (user_addr_t, size_t *, int);
b0d623f7 83int kdbg_readmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
0c530ab8
A
84int kdbg_getreg(kd_regtype *);
85int kdbg_setreg(kd_regtype *);
86int kdbg_setrtcdec(kd_regtype *);
87int kdbg_setpidex(kd_regtype *);
88int kdbg_setpid(kd_regtype *);
89void kdbg_mapinit(void);
6d2010ae
A
90int kdbg_reinit(boolean_t);
91int kdbg_bootstrap(boolean_t);
0c530ab8 92
6d2010ae 93static int create_buffers(boolean_t);
0c530ab8
A
94static void delete_buffers(void);
95
2d21ac55
A
96extern void IOSleep(int);
97
9bccf70c
A
98/* trace enable status */
99unsigned int kdebug_enable = 0;
100
101/* track timestamps for security server's entropy needs */
55e303ae 102uint64_t * kd_entropy_buffer = 0;
9bccf70c
A
103unsigned int kd_entropy_bufsize = 0;
104unsigned int kd_entropy_count = 0;
105unsigned int kd_entropy_indx = 0;
b0d623f7 106vm_offset_t kd_entropy_buftomem = 0;
9bccf70c 107
6d2010ae
A
108#define MAX_ENTROPY_COUNT (128 * 1024)
109
91447636
A
110
111#define SLOW_NOLOG 0x01
112#define SLOW_CHECKS 0x02
113#define SLOW_ENTROPY 0x04
6d2010ae 114#define SLOW_CHUD 0x08
91447636 115
0c530ab8
A
116unsigned int kd_cpus;
117
b0d623f7
A
118#define EVENTS_PER_STORAGE_UNIT 2048
119#define MIN_STORAGE_UNITS_PER_CPU 4
120
6d2010ae
A
121#define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
122
123#define NATIVE_TRACE_FACILITY
124
125union kds_ptr {
126 struct {
127 uint32_t buffer_index:21;
128 uint16_t offset:11;
129 };
130 uint32_t raw;
131};
132
b0d623f7 133struct kd_storage {
6d2010ae
A
134 union kds_ptr kds_next;
135 uint32_t kds_bufindx;
136 uint32_t kds_bufcnt;
137 uint32_t kds_readlast;
138 boolean_t kds_lostevents;
139 uint64_t kds_timestamp;
0c530ab8 140
b0d623f7 141 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
0c530ab8
A
142};
143
b0d623f7
A
144#define MAX_BUFFER_SIZE (1024 * 1024 * 128)
145#define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
146
b0d623f7
A
147struct kd_storage_buffers {
148 struct kd_storage *kdsb_addr;
149 uint32_t kdsb_size;
150};
151
6d2010ae 152#define KDS_PTR_NULL 0xffffffff
b0d623f7
A
153struct kd_storage_buffers *kd_bufs = NULL;
154int n_storage_units = 0;
155int n_storage_buffers = 0;
6d2010ae
A
156int n_storage_threshold = 0;
157int kds_waiter = 0;
158int kde_waiter = 0;
b0d623f7 159
6d2010ae 160#pragma pack(0)
b0d623f7 161struct kd_bufinfo {
6d2010ae
A
162 union kds_ptr kd_list_head;
163 union kds_ptr kd_list_tail;
164 boolean_t kd_lostevents;
165 uint32_t _pad;
166 uint64_t kd_prev_timebase;
167 uint32_t num_bufs;
b0d623f7
A
168} __attribute__(( aligned(CPU_CACHE_SIZE) ));
169
6d2010ae
A
170struct kd_ctrl_page_t {
171 union kds_ptr kds_free_list;
172 uint32_t enabled :1;
173 uint32_t _pad0 :31;
174 int kds_inuse_count;
175 uint32_t kdebug_flags;
176 uint32_t kdebug_slowcheck;
177 uint32_t _pad1;
178 struct {
179 uint64_t tsc_base;
180 uint64_t ns_base;
181 } cpu_timebase[32]; // should be max number of actual logical cpus
182} kd_ctrl_page = {.kds_free_list = {.raw = KDS_PTR_NULL}, .enabled = 0, .kds_inuse_count = 0, .kdebug_flags = 0, .kdebug_slowcheck = SLOW_NOLOG};
183#pragma pack()
184
0c530ab8
A
185struct kd_bufinfo *kdbip = NULL;
186
6d2010ae 187#define KDCOPYBUF_COUNT 8192
0c530ab8
A
188#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
189kd_buf *kdcopybuf = NULL;
190
191
1c79356b 192unsigned int nkdbufs = 8192;
1c79356b
A
193unsigned int kdlog_beg=0;
194unsigned int kdlog_end=0;
195unsigned int kdlog_value1=0;
196unsigned int kdlog_value2=0;
197unsigned int kdlog_value3=0;
198unsigned int kdlog_value4=0;
199
6d2010ae 200static lck_spin_t * kdw_spin_lock;
b0d623f7 201static lck_spin_t * kds_spin_lock;
0c530ab8
A
202static lck_mtx_t * kd_trace_mtx_sysctl;
203static lck_grp_t * kd_trace_mtx_sysctl_grp;
204static lck_attr_t * kd_trace_mtx_sysctl_attr;
205static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
206
207static lck_grp_t *stackshot_subsys_lck_grp;
208static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
209static lck_attr_t *stackshot_subsys_lck_attr;
210static lck_mtx_t stackshot_subsys_mutex;
211
212void *stackshot_snapbuf = NULL;
213
214int
b7266188 215stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
91447636 216
0c530ab8 217extern void
b7266188 218kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
91447636 219
0c530ab8
A
220extern int
221kdp_stack_snapshot_geterror(void);
222extern unsigned int
223kdp_stack_snapshot_bytes_traced(void);
1c79356b
A
224
225kd_threadmap *kd_mapptr = 0;
226unsigned int kd_mapsize = 0;
227unsigned int kd_mapcount = 0;
b0d623f7
A
228vm_offset_t kd_maptomem = 0;
229
230off_t RAW_file_offset = 0;
6d2010ae
A
231int RAW_file_written = 0;
232
233#define RAW_FLUSH_SIZE (2 * 1024 * 1024)
234
1c79356b
A
235
236pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
237
6d2010ae
A
238#define DBG_FUNC_MASK 0xfffffffc
239
240#define INTERRUPT 0x01050000
241#define MACH_vmfault 0x01300008
242#define BSC_SysCall 0x040c0000
243#define MACH_SysCall 0x010c0000
244#define DBG_SCALL_MASK 0xffff0000
245
1c79356b 246
9bccf70c
A
247/* task to string structure */
248struct tts
249{
0c530ab8 250 task_t task; /* from procs task */
55e303ae 251 pid_t pid; /* from procs p_pid */
9bccf70c
A
252 char task_comm[20]; /* from procs p_comm */
253};
254
255typedef struct tts tts_t;
256
1c79356b
A
257struct krt
258{
6d2010ae
A
259 kd_threadmap *map; /* pointer to the map buffer */
260 int count;
261 int maxcount;
262 struct tts *atts;
1c79356b
A
263};
264
265typedef struct krt krt_t;
266
9bccf70c 267/* This is for the CHUD toolkit call */
b0d623f7
A
268typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
269 uintptr_t arg2, uintptr_t arg3,
270 uintptr_t arg4, uintptr_t arg5);
9bccf70c 271
6d2010ae 272volatile kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
9bccf70c 273
2d21ac55 274__private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
91447636 275
6d2010ae
A
276static void
277kdbg_set_tracing_enabled(boolean_t enabled)
1c79356b 278{
6d2010ae
A
279 int s = ml_set_interrupts_enabled(FALSE);
280 lck_spin_lock(kds_spin_lock);
281
282 if (enabled) {
283 kdebug_enable |= KDEBUG_ENABLE_TRACE;
284 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
285 kd_ctrl_page.enabled = 1;
286 } else {
287 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
288 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
289 kd_ctrl_page.enabled = 0;
290 }
291 lck_spin_unlock(kds_spin_lock);
292 ml_set_interrupts_enabled(s);
1c79356b
A
293}
294
6d2010ae
A
295static void
296kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
297{
298 int s = ml_set_interrupts_enabled(FALSE);
299 lck_spin_lock(kds_spin_lock);
300
301 if (enabled) {
302 kd_ctrl_page.kdebug_slowcheck |= slowflag;
303 kdebug_enable |= enableflag;
304 } else {
305 kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
306 kdebug_enable &= ~enableflag;
307 }
308 lck_spin_unlock(kds_spin_lock);
309 ml_set_interrupts_enabled(s);
310}
311
312
313#ifdef NATIVE_TRACE_FACILITY
314void
315disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
316{
317 int s = ml_set_interrupts_enabled(FALSE);
318 lck_spin_lock(kds_spin_lock);
319
320 *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
321 *old_flags = kd_ctrl_page.kdebug_flags;
322
323 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
324 kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
325
326 lck_spin_unlock(kds_spin_lock);
327 ml_set_interrupts_enabled(s);
328}
329
330void
331enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
332{
333 int s = ml_set_interrupts_enabled(FALSE);
334 lck_spin_lock(kds_spin_lock);
335
336 kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
337
338 if ( !(old_slowcheck & SLOW_NOLOG))
339 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
340
341 if (lostevents == TRUE)
342 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
343
344 lck_spin_unlock(kds_spin_lock);
345 ml_set_interrupts_enabled(s);
346}
347
348void trace_set_timebases(__unused uint64_t tsc, __unused uint64_t ns)
349{
350}
351#else
352/* Begin functions that are defined twice */
353void trace_set_timebases(uint64_t tsc, uint64_t ns)
354{
355 int cpu = cpu_number();
356 kd_ctrl_page.cpu_timebase[cpu].tsc_base = tsc;
357 kd_ctrl_page.cpu_timebase[cpu].ns_base = ns;
358}
359
360#endif
b0d623f7 361
0c530ab8 362static int
6d2010ae
A
363#if defined(__i386__) || defined(__x86_64__)
364create_buffers(boolean_t early_trace)
365#else
366create_buffers(__unused boolean_t early_trace)
367#endif
0c530ab8 368{
b0d623f7
A
369 int i;
370 int p_buffer_size;
371 int f_buffer_size;
372 int f_buffers;
373 int error = 0;
374
6d2010ae
A
375 /*
376 * get the number of cpus and cache it
377 */
378#if defined(__i386__) || defined(__x86_64__)
379 if (early_trace == TRUE) {
380 /*
381 * we've started tracing before the
382 * IOKit has even started running... just
383 * use the static max value
384 */
385 kd_cpus = max_ncpus;
386 } else
387#endif
388 {
389 host_basic_info_data_t hinfo;
390 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
391
392#define BSD_HOST 1
393 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
394 kd_cpus = hinfo.logical_cpu_max;
395 }
396 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS) {
397 error = ENOSPC;
398 goto out;
399 }
400
401 trace_handler_map_bufinfo((uintptr_t)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
402
403#if !defined(NATIVE_TRACE_FACILITY)
404 for(i=0;i<(int)kd_cpus;i++) {
405 get_nanotime_timebases(i,
406 &kd_ctrl_page.cpu_timebase[i].tsc_base,
407 &kd_ctrl_page.cpu_timebase[i].ns_base);
408 }
409#endif
410
b0d623f7
A
411 if (nkdbufs < (kd_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
412 n_storage_units = kd_cpus * MIN_STORAGE_UNITS_PER_CPU;
413 else
414 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
0c530ab8 415
b0d623f7 416 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
2d21ac55 417
b0d623f7
A
418 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
419 n_storage_buffers = f_buffers;
0c530ab8 420
b0d623f7
A
421 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
422 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
423
424 if (p_buffer_size)
425 n_storage_buffers++;
426
427 kd_bufs = NULL;
0c530ab8
A
428
429 if (kdcopybuf == 0) {
b0d623f7
A
430 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
431 error = ENOSPC;
432 goto out;
433 }
0c530ab8 434 }
b0d623f7
A
435 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
436 error = ENOSPC;
437 goto out;
0c530ab8 438 }
b0d623f7 439 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
0c530ab8 440
b0d623f7
A
441 for (i = 0; i < f_buffers; i++) {
442 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
443 error = ENOSPC;
444 goto out;
445 }
6d2010ae
A
446 bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
447
b0d623f7 448 kd_bufs[i].kdsb_size = f_buffer_size;
0c530ab8 449 }
b0d623f7
A
450 if (p_buffer_size) {
451 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
452 error = ENOSPC;
453 goto out;
454 }
6d2010ae
A
455 bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
456
b0d623f7
A
457 kd_bufs[i].kdsb_size = p_buffer_size;
458 }
6d2010ae 459 n_storage_units = 0;
b0d623f7
A
460
461 for (i = 0; i < n_storage_buffers; i++) {
462 struct kd_storage *kds;
463 int n_elements;
464 int n;
465
466 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
467 kds = kd_bufs[i].kdsb_addr;
468
6d2010ae
A
469 trace_handler_map_buffer(i, (uintptr_t)kd_bufs[i].kdsb_addr, kd_bufs[i].kdsb_size);
470
b0d623f7 471 for (n = 0; n < n_elements; n++) {
6d2010ae
A
472 kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
473 kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
b0d623f7 474
6d2010ae
A
475 kd_ctrl_page.kds_free_list.buffer_index = i;
476 kd_ctrl_page.kds_free_list.offset = n;
b0d623f7 477 }
6d2010ae 478 n_storage_units += n_elements;
0c530ab8 479 }
6d2010ae 480
b0d623f7
A
481 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
482
6d2010ae
A
483 for (i = 0; i < (int)kd_cpus; i++) {
484 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
485 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
486 kdbip[i].kd_lostevents = FALSE;
487 kdbip[i].num_bufs = 0;
488 }
489
490 kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
491
492 kd_ctrl_page.kds_inuse_count = 0;
493 n_storage_threshold = n_storage_units / 2;
b0d623f7
A
494out:
495 if (error)
496 delete_buffers();
0c530ab8 497
b0d623f7 498 return(error);
0c530ab8
A
499}
500
501
502static void
503delete_buffers(void)
4452a7af 504{
b0d623f7
A
505 int i;
506
507 if (kd_bufs) {
508 for (i = 0; i < n_storage_buffers; i++) {
6d2010ae 509 if (kd_bufs[i].kdsb_addr) {
b0d623f7 510 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
6d2010ae
A
511 trace_handler_unmap_buffer(i);
512 }
b0d623f7
A
513 }
514 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
0c530ab8 515
b0d623f7
A
516 kd_bufs = NULL;
517 n_storage_buffers = 0;
0c530ab8
A
518 }
519 if (kdcopybuf) {
520 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
b0d623f7 521
0c530ab8
A
522 kdcopybuf = NULL;
523 }
6d2010ae 524 kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
b0d623f7 525
6d2010ae
A
526 if (kdbip) {
527 trace_handler_unmap_bufinfo();
528
529 kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
530
531 kdbip = NULL;
532 }
533 kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
0c530ab8
A
534}
535
536
6d2010ae
A
537#ifdef NATIVE_TRACE_FACILITY
538void
539release_storage_unit(int cpu, uint32_t kdsp_raw)
0c530ab8 540{
b0d623f7 541 int s = 0;
6d2010ae
A
542 struct kd_storage *kdsp_actual;
543 struct kd_bufinfo *kdbp;
544 union kds_ptr kdsp;
545
546 kdsp.raw = kdsp_raw;
547
b0d623f7
A
548 s = ml_set_interrupts_enabled(FALSE);
549 lck_spin_lock(kds_spin_lock);
550
6d2010ae
A
551 kdbp = &kdbip[cpu];
552
553 if (kdsp.raw == kdbp->kd_list_head.raw) {
b0d623f7 554 /*
6d2010ae 555 * it's possible for the storage unit pointed to
b0d623f7 556 * by kdsp to have already been stolen... so
6d2010ae 557 * check to see if it's still the head of the list
b0d623f7
A
558 * now that we're behind the lock that protects
559 * adding and removing from the queue...
560 * since we only ever release and steal units from
6d2010ae 561 * that position, if it's no longer the head
b0d623f7
A
562 * we having nothing to do in this context
563 */
6d2010ae
A
564 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
565 kdbp->kd_list_head = kdsp_actual->kds_next;
0c530ab8 566
6d2010ae
A
567 kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
568 kd_ctrl_page.kds_free_list = kdsp;
569
570 kd_ctrl_page.kds_inuse_count--;
b0d623f7
A
571 }
572 lck_spin_unlock(kds_spin_lock);
573 ml_set_interrupts_enabled(s);
574}
575
576
6d2010ae
A
577boolean_t
578allocate_storage_unit(int cpu)
b0d623f7 579{
6d2010ae
A
580 union kds_ptr kdsp;
581 struct kd_storage *kdsp_actual;
582 struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
b0d623f7 583 uint64_t oldest_ts, ts;
6d2010ae
A
584 boolean_t retval = TRUE;
585 int s = 0;
b0d623f7 586
6d2010ae 587 s = ml_set_interrupts_enabled(FALSE);
b0d623f7
A
588 lck_spin_lock(kds_spin_lock);
589
6d2010ae
A
590 kdbp = &kdbip[cpu];
591
592 /* If someone beat us to the allocate, return success */
593 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
594 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
595
596 if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
597 goto out;
598 }
599
600 if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
601 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
602 kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
603
604 kd_ctrl_page.kds_inuse_count++;
605 } else {
606 if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
607 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
608 kdbp->kd_lostevents = TRUE;
609 retval = FALSE;
b0d623f7
A
610 goto out;
611 }
612 kdbp_vict = NULL;
613 oldest_ts = (uint64_t)-1;
614
615 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_cpus]; kdbp_try++) {
616
6d2010ae 617 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
b0d623f7
A
618 /*
619 * no storage unit to steal
620 */
621 continue;
622 }
6d2010ae
A
623
624 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
625
626 if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
b0d623f7
A
627 /*
628 * make sure we don't steal the storage unit
6d2010ae
A
629 * being actively recorded to... need to
630 * move on because we don't want an out-of-order
631 * set of events showing up later
b0d623f7
A
632 */
633 continue;
634 }
6d2010ae 635 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
b0d623f7
A
636
637 if (ts < oldest_ts) {
638 /*
639 * when 'wrapping', we want to steal the
640 * storage unit that has the 'earliest' time
641 * associated with it (first event time)
642 */
643 oldest_ts = ts;
644 kdbp_vict = kdbp_try;
645 }
646 }
b0d623f7
A
647 if (kdbp_vict == NULL) {
648 kdebug_enable = 0;
6d2010ae
A
649 kd_ctrl_page.enabled = 0;
650 retval = FALSE;
651 goto out;
b0d623f7 652 }
b0d623f7 653 kdsp = kdbp_vict->kd_list_head;
6d2010ae 654 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
b0d623f7 655
6d2010ae 656 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
b0d623f7 657
6d2010ae 658 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
b0d623f7 659 }
6d2010ae
A
660 kdsp_actual->kds_timestamp = mach_absolute_time();
661 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
662 kdsp_actual->kds_bufcnt = 0;
663 kdsp_actual->kds_readlast = 0;
664
665 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
666 kdbp->kd_lostevents = FALSE;
667 kdsp_actual->kds_bufindx = 0;
b0d623f7 668
6d2010ae 669 if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
b0d623f7
A
670 kdbp->kd_list_head = kdsp;
671 else
6d2010ae 672 POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
b0d623f7
A
673 kdbp->kd_list_tail = kdsp;
674out:
675 lck_spin_unlock(kds_spin_lock);
6d2010ae 676 ml_set_interrupts_enabled(s);
b0d623f7 677
6d2010ae 678 return (retval);
b0d623f7 679}
6d2010ae 680#endif
b0d623f7 681
6d2010ae
A
682void
683kernel_debug_internal(
684 uint32_t debugid,
685 uintptr_t arg1,
686 uintptr_t arg2,
687 uintptr_t arg3,
688 uintptr_t arg4,
689 uintptr_t arg5,
690 int entropy_flag);
b0d623f7 691
6d2010ae 692__attribute__((always_inline)) void
b0d623f7
A
693kernel_debug_internal(
694 uint32_t debugid,
695 uintptr_t arg1,
696 uintptr_t arg2,
697 uintptr_t arg3,
698 uintptr_t arg4,
699 uintptr_t arg5,
700 int entropy_flag)
701{
702 struct proc *curproc;
703 uint64_t now;
6d2010ae
A
704 uint32_t bindx;
705 boolean_t s;
b0d623f7
A
706 kd_buf *kd;
707 int cpu;
708 struct kd_bufinfo *kdbp;
6d2010ae 709 struct kd_storage *kdsp_actual;
b0d623f7 710
91447636 711
6d2010ae 712 if (kd_ctrl_page.kdebug_slowcheck) {
9bccf70c 713
6d2010ae
A
714 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
715 kd_chudhook_fn chudhook;
b0d623f7 716 /*
6d2010ae
A
717 * Mask interrupts to minimize the interval across
718 * which the driver providing the hook could be
719 * unloaded.
b0d623f7 720 */
6d2010ae
A
721 s = ml_set_interrupts_enabled(FALSE);
722 chudhook = kdebug_chudhook;
723 if (chudhook)
724 chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
725 ml_set_interrupts_enabled(s);
b0d623f7 726 }
6d2010ae
A
727 if ((kdebug_enable & KDEBUG_ENABLE_ENTROPY) && entropy_flag) {
728
729 now = mach_absolute_time();
730
731 s = ml_set_interrupts_enabled(FALSE);
732 lck_spin_lock(kds_spin_lock);
733
734 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY) {
735
736 if (kd_entropy_indx < kd_entropy_count) {
737 kd_entropy_buffer[kd_entropy_indx] = now;
738 kd_entropy_indx++;
739 }
740 if (kd_entropy_indx == kd_entropy_count) {
741 /*
742 * Disable entropy collection
743 */
744 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
745 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_ENTROPY;
746 }
747 }
748 lck_spin_unlock(kds_spin_lock);
749 ml_set_interrupts_enabled(s);
750 }
751 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & KDEBUG_ENABLE_TRACE))
752 goto out1;
b0d623f7 753
6d2010ae
A
754 if ( !ml_at_interrupt_context()) {
755 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
756 /*
757 * If kdebug flag is not set for current proc, return
758 */
759 curproc = current_proc();
1c79356b 760
6d2010ae
A
761 if ((curproc && !(curproc->p_kdebug)) &&
762 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
763 goto out1;
764 }
765 else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
766 /*
767 * If kdebug flag is set for current proc, return
768 */
769 curproc = current_proc();
b0d623f7 770
6d2010ae
A
771 if ((curproc && curproc->p_kdebug) &&
772 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
773 goto out1;
774 }
775 }
776 if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
777 if ((debugid < kdlog_beg)
778 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
779 goto out1;
780 }
781 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
782 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
783 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
784 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
785 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
786 (debugid >> 24 != DBG_TRACE))
787 goto out1;
788 }
b0d623f7 789 }
6d2010ae
A
790 disable_preemption();
791 cpu = cpu_number();
b0d623f7 792 kdbp = &kdbip[cpu];
6d2010ae
A
793retry_q:
794 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
795 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
796 bindx = kdsp_actual->kds_bufindx;
797 } else
798 kdsp_actual = NULL;
799
800 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
801 if (allocate_storage_unit(cpu) == FALSE) {
b0d623f7
A
802 /*
803 * this can only happen if wrapping
804 * has been disabled
805 */
806 goto out;
807 }
6d2010ae 808 goto retry_q;
b0d623f7 809 }
6d2010ae
A
810 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
811
812 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
813 goto retry_q;
814
815 kd = &kdsp_actual->kds_records[bindx];
b0d623f7 816
1c79356b
A
817 kd->debugid = debugid;
818 kd->arg1 = arg1;
819 kd->arg2 = arg2;
820 kd->arg3 = arg3;
821 kd->arg4 = arg4;
0c530ab8 822 kd->arg5 = arg5;
1c79356b 823
b0d623f7 824 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1c79356b 825
6d2010ae 826 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
0c530ab8 827out:
6d2010ae
A
828 enable_preemption();
829out1:
830 if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) ||
831 (kde_waiter && kd_entropy_indx >= kd_entropy_count)) {
832 uint32_t etype;
833 uint32_t stype;
834
835 etype = debugid & DBG_FUNC_MASK;
836 stype = debugid & DBG_SCALL_MASK;
837
838 if (etype == INTERRUPT || etype == MACH_vmfault ||
839 stype == BSC_SysCall || stype == MACH_SysCall) {
840
841 boolean_t need_kds_wakeup = FALSE;
842 boolean_t need_kde_wakeup = FALSE;
843
844 /*
845 * try to take the lock here to synchronize with the
846 * waiter entering the blocked state... use the try
847 * mode to prevent deadlocks caused by re-entering this
848 * routine due to various trace points triggered in the
849 * lck_spin_sleep_xxxx routines used to actually enter
850 * one of our 2 wait conditions... no problem if we fail,
851 * there will be lots of additional events coming in that
852 * will eventually succeed in grabbing this lock
853 */
854 s = ml_set_interrupts_enabled(FALSE);
855
856 if (lck_spin_try_lock(kdw_spin_lock)) {
857
858 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
859 kds_waiter = 0;
860 need_kds_wakeup = TRUE;
861 }
862 if (kde_waiter && kd_entropy_indx >= kd_entropy_count) {
863 kde_waiter = 0;
864 need_kde_wakeup = TRUE;
865 }
866 lck_spin_unlock(kdw_spin_lock);
867 }
868 ml_set_interrupts_enabled(s);
869
870 if (need_kds_wakeup == TRUE)
871 wakeup(&kds_waiter);
872 if (need_kde_wakeup == TRUE)
873 wakeup(&kde_waiter);
874 }
875 }
1c79356b
A
876}
877
878void
b0d623f7
A
879kernel_debug(
880 uint32_t debugid,
881 uintptr_t arg1,
882 uintptr_t arg2,
883 uintptr_t arg3,
884 uintptr_t arg4,
885 __unused uintptr_t arg5)
1c79356b 886{
b0d623f7 887 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()), 1);
0c530ab8 888}
21362eb3 889
0c530ab8 890void
b0d623f7
A
891kernel_debug1(
892 uint32_t debugid,
893 uintptr_t arg1,
894 uintptr_t arg2,
895 uintptr_t arg3,
896 uintptr_t arg4,
897 uintptr_t arg5)
0c530ab8 898{
6d2010ae 899 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 1);
0c530ab8 900}
6601e61a 901
6d2010ae
A
902/*
903 * Support syscall SYS_kdebug_trace
904 */
905int
906kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
0c530ab8 907{
6d2010ae
A
908 if ( __probable(kdebug_enable == 0) )
909 return(EINVAL);
910
911 kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()), 0);
91447636 912
6d2010ae
A
913 return(0);
914}
1c79356b 915
1c79356b 916
6d2010ae
A
917static void
918kdbg_lock_init(void)
919{
920 if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
921 return;
6601e61a 922
6d2010ae
A
923 trace_handler_map_ctrl_page((uintptr_t)&kd_ctrl_page, sizeof(kd_ctrl_page), sizeof(struct kd_storage), sizeof(union kds_ptr));
924
0c530ab8 925 /*
91447636
A
926 * allocate lock group attribute and group
927 */
0c530ab8
A
928 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
929 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
91447636
A
930
931 /*
932 * allocate the lock attribute
933 */
0c530ab8 934 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
91447636
A
935
936
937 /*
6d2010ae 938 * allocate and initialize mutex's
91447636 939 */
0c530ab8 940 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
b0d623f7 941 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
6d2010ae 942 kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
91447636 943
6d2010ae 944 kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
91447636
A
945}
946
947
948int
6d2010ae 949kdbg_bootstrap(boolean_t early_trace)
1c79356b 950{
6d2010ae 951 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
91447636 952
6d2010ae 953 return (create_buffers(early_trace));
1c79356b
A
954}
955
0c530ab8 956int
6d2010ae 957kdbg_reinit(boolean_t early_trace)
1c79356b 958{
b0d623f7 959 int ret = 0;
91447636 960
b0d623f7
A
961 /*
962 * Disable trace collecting
963 * First make sure we're not in
964 * the middle of cutting a trace
965 */
6d2010ae 966 kdbg_set_tracing_enabled(FALSE);
1c79356b 967
b0d623f7
A
968 /*
969 * make sure the SLOW_NOLOG is seen
970 * by everyone that might be trying
971 * to cut a trace..
972 */
973 IOSleep(100);
1c79356b 974
b0d623f7 975 delete_buffers();
1c79356b 976
6d2010ae 977 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
b0d623f7 978 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
6d2010ae 979 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
980 kd_mapsize = 0;
981 kd_mapptr = (kd_threadmap *) 0;
982 kd_mapcount = 0;
983 }
6d2010ae
A
984 ret = kdbg_bootstrap(early_trace);
985
986 RAW_file_offset = 0;
987 RAW_file_written = 0;
1c79356b 988
b0d623f7 989 return(ret);
1c79356b
A
990}
991
0c530ab8
A
992void
993kdbg_trace_data(struct proc *proc, long *arg_pid)
55e303ae 994{
b0d623f7
A
995 if (!proc)
996 *arg_pid = 0;
997 else
998 *arg_pid = proc->p_pid;
55e303ae
A
999}
1000
1001
0c530ab8
A
1002void
1003kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1c79356b 1004{
b0d623f7
A
1005 char *dbg_nameptr;
1006 int dbg_namelen;
1007 long dbg_parms[4];
1008
1009 if (!proc) {
1010 *arg1 = 0;
1011 *arg2 = 0;
1012 *arg3 = 0;
1013 *arg4 = 0;
1014 return;
1015 }
1016 /*
1017 * Collect the pathname for tracing
1018 */
1019 dbg_nameptr = proc->p_comm;
1020 dbg_namelen = (int)strlen(proc->p_comm);
1021 dbg_parms[0]=0L;
1022 dbg_parms[1]=0L;
1023 dbg_parms[2]=0L;
1024 dbg_parms[3]=0L;
1c79356b 1025
b0d623f7
A
1026 if(dbg_namelen > (int)sizeof(dbg_parms))
1027 dbg_namelen = (int)sizeof(dbg_parms);
1c79356b 1028
b0d623f7 1029 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1c79356b 1030
b0d623f7
A
1031 *arg1=dbg_parms[0];
1032 *arg2=dbg_parms[1];
1033 *arg3=dbg_parms[2];
1034 *arg4=dbg_parms[3];
1c79356b
A
1035}
1036
91447636 1037static void
0c530ab8 1038kdbg_resolve_map(thread_t th_act, void *opaque)
1c79356b 1039{
b0d623f7
A
1040 kd_threadmap *mapptr;
1041 krt_t *t = (krt_t *)opaque;
1042
1043 if (t->count < t->maxcount) {
1044 mapptr = &t->map[t->count];
1045 mapptr->thread = (uintptr_t)thread_tid(th_act);
1046
1047 (void) strlcpy (mapptr->command, t->atts->task_comm,
1048 sizeof(t->atts->task_comm));
1049 /*
1050 * Some kernel threads have no associated pid.
1051 * We still need to mark the entry as valid.
1052 */
1053 if (t->atts->pid)
1054 mapptr->valid = t->atts->pid;
1055 else
1056 mapptr->valid = 1;
1057
1058 t->count++;
1059 }
1c79356b
A
1060}
1061
0c530ab8
A
1062void
1063kdbg_mapinit(void)
1c79356b 1064{
b0d623f7
A
1065 struct proc *p;
1066 struct krt akrt;
1067 int tts_count; /* number of task-to-string structures */
1068 struct tts *tts_mapptr;
1069 unsigned int tts_mapsize = 0;
1070 vm_offset_t tts_maptomem=0;
1071 int i;
1c79356b 1072
6d2010ae 1073 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
b0d623f7 1074 return;
1c79356b 1075
b0d623f7
A
1076 /*
1077 * need to use PROC_SCANPROCLIST with proc_iterate
1078 */
2d21ac55
A
1079 proc_list_lock();
1080
b0d623f7
A
1081 /*
1082 * Calculate the sizes of map buffers
1083 */
1084 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
1085 kd_mapcount += get_task_numacts((task_t)p->task);
1086 tts_count++;
1087 }
2d21ac55
A
1088 proc_list_unlock();
1089
9bccf70c
A
1090 /*
1091 * The proc count could change during buffer allocation,
1092 * so introduce a small fudge factor to bump up the
1093 * buffer sizes. This gives new tasks some chance of
1094 * making into the tables. Bump up by 10%.
1095 */
1096 kd_mapcount += kd_mapcount/10;
1097 tts_count += tts_count/10;
1098
1c79356b 1099 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
b0d623f7
A
1100
1101 if ((kmem_alloc(kernel_map, & kd_maptomem, (vm_size_t)kd_mapsize) == KERN_SUCCESS)) {
1102 kd_mapptr = (kd_threadmap *) kd_maptomem;
1103 bzero(kd_mapptr, kd_mapsize);
1104 } else
1105 kd_mapptr = (kd_threadmap *) 0;
1c79356b 1106
9bccf70c 1107 tts_mapsize = tts_count * sizeof(struct tts);
9bccf70c 1108
b0d623f7
A
1109 if ((kmem_alloc(kernel_map, & tts_maptomem, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
1110 tts_mapptr = (struct tts *) tts_maptomem;
1111 bzero(tts_mapptr, tts_mapsize);
1112 } else
1113 tts_mapptr = (struct tts *) 0;
9bccf70c
A
1114
1115 /*
1116 * We need to save the procs command string
1117 * and take a reference for each task associated
1118 * with a valid process
1119 */
9bccf70c 1120 if (tts_mapptr) {
b0d623f7
A
1121 /*
1122 * should use proc_iterate
1123 */
2d21ac55
A
1124 proc_list_lock();
1125
b0d623f7 1126 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
2d21ac55 1127 if (p->p_lflag & P_LEXIT)
9bccf70c
A
1128 continue;
1129
91447636
A
1130 if (p->task) {
1131 task_reference(p->task);
1132 tts_mapptr[i].task = p->task;
55e303ae 1133 tts_mapptr[i].pid = p->p_pid;
2d21ac55 1134 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
91447636 1135 i++;
9bccf70c
A
1136 }
1137 }
1138 tts_count = i;
2d21ac55
A
1139
1140 proc_list_unlock();
9bccf70c
A
1141 }
1142
b0d623f7 1143 if (kd_mapptr && tts_mapptr) {
6d2010ae 1144 kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
9bccf70c 1145
b0d623f7
A
1146 /*
1147 * Initialize thread map data
1148 */
1149 akrt.map = kd_mapptr;
1150 akrt.count = 0;
1151 akrt.maxcount = kd_mapcount;
1c79356b 1152
b0d623f7
A
1153 for (i = 0; i < tts_count; i++) {
1154 akrt.atts = &tts_mapptr[i];
1155 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
1156 task_deallocate((task_t) tts_mapptr[i].task);
1157 }
1158 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1159 }
1c79356b
A
1160}
1161
91447636
A
1162static void
1163kdbg_clear(void)
1c79356b 1164{
91447636
A
1165 /*
1166 * Clean up the trace buffer
1167 * First make sure we're not in
1168 * the middle of cutting a trace
1169 */
6d2010ae 1170 kdbg_set_tracing_enabled(FALSE);
91447636 1171
0c530ab8
A
1172 /*
1173 * make sure the SLOW_NOLOG is seen
1174 * by everyone that might be trying
1175 * to cut a trace..
1176 */
1177 IOSleep(100);
1178
91447636 1179 global_state_pid = -1;
6d2010ae
A
1180 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1181 kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
1182 kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
0c530ab8
A
1183
1184 delete_buffers();
1c79356b
A
1185
1186 /* Clean up the thread map buffer */
6d2010ae 1187 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
1188 if (kd_mapptr) {
1189 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1190 kd_mapptr = (kd_threadmap *) 0;
1191 }
1c79356b
A
1192 kd_mapsize = 0;
1193 kd_mapcount = 0;
6d2010ae
A
1194
1195 RAW_file_offset = 0;
1196 RAW_file_written = 0;
1c79356b
A
1197}
1198
0c530ab8 1199int
1c79356b
A
1200kdbg_setpid(kd_regtype *kdr)
1201{
b0d623f7
A
1202 pid_t pid;
1203 int flag, ret=0;
1204 struct proc *p;
1205
1206 pid = (pid_t)kdr->value1;
1207 flag = (int)kdr->value2;
1208
1209 if (pid > 0) {
1210 if ((p = proc_find(pid)) == NULL)
1211 ret = ESRCH;
1212 else {
1213 if (flag == 1) {
1214 /*
1215 * turn on pid check for this and all pids
1216 */
6d2010ae
A
1217 kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
1218 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1219 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1220
b0d623f7
A
1221 p->p_kdebug = 1;
1222 } else {
1223 /*
1224 * turn off pid check for this pid value
1225 * Don't turn off all pid checking though
1226 *
6d2010ae 1227 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
b0d623f7
A
1228 */
1229 p->p_kdebug = 0;
1230 }
1231 proc_rele(p);
1232 }
1c79356b 1233 }
b0d623f7
A
1234 else
1235 ret = EINVAL;
1236
1237 return(ret);
1c79356b
A
1238}
1239
1240/* This is for pid exclusion in the trace buffer */
0c530ab8 1241int
1c79356b
A
1242kdbg_setpidex(kd_regtype *kdr)
1243{
b0d623f7
A
1244 pid_t pid;
1245 int flag, ret=0;
1246 struct proc *p;
1247
1248 pid = (pid_t)kdr->value1;
1249 flag = (int)kdr->value2;
1250
1251 if (pid > 0) {
1252 if ((p = proc_find(pid)) == NULL)
1253 ret = ESRCH;
1254 else {
1255 if (flag == 1) {
1256 /*
1257 * turn on pid exclusion
1258 */
6d2010ae
A
1259 kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
1260 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1261 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
b0d623f7
A
1262
1263 p->p_kdebug = 1;
1264 }
1265 else {
1266 /*
1267 * turn off pid exclusion for this pid value
1268 * Don't turn off all pid exclusion though
1269 *
6d2010ae 1270 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
b0d623f7
A
1271 */
1272 p->p_kdebug = 0;
1273 }
1274 proc_rele(p);
1275 }
1276 } else
1277 ret = EINVAL;
1278
1279 return(ret);
1c79356b
A
1280}
1281
b0d623f7
A
1282
1283/*
1284 * This is for setting a maximum decrementer value
1285 */
0c530ab8 1286int
1c79356b
A
1287kdbg_setrtcdec(kd_regtype *kdr)
1288{
b0d623f7
A
1289 int ret = 0;
1290 natural_t decval;
1c79356b 1291
b0d623f7 1292 decval = (natural_t)kdr->value1;
1c79356b 1293
b0d623f7
A
1294 if (decval && decval < KDBG_MINRTCDEC)
1295 ret = EINVAL;
3a60a9f5
A
1296 else
1297 ret = ENOTSUP;
1c79356b 1298
b0d623f7 1299 return(ret);
1c79356b
A
1300}
1301
0c530ab8 1302int
1c79356b
A
1303kdbg_setreg(kd_regtype * kdr)
1304{
0c530ab8 1305 int ret=0;
1c79356b
A
1306 unsigned int val_1, val_2, val;
1307 switch (kdr->type) {
1308
1309 case KDBG_CLASSTYPE :
1310 val_1 = (kdr->value1 & 0xff);
1311 val_2 = (kdr->value2 & 0xff);
1312 kdlog_beg = (val_1<<24);
1313 kdlog_end = (val_2<<24);
6d2010ae
A
1314 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1315 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1316 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1317 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
1318 break;
1319 case KDBG_SUBCLSTYPE :
1320 val_1 = (kdr->value1 & 0xff);
1321 val_2 = (kdr->value2 & 0xff);
1322 val = val_2 + 1;
1323 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1324 kdlog_end = ((val_1<<24) | (val << 16));
6d2010ae
A
1325 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1326 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1327 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1328 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
1329 break;
1330 case KDBG_RANGETYPE :
1331 kdlog_beg = (kdr->value1);
1332 kdlog_end = (kdr->value2);
6d2010ae
A
1333 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1334 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1335 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1336 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
1337 break;
1338 case KDBG_VALCHECK:
1339 kdlog_value1 = (kdr->value1);
1340 kdlog_value2 = (kdr->value2);
1341 kdlog_value3 = (kdr->value3);
1342 kdlog_value4 = (kdr->value4);
6d2010ae
A
1343 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1344 kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
1345 kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
1346 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1c79356b
A
1347 break;
1348 case KDBG_TYPENONE :
6d2010ae 1349 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
91447636 1350
6d2010ae
A
1351 if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1352 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
91447636 1353 else
6d2010ae 1354 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
91447636 1355
1c79356b
A
1356 kdlog_beg = 0;
1357 kdlog_end = 0;
1358 break;
1359 default :
1360 ret = EINVAL;
1361 break;
1362 }
1363 return(ret);
1364}
1365
0c530ab8
A
1366int
1367kdbg_getreg(__unused kd_regtype * kdr)
1c79356b 1368{
0c530ab8 1369#if 0
1c79356b
A
1370 int i,j, ret=0;
1371 unsigned int val_1, val_2, val;
0c530ab8 1372
1c79356b
A
1373 switch (kdr->type) {
1374 case KDBG_CLASSTYPE :
1375 val_1 = (kdr->value1 & 0xff);
1376 val_2 = val_1 + 1;
1377 kdlog_beg = (val_1<<24);
1378 kdlog_end = (val_2<<24);
6d2010ae
A
1379 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1380 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1c79356b
A
1381 break;
1382 case KDBG_SUBCLSTYPE :
1383 val_1 = (kdr->value1 & 0xff);
1384 val_2 = (kdr->value2 & 0xff);
1385 val = val_2 + 1;
1386 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1387 kdlog_end = ((val_1<<24) | (val << 16));
6d2010ae
A
1388 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1389 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1c79356b
A
1390 break;
1391 case KDBG_RANGETYPE :
1392 kdlog_beg = (kdr->value1);
1393 kdlog_end = (kdr->value2);
6d2010ae
A
1394 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1395 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1c79356b
A
1396 break;
1397 case KDBG_TYPENONE :
6d2010ae 1398 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1c79356b
A
1399 kdlog_beg = 0;
1400 kdlog_end = 0;
1401 break;
1402 default :
1403 ret = EINVAL;
1404 break;
1405 }
1406#endif /* 0 */
1407 return(EINVAL);
1408}
1409
1410
91447636 1411int
b0d623f7 1412kdbg_readmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1c79356b 1413{
b0d623f7
A
1414 int avail = *number;
1415 int ret = 0;
1416 uint32_t count = 0;
1c79356b 1417
b0d623f7 1418 count = avail/sizeof (kd_threadmap);
1c79356b 1419
b0d623f7 1420 if (count && (count <= kd_mapcount))
1c79356b 1421 {
6d2010ae 1422 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
b0d623f7
A
1423 {
1424 if (*number < kd_mapsize)
1425 ret = EINVAL;
1426 else
1427 {
6d2010ae
A
1428 if (vp)
1429 {
1430 RAW_header header;
1431 clock_sec_t secs;
1432 clock_usec_t usecs;
1433 char *pad_buf;
1434 int pad_size;
1435
1436 header.version_no = RAW_VERSION1;
1437 header.thread_count = count;
1438
1439 clock_get_calendar_microtime(&secs, &usecs);
1440 header.TOD_secs = secs;
1441 header.TOD_usecs = usecs;
1442
1443 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
1444 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1445 if (ret)
1446 goto write_error;
1447 RAW_file_offset += sizeof(RAW_header);
1448
1449 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, kd_mapsize, RAW_file_offset,
1450 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1451 if (ret)
1452 goto write_error;
b0d623f7
A
1453 RAW_file_offset += kd_mapsize;
1454
6d2010ae
A
1455 pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
1456
1457 if (pad_size)
1458 {
1459 pad_buf = (char *)kalloc(pad_size);
1460 memset(pad_buf, 0, pad_size);
1461
1462 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
1463 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1464 kfree(pad_buf, pad_size);
1465
1466 if (ret)
1467 goto write_error;
1468 RAW_file_offset += pad_size;
1469 }
1470 RAW_file_written += sizeof(RAW_header) + kd_mapsize + pad_size;
1471
b0d623f7
A
1472 } else {
1473 if (copyout(kd_mapptr, buffer, kd_mapsize))
1474 ret = EINVAL;
1475 }
1476 }
1477 }
1478 else
1479 ret = EINVAL;
1480 }
1481 else
1482 ret = EINVAL;
1483
6d2010ae
A
1484 if (ret && vp)
1485 {
b0d623f7
A
1486 count = 0;
1487
1488 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
1489 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1490 RAW_file_offset += sizeof(uint32_t);
6d2010ae 1491 RAW_file_written += sizeof(uint32_t);
1c79356b 1492 }
6d2010ae
A
1493write_error:
1494 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
b0d623f7
A
1495 {
1496 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
6d2010ae 1497 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
b0d623f7
A
1498 kd_mapsize = 0;
1499 kd_mapptr = (kd_threadmap *) 0;
1500 kd_mapcount = 0;
1501 }
b0d623f7 1502 return(ret);
1c79356b
A
1503}
1504
91447636
A
1505int
1506kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
9bccf70c 1507{
b0d623f7
A
1508 int avail = *number;
1509 int ret = 0;
6d2010ae
A
1510 int s;
1511 u_int64_t abstime;
1512 u_int64_t ns;
1513 int wait_result = THREAD_AWAKENED;
1514
b0d623f7
A
1515
1516 if (kd_entropy_buffer)
1517 return(EBUSY);
1518
6d2010ae
A
1519 if (ms_timeout < 0)
1520 return(EINVAL);
1521
1522 kd_entropy_count = avail/sizeof(uint64_t);
1523
1524 if (kd_entropy_count > MAX_ENTROPY_COUNT || kd_entropy_count == 0) {
1525 /*
1526 * Enforce maximum entropy entries
1527 */
1528 return(EINVAL);
1529 }
1530 kd_entropy_bufsize = kd_entropy_count * sizeof(uint64_t);
b0d623f7
A
1531
1532 /*
b0d623f7
A
1533 * allocate entropy buffer
1534 */
6d2010ae 1535 if (kmem_alloc(kernel_map, &kd_entropy_buftomem, (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
b0d623f7
A
1536 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
1537 } else {
1538 kd_entropy_buffer = (uint64_t *) 0;
1539 kd_entropy_count = 0;
6d2010ae
A
1540
1541 return (ENOMEM);
b0d623f7 1542 }
6d2010ae 1543 kd_entropy_indx = 0;
b0d623f7 1544
6d2010ae 1545 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START, ms_timeout, kd_entropy_count, 0, 0, 0);
b0d623f7
A
1546
1547 /*
1548 * Enable entropy sampling
1549 */
6d2010ae 1550 kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, TRUE);
b0d623f7 1551
6d2010ae
A
1552 if (ms_timeout) {
1553 ns = (u_int64_t)ms_timeout * (u_int64_t)(1000 * 1000);
1554 nanoseconds_to_absolutetime(ns, &abstime );
1555 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1556 } else
1557 abstime = 0;
1558
1559 s = ml_set_interrupts_enabled(FALSE);
1560 lck_spin_lock(kdw_spin_lock);
1561
1562 while (wait_result == THREAD_AWAKENED && kd_entropy_indx < kd_entropy_count) {
1563
1564 kde_waiter = 1;
1565
1566 if (abstime) {
1567 /*
1568 * wait for the specified timeout or
1569 * until we've hit our sample limit
1570 */
1571 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE, abstime);
1572 } else {
1573 /*
1574 * wait until we've hit our sample limit
1575 */
1576 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE);
1577 }
1578 kde_waiter = 0;
1579 }
1580 lck_spin_unlock(kdw_spin_lock);
1581 ml_set_interrupts_enabled(s);
b0d623f7
A
1582
1583 /*
1584 * Disable entropy sampling
1585 */
6d2010ae
A
1586 kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, FALSE);
1587
1588 KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END, ms_timeout, kd_entropy_indx, 0, 0, 0);
b0d623f7
A
1589
1590 *number = 0;
1591 ret = 0;
1592
1593 if (kd_entropy_indx > 0) {
1594 /*
1595 * copyout the buffer
1596 */
6d2010ae 1597 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(uint64_t)))
b0d623f7
A
1598 ret = EINVAL;
1599 else
6d2010ae 1600 *number = kd_entropy_indx * sizeof(uint64_t);
b0d623f7
A
1601 }
1602 /*
1603 * Always cleanup
1604 */
1605 kd_entropy_count = 0;
1606 kd_entropy_indx = 0;
1607 kd_entropy_buftomem = 0;
1608 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
1609 kd_entropy_buffer = (uint64_t *) 0;
1610
1611 return(ret);
9bccf70c
A
1612}
1613
1614
2d21ac55
A
1615static void
1616kdbg_set_nkdbufs(unsigned int value)
1617{
1618 /*
b0d623f7 1619 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2d21ac55
A
1620 * 'value' is the desired number of trace entries
1621 */
b0d623f7 1622 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2d21ac55
A
1623
1624 if (value <= max_entries)
1625 nkdbufs = value;
1626 else
1627 nkdbufs = max_entries;
1628}
1629
1630
9bccf70c
A
1631/*
1632 * This function is provided for the CHUD toolkit only.
1633 * int val:
1634 * zero disables kdebug_chudhook function call
1635 * non-zero enables kdebug_chudhook function call
1636 * char *fn:
1637 * address of the enabled kdebug_chudhook function
1638*/
1639
0c530ab8
A
1640void
1641kdbg_control_chud(int val, void *fn)
9bccf70c 1642{
6d2010ae
A
1643 kdbg_lock_init();
1644
1645 if (val) {
1646 /* enable chudhook */
9bccf70c 1647 kdebug_chudhook = fn;
6d2010ae 1648 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
9bccf70c
A
1649 }
1650 else {
6d2010ae
A
1651 /* disable chudhook */
1652 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
9bccf70c
A
1653 kdebug_chudhook = 0;
1654 }
1655}
1c79356b 1656
9bccf70c 1657
0c530ab8 1658int
c910b4d9 1659kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1c79356b 1660{
b0d623f7
A
1661 int ret = 0;
1662 size_t size = *sizep;
c910b4d9 1663 unsigned int value = 0;
91447636
A
1664 kd_regtype kd_Reg;
1665 kbufinfo_t kd_bufinfo;
1666 pid_t curpid;
6d2010ae 1667 proc_t p, curproc;
91447636 1668
c910b4d9 1669 if (name[0] == KERN_KDGETENTROPY ||
6d2010ae
A
1670 name[0] == KERN_KDWRITETR ||
1671 name[0] == KERN_KDWRITEMAP ||
c910b4d9
A
1672 name[0] == KERN_KDEFLAGS ||
1673 name[0] == KERN_KDDFLAGS ||
1674 name[0] == KERN_KDENABLE ||
1675 name[0] == KERN_KDSETBUF) {
1676
1677 if ( namelen < 2 )
6d2010ae 1678 return(EINVAL);
c910b4d9
A
1679 value = name[1];
1680 }
1681
91447636 1682 kdbg_lock_init();
0c530ab8 1683
6d2010ae 1684 if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
b0d623f7 1685 return(ENOSPC);
0c530ab8
A
1686
1687 lck_mtx_lock(kd_trace_mtx_sysctl);
91447636
A
1688
1689 if (name[0] == KERN_KDGETBUF) {
b0d623f7
A
1690 /*
1691 * Does not alter the global_state_pid
1692 * This is a passive request.
91447636 1693 */
b0d623f7
A
1694 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1695 /*
1696 * There is not enough room to return even
1697 * the first element of the info structure.
1698 */
1699 ret = EINVAL;
1700 goto out;
1701 }
1702 kd_bufinfo.nkdbufs = nkdbufs;
1703 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1704
6d2010ae 1705 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
b0d623f7
A
1706 kd_bufinfo.nolog = 1;
1707 else
1708 kd_bufinfo.nolog = 0;
1709
6d2010ae 1710 kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
b0d623f7
A
1711#if defined(__LP64__)
1712 kd_bufinfo.flags |= KDBG_LP64;
1713#endif
1714 kd_bufinfo.bufid = global_state_pid;
9bccf70c 1715
b0d623f7
A
1716 if (size >= sizeof(kd_bufinfo)) {
1717 /*
1718 * Provide all the info we have
1719 */
1720 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
1721 ret = EINVAL;
1722 } else {
1723 /*
91447636
A
1724 * For backwards compatibility, only provide
1725 * as much info as there is room for.
1726 */
b0d623f7
A
1727 if (copyout(&kd_bufinfo, where, size))
1728 ret = EINVAL;
1729 }
1730 goto out;
1731
1732 } else if (name[0] == KERN_KDGETENTROPY) {
1733 if (kd_entropy_buffer)
1734 ret = EBUSY;
1735 else
1736 ret = kdbg_getentropy(where, sizep, value);
1737 goto out;
91447636
A
1738 }
1739
0c530ab8 1740 if ((curproc = current_proc()) != NULL)
b0d623f7 1741 curpid = curproc->p_pid;
91447636 1742 else {
b0d623f7
A
1743 ret = ESRCH;
1744 goto out;
91447636 1745 }
1c79356b 1746 if (global_state_pid == -1)
b0d623f7 1747 global_state_pid = curpid;
91447636 1748 else if (global_state_pid != curpid) {
b0d623f7
A
1749 if ((p = proc_find(global_state_pid)) == NULL) {
1750 /*
1751 * The global pid no longer exists
1752 */
1753 global_state_pid = curpid;
1754 } else {
1755 /*
1756 * The global pid exists, deny this request
1757 */
1758 proc_rele(p);
91447636 1759
b0d623f7
A
1760 ret = EBUSY;
1761 goto out;
1762 }
91447636 1763 }
1c79356b
A
1764
1765 switch(name[0]) {
1766 case KERN_KDEFLAGS:
1767 value &= KDBG_USERFLAGS;
6d2010ae 1768 kd_ctrl_page.kdebug_flags |= value;
1c79356b
A
1769 break;
1770 case KERN_KDDFLAGS:
1771 value &= KDBG_USERFLAGS;
6d2010ae 1772 kd_ctrl_page.kdebug_flags &= ~value;
1c79356b 1773 break;
b0d623f7
A
1774 case KERN_KDENABLE:
1775 /*
1776 * used to enable or disable
1777 */
1778 if (value) {
1779 /*
1780 * enable only if buffer is initialized
1781 */
6d2010ae 1782 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
b0d623f7
A
1783 ret = EINVAL;
1784 break;
1785 }
1786 kdbg_mapinit();
1787
6d2010ae 1788 kdbg_set_tracing_enabled(TRUE);
1c79356b 1789 }
6d2010ae
A
1790 else
1791 kdbg_set_tracing_enabled(FALSE);
b0d623f7 1792 break;
1c79356b 1793 case KERN_KDSETBUF:
2d21ac55 1794 kdbg_set_nkdbufs(value);
1c79356b 1795 break;
1c79356b 1796 case KERN_KDSETUP:
6d2010ae 1797 ret = kdbg_reinit(FALSE);
1c79356b
A
1798 break;
1799 case KERN_KDREMOVE:
1800 kdbg_clear();
1801 break;
1802 case KERN_KDSETREG:
1803 if(size < sizeof(kd_regtype)) {
b0d623f7 1804 ret = EINVAL;
1c79356b
A
1805 break;
1806 }
1807 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1808 ret = EINVAL;
1c79356b
A
1809 break;
1810 }
1811 ret = kdbg_setreg(&kd_Reg);
1812 break;
1813 case KERN_KDGETREG:
b0d623f7 1814 if (size < sizeof(kd_regtype)) {
1c79356b
A
1815 ret = EINVAL;
1816 break;
1817 }
1818 ret = kdbg_getreg(&kd_Reg);
b0d623f7
A
1819 if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
1820 ret = EINVAL;
1c79356b
A
1821 }
1822 break;
1823 case KERN_KDREADTR:
b0d623f7 1824 ret = kdbg_read(where, sizep, NULL, NULL);
1c79356b 1825 break;
6d2010ae
A
1826 case KERN_KDWRITETR:
1827 case KERN_KDWRITEMAP:
1828 {
1829 struct vfs_context context;
1830 struct fileproc *fp;
1831 size_t number;
1832 vnode_t vp;
1833 int fd;
1834
1835 if (name[0] == KERN_KDWRITETR) {
1836 int s;
1837 int wait_result = THREAD_AWAKENED;
1838 u_int64_t abstime;
1839 u_int64_t ns;
1840
1841 if (*sizep) {
1842 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
1843 nanoseconds_to_absolutetime(ns, &abstime );
1844 clock_absolutetime_interval_to_deadline( abstime, &abstime );
1845 } else
1846 abstime = 0;
1847
1848 s = ml_set_interrupts_enabled(FALSE);
1849 lck_spin_lock(kdw_spin_lock);
1850
1851 while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
1852
1853 kds_waiter = 1;
1854
1855 if (abstime)
1856 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
1857 else
1858 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
1859
1860 kds_waiter = 0;
1861 }
1862 lck_spin_unlock(kdw_spin_lock);
1863 ml_set_interrupts_enabled(s);
1864 }
1865 p = current_proc();
1866 fd = value;
1867
1868 proc_fdlock(p);
1869 if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
1870 proc_fdunlock(p);
1871 break;
1872 }
1873 context.vc_thread = current_thread();
1874 context.vc_ucred = fp->f_fglob->fg_cred;
1875
1876 if (fp->f_fglob->fg_type != DTYPE_VNODE) {
1877 fp_drop(p, fd, fp, 1);
1878 proc_fdunlock(p);
1879
1880 ret = EBADF;
1881 break;
1882 }
1883 vp = (struct vnode *)fp->f_fglob->fg_data;
1884 proc_fdunlock(p);
1885
1886 if ((ret = vnode_getwithref(vp)) == 0) {
1887
1888 if (name[0] == KERN_KDWRITETR) {
1889 number = nkdbufs * sizeof(kd_buf);
1890
1891 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_START, 0, 0, 0, 0, 0);
1892 ret = kdbg_read(0, &number, vp, &context);
1893 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_END, number, 0, 0, 0, 0);
1894
1895 *sizep = number;
1896 } else {
1897 number = kd_mapsize;
1898 kdbg_readmap(0, &number, vp, &context);
1899 }
1900 vnode_put(vp);
1901 }
1902 fp_drop(p, fd, fp, 0);
1903
1904 break;
1905 }
1c79356b
A
1906 case KERN_KDPIDTR:
1907 if (size < sizeof(kd_regtype)) {
1908 ret = EINVAL;
1909 break;
1910 }
1911 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1912 ret = EINVAL;
1c79356b
A
1913 break;
1914 }
1915 ret = kdbg_setpid(&kd_Reg);
1916 break;
1917 case KERN_KDPIDEX:
1918 if (size < sizeof(kd_regtype)) {
1919 ret = EINVAL;
1920 break;
1921 }
1922 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1923 ret = EINVAL;
1c79356b
A
1924 break;
1925 }
1926 ret = kdbg_setpidex(&kd_Reg);
1927 break;
1928 case KERN_KDTHRMAP:
b0d623f7 1929 ret = kdbg_readmap(where, sizep, NULL, NULL);
1c79356b
A
1930 break;
1931 case KERN_KDSETRTCDEC:
1932 if (size < sizeof(kd_regtype)) {
1933 ret = EINVAL;
1934 break;
1935 }
1936 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
b0d623f7 1937 ret = EINVAL;
1c79356b
A
1938 break;
1939 }
1940 ret = kdbg_setrtcdec(&kd_Reg);
1941 break;
1942
1943 default:
b0d623f7 1944 ret = EINVAL;
1c79356b 1945 }
b0d623f7 1946out:
0c530ab8 1947 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1948
1c79356b
A
1949 return(ret);
1950}
1951
0c530ab8
A
1952
1953/*
b0d623f7
A
1954 * This code can run for the most part concurrently with kernel_debug_internal()...
1955 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
1956 * synchronize with the recording side of this puzzle... otherwise, we are able to
1957 * move through the lists w/o use of any locks
0c530ab8
A
1958 */
1959int
b0d623f7 1960kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1c79356b 1961{
0c530ab8 1962 unsigned int count;
6d2010ae 1963 unsigned int cpu, min_cpu;
b0d623f7 1964 uint64_t mintime, t;
6d2010ae 1965 int error = 0;
0c530ab8 1966 kd_buf *tempbuf;
6d2010ae
A
1967 uint32_t rcursor;
1968 kd_buf lostevent;
1969 union kds_ptr kdsp;
1970 struct kd_storage *kdsp_actual;
b0d623f7 1971 struct kd_bufinfo *kdbp;
6d2010ae 1972 struct kd_bufinfo *min_kdbp;
0c530ab8
A
1973 uint32_t tempbuf_count;
1974 uint32_t tempbuf_number;
b0d623f7
A
1975 uint32_t old_kdebug_flags;
1976 uint32_t old_kdebug_slowcheck;
6d2010ae
A
1977 boolean_t lostevents = FALSE;
1978 boolean_t out_of_events = FALSE;
2d21ac55 1979
0c530ab8
A
1980 count = *number/sizeof(kd_buf);
1981 *number = 0;
1982
6d2010ae 1983 if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
0c530ab8 1984 return EINVAL;
1c79356b 1985
6d2010ae
A
1986 memset(&lostevent, 0, sizeof(lostevent));
1987 lostevent.debugid = TRACEDBG_CODE(DBG_TRACE_INFO, 2);
1988
0c530ab8
A
1989 /*
1990 * because we hold kd_trace_mtx_sysctl, no other control threads can
1991 * be playing with kdebug_flags... the code that cuts new events could
b0d623f7
A
1992 * be running, but it grabs kds_spin_lock if it needs to acquire a new
1993 * storage chunk which is where it examines kdebug_flags... it its adding
1994 * to the same chunk we're reading from, no problem...
0c530ab8 1995 */
0c530ab8 1996
6d2010ae 1997 disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
4452a7af 1998
0c530ab8
A
1999 if (count > nkdbufs)
2000 count = nkdbufs;
4452a7af 2001
0c530ab8
A
2002 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2003 tempbuf_count = KDCOPYBUF_COUNT;
4452a7af 2004
0c530ab8
A
2005 while (count) {
2006 tempbuf = kdcopybuf;
2007 tempbuf_number = 0;
2008
2009 while (tempbuf_count) {
6d2010ae
A
2010 mintime = 0xffffffffffffffffULL;
2011 min_kdbp = NULL;
2012 min_cpu = 0;
0c530ab8 2013
b0d623f7
A
2014 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_cpus; cpu++, kdbp++) {
2015
6d2010ae 2016 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
0c530ab8 2017 continue;
6d2010ae
A
2018 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2019
2020 rcursor = kdsp_actual->kds_readlast;
b0d623f7 2021
6d2010ae 2022 if (rcursor == kdsp_actual->kds_bufindx)
b0d623f7 2023 continue;
0c530ab8 2024
6d2010ae
A
2025 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
2026
2027 if (t < kdsp_actual->kds_timestamp) {
2028 /*
2029 * indicates we've not yet completed filling
2030 * in this event...
2031 * this should only occur when we're looking
2032 * at the buf that the record head is utilizing
2033 * we'll pick these events up on the next
2034 * call to kdbg_read
2035 * we bail at this point so that we don't
2036 * get an out-of-order timestream by continuing
2037 * to read events from the other CPUs' timestream(s)
2038 */
2039 out_of_events = TRUE;
2040 break;
2041 }
0c530ab8 2042 if (t < mintime) {
b0d623f7 2043 mintime = t;
6d2010ae
A
2044 min_kdbp = kdbp;
2045 min_cpu = cpu;
91447636
A
2046 }
2047 }
6d2010ae
A
2048 if (min_kdbp == NULL || out_of_events == TRUE) {
2049 /*
b0d623f7 2050 * all buffers ran empty
91447636 2051 */
6d2010ae
A
2052 out_of_events = TRUE;
2053 break;
2054 }
2055 kdsp = min_kdbp->kd_list_head;
2056 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
0c530ab8 2057
6d2010ae
A
2058 if (kdsp_actual->kds_lostevents == TRUE) {
2059 lostevent.timestamp = kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp;
2060 *tempbuf = lostevent;
2061
2062 kdsp_actual->kds_lostevents = FALSE;
2063 lostevents = TRUE;
2d21ac55 2064
6d2010ae 2065 goto nextevent;
2d21ac55 2066 }
6d2010ae
A
2067 *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
2068
2069 if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
2070 release_storage_unit(min_cpu, kdsp.raw);
2071
b0d623f7
A
2072 /*
2073 * Watch for out of order timestamps
2074 */
6d2010ae 2075 if (mintime < min_kdbp->kd_prev_timebase) {
b0d623f7
A
2076 /*
2077 * if so, use the previous timestamp + 1 cycle
2078 */
6d2010ae
A
2079 min_kdbp->kd_prev_timebase++;
2080 kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
b0d623f7 2081 } else
6d2010ae
A
2082 min_kdbp->kd_prev_timebase = mintime;
2083nextevent:
0c530ab8
A
2084 tempbuf_count--;
2085 tempbuf_number++;
b0d623f7 2086 tempbuf++;
6d2010ae
A
2087
2088 if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
2089 break;
0c530ab8
A
2090 }
2091 if (tempbuf_number) {
b0d623f7
A
2092
2093 if (vp) {
2094 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
2095 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2096
2097 RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
6d2010ae
A
2098
2099 if (RAW_file_written >= RAW_FLUSH_SIZE) {
2100 cluster_push(vp, 0);
2101
2102 RAW_file_written = 0;
2103 }
b0d623f7
A
2104 } else {
2105 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
2106 buffer += (tempbuf_number * sizeof(kd_buf));
2107 }
2108 if (error) {
2109 *number = 0;
0c530ab8
A
2110 error = EINVAL;
2111 break;
6601e61a 2112 }
0c530ab8
A
2113 count -= tempbuf_number;
2114 *number += tempbuf_number;
0c530ab8 2115 }
6d2010ae 2116 if (out_of_events == TRUE)
0c530ab8
A
2117 /*
2118 * all trace buffers are empty
2119 */
2120 break;
89b3af67 2121
0c530ab8
A
2122 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2123 tempbuf_count = KDCOPYBUF_COUNT;
2124 }
2125 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
6d2010ae 2126 enable_wrap(old_kdebug_slowcheck, lostevents);
0c530ab8
A
2127 }
2128 return (error);
6601e61a 2129}
4452a7af 2130
0c530ab8 2131
55e303ae
A
2132unsigned char *getProcName(struct proc *proc);
2133unsigned char *getProcName(struct proc *proc) {
2134
2135 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
2136
2137}
0c530ab8
A
2138
2139#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2140#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
b0d623f7 2141#if defined(__i386__) || defined (__x86_64__)
0c530ab8
A
2142#define TRAP_DEBUGGER __asm__ volatile("int3");
2143#endif
0c530ab8 2144
d41d1dae 2145#define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
0c530ab8
A
2146
2147/* Initialize the mutex governing access to the stack snapshot subsystem */
2148__private_extern__ void
2149stackshot_lock_init( void )
2150{
2151 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
2152
2153 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
2154
2155 stackshot_subsys_lck_attr = lck_attr_alloc_init();
2156
2157 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
2158}
2159
2160/*
2161 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2162 * on the system, tracing both kernel and user stacks
2163 * where available. Uses machine specific trace routines
2164 * for ppc, ppc64 and x86.
2165 * Inputs: uap->pid - process id of process to be traced, or -1
2166 * for the entire system
2167 * uap->tracebuf - address of the user space destination
2168 * buffer
2169 * uap->tracebuf_size - size of the user space trace buffer
2170 * uap->options - various options, including the maximum
2171 * number of frames to trace.
2172 * Outputs: EPERM if the caller is not privileged
2173 * EINVAL if the supplied trace buffer isn't sanely sized
2174 * ENOMEM if we don't have enough memory to satisfy the
2175 * request
2176 * ENOENT if the target pid isn't found
2177 * ENOSPC if the supplied buffer is insufficient
2178 * *retval contains the number of bytes traced, if successful
2179 * and -1 otherwise. If the request failed due to
2180 * tracebuffer exhaustion, we copyout as much as possible.
2181 */
2182int
b0d623f7 2183stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
0c530ab8
A
2184 int error = 0;
2185
2186 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
2187 return(error);
2188
2189 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
b7266188 2190 uap->flags, uap->dispatch_offset, retval);
0c530ab8
A
2191}
2192
2193int
b7266188 2194stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
0c530ab8
A
2195{
2196 int error = 0;
2197 unsigned bytesTraced = 0;
d41d1dae 2198 boolean_t istate;
0c530ab8
A
2199
2200 *retval = -1;
2201/* Serialize tracing */
2202 STACKSHOT_SUBSYS_LOCK();
2203
2204 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
2205 error = EINVAL;
2206 goto error_exit;
2207 }
2208
d41d1dae
A
2209 assert(stackshot_snapbuf == NULL);
2210 if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
2211 error = ENOMEM;
2212 goto error_exit;
2213 }
0c530ab8 2214
d41d1dae 2215 if (panic_active()) {
0c530ab8
A
2216 error = ENOMEM;
2217 goto error_exit;
2218 }
d41d1dae
A
2219
2220 istate = ml_set_interrupts_enabled(FALSE);
0c530ab8 2221/* Preload trace parameters*/
b7266188 2222 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
0c530ab8
A
2223
2224/* Trap to the debugger to obtain a coherent stack snapshot; this populates
2225 * the trace buffer
2226 */
2d21ac55 2227
0c530ab8
A
2228 TRAP_DEBUGGER;
2229
d41d1dae
A
2230 ml_set_interrupts_enabled(istate);
2231
0c530ab8
A
2232 bytesTraced = kdp_stack_snapshot_bytes_traced();
2233
2234 if (bytesTraced > 0) {
2235 if ((error = copyout(stackshot_snapbuf, tracebuf,
2236 ((bytesTraced < tracebuf_size) ?
2237 bytesTraced : tracebuf_size))))
2238 goto error_exit;
2239 *retval = bytesTraced;
2240 }
2241 else {
2242 error = ENOENT;
2243 goto error_exit;
2244 }
2245
2246 error = kdp_stack_snapshot_geterror();
2247 if (error == -1) {
2248 error = ENOSPC;
2249 *retval = -1;
2250 goto error_exit;
2251 }
2252
2253error_exit:
2254 if (stackshot_snapbuf != NULL)
d41d1dae 2255 kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
0c530ab8
A
2256 stackshot_snapbuf = NULL;
2257 STACKSHOT_SUBSYS_UNLOCK();
2258 return error;
2259}
2d21ac55
A
2260
2261void
2262start_kern_tracing(unsigned int new_nkdbufs) {
6d2010ae 2263
2d21ac55
A
2264 if (!new_nkdbufs)
2265 return;
2266 kdbg_set_nkdbufs(new_nkdbufs);
2267 kdbg_lock_init();
6d2010ae
A
2268 kdbg_reinit(TRUE);
2269 kdbg_set_tracing_enabled(TRUE);
b0d623f7
A
2270
2271#if defined(__i386__) || defined(__x86_64__)
2272 uint64_t now = mach_absolute_time();
2273
2274 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
2275 (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
2276 (uint32_t)(now >> 32), (uint32_t)now,
2277 0);
2278#endif
2d21ac55
A
2279 printf("kernel tracing started\n");
2280}
b0d623f7
A
2281
2282void
2283kdbg_dump_trace_to_file(const char *filename)
2284{
2285 vfs_context_t ctx;
2286 vnode_t vp;
2287 int error;
2288 size_t number;
2289
2290
6d2010ae 2291 if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
b0d623f7
A
2292 return;
2293
2294 if (global_state_pid != -1) {
2295 if ((proc_find(global_state_pid)) != NULL) {
2296 /*
2297 * The global pid exists, we're running
2298 * due to fs_usage, latency, etc...
2299 * don't cut the panic/shutdown trace file
2300 */
2301 return;
2302 }
2303 }
2304 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
2305
2306 kdebug_enable = 0;
6d2010ae 2307 kd_ctrl_page.enabled = 0;
b0d623f7
A
2308
2309 ctx = vfs_context_kernel();
2310
2311 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
2312 return;
2313
2314 number = kd_mapsize;
2315 kdbg_readmap(0, &number, vp, ctx);
2316
2317 number = nkdbufs*sizeof(kd_buf);
2318 kdbg_read(0, &number, vp, ctx);
2319
2320 vnode_close(vp, FWRITE, ctx);
2321
2322 sync(current_proc(), (void *)NULL, (int *)NULL);
2323}
6d2010ae
A
2324
2325/* Helper function for filling in the BSD name for an address space
2326 * Defined here because the machine bindings know only Mach threads
2327 * and nothing about BSD processes.
2328 *
2329 * FIXME: need to grab a lock during this?
2330 */
2331void kdbg_get_task_name(char* name_buf, int len, task_t task)
2332{
2333 proc_t proc;
2334
2335 /* Note: we can't use thread->task (and functions that rely on it) here
2336 * because it hasn't been initialized yet when this function is called.
2337 * We use the explicitly-passed task parameter instead.
2338 */
2339 proc = get_bsdtask_info(task);
2340 if (proc != PROC_NULL)
2341 snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
2342 else
2343 snprintf(name_buf, len, "%p [!bsd]", task);
2344}
2345
2346
2347
2348#if defined(NATIVE_TRACE_FACILITY)
2349void trace_handler_map_ctrl_page(__unused uintptr_t addr, __unused size_t ctrl_page_size, __unused size_t storage_size, __unused size_t kds_ptr_size)
2350{
2351}
2352void trace_handler_map_bufinfo(__unused uintptr_t addr, __unused size_t size)
2353{
2354}
2355void trace_handler_unmap_bufinfo(void)
2356{
2357}
2358void trace_handler_map_buffer(__unused int index, __unused uintptr_t addr, __unused size_t size)
2359{
2360}
2361void trace_handler_unmap_buffer(__unused int index)
2362{
2363}
2364#endif