]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
91447636 4 * @Apple_LICENSE_HEADER_START@
1c79356b 5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b 19 *
2d21ac55 20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
21 */
22
23#include <machine/spl.h>
24
91447636
A
25#include <sys/errno.h>
26#include <sys/param.h>
0c530ab8 27#include <sys/systm.h>
91447636
A
28#include <sys/proc_internal.h>
29#include <sys/vm.h>
30#include <sys/sysctl.h>
31#include <sys/kdebug.h>
32#include <sys/sysproto.h>
33
1c79356b
A
34#define HZ 100
35#include <mach/clock_types.h>
36#include <mach/mach_types.h>
55e303ae 37#include <mach/mach_time.h>
1c79356b
A
38#include <machine/machine_routines.h>
39
1c79356b
A
40#include <kern/thread.h>
41#include <kern/task.h>
2d21ac55 42#include <kern/debug.h>
1c79356b
A
43#include <vm/vm_kern.h>
44#include <sys/lock.h>
45
0c530ab8
A
46#include <sys/malloc.h>
47#include <sys/kauth.h>
48
49#include <mach/mach_host.h> /* for host_info() */
50#include <libkern/OSAtomic.h>
51
52/* XXX should have prototypes, but Mach does not provide one */
53void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
54int cpu_number(void); /* XXX <machine/...> include path broken */
55
56/* XXX should probably be static, but it's debugging code... */
57int kdbg_read(user_addr_t, size_t *);
58void kdbg_control_chud(int, void *);
59int kdbg_control(int *, u_int, user_addr_t, size_t *);
60int kdbg_getentropy (user_addr_t, size_t *, int);
61int kdbg_readmap(user_addr_t, size_t *);
62int kdbg_getreg(kd_regtype *);
63int kdbg_setreg(kd_regtype *);
64int kdbg_setrtcdec(kd_regtype *);
65int kdbg_setpidex(kd_regtype *);
66int kdbg_setpid(kd_regtype *);
67void kdbg_mapinit(void);
68int kdbg_reinit(void);
69int kdbg_bootstrap(void);
70
2d21ac55 71static int create_buffers(void);
0c530ab8
A
72static void delete_buffers(void);
73
2d21ac55
A
74extern void IOSleep(int);
75
0c530ab8
A
76#ifdef ppc
77extern uint32_t maxDec;
78#endif
79
9bccf70c
A
80/* trace enable status */
81unsigned int kdebug_enable = 0;
82
83/* track timestamps for security server's entropy needs */
55e303ae 84uint64_t * kd_entropy_buffer = 0;
9bccf70c
A
85unsigned int kd_entropy_bufsize = 0;
86unsigned int kd_entropy_count = 0;
87unsigned int kd_entropy_indx = 0;
88unsigned int kd_entropy_buftomem = 0;
89
91447636
A
90
91#define SLOW_NOLOG 0x01
92#define SLOW_CHECKS 0x02
93#define SLOW_ENTROPY 0x04
94
95unsigned int kdebug_slowcheck=SLOW_NOLOG;
96
0c530ab8
A
97unsigned int kd_cpus;
98
99struct kd_bufinfo {
100 kd_buf * kd_stop;
101 kd_buf * kd_bufptr;
102 kd_buf * kd_buffer;
103 kd_buf * kd_buflast;
104 kd_buf * kd_readlast;
105 int kd_wrapped; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
106 uint64_t kd_prev_timebase;
107 int kd_pad[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
108
109};
110
111struct kd_bufinfo *kdbip = NULL;
112
113#define KDCOPYBUF_COUNT 1024
114#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
115kd_buf *kdcopybuf = NULL;
116
117
1c79356b
A
118unsigned int nkdbufs = 8192;
119unsigned int kd_bufsize = 0;
120unsigned int kdebug_flags = 0;
1c79356b
A
121unsigned int kdlog_beg=0;
122unsigned int kdlog_end=0;
123unsigned int kdlog_value1=0;
124unsigned int kdlog_value2=0;
125unsigned int kdlog_value3=0;
126unsigned int kdlog_value4=0;
127
0c530ab8
A
128static lck_mtx_t * kd_trace_mtx_sysctl;
129static lck_grp_t * kd_trace_mtx_sysctl_grp;
130static lck_attr_t * kd_trace_mtx_sysctl_attr;
131static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
132
133static lck_grp_t *stackshot_subsys_lck_grp;
134static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
135static lck_attr_t *stackshot_subsys_lck_attr;
136static lck_mtx_t stackshot_subsys_mutex;
137
138void *stackshot_snapbuf = NULL;
139
140int
141stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval);
91447636 142
0c530ab8
A
143extern void
144kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t options);
91447636 145
0c530ab8
A
146extern int
147kdp_stack_snapshot_geterror(void);
148extern unsigned int
149kdp_stack_snapshot_bytes_traced(void);
1c79356b
A
150
151kd_threadmap *kd_mapptr = 0;
152unsigned int kd_mapsize = 0;
153unsigned int kd_mapcount = 0;
154unsigned int kd_maptomem = 0;
155
156pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
157
158#define DBG_FUNC_MASK 0xfffffffc
159
9bccf70c
A
160/* task to string structure */
161struct tts
162{
0c530ab8 163 task_t task; /* from procs task */
55e303ae 164 pid_t pid; /* from procs p_pid */
9bccf70c
A
165 char task_comm[20]; /* from procs p_comm */
166};
167
168typedef struct tts tts_t;
169
1c79356b
A
170struct krt
171{
172 kd_threadmap *map; /* pointer to the map buffer */
173 int count;
174 int maxcount;
9bccf70c 175 struct tts *atts;
1c79356b
A
176};
177
178typedef struct krt krt_t;
179
9bccf70c
A
180/* This is for the CHUD toolkit call */
181typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
182 unsigned int arg2, unsigned int arg3,
183 unsigned int arg4, unsigned int arg5);
184
185kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
186
2d21ac55 187__private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
91447636 188
1c79356b 189/* Support syscall SYS_kdebug_trace */
0c530ab8
A
190int
191kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused register_t *retval)
1c79356b 192{
91447636
A
193 if ( (kdebug_enable == 0) )
194 return(EINVAL);
1c79356b 195
91447636
A
196 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
197 return(0);
1c79356b
A
198}
199
0c530ab8
A
200static int
201create_buffers(void)
202{
203 unsigned int cpu, i;
204 int nentries;
205
206 nentries = nkdbufs / kd_cpus;
2d21ac55
A
207 nkdbufs = nentries * kd_cpus;
208
0c530ab8
A
209 kd_bufsize = nentries * sizeof(kd_buf);
210
211 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
212
213 if (kdcopybuf == 0) {
214 if (kmem_alloc(kernel_map, (unsigned int *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS)
2d21ac55 215 return(ENOMEM);
0c530ab8
A
216 }
217 for (cpu = 0; cpu < kd_cpus; cpu++) {
218 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip[cpu].kd_buffer, kd_bufsize) != KERN_SUCCESS)
219 break;
220 }
221 if (cpu < kd_cpus) {
222 for (i = 0; i < cpu; i++)
223 kmem_free(kernel_map, (vm_offset_t)kdbip[i].kd_buffer, kd_bufsize);
224 kd_bufsize = 0;
225
226 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
227 kdcopybuf = NULL;
228
229 return(ENOMEM);
230 }
231 for (cpu = 0; cpu < kd_cpus; cpu++) {
232 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
233 kdbip[cpu].kd_buflast = &kdbip[cpu].kd_bufptr[nentries];
234 kdbip[cpu].kd_readlast = kdbip[cpu].kd_bufptr;
235 }
236 kdebug_flags |= KDBG_BUFINIT;
237
238 return(0);
239}
240
241
242static void
243delete_buffers(void)
4452a7af 244{
0c530ab8
A
245 unsigned int cpu;
246
247 if (kd_bufsize && (kdebug_flags & KDBG_BUFINIT)) {
248 for (cpu = 0; cpu < kd_cpus; cpu++)
249 kmem_free(kernel_map, (vm_offset_t)kdbip[cpu].kd_buffer, kd_bufsize);
250 kd_bufsize = 0;
251 }
252 if (kdcopybuf) {
253 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
254 kdcopybuf = NULL;
255 }
256 kdebug_flags &= ~KDBG_BUFINIT;
257}
258
259
260static void
261kernel_debug_internal(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
262 unsigned int arg4, unsigned int arg5, int entropy_flag)
263{
264 int s;
1c79356b
A
265 kd_buf * kd;
266 struct proc *curproc;
1c79356b 267 unsigned long long now;
0c530ab8
A
268 int cpu;
269
270 s = ml_set_interrupts_enabled(FALSE);
91447636 271
0c530ab8
A
272 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
273 cpu = cpu_number();
1c79356b 274
9bccf70c 275 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
91447636
A
276 if (kdebug_chudhook)
277 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
9bccf70c 278
91447636 279 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
0c530ab8 280 goto out;
9bccf70c 281 }
91447636
A
282
283 if (kdebug_slowcheck == 0)
284 goto record_trace;
1c79356b 285
0c530ab8 286 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY))
9bccf70c
A
287 {
288 if (kd_entropy_indx < kd_entropy_count)
289 {
55e303ae 290 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
9bccf70c
A
291 kd_entropy_indx++;
292 }
293
294 if (kd_entropy_indx == kd_entropy_count)
295 {
296 /* Disable entropy collection */
297 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
91447636 298 kdebug_slowcheck &= ~SLOW_ENTROPY;
9bccf70c
A
299 }
300 }
301
91447636 302 if ( (kdebug_slowcheck & SLOW_NOLOG) )
0c530ab8 303 goto out;
91447636 304
1c79356b
A
305 if (kdebug_flags & KDBG_PIDCHECK)
306 {
307 /* If kdebug flag is not set for current proc, return */
308 curproc = current_proc();
2d21ac55 309 if ((curproc && !(curproc->p_kdebug)) &&
1c79356b 310 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
0c530ab8 311 goto out;
1c79356b
A
312 }
313 else if (kdebug_flags & KDBG_PIDEXCLUDE)
314 {
315 /* If kdebug flag is set for current proc, return */
316 curproc = current_proc();
2d21ac55 317 if ((curproc && curproc->p_kdebug) &&
1c79356b 318 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
0c530ab8 319 goto out;
1c79356b
A
320 }
321
322 if (kdebug_flags & KDBG_RANGECHECK)
323 {
0c530ab8
A
324 if ((debugid < kdlog_beg)
325 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
326 goto out;
1c79356b
A
327 }
328 else if (kdebug_flags & KDBG_VALCHECK)
329 {
330 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
331 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
332 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
333 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
334 (debugid >> 24 != DBG_TRACE))
0c530ab8 335 goto out;
1c79356b 336 }
91447636
A
337
338record_trace:
0c530ab8 339 kd = kdbip[cpu].kd_bufptr;
1c79356b
A
340 kd->debugid = debugid;
341 kd->arg1 = arg1;
342 kd->arg2 = arg2;
343 kd->arg3 = arg3;
344 kd->arg4 = arg4;
0c530ab8 345 kd->arg5 = arg5;
1c79356b 346
0c530ab8
A
347 /*
348 * Watch for out of order timestamps
349 */
350 if (now < kdbip[cpu].kd_prev_timebase)
1c79356b 351 {
0c530ab8
A
352 /*
353 * if so, just store the previous timestamp + a cycle
354 */
355 now = ++kdbip[cpu].kd_prev_timebase & KDBG_TIMESTAMP_MASK;
1c79356b
A
356 }
357 else
358 {
0c530ab8 359 kdbip[cpu].kd_prev_timebase = now;
1c79356b 360 }
0c530ab8
A
361 kd->timestamp = now | (((uint64_t)cpu) << KDBG_CPU_SHIFT);
362
363 kdbip[cpu].kd_bufptr++;
1c79356b 364
0c530ab8
A
365 if (kdbip[cpu].kd_bufptr >= kdbip[cpu].kd_buflast)
366 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
1c79356b 367
0c530ab8 368 if (kdbip[cpu].kd_bufptr == kdbip[cpu].kd_readlast) {
1c79356b 369 if (kdebug_flags & KDBG_NOWRAP)
91447636 370 kdebug_slowcheck |= SLOW_NOLOG;
0c530ab8 371 kdbip[cpu].kd_wrapped = 1;
1c79356b
A
372 kdebug_flags |= KDBG_WRAPPED;
373 }
0c530ab8
A
374
375out:
1c79356b
A
376 ml_set_interrupts_enabled(s);
377}
378
379void
0c530ab8
A
380kernel_debug(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
381 unsigned int arg4, __unused unsigned int arg5)
1c79356b 382{
0c530ab8
A
383 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (int)current_thread(), 1);
384}
21362eb3 385
0c530ab8
A
386void
387kernel_debug1(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
388 unsigned int arg4, unsigned int arg5)
389{
390 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
391}
6601e61a 392
0c530ab8
A
393static void
394kdbg_lock_init(void)
395{
396 host_basic_info_data_t hinfo;
397 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
91447636 398
0c530ab8 399 if (kdebug_flags & KDBG_LOCKINIT)
1c79356b 400 return;
1c79356b 401
0c530ab8
A
402 /* get the number of cpus and cache it */
403#define BSD_HOST 1
404 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
593a1d5f 405 kd_cpus = hinfo.logical_cpu_max;
1c79356b 406
2d21ac55
A
407 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip,
408 sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
0c530ab8 409 return;
6601e61a 410
0c530ab8 411 /*
91447636
A
412 * allocate lock group attribute and group
413 */
0c530ab8
A
414 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
415 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
91447636
A
416
417 /*
418 * allocate the lock attribute
419 */
0c530ab8 420 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
91447636
A
421
422
423 /*
424 * allocate and initialize spin lock and mutex
425 */
0c530ab8 426 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
91447636
A
427
428 kdebug_flags |= KDBG_LOCKINIT;
429}
430
431
432int
0c530ab8 433kdbg_bootstrap(void)
1c79356b 434{
0c530ab8 435 kdebug_flags &= ~KDBG_WRAPPED;
91447636 436
0c530ab8 437 return (create_buffers());
1c79356b
A
438}
439
0c530ab8
A
440int
441kdbg_reinit(void)
1c79356b 442{
1c79356b
A
443 int ret=0;
444
91447636
A
445 /*
446 * Disable trace collecting
447 * First make sure we're not in
448 * the middle of cutting a trace
449 */
91447636 450
9bccf70c 451 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
91447636
A
452 kdebug_slowcheck |= SLOW_NOLOG;
453
0c530ab8
A
454 /*
455 * make sure the SLOW_NOLOG is seen
456 * by everyone that might be trying
457 * to cut a trace..
458 */
459 IOSleep(100);
1c79356b 460
0c530ab8 461 delete_buffers();
1c79356b
A
462
463 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
464 {
55e303ae 465 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
466 kdebug_flags &= ~KDBG_MAPINIT;
467 kd_mapsize = 0;
468 kd_mapptr = (kd_threadmap *) 0;
469 kd_mapcount = 0;
470 }
471
0c530ab8 472 ret = kdbg_bootstrap();
1c79356b
A
473
474 return(ret);
475}
476
0c530ab8
A
477void
478kdbg_trace_data(struct proc *proc, long *arg_pid)
55e303ae
A
479{
480 if (!proc)
481 *arg_pid = 0;
482 else
483 *arg_pid = proc->p_pid;
484
485 return;
486}
487
488
0c530ab8
A
489void
490kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1c79356b 491{
1c79356b
A
492 char *dbg_nameptr;
493 int dbg_namelen;
494 long dbg_parms[4];
1c79356b
A
495 if (!proc)
496 {
497 *arg1 = 0;
498 *arg2 = 0;
499 *arg3 = 0;
500 *arg4 = 0;
501 return;
502 }
503
504 /* Collect the pathname for tracing */
505 dbg_nameptr = proc->p_comm;
506 dbg_namelen = strlen(proc->p_comm);
507 dbg_parms[0]=0L;
508 dbg_parms[1]=0L;
509 dbg_parms[2]=0L;
510 dbg_parms[3]=0L;
511
0c530ab8 512 if(dbg_namelen > (int)sizeof(dbg_parms))
1c79356b
A
513 dbg_namelen = sizeof(dbg_parms);
514
2d21ac55 515 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1c79356b
A
516
517 *arg1=dbg_parms[0];
518 *arg2=dbg_parms[1];
519 *arg3=dbg_parms[2];
520 *arg4=dbg_parms[3];
521}
522
91447636 523static void
0c530ab8 524kdbg_resolve_map(thread_t th_act, void *opaque)
1c79356b
A
525{
526 kd_threadmap *mapptr;
0c530ab8 527 krt_t *t = (krt_t *)opaque;
1c79356b
A
528
529 if(t->count < t->maxcount)
530 {
531 mapptr=&t->map[t->count];
55e303ae 532 mapptr->thread = (unsigned int)th_act;
2d21ac55
A
533 (void) strlcpy (mapptr->command, t->atts->task_comm,
534 sizeof(t->atts->task_comm));
55e303ae
A
535
536 /*
537 Some kernel threads have no associated pid.
538 We still need to mark the entry as valid.
539 */
540 if (t->atts->pid)
541 mapptr->valid = t->atts->pid;
542 else
543 mapptr->valid = 1;
544
1c79356b
A
545 t->count++;
546 }
547}
548
0c530ab8
A
549void
550kdbg_mapinit(void)
1c79356b
A
551{
552 struct proc *p;
553 struct krt akrt;
9bccf70c
A
554 int tts_count; /* number of task-to-string structures */
555 struct tts *tts_mapptr;
556 unsigned int tts_mapsize = 0;
557 unsigned int tts_maptomem=0;
558 int i;
559
1c79356b
A
560
561 if (kdebug_flags & KDBG_MAPINIT)
562 return;
563
2d21ac55
A
564 /* need to use PROC_SCANPROCLIST with proc_iterate */
565 proc_list_lock();
566
9bccf70c
A
567 /* Calculate the sizes of map buffers*/
568 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
1c79356b
A
569 p = p->p_list.le_next)
570 {
571 kd_mapcount += get_task_numacts((task_t)p->task);
9bccf70c 572 tts_count++;
1c79356b
A
573 }
574
2d21ac55
A
575 proc_list_unlock();
576
9bccf70c
A
577 /*
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
582 */
583 kd_mapcount += kd_mapcount/10;
584 tts_count += tts_count/10;
585
1c79356b
A
586 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
587 if((kmem_alloc(kernel_map, & kd_maptomem,
588 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
55e303ae 589 {
1c79356b 590 kd_mapptr = (kd_threadmap *) kd_maptomem;
55e303ae
A
591 bzero(kd_mapptr, kd_mapsize);
592 }
1c79356b
A
593 else
594 kd_mapptr = (kd_threadmap *) 0;
595
9bccf70c
A
596 tts_mapsize = tts_count * sizeof(struct tts);
597 if((kmem_alloc(kernel_map, & tts_maptomem,
598 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
55e303ae 599 {
9bccf70c 600 tts_mapptr = (struct tts *) tts_maptomem;
55e303ae
A
601 bzero(tts_mapptr, tts_mapsize);
602 }
9bccf70c
A
603 else
604 tts_mapptr = (struct tts *) 0;
605
606
607 /*
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
611 */
612
613 if (tts_mapptr) {
2d21ac55
A
614 /* should use proc_iterate */
615 proc_list_lock();
616
9bccf70c
A
617 for (p = allproc.lh_first, i=0; p && i < tts_count;
618 p = p->p_list.le_next) {
2d21ac55 619 if (p->p_lflag & P_LEXIT)
9bccf70c
A
620 continue;
621
91447636
A
622 if (p->task) {
623 task_reference(p->task);
624 tts_mapptr[i].task = p->task;
55e303ae 625 tts_mapptr[i].pid = p->p_pid;
2d21ac55 626 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
91447636 627 i++;
9bccf70c
A
628 }
629 }
630 tts_count = i;
2d21ac55
A
631
632 proc_list_unlock();
633
9bccf70c
A
634 }
635
636
637 if (kd_mapptr && tts_mapptr)
1c79356b
A
638 {
639 kdebug_flags |= KDBG_MAPINIT;
640 /* Initialize thread map data */
641 akrt.map = kd_mapptr;
642 akrt.count = 0;
643 akrt.maxcount = kd_mapcount;
644
9bccf70c 645 for (i=0; i < tts_count; i++)
1c79356b 646 {
9bccf70c
A
647 akrt.atts = &tts_mapptr[i];
648 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
55e303ae 649 task_deallocate((task_t) tts_mapptr[i].task);
9bccf70c 650 }
55e303ae 651 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1c79356b
A
652 }
653}
654
91447636
A
655static void
656kdbg_clear(void)
1c79356b 657{
91447636
A
658 /*
659 * Clean up the trace buffer
660 * First make sure we're not in
661 * the middle of cutting a trace
662 */
1c79356b 663
9bccf70c 664 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
91447636
A
665 kdebug_slowcheck = SLOW_NOLOG;
666
0c530ab8
A
667 /*
668 * make sure the SLOW_NOLOG is seen
669 * by everyone that might be trying
670 * to cut a trace..
671 */
672 IOSleep(100);
673
91447636
A
674 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
675 kdebug_slowcheck |= SLOW_ENTROPY;
676
91447636 677 global_state_pid = -1;
1c79356b
A
678 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
679 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
680 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
0c530ab8
A
681
682 delete_buffers();
1c79356b
A
683
684 /* Clean up the thread map buffer */
685 kdebug_flags &= ~KDBG_MAPINIT;
55e303ae 686 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
687 kd_mapptr = (kd_threadmap *) 0;
688 kd_mapsize = 0;
689 kd_mapcount = 0;
690}
691
0c530ab8 692int
1c79356b
A
693kdbg_setpid(kd_regtype *kdr)
694{
695 pid_t pid;
696 int flag, ret=0;
697 struct proc *p;
698
699 pid = (pid_t)kdr->value1;
700 flag = (int)kdr->value2;
701
702 if (pid > 0)
703 {
2d21ac55 704 if ((p = proc_find(pid)) == NULL)
1c79356b
A
705 ret = ESRCH;
706 else
707 {
708 if (flag == 1) /* turn on pid check for this and all pids */
709 {
710 kdebug_flags |= KDBG_PIDCHECK;
711 kdebug_flags &= ~KDBG_PIDEXCLUDE;
91447636
A
712 kdebug_slowcheck |= SLOW_CHECKS;
713
2d21ac55 714 p->p_kdebug = 1;
1c79356b
A
715 }
716 else /* turn off pid check for this pid value */
717 {
718 /* Don't turn off all pid checking though */
719 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
2d21ac55 720 p->p_kdebug = 0;
1c79356b 721 }
2d21ac55 722 proc_rele(p);
1c79356b
A
723 }
724 }
725 else
726 ret = EINVAL;
727 return(ret);
728}
729
730/* This is for pid exclusion in the trace buffer */
0c530ab8 731int
1c79356b
A
732kdbg_setpidex(kd_regtype *kdr)
733{
734 pid_t pid;
735 int flag, ret=0;
736 struct proc *p;
737
738 pid = (pid_t)kdr->value1;
739 flag = (int)kdr->value2;
740
741 if (pid > 0)
742 {
2d21ac55 743 if ((p = proc_find(pid)) == NULL)
1c79356b
A
744 ret = ESRCH;
745 else
746 {
747 if (flag == 1) /* turn on pid exclusion */
748 {
749 kdebug_flags |= KDBG_PIDEXCLUDE;
750 kdebug_flags &= ~KDBG_PIDCHECK;
91447636
A
751 kdebug_slowcheck |= SLOW_CHECKS;
752
2d21ac55 753 p->p_kdebug = 1;
1c79356b
A
754 }
755 else /* turn off pid exclusion for this pid value */
756 {
757 /* Don't turn off all pid exclusion though */
758 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
2d21ac55 759 p->p_kdebug = 0;
1c79356b 760 }
2d21ac55 761 proc_rele(p);
1c79356b
A
762 }
763 }
764 else
765 ret = EINVAL;
766 return(ret);
767}
768
3a60a9f5 769/* This is for setting a maximum decrementer value */
0c530ab8 770int
1c79356b
A
771kdbg_setrtcdec(kd_regtype *kdr)
772{
773 int ret=0;
774 natural_t decval;
775
776 decval = (natural_t)kdr->value1;
777
778 if (decval && decval < KDBG_MINRTCDEC)
3a60a9f5 779 ret = EINVAL;
1c79356b 780#ifdef ppc
3a60a9f5 781 else {
3a60a9f5
A
782 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
783 }
1c79356b 784#else
3a60a9f5
A
785 else
786 ret = ENOTSUP;
1c79356b
A
787#endif /* ppc */
788
789 return(ret);
790}
791
0c530ab8 792int
1c79356b
A
793kdbg_setreg(kd_regtype * kdr)
794{
0c530ab8 795 int ret=0;
1c79356b
A
796 unsigned int val_1, val_2, val;
797 switch (kdr->type) {
798
799 case KDBG_CLASSTYPE :
800 val_1 = (kdr->value1 & 0xff);
801 val_2 = (kdr->value2 & 0xff);
802 kdlog_beg = (val_1<<24);
803 kdlog_end = (val_2<<24);
804 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
805 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
806 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
91447636 807 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
808 break;
809 case KDBG_SUBCLSTYPE :
810 val_1 = (kdr->value1 & 0xff);
811 val_2 = (kdr->value2 & 0xff);
812 val = val_2 + 1;
813 kdlog_beg = ((val_1<<24) | (val_2 << 16));
814 kdlog_end = ((val_1<<24) | (val << 16));
815 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
816 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
817 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
91447636 818 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
819 break;
820 case KDBG_RANGETYPE :
821 kdlog_beg = (kdr->value1);
822 kdlog_end = (kdr->value2);
823 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
824 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
825 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
91447636 826 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
827 break;
828 case KDBG_VALCHECK:
829 kdlog_value1 = (kdr->value1);
830 kdlog_value2 = (kdr->value2);
831 kdlog_value3 = (kdr->value3);
832 kdlog_value4 = (kdr->value4);
833 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
834 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
835 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
91447636 836 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
837 break;
838 case KDBG_TYPENONE :
839 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
91447636
A
840
841 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
842 kdebug_slowcheck |= SLOW_CHECKS;
843 else
844 kdebug_slowcheck &= ~SLOW_CHECKS;
845
1c79356b
A
846 kdlog_beg = 0;
847 kdlog_end = 0;
848 break;
849 default :
850 ret = EINVAL;
851 break;
852 }
853 return(ret);
854}
855
0c530ab8
A
856int
857kdbg_getreg(__unused kd_regtype * kdr)
1c79356b 858{
0c530ab8 859#if 0
1c79356b
A
860 int i,j, ret=0;
861 unsigned int val_1, val_2, val;
0c530ab8 862
1c79356b
A
863 switch (kdr->type) {
864 case KDBG_CLASSTYPE :
865 val_1 = (kdr->value1 & 0xff);
866 val_2 = val_1 + 1;
867 kdlog_beg = (val_1<<24);
868 kdlog_end = (val_2<<24);
869 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
870 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
871 break;
872 case KDBG_SUBCLSTYPE :
873 val_1 = (kdr->value1 & 0xff);
874 val_2 = (kdr->value2 & 0xff);
875 val = val_2 + 1;
876 kdlog_beg = ((val_1<<24) | (val_2 << 16));
877 kdlog_end = ((val_1<<24) | (val << 16));
878 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
879 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
880 break;
881 case KDBG_RANGETYPE :
882 kdlog_beg = (kdr->value1);
883 kdlog_end = (kdr->value2);
884 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
885 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
886 break;
887 case KDBG_TYPENONE :
888 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
889 kdlog_beg = 0;
890 kdlog_end = 0;
891 break;
892 default :
893 ret = EINVAL;
894 break;
895 }
896#endif /* 0 */
897 return(EINVAL);
898}
899
900
91447636
A
901int
902kdbg_readmap(user_addr_t buffer, size_t *number)
1c79356b
A
903{
904 int avail = *number;
905 int ret = 0;
0c530ab8 906 unsigned int count = 0;
1c79356b
A
907
908 count = avail/sizeof (kd_threadmap);
909
910 if (count && (count <= kd_mapcount))
911 {
912 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
913 {
914 if (*number < kd_mapsize)
915 ret=EINVAL;
916 else
917 {
918 if (copyout(kd_mapptr, buffer, kd_mapsize))
919 ret=EINVAL;
920 }
921 }
922 else
923 ret=EINVAL;
924 }
925 else
926 ret=EINVAL;
927
928 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
929 {
55e303ae 930 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
931 kdebug_flags &= ~KDBG_MAPINIT;
932 kd_mapsize = 0;
933 kd_mapptr = (kd_threadmap *) 0;
934 kd_mapcount = 0;
935 }
936
937 return(ret);
938}
939
91447636
A
940int
941kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
9bccf70c
A
942{
943 int avail = *number;
944 int ret = 0;
9bccf70c
A
945
946 if (kd_entropy_buffer)
947 return(EBUSY);
948
949 kd_entropy_count = avail/sizeof(mach_timespec_t);
950 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
951 kd_entropy_indx = 0;
952
953 /* Enforce maximum entropy entries here if needed */
954
955 /* allocate entropy buffer */
956 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
957 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
958 {
55e303ae 959 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
9bccf70c
A
960 }
961 else
962 {
55e303ae 963 kd_entropy_buffer = (uint64_t *) 0;
9bccf70c
A
964 kd_entropy_count = 0;
965 kd_entropy_indx = 0;
966 return (EINVAL);
967 }
968
969 if (ms_timeout < 10)
970 ms_timeout = 10;
971
972 /* Enable entropy sampling */
973 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
91447636 974 kdebug_slowcheck |= SLOW_ENTROPY;
9bccf70c
A
975
976 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
977
978 /* Disable entropy sampling */
979 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
91447636 980 kdebug_slowcheck &= ~SLOW_ENTROPY;
9bccf70c
A
981
982 *number = 0;
983 ret = 0;
984
985 if (kd_entropy_indx > 0)
986 {
987 /* copyout the buffer */
988 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
989 ret = EINVAL;
990 else
991 *number = kd_entropy_indx;
992 }
993
994 /* Always cleanup */
995 kd_entropy_count = 0;
996 kd_entropy_indx = 0;
997 kd_entropy_buftomem = 0;
55e303ae
A
998 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
999 kd_entropy_buffer = (uint64_t *) 0;
9bccf70c
A
1000 return(ret);
1001}
1002
1003
2d21ac55
A
1004static void
1005kdbg_set_nkdbufs(unsigned int value)
1006{
1007 /*
1008 * We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller
1009 * 'value' is the desired number of trace entries
1010 */
1011 unsigned int max_entries = (sane_size/4) / sizeof(kd_buf);
1012
1013 if (value <= max_entries)
1014 nkdbufs = value;
1015 else
1016 nkdbufs = max_entries;
1017}
1018
1019
9bccf70c
A
1020/*
1021 * This function is provided for the CHUD toolkit only.
1022 * int val:
1023 * zero disables kdebug_chudhook function call
1024 * non-zero enables kdebug_chudhook function call
1025 * char *fn:
1026 * address of the enabled kdebug_chudhook function
1027*/
1028
0c530ab8
A
1029void
1030kdbg_control_chud(int val, void *fn)
9bccf70c
A
1031{
1032 if (val) {
1033 /* enable chudhook */
9bccf70c 1034 kdebug_chudhook = fn;
91447636 1035 kdebug_enable |= KDEBUG_ENABLE_CHUD;
9bccf70c
A
1036 }
1037 else {
1038 /* disable chudhook */
1039 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1040 kdebug_chudhook = 0;
1041 }
1042}
1c79356b 1043
9bccf70c 1044
0c530ab8 1045int
c910b4d9 1046kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1c79356b 1047{
2d21ac55 1048 int ret=0;
0c530ab8 1049 size_t size=*sizep;
c910b4d9 1050 unsigned int value = 0;
91447636
A
1051 kd_regtype kd_Reg;
1052 kbufinfo_t kd_bufinfo;
1053 pid_t curpid;
1054 struct proc *p, *curproc;
1055
c910b4d9
A
1056 if (name[0] == KERN_KDGETENTROPY ||
1057 name[0] == KERN_KDEFLAGS ||
1058 name[0] == KERN_KDDFLAGS ||
1059 name[0] == KERN_KDENABLE ||
1060 name[0] == KERN_KDSETBUF) {
1061
1062 if ( namelen < 2 )
1063 return(EINVAL);
1064 value = name[1];
1065 }
1066
91447636 1067 kdbg_lock_init();
0c530ab8
A
1068
1069 if ( !(kdebug_flags & KDBG_LOCKINIT))
1070 return(ENOMEM);
1071
1072 lck_mtx_lock(kd_trace_mtx_sysctl);
91447636
A
1073
1074 if (name[0] == KERN_KDGETBUF) {
1075 /*
1076 * Does not alter the global_state_pid
1077 * This is a passive request.
9bccf70c 1078 */
91447636
A
1079 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1080 /*
1081 * There is not enough room to return even
1082 * the first element of the info structure.
1083 */
0c530ab8 1084 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1085
1086 return(EINVAL);
1087 }
1088 kd_bufinfo.nkdbufs = nkdbufs;
1089 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1090
1091 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1092 kd_bufinfo.nolog = 1;
1093 else
1094 kd_bufinfo.nolog = 0;
1095 kd_bufinfo.flags = kdebug_flags;
1096 kd_bufinfo.bufid = global_state_pid;
9bccf70c 1097
91447636
A
1098 if (size >= sizeof(kd_bufinfo)) {
1099 /*
1100 * Provide all the info we have
1101 */
1102 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
0c530ab8 1103 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1104
1105 return(EINVAL);
1106 }
1107 }
1108 else {
1109 /*
1110 * For backwards compatibility, only provide
1111 * as much info as there is room for.
1112 */
1113 if (copyout (&kd_bufinfo, where, size)) {
0c530ab8 1114 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1115
1116 return(EINVAL);
1117 }
1118 }
0c530ab8 1119 lck_mtx_unlock(kd_trace_mtx_sysctl);
1c79356b 1120
91447636
A
1121 return(0);
1122 } else if (name[0] == KERN_KDGETENTROPY) {
1123 if (kd_entropy_buffer)
1124 ret = EBUSY;
1125 else
1126 ret = kdbg_getentropy(where, sizep, value);
0c530ab8 1127 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1128
1129 return (ret);
1130 }
1131
0c530ab8 1132 if ((curproc = current_proc()) != NULL)
91447636
A
1133 curpid = curproc->p_pid;
1134 else {
0c530ab8 1135 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1136
1137 return (ESRCH);
1138 }
1c79356b
A
1139 if (global_state_pid == -1)
1140 global_state_pid = curpid;
91447636 1141 else if (global_state_pid != curpid) {
2d21ac55 1142 if ((p = proc_find(global_state_pid)) == NULL) {
91447636
A
1143 /*
1144 * The global pid no longer exists
1145 */
1146 global_state_pid = curpid;
1147 } else {
1148 /*
1149 * The global pid exists, deny this request
1150 */
2d21ac55 1151 proc_rele(p);
0c530ab8 1152 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1153
1c79356b 1154 return(EBUSY);
91447636
A
1155 }
1156 }
1c79356b
A
1157
1158 switch(name[0]) {
1159 case KERN_KDEFLAGS:
1160 value &= KDBG_USERFLAGS;
1161 kdebug_flags |= value;
1162 break;
1163 case KERN_KDDFLAGS:
1164 value &= KDBG_USERFLAGS;
1165 kdebug_flags &= ~value;
1166 break;
1167 case KERN_KDENABLE: /* used to enable or disable */
1168 if (value)
1169 {
1170 /* enable only if buffer is initialized */
1171 if (!(kdebug_flags & KDBG_BUFINIT))
1172 {
1173 ret=EINVAL;
1174 break;
1175 }
0c530ab8
A
1176 kdbg_mapinit();
1177
91447636
A
1178 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1179 kdebug_slowcheck &= ~SLOW_NOLOG;
1c79356b 1180 }
9bccf70c 1181 else
91447636
A
1182 {
1183 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1184 kdebug_slowcheck |= SLOW_NOLOG;
1185 }
1c79356b
A
1186 break;
1187 case KERN_KDSETBUF:
2d21ac55 1188 kdbg_set_nkdbufs(value);
1c79356b 1189 break;
1c79356b
A
1190 case KERN_KDSETUP:
1191 ret=kdbg_reinit();
1192 break;
1193 case KERN_KDREMOVE:
1194 kdbg_clear();
1195 break;
1196 case KERN_KDSETREG:
1197 if(size < sizeof(kd_regtype)) {
1198 ret=EINVAL;
1199 break;
1200 }
1201 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1202 ret= EINVAL;
1203 break;
1204 }
1205 ret = kdbg_setreg(&kd_Reg);
1206 break;
1207 case KERN_KDGETREG:
1208 if(size < sizeof(kd_regtype)) {
1209 ret = EINVAL;
1210 break;
1211 }
1212 ret = kdbg_getreg(&kd_Reg);
1213 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1214 ret=EINVAL;
1215 }
1216 break;
1217 case KERN_KDREADTR:
1218 ret = kdbg_read(where, sizep);
1219 break;
1220 case KERN_KDPIDTR:
1221 if (size < sizeof(kd_regtype)) {
1222 ret = EINVAL;
1223 break;
1224 }
1225 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1226 ret= EINVAL;
1227 break;
1228 }
1229 ret = kdbg_setpid(&kd_Reg);
1230 break;
1231 case KERN_KDPIDEX:
1232 if (size < sizeof(kd_regtype)) {
1233 ret = EINVAL;
1234 break;
1235 }
1236 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1237 ret= EINVAL;
1238 break;
1239 }
1240 ret = kdbg_setpidex(&kd_Reg);
1241 break;
1242 case KERN_KDTHRMAP:
91447636 1243 ret = kdbg_readmap(where, sizep);
1c79356b
A
1244 break;
1245 case KERN_KDSETRTCDEC:
1246 if (size < sizeof(kd_regtype)) {
1247 ret = EINVAL;
1248 break;
1249 }
1250 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1251 ret= EINVAL;
1252 break;
1253 }
1254 ret = kdbg_setrtcdec(&kd_Reg);
1255 break;
1256
1257 default:
1258 ret= EINVAL;
1259 }
0c530ab8 1260 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1261
1c79356b
A
1262 return(ret);
1263}
1264
0c530ab8
A
1265
1266/*
1267 * This code can run concurrently with kernel_debug_internal()
1268 * without the need of any locks, because all reads of kd_bufptr[i],
1269 * which get modified by kernel_debug_internal(), are safe.
1270 */
1271int
91447636 1272kdbg_read(user_addr_t buffer, size_t *number)
1c79356b 1273{
0c530ab8
A
1274 unsigned int count;
1275 unsigned int cpu;
1276 int mincpu;
1277 uint64_t mintime, t, last_wrap_time;
1278 int last_wrap_cpu;
1279 int error = 0;
1280 kd_buf *tempbuf;
1281 uint32_t tempbuf_count;
1282 uint32_t tempbuf_number;
1283 unsigned int old_kdebug_flags, new_kdebug_flags;
1284 unsigned int old_kdebug_slowcheck, new_kdebug_slowcheck;
2d21ac55
A
1285 boolean_t first_event = TRUE;
1286
0c530ab8
A
1287 count = *number/sizeof(kd_buf);
1288 *number = 0;
1289
1290 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1291 return EINVAL;
1c79356b 1292
0c530ab8
A
1293 /*
1294 * because we hold kd_trace_mtx_sysctl, no other control threads can
1295 * be playing with kdebug_flags... the code that cuts new events could
1296 * be running, but it only reads kdebug_flags, it doesn't write it..
1297 * use an OSCompareAndSwap to make sure the other processors see the
1298 * change of state immediately, not to protect against 2 threads racing to update it
1299 */
1300 old_kdebug_slowcheck = kdebug_slowcheck;
1301 do {
1302 old_kdebug_flags = kdebug_flags;
1303 new_kdebug_flags = old_kdebug_flags & ~KDBG_WRAPPED;
1304 new_kdebug_flags |= KDBG_NOWRAP;
1305 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1306
1307 last_wrap_time = 0;
1308 last_wrap_cpu = -1;
1309
1310 for (cpu = 0; cpu < kd_cpus; cpu++) {
1311 kd_buf *cur_bufptr;
1312
1313 if ((cur_bufptr = kdbip[cpu].kd_bufptr) >= kdbip[cpu].kd_buflast)
1314 cur_bufptr = kdbip[cpu].kd_buffer;
89b3af67 1315
0c530ab8
A
1316 if (kdbip[cpu].kd_wrapped) {
1317 kdbip[cpu].kd_wrapped = 0;
1318 kdbip[cpu].kd_readlast = cur_bufptr;
1319 kdbip[cpu].kd_stop = cur_bufptr;
4452a7af 1320
0c530ab8
A
1321 if (kd_cpus > 1 && ((cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK) > last_wrap_time)) {
1322 last_wrap_time = cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK;
1323 last_wrap_cpu = cpu;
1324 }
1325 } else {
1326 if (kdbip[cpu].kd_readlast == cur_bufptr)
1327 kdbip[cpu].kd_stop = 0;
1328 else
1329 kdbip[cpu].kd_stop = cur_bufptr;
1330 }
1331 }
1332 if (count > nkdbufs)
1333 count = nkdbufs;
4452a7af 1334
0c530ab8
A
1335 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1336 tempbuf_count = KDCOPYBUF_COUNT;
4452a7af 1337
2d21ac55
A
1338 if (last_wrap_cpu == -1)
1339 first_event = FALSE;
1340
0c530ab8
A
1341 while (count) {
1342 tempbuf = kdcopybuf;
1343 tempbuf_number = 0;
1344
1345 while (tempbuf_count) {
1346 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1347 mincpu = -1;
1348
1349 for (cpu = 0; cpu < kd_cpus; cpu++) {
1350 if (kdbip[cpu].kd_stop == 0) /* empty buffer */
1351 continue;
1352 t = kdbip[cpu].kd_readlast[0].timestamp & KDBG_TIMESTAMP_MASK;
1353
1354 if (t < mintime) {
1355 mintime = t;
1356 mincpu = cpu;
91447636
A
1357 }
1358 }
0c530ab8
A
1359 if (mincpu < 0)
1360 /*
1361 * all buffers ran empty early
91447636 1362 */
0c530ab8
A
1363 break;
1364
2d21ac55
A
1365 if (first_event == TRUE) {
1366 /*
1367 * make sure we leave room for the
1368 * LAST_WRAPPER event we inject
1369 * by throwing away the first event
1370 * it's better to lose that one
1371 * than the last one
1372 */
1373 first_event = FALSE;
1374
1375 kdbip[mincpu].kd_readlast++;
1376
1377 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1378 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1379 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1380 kdbip[mincpu].kd_stop = 0;
1381
1382 continue;
1383 }
0c530ab8
A
1384 if (last_wrap_cpu == mincpu) {
1385 tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
2d21ac55
A
1386 tempbuf->arg1 = kd_bufsize / sizeof(kd_buf);
1387 tempbuf->arg2 = kd_cpus;
0c530ab8
A
1388 tempbuf->arg3 = 0;
1389 tempbuf->arg4 = 0;
1390 tempbuf->arg5 = (int)current_thread();
1391
1392 tempbuf->timestamp = last_wrap_time | (((uint64_t)last_wrap_cpu) << KDBG_CPU_SHIFT);
1393
1394 tempbuf++;
1395
1396 last_wrap_cpu = -1;
1397
1398 } else {
1399 *(tempbuf++) = kdbip[mincpu].kd_readlast[0];
1400
1401 kdbip[mincpu].kd_readlast++;
1402
1403 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1404 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1405 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1406 kdbip[mincpu].kd_stop = 0;
91447636 1407 }
0c530ab8
A
1408 tempbuf_count--;
1409 tempbuf_number++;
1410 }
1411 if (tempbuf_number) {
1412 if ((error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf)))) {
91447636 1413 *number = 0;
0c530ab8
A
1414 error = EINVAL;
1415 break;
6601e61a 1416 }
0c530ab8
A
1417 count -= tempbuf_number;
1418 *number += tempbuf_number;
1419 buffer += (tempbuf_number * sizeof(kd_buf));
1420 }
1421 if (tempbuf_count)
1422 /*
1423 * all trace buffers are empty
1424 */
1425 break;
89b3af67 1426
0c530ab8
A
1427 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1428 tempbuf_count = KDCOPYBUF_COUNT;
1429 }
1430 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
1431 do {
1432 old_kdebug_flags = kdebug_flags;
1433 new_kdebug_flags = old_kdebug_flags & ~KDBG_NOWRAP;
1434 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1435
1436 if ( !(old_kdebug_slowcheck & SLOW_NOLOG)) {
1437 do {
1438 old_kdebug_slowcheck = kdebug_slowcheck;
1439 new_kdebug_slowcheck = old_kdebug_slowcheck & ~SLOW_NOLOG;
1440 } while ( !OSCompareAndSwap((UInt32)old_kdebug_slowcheck, (UInt32)new_kdebug_slowcheck, (UInt32 *)&kdebug_slowcheck));
1441 }
1442 }
1443 return (error);
6601e61a 1444}
4452a7af 1445
0c530ab8 1446
55e303ae
A
1447unsigned char *getProcName(struct proc *proc);
1448unsigned char *getProcName(struct proc *proc) {
1449
1450 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1451
1452}
0c530ab8
A
1453
1454#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1455#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1456#ifdef __i386__
1457#define TRAP_DEBUGGER __asm__ volatile("int3");
1458#endif
1459#ifdef __ppc__
2d21ac55 1460#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
0c530ab8
A
1461#endif
1462
1463#define SANE_TRACEBUF_SIZE 2*1024*1024
1464
1465/* Initialize the mutex governing access to the stack snapshot subsystem */
1466__private_extern__ void
1467stackshot_lock_init( void )
1468{
1469 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1470
1471 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1472
1473 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1474
1475 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1476}
1477
1478/*
1479 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1480 * on the system, tracing both kernel and user stacks
1481 * where available. Uses machine specific trace routines
1482 * for ppc, ppc64 and x86.
1483 * Inputs: uap->pid - process id of process to be traced, or -1
1484 * for the entire system
1485 * uap->tracebuf - address of the user space destination
1486 * buffer
1487 * uap->tracebuf_size - size of the user space trace buffer
1488 * uap->options - various options, including the maximum
1489 * number of frames to trace.
1490 * Outputs: EPERM if the caller is not privileged
1491 * EINVAL if the supplied trace buffer isn't sanely sized
1492 * ENOMEM if we don't have enough memory to satisfy the
1493 * request
1494 * ENOENT if the target pid isn't found
1495 * ENOSPC if the supplied buffer is insufficient
1496 * *retval contains the number of bytes traced, if successful
1497 * and -1 otherwise. If the request failed due to
1498 * tracebuffer exhaustion, we copyout as much as possible.
1499 */
1500int
1501stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, register_t *retval) {
1502 int error = 0;
1503
1504 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1505 return(error);
1506
1507 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
1508 uap->options, retval);
1509}
1510
1511int
1512stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval)
1513{
1514 int error = 0;
1515 unsigned bytesTraced = 0;
1516
1517 *retval = -1;
1518/* Serialize tracing */
1519 STACKSHOT_SUBSYS_LOCK();
1520
1521 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1522 error = EINVAL;
1523 goto error_exit;
1524 }
1525
1526 MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
1527
1528 if (stackshot_snapbuf == NULL) {
1529 error = ENOMEM;
1530 goto error_exit;
1531 }
1532/* Preload trace parameters*/
1533 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, options);
1534
1535/* Trap to the debugger to obtain a coherent stack snapshot; this populates
1536 * the trace buffer
1537 */
2d21ac55
A
1538 if (panic_active()) {
1539 error = ENOMEM;
1540 goto error_exit;
1541 }
1542
0c530ab8
A
1543 TRAP_DEBUGGER;
1544
1545 bytesTraced = kdp_stack_snapshot_bytes_traced();
1546
1547 if (bytesTraced > 0) {
1548 if ((error = copyout(stackshot_snapbuf, tracebuf,
1549 ((bytesTraced < tracebuf_size) ?
1550 bytesTraced : tracebuf_size))))
1551 goto error_exit;
1552 *retval = bytesTraced;
1553 }
1554 else {
1555 error = ENOENT;
1556 goto error_exit;
1557 }
1558
1559 error = kdp_stack_snapshot_geterror();
1560 if (error == -1) {
1561 error = ENOSPC;
1562 *retval = -1;
1563 goto error_exit;
1564 }
1565
1566error_exit:
1567 if (stackshot_snapbuf != NULL)
1568 FREE(stackshot_snapbuf, M_TEMP);
1569 stackshot_snapbuf = NULL;
1570 STACKSHOT_SUBSYS_UNLOCK();
1571 return error;
1572}
2d21ac55
A
1573
1574void
1575start_kern_tracing(unsigned int new_nkdbufs) {
1576 if (!new_nkdbufs)
1577 return;
1578 kdbg_set_nkdbufs(new_nkdbufs);
1579 kdbg_lock_init();
1580 kdbg_reinit();
1581 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1582 kdebug_slowcheck &= ~SLOW_NOLOG;
1583 kdbg_mapinit();
1584 printf("kernel tracing started\n");
1585}