]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kdebug.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
91447636 4 * @Apple_LICENSE_HEADER_START@
1c79356b 5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b 19 *
2d21ac55 20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
21 */
22
23#include <machine/spl.h>
24
91447636
A
25#include <sys/errno.h>
26#include <sys/param.h>
0c530ab8 27#include <sys/systm.h>
91447636
A
28#include <sys/proc_internal.h>
29#include <sys/vm.h>
30#include <sys/sysctl.h>
31#include <sys/kdebug.h>
32#include <sys/sysproto.h>
33
1c79356b
A
34#define HZ 100
35#include <mach/clock_types.h>
36#include <mach/mach_types.h>
55e303ae 37#include <mach/mach_time.h>
1c79356b
A
38#include <machine/machine_routines.h>
39
1c79356b
A
40#include <kern/thread.h>
41#include <kern/task.h>
2d21ac55 42#include <kern/debug.h>
1c79356b
A
43#include <vm/vm_kern.h>
44#include <sys/lock.h>
45
0c530ab8
A
46#include <sys/malloc.h>
47#include <sys/kauth.h>
48
49#include <mach/mach_host.h> /* for host_info() */
50#include <libkern/OSAtomic.h>
51
52/* XXX should have prototypes, but Mach does not provide one */
53void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
54int cpu_number(void); /* XXX <machine/...> include path broken */
55
56/* XXX should probably be static, but it's debugging code... */
57int kdbg_read(user_addr_t, size_t *);
58void kdbg_control_chud(int, void *);
59int kdbg_control(int *, u_int, user_addr_t, size_t *);
60int kdbg_getentropy (user_addr_t, size_t *, int);
61int kdbg_readmap(user_addr_t, size_t *);
62int kdbg_getreg(kd_regtype *);
63int kdbg_setreg(kd_regtype *);
64int kdbg_setrtcdec(kd_regtype *);
65int kdbg_setpidex(kd_regtype *);
66int kdbg_setpid(kd_regtype *);
67void kdbg_mapinit(void);
68int kdbg_reinit(void);
69int kdbg_bootstrap(void);
70
2d21ac55 71static int create_buffers(void);
0c530ab8
A
72static void delete_buffers(void);
73
2d21ac55
A
74extern void IOSleep(int);
75
0c530ab8
A
76#ifdef ppc
77extern uint32_t maxDec;
78#endif
79
9bccf70c
A
80/* trace enable status */
81unsigned int kdebug_enable = 0;
82
83/* track timestamps for security server's entropy needs */
55e303ae 84uint64_t * kd_entropy_buffer = 0;
9bccf70c
A
85unsigned int kd_entropy_bufsize = 0;
86unsigned int kd_entropy_count = 0;
87unsigned int kd_entropy_indx = 0;
88unsigned int kd_entropy_buftomem = 0;
89
91447636
A
90
91#define SLOW_NOLOG 0x01
92#define SLOW_CHECKS 0x02
93#define SLOW_ENTROPY 0x04
94
95unsigned int kdebug_slowcheck=SLOW_NOLOG;
96
0c530ab8
A
97unsigned int kd_cpus;
98
99struct kd_bufinfo {
100 kd_buf * kd_stop;
101 kd_buf * kd_bufptr;
102 kd_buf * kd_buffer;
103 kd_buf * kd_buflast;
104 kd_buf * kd_readlast;
105 int kd_wrapped; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
106 uint64_t kd_prev_timebase;
107 int kd_pad[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
108
109};
110
111struct kd_bufinfo *kdbip = NULL;
112
113#define KDCOPYBUF_COUNT 1024
114#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
115kd_buf *kdcopybuf = NULL;
116
117
1c79356b
A
118unsigned int nkdbufs = 8192;
119unsigned int kd_bufsize = 0;
120unsigned int kdebug_flags = 0;
1c79356b
A
121unsigned int kdlog_beg=0;
122unsigned int kdlog_end=0;
123unsigned int kdlog_value1=0;
124unsigned int kdlog_value2=0;
125unsigned int kdlog_value3=0;
126unsigned int kdlog_value4=0;
127
0c530ab8
A
128static lck_mtx_t * kd_trace_mtx_sysctl;
129static lck_grp_t * kd_trace_mtx_sysctl_grp;
130static lck_attr_t * kd_trace_mtx_sysctl_attr;
131static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
132
133static lck_grp_t *stackshot_subsys_lck_grp;
134static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
135static lck_attr_t *stackshot_subsys_lck_attr;
136static lck_mtx_t stackshot_subsys_mutex;
137
138void *stackshot_snapbuf = NULL;
139
140int
141stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval);
91447636 142
0c530ab8
A
143extern void
144kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t options);
91447636 145
0c530ab8
A
146extern int
147kdp_stack_snapshot_geterror(void);
148extern unsigned int
149kdp_stack_snapshot_bytes_traced(void);
1c79356b
A
150
151kd_threadmap *kd_mapptr = 0;
152unsigned int kd_mapsize = 0;
153unsigned int kd_mapcount = 0;
154unsigned int kd_maptomem = 0;
155
156pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
157
158#define DBG_FUNC_MASK 0xfffffffc
159
9bccf70c
A
160/* task to string structure */
161struct tts
162{
0c530ab8 163 task_t task; /* from procs task */
55e303ae 164 pid_t pid; /* from procs p_pid */
9bccf70c
A
165 char task_comm[20]; /* from procs p_comm */
166};
167
168typedef struct tts tts_t;
169
1c79356b
A
170struct krt
171{
172 kd_threadmap *map; /* pointer to the map buffer */
173 int count;
174 int maxcount;
9bccf70c 175 struct tts *atts;
1c79356b
A
176};
177
178typedef struct krt krt_t;
179
9bccf70c
A
180/* This is for the CHUD toolkit call */
181typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
182 unsigned int arg2, unsigned int arg3,
183 unsigned int arg4, unsigned int arg5);
184
185kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
186
2d21ac55 187__private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
91447636 188
1c79356b 189/* Support syscall SYS_kdebug_trace */
0c530ab8
A
190int
191kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused register_t *retval)
1c79356b 192{
91447636
A
193 if ( (kdebug_enable == 0) )
194 return(EINVAL);
1c79356b 195
91447636
A
196 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
197 return(0);
1c79356b
A
198}
199
0c530ab8
A
200static int
201create_buffers(void)
202{
203 unsigned int cpu, i;
204 int nentries;
205
206 nentries = nkdbufs / kd_cpus;
2d21ac55
A
207 nkdbufs = nentries * kd_cpus;
208
0c530ab8
A
209 kd_bufsize = nentries * sizeof(kd_buf);
210
211 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
212
213 if (kdcopybuf == 0) {
214 if (kmem_alloc(kernel_map, (unsigned int *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS)
2d21ac55 215 return(ENOMEM);
0c530ab8
A
216 }
217 for (cpu = 0; cpu < kd_cpus; cpu++) {
218 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip[cpu].kd_buffer, kd_bufsize) != KERN_SUCCESS)
219 break;
220 }
221 if (cpu < kd_cpus) {
222 for (i = 0; i < cpu; i++)
223 kmem_free(kernel_map, (vm_offset_t)kdbip[i].kd_buffer, kd_bufsize);
224 kd_bufsize = 0;
225
226 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
227 kdcopybuf = NULL;
228
229 return(ENOMEM);
230 }
231 for (cpu = 0; cpu < kd_cpus; cpu++) {
232 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
233 kdbip[cpu].kd_buflast = &kdbip[cpu].kd_bufptr[nentries];
234 kdbip[cpu].kd_readlast = kdbip[cpu].kd_bufptr;
235 }
236 kdebug_flags |= KDBG_BUFINIT;
237
238 return(0);
239}
240
241
242static void
243delete_buffers(void)
4452a7af 244{
0c530ab8
A
245 unsigned int cpu;
246
247 if (kd_bufsize && (kdebug_flags & KDBG_BUFINIT)) {
248 for (cpu = 0; cpu < kd_cpus; cpu++)
249 kmem_free(kernel_map, (vm_offset_t)kdbip[cpu].kd_buffer, kd_bufsize);
250 kd_bufsize = 0;
251 }
252 if (kdcopybuf) {
253 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
254 kdcopybuf = NULL;
255 }
256 kdebug_flags &= ~KDBG_BUFINIT;
257}
258
259
260static void
261kernel_debug_internal(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
262 unsigned int arg4, unsigned int arg5, int entropy_flag)
263{
264 int s;
1c79356b
A
265 kd_buf * kd;
266 struct proc *curproc;
1c79356b 267 unsigned long long now;
0c530ab8
A
268 int cpu;
269
270 s = ml_set_interrupts_enabled(FALSE);
91447636 271
0c530ab8
A
272 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
273 cpu = cpu_number();
1c79356b 274
9bccf70c 275 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
91447636
A
276 if (kdebug_chudhook)
277 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
9bccf70c 278
91447636 279 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
0c530ab8 280 goto out;
9bccf70c 281 }
91447636
A
282
283 if (kdebug_slowcheck == 0)
284 goto record_trace;
1c79356b 285
0c530ab8 286 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY))
9bccf70c
A
287 {
288 if (kd_entropy_indx < kd_entropy_count)
289 {
55e303ae 290 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
9bccf70c
A
291 kd_entropy_indx++;
292 }
293
294 if (kd_entropy_indx == kd_entropy_count)
295 {
296 /* Disable entropy collection */
297 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
91447636 298 kdebug_slowcheck &= ~SLOW_ENTROPY;
9bccf70c
A
299 }
300 }
301
91447636 302 if ( (kdebug_slowcheck & SLOW_NOLOG) )
0c530ab8 303 goto out;
91447636 304
1c79356b
A
305 if (kdebug_flags & KDBG_PIDCHECK)
306 {
307 /* If kdebug flag is not set for current proc, return */
308 curproc = current_proc();
2d21ac55 309 if ((curproc && !(curproc->p_kdebug)) &&
1c79356b 310 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
0c530ab8 311 goto out;
1c79356b
A
312 }
313 else if (kdebug_flags & KDBG_PIDEXCLUDE)
314 {
315 /* If kdebug flag is set for current proc, return */
316 curproc = current_proc();
2d21ac55 317 if ((curproc && curproc->p_kdebug) &&
1c79356b 318 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
0c530ab8 319 goto out;
1c79356b
A
320 }
321
322 if (kdebug_flags & KDBG_RANGECHECK)
323 {
0c530ab8
A
324 if ((debugid < kdlog_beg)
325 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
326 goto out;
1c79356b
A
327 }
328 else if (kdebug_flags & KDBG_VALCHECK)
329 {
330 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
331 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
332 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
333 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
334 (debugid >> 24 != DBG_TRACE))
0c530ab8 335 goto out;
1c79356b 336 }
91447636
A
337
338record_trace:
0c530ab8 339 kd = kdbip[cpu].kd_bufptr;
1c79356b
A
340 kd->debugid = debugid;
341 kd->arg1 = arg1;
342 kd->arg2 = arg2;
343 kd->arg3 = arg3;
344 kd->arg4 = arg4;
0c530ab8 345 kd->arg5 = arg5;
1c79356b 346
0c530ab8
A
347 /*
348 * Watch for out of order timestamps
349 */
350 if (now < kdbip[cpu].kd_prev_timebase)
1c79356b 351 {
0c530ab8
A
352 /*
353 * if so, just store the previous timestamp + a cycle
354 */
355 now = ++kdbip[cpu].kd_prev_timebase & KDBG_TIMESTAMP_MASK;
1c79356b
A
356 }
357 else
358 {
0c530ab8 359 kdbip[cpu].kd_prev_timebase = now;
1c79356b 360 }
0c530ab8
A
361 kd->timestamp = now | (((uint64_t)cpu) << KDBG_CPU_SHIFT);
362
363 kdbip[cpu].kd_bufptr++;
1c79356b 364
0c530ab8
A
365 if (kdbip[cpu].kd_bufptr >= kdbip[cpu].kd_buflast)
366 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
1c79356b 367
0c530ab8 368 if (kdbip[cpu].kd_bufptr == kdbip[cpu].kd_readlast) {
1c79356b 369 if (kdebug_flags & KDBG_NOWRAP)
91447636 370 kdebug_slowcheck |= SLOW_NOLOG;
0c530ab8 371 kdbip[cpu].kd_wrapped = 1;
1c79356b
A
372 kdebug_flags |= KDBG_WRAPPED;
373 }
0c530ab8
A
374
375out:
1c79356b
A
376 ml_set_interrupts_enabled(s);
377}
378
379void
0c530ab8
A
380kernel_debug(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
381 unsigned int arg4, __unused unsigned int arg5)
1c79356b 382{
0c530ab8
A
383 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (int)current_thread(), 1);
384}
21362eb3 385
0c530ab8
A
386void
387kernel_debug1(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
388 unsigned int arg4, unsigned int arg5)
389{
390 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
391}
6601e61a 392
0c530ab8
A
393static void
394kdbg_lock_init(void)
395{
396 host_basic_info_data_t hinfo;
397 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
91447636 398
0c530ab8 399 if (kdebug_flags & KDBG_LOCKINIT)
1c79356b 400 return;
1c79356b 401
0c530ab8
A
402 /* get the number of cpus and cache it */
403#define BSD_HOST 1
404 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
405 kd_cpus = hinfo.physical_cpu_max;
1c79356b 406
2d21ac55
A
407 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip,
408 sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
0c530ab8 409 return;
6601e61a 410
0c530ab8 411 /*
91447636
A
412 * allocate lock group attribute and group
413 */
0c530ab8
A
414 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
415 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
91447636
A
416
417 /*
418 * allocate the lock attribute
419 */
0c530ab8 420 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
91447636
A
421
422
423 /*
424 * allocate and initialize spin lock and mutex
425 */
0c530ab8 426 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
91447636
A
427
428 kdebug_flags |= KDBG_LOCKINIT;
429}
430
431
432int
0c530ab8 433kdbg_bootstrap(void)
1c79356b 434{
0c530ab8 435 kdebug_flags &= ~KDBG_WRAPPED;
91447636 436
0c530ab8 437 return (create_buffers());
1c79356b
A
438}
439
0c530ab8
A
440int
441kdbg_reinit(void)
1c79356b 442{
1c79356b
A
443 int ret=0;
444
91447636
A
445 /*
446 * Disable trace collecting
447 * First make sure we're not in
448 * the middle of cutting a trace
449 */
91447636 450
9bccf70c 451 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
91447636
A
452 kdebug_slowcheck |= SLOW_NOLOG;
453
0c530ab8
A
454 /*
455 * make sure the SLOW_NOLOG is seen
456 * by everyone that might be trying
457 * to cut a trace..
458 */
459 IOSleep(100);
1c79356b 460
0c530ab8 461 delete_buffers();
1c79356b
A
462
463 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
464 {
55e303ae 465 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
466 kdebug_flags &= ~KDBG_MAPINIT;
467 kd_mapsize = 0;
468 kd_mapptr = (kd_threadmap *) 0;
469 kd_mapcount = 0;
470 }
471
0c530ab8 472 ret = kdbg_bootstrap();
1c79356b
A
473
474 return(ret);
475}
476
0c530ab8
A
477void
478kdbg_trace_data(struct proc *proc, long *arg_pid)
55e303ae
A
479{
480 if (!proc)
481 *arg_pid = 0;
482 else
483 *arg_pid = proc->p_pid;
484
485 return;
486}
487
488
0c530ab8
A
489void
490kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1c79356b 491{
1c79356b
A
492 char *dbg_nameptr;
493 int dbg_namelen;
494 long dbg_parms[4];
1c79356b
A
495 if (!proc)
496 {
497 *arg1 = 0;
498 *arg2 = 0;
499 *arg3 = 0;
500 *arg4 = 0;
501 return;
502 }
503
504 /* Collect the pathname for tracing */
505 dbg_nameptr = proc->p_comm;
506 dbg_namelen = strlen(proc->p_comm);
507 dbg_parms[0]=0L;
508 dbg_parms[1]=0L;
509 dbg_parms[2]=0L;
510 dbg_parms[3]=0L;
511
0c530ab8 512 if(dbg_namelen > (int)sizeof(dbg_parms))
1c79356b
A
513 dbg_namelen = sizeof(dbg_parms);
514
2d21ac55 515 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1c79356b
A
516
517 *arg1=dbg_parms[0];
518 *arg2=dbg_parms[1];
519 *arg3=dbg_parms[2];
520 *arg4=dbg_parms[3];
521}
522
91447636 523static void
0c530ab8 524kdbg_resolve_map(thread_t th_act, void *opaque)
1c79356b
A
525{
526 kd_threadmap *mapptr;
0c530ab8 527 krt_t *t = (krt_t *)opaque;
1c79356b
A
528
529 if(t->count < t->maxcount)
530 {
531 mapptr=&t->map[t->count];
55e303ae 532 mapptr->thread = (unsigned int)th_act;
2d21ac55
A
533 (void) strlcpy (mapptr->command, t->atts->task_comm,
534 sizeof(t->atts->task_comm));
55e303ae
A
535
536 /*
537 Some kernel threads have no associated pid.
538 We still need to mark the entry as valid.
539 */
540 if (t->atts->pid)
541 mapptr->valid = t->atts->pid;
542 else
543 mapptr->valid = 1;
544
1c79356b
A
545 t->count++;
546 }
547}
548
0c530ab8
A
549void
550kdbg_mapinit(void)
1c79356b
A
551{
552 struct proc *p;
553 struct krt akrt;
9bccf70c
A
554 int tts_count; /* number of task-to-string structures */
555 struct tts *tts_mapptr;
556 unsigned int tts_mapsize = 0;
557 unsigned int tts_maptomem=0;
558 int i;
559
1c79356b
A
560
561 if (kdebug_flags & KDBG_MAPINIT)
562 return;
563
2d21ac55
A
564 /* need to use PROC_SCANPROCLIST with proc_iterate */
565 proc_list_lock();
566
9bccf70c
A
567 /* Calculate the sizes of map buffers*/
568 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
1c79356b
A
569 p = p->p_list.le_next)
570 {
571 kd_mapcount += get_task_numacts((task_t)p->task);
9bccf70c 572 tts_count++;
1c79356b
A
573 }
574
2d21ac55
A
575 proc_list_unlock();
576
9bccf70c
A
577 /*
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
582 */
583 kd_mapcount += kd_mapcount/10;
584 tts_count += tts_count/10;
585
1c79356b
A
586 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
587 if((kmem_alloc(kernel_map, & kd_maptomem,
588 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
55e303ae 589 {
1c79356b 590 kd_mapptr = (kd_threadmap *) kd_maptomem;
55e303ae
A
591 bzero(kd_mapptr, kd_mapsize);
592 }
1c79356b
A
593 else
594 kd_mapptr = (kd_threadmap *) 0;
595
9bccf70c
A
596 tts_mapsize = tts_count * sizeof(struct tts);
597 if((kmem_alloc(kernel_map, & tts_maptomem,
598 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
55e303ae 599 {
9bccf70c 600 tts_mapptr = (struct tts *) tts_maptomem;
55e303ae
A
601 bzero(tts_mapptr, tts_mapsize);
602 }
9bccf70c
A
603 else
604 tts_mapptr = (struct tts *) 0;
605
606
607 /*
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
611 */
612
613 if (tts_mapptr) {
2d21ac55
A
614 /* should use proc_iterate */
615 proc_list_lock();
616
9bccf70c
A
617 for (p = allproc.lh_first, i=0; p && i < tts_count;
618 p = p->p_list.le_next) {
2d21ac55 619 if (p->p_lflag & P_LEXIT)
9bccf70c
A
620 continue;
621
91447636
A
622 if (p->task) {
623 task_reference(p->task);
624 tts_mapptr[i].task = p->task;
55e303ae 625 tts_mapptr[i].pid = p->p_pid;
2d21ac55 626 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
91447636 627 i++;
9bccf70c
A
628 }
629 }
630 tts_count = i;
2d21ac55
A
631
632 proc_list_unlock();
633
9bccf70c
A
634 }
635
636
637 if (kd_mapptr && tts_mapptr)
1c79356b
A
638 {
639 kdebug_flags |= KDBG_MAPINIT;
640 /* Initialize thread map data */
641 akrt.map = kd_mapptr;
642 akrt.count = 0;
643 akrt.maxcount = kd_mapcount;
644
9bccf70c 645 for (i=0; i < tts_count; i++)
1c79356b 646 {
9bccf70c
A
647 akrt.atts = &tts_mapptr[i];
648 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
55e303ae 649 task_deallocate((task_t) tts_mapptr[i].task);
9bccf70c 650 }
55e303ae 651 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1c79356b
A
652 }
653}
654
91447636
A
655static void
656kdbg_clear(void)
1c79356b 657{
91447636
A
658 /*
659 * Clean up the trace buffer
660 * First make sure we're not in
661 * the middle of cutting a trace
662 */
1c79356b 663
9bccf70c 664 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
91447636
A
665 kdebug_slowcheck = SLOW_NOLOG;
666
0c530ab8
A
667 /*
668 * make sure the SLOW_NOLOG is seen
669 * by everyone that might be trying
670 * to cut a trace..
671 */
672 IOSleep(100);
673
91447636
A
674 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
675 kdebug_slowcheck |= SLOW_ENTROPY;
676
91447636 677 global_state_pid = -1;
1c79356b
A
678 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
679 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
680 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
0c530ab8
A
681
682 delete_buffers();
1c79356b
A
683
684 /* Clean up the thread map buffer */
685 kdebug_flags &= ~KDBG_MAPINIT;
55e303ae 686 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
687 kd_mapptr = (kd_threadmap *) 0;
688 kd_mapsize = 0;
689 kd_mapcount = 0;
690}
691
0c530ab8 692int
1c79356b
A
693kdbg_setpid(kd_regtype *kdr)
694{
695 pid_t pid;
696 int flag, ret=0;
697 struct proc *p;
698
699 pid = (pid_t)kdr->value1;
700 flag = (int)kdr->value2;
701
702 if (pid > 0)
703 {
2d21ac55 704 if ((p = proc_find(pid)) == NULL)
1c79356b
A
705 ret = ESRCH;
706 else
707 {
708 if (flag == 1) /* turn on pid check for this and all pids */
709 {
710 kdebug_flags |= KDBG_PIDCHECK;
711 kdebug_flags &= ~KDBG_PIDEXCLUDE;
91447636
A
712 kdebug_slowcheck |= SLOW_CHECKS;
713
2d21ac55 714 p->p_kdebug = 1;
1c79356b
A
715 }
716 else /* turn off pid check for this pid value */
717 {
718 /* Don't turn off all pid checking though */
719 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
2d21ac55 720 p->p_kdebug = 0;
1c79356b 721 }
2d21ac55 722 proc_rele(p);
1c79356b
A
723 }
724 }
725 else
726 ret = EINVAL;
727 return(ret);
728}
729
730/* This is for pid exclusion in the trace buffer */
0c530ab8 731int
1c79356b
A
732kdbg_setpidex(kd_regtype *kdr)
733{
734 pid_t pid;
735 int flag, ret=0;
736 struct proc *p;
737
738 pid = (pid_t)kdr->value1;
739 flag = (int)kdr->value2;
740
741 if (pid > 0)
742 {
2d21ac55 743 if ((p = proc_find(pid)) == NULL)
1c79356b
A
744 ret = ESRCH;
745 else
746 {
747 if (flag == 1) /* turn on pid exclusion */
748 {
749 kdebug_flags |= KDBG_PIDEXCLUDE;
750 kdebug_flags &= ~KDBG_PIDCHECK;
91447636
A
751 kdebug_slowcheck |= SLOW_CHECKS;
752
2d21ac55 753 p->p_kdebug = 1;
1c79356b
A
754 }
755 else /* turn off pid exclusion for this pid value */
756 {
757 /* Don't turn off all pid exclusion though */
758 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
2d21ac55 759 p->p_kdebug = 0;
1c79356b 760 }
2d21ac55 761 proc_rele(p);
1c79356b
A
762 }
763 }
764 else
765 ret = EINVAL;
766 return(ret);
767}
768
3a60a9f5 769/* This is for setting a maximum decrementer value */
0c530ab8 770int
1c79356b
A
771kdbg_setrtcdec(kd_regtype *kdr)
772{
773 int ret=0;
774 natural_t decval;
775
776 decval = (natural_t)kdr->value1;
777
778 if (decval && decval < KDBG_MINRTCDEC)
3a60a9f5 779 ret = EINVAL;
1c79356b 780#ifdef ppc
3a60a9f5 781 else {
3a60a9f5
A
782 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
783 }
1c79356b 784#else
3a60a9f5
A
785 else
786 ret = ENOTSUP;
1c79356b
A
787#endif /* ppc */
788
789 return(ret);
790}
791
0c530ab8 792int
1c79356b
A
793kdbg_setreg(kd_regtype * kdr)
794{
0c530ab8 795 int ret=0;
1c79356b
A
796 unsigned int val_1, val_2, val;
797 switch (kdr->type) {
798
799 case KDBG_CLASSTYPE :
800 val_1 = (kdr->value1 & 0xff);
801 val_2 = (kdr->value2 & 0xff);
802 kdlog_beg = (val_1<<24);
803 kdlog_end = (val_2<<24);
804 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
805 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
806 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
91447636 807 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
808 break;
809 case KDBG_SUBCLSTYPE :
810 val_1 = (kdr->value1 & 0xff);
811 val_2 = (kdr->value2 & 0xff);
812 val = val_2 + 1;
813 kdlog_beg = ((val_1<<24) | (val_2 << 16));
814 kdlog_end = ((val_1<<24) | (val << 16));
815 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
816 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
817 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
91447636 818 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
819 break;
820 case KDBG_RANGETYPE :
821 kdlog_beg = (kdr->value1);
822 kdlog_end = (kdr->value2);
823 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
824 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
825 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
91447636 826 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
827 break;
828 case KDBG_VALCHECK:
829 kdlog_value1 = (kdr->value1);
830 kdlog_value2 = (kdr->value2);
831 kdlog_value3 = (kdr->value3);
832 kdlog_value4 = (kdr->value4);
833 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
834 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
835 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
91447636 836 kdebug_slowcheck |= SLOW_CHECKS;
1c79356b
A
837 break;
838 case KDBG_TYPENONE :
839 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
91447636
A
840
841 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
842 kdebug_slowcheck |= SLOW_CHECKS;
843 else
844 kdebug_slowcheck &= ~SLOW_CHECKS;
845
1c79356b
A
846 kdlog_beg = 0;
847 kdlog_end = 0;
848 break;
849 default :
850 ret = EINVAL;
851 break;
852 }
853 return(ret);
854}
855
0c530ab8
A
856int
857kdbg_getreg(__unused kd_regtype * kdr)
1c79356b 858{
0c530ab8 859#if 0
1c79356b
A
860 int i,j, ret=0;
861 unsigned int val_1, val_2, val;
0c530ab8 862
1c79356b
A
863 switch (kdr->type) {
864 case KDBG_CLASSTYPE :
865 val_1 = (kdr->value1 & 0xff);
866 val_2 = val_1 + 1;
867 kdlog_beg = (val_1<<24);
868 kdlog_end = (val_2<<24);
869 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
870 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
871 break;
872 case KDBG_SUBCLSTYPE :
873 val_1 = (kdr->value1 & 0xff);
874 val_2 = (kdr->value2 & 0xff);
875 val = val_2 + 1;
876 kdlog_beg = ((val_1<<24) | (val_2 << 16));
877 kdlog_end = ((val_1<<24) | (val << 16));
878 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
879 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
880 break;
881 case KDBG_RANGETYPE :
882 kdlog_beg = (kdr->value1);
883 kdlog_end = (kdr->value2);
884 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
885 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
886 break;
887 case KDBG_TYPENONE :
888 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
889 kdlog_beg = 0;
890 kdlog_end = 0;
891 break;
892 default :
893 ret = EINVAL;
894 break;
895 }
896#endif /* 0 */
897 return(EINVAL);
898}
899
900
91447636
A
901int
902kdbg_readmap(user_addr_t buffer, size_t *number)
1c79356b
A
903{
904 int avail = *number;
905 int ret = 0;
0c530ab8 906 unsigned int count = 0;
1c79356b
A
907
908 count = avail/sizeof (kd_threadmap);
909
910 if (count && (count <= kd_mapcount))
911 {
912 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
913 {
914 if (*number < kd_mapsize)
915 ret=EINVAL;
916 else
917 {
918 if (copyout(kd_mapptr, buffer, kd_mapsize))
919 ret=EINVAL;
920 }
921 }
922 else
923 ret=EINVAL;
924 }
925 else
926 ret=EINVAL;
927
928 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
929 {
55e303ae 930 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1c79356b
A
931 kdebug_flags &= ~KDBG_MAPINIT;
932 kd_mapsize = 0;
933 kd_mapptr = (kd_threadmap *) 0;
934 kd_mapcount = 0;
935 }
936
937 return(ret);
938}
939
91447636
A
940int
941kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
9bccf70c
A
942{
943 int avail = *number;
944 int ret = 0;
9bccf70c
A
945
946 if (kd_entropy_buffer)
947 return(EBUSY);
948
949 kd_entropy_count = avail/sizeof(mach_timespec_t);
950 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
951 kd_entropy_indx = 0;
952
953 /* Enforce maximum entropy entries here if needed */
954
955 /* allocate entropy buffer */
956 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
957 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
958 {
55e303ae 959 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
9bccf70c
A
960 }
961 else
962 {
55e303ae 963 kd_entropy_buffer = (uint64_t *) 0;
9bccf70c
A
964 kd_entropy_count = 0;
965 kd_entropy_indx = 0;
966 return (EINVAL);
967 }
968
969 if (ms_timeout < 10)
970 ms_timeout = 10;
971
972 /* Enable entropy sampling */
973 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
91447636 974 kdebug_slowcheck |= SLOW_ENTROPY;
9bccf70c
A
975
976 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
977
978 /* Disable entropy sampling */
979 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
91447636 980 kdebug_slowcheck &= ~SLOW_ENTROPY;
9bccf70c
A
981
982 *number = 0;
983 ret = 0;
984
985 if (kd_entropy_indx > 0)
986 {
987 /* copyout the buffer */
988 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
989 ret = EINVAL;
990 else
991 *number = kd_entropy_indx;
992 }
993
994 /* Always cleanup */
995 kd_entropy_count = 0;
996 kd_entropy_indx = 0;
997 kd_entropy_buftomem = 0;
55e303ae
A
998 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
999 kd_entropy_buffer = (uint64_t *) 0;
9bccf70c
A
1000 return(ret);
1001}
1002
1003
2d21ac55
A
1004static void
1005kdbg_set_nkdbufs(unsigned int value)
1006{
1007 /*
1008 * We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller
1009 * 'value' is the desired number of trace entries
1010 */
1011 unsigned int max_entries = (sane_size/4) / sizeof(kd_buf);
1012
1013 if (value <= max_entries)
1014 nkdbufs = value;
1015 else
1016 nkdbufs = max_entries;
1017}
1018
1019
9bccf70c
A
1020/*
1021 * This function is provided for the CHUD toolkit only.
1022 * int val:
1023 * zero disables kdebug_chudhook function call
1024 * non-zero enables kdebug_chudhook function call
1025 * char *fn:
1026 * address of the enabled kdebug_chudhook function
1027*/
1028
0c530ab8
A
1029void
1030kdbg_control_chud(int val, void *fn)
9bccf70c
A
1031{
1032 if (val) {
1033 /* enable chudhook */
9bccf70c 1034 kdebug_chudhook = fn;
91447636 1035 kdebug_enable |= KDEBUG_ENABLE_CHUD;
9bccf70c
A
1036 }
1037 else {
1038 /* disable chudhook */
1039 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1040 kdebug_chudhook = 0;
1041 }
1042}
1c79356b 1043
9bccf70c 1044
0c530ab8
A
1045int
1046kdbg_control(int *name, __unused u_int namelen, user_addr_t where, size_t *sizep)
1c79356b 1047{
2d21ac55 1048 int ret=0;
0c530ab8 1049 size_t size=*sizep;
91447636
A
1050 unsigned int value = name[1];
1051 kd_regtype kd_Reg;
1052 kbufinfo_t kd_bufinfo;
1053 pid_t curpid;
1054 struct proc *p, *curproc;
1055
1056
1057 kdbg_lock_init();
0c530ab8
A
1058
1059 if ( !(kdebug_flags & KDBG_LOCKINIT))
1060 return(ENOMEM);
1061
1062 lck_mtx_lock(kd_trace_mtx_sysctl);
91447636
A
1063
1064 if (name[0] == KERN_KDGETBUF) {
1065 /*
1066 * Does not alter the global_state_pid
1067 * This is a passive request.
9bccf70c 1068 */
91447636
A
1069 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1070 /*
1071 * There is not enough room to return even
1072 * the first element of the info structure.
1073 */
0c530ab8 1074 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1075
1076 return(EINVAL);
1077 }
1078 kd_bufinfo.nkdbufs = nkdbufs;
1079 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1080
1081 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1082 kd_bufinfo.nolog = 1;
1083 else
1084 kd_bufinfo.nolog = 0;
1085 kd_bufinfo.flags = kdebug_flags;
1086 kd_bufinfo.bufid = global_state_pid;
9bccf70c 1087
91447636
A
1088 if (size >= sizeof(kd_bufinfo)) {
1089 /*
1090 * Provide all the info we have
1091 */
1092 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
0c530ab8 1093 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1094
1095 return(EINVAL);
1096 }
1097 }
1098 else {
1099 /*
1100 * For backwards compatibility, only provide
1101 * as much info as there is room for.
1102 */
1103 if (copyout (&kd_bufinfo, where, size)) {
0c530ab8 1104 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1105
1106 return(EINVAL);
1107 }
1108 }
0c530ab8 1109 lck_mtx_unlock(kd_trace_mtx_sysctl);
1c79356b 1110
91447636
A
1111 return(0);
1112 } else if (name[0] == KERN_KDGETENTROPY) {
1113 if (kd_entropy_buffer)
1114 ret = EBUSY;
1115 else
1116 ret = kdbg_getentropy(where, sizep, value);
0c530ab8 1117 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1118
1119 return (ret);
1120 }
1121
0c530ab8 1122 if ((curproc = current_proc()) != NULL)
91447636
A
1123 curpid = curproc->p_pid;
1124 else {
0c530ab8 1125 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636
A
1126
1127 return (ESRCH);
1128 }
1c79356b
A
1129 if (global_state_pid == -1)
1130 global_state_pid = curpid;
91447636 1131 else if (global_state_pid != curpid) {
2d21ac55 1132 if ((p = proc_find(global_state_pid)) == NULL) {
91447636
A
1133 /*
1134 * The global pid no longer exists
1135 */
1136 global_state_pid = curpid;
1137 } else {
1138 /*
1139 * The global pid exists, deny this request
1140 */
2d21ac55 1141 proc_rele(p);
0c530ab8 1142 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1143
1c79356b 1144 return(EBUSY);
91447636
A
1145 }
1146 }
1c79356b
A
1147
1148 switch(name[0]) {
1149 case KERN_KDEFLAGS:
1150 value &= KDBG_USERFLAGS;
1151 kdebug_flags |= value;
1152 break;
1153 case KERN_KDDFLAGS:
1154 value &= KDBG_USERFLAGS;
1155 kdebug_flags &= ~value;
1156 break;
1157 case KERN_KDENABLE: /* used to enable or disable */
1158 if (value)
1159 {
1160 /* enable only if buffer is initialized */
1161 if (!(kdebug_flags & KDBG_BUFINIT))
1162 {
1163 ret=EINVAL;
1164 break;
1165 }
0c530ab8
A
1166 kdbg_mapinit();
1167
91447636
A
1168 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1169 kdebug_slowcheck &= ~SLOW_NOLOG;
1c79356b 1170 }
9bccf70c 1171 else
91447636
A
1172 {
1173 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1174 kdebug_slowcheck |= SLOW_NOLOG;
1175 }
1c79356b
A
1176 break;
1177 case KERN_KDSETBUF:
2d21ac55 1178 kdbg_set_nkdbufs(value);
1c79356b 1179 break;
1c79356b
A
1180 case KERN_KDSETUP:
1181 ret=kdbg_reinit();
1182 break;
1183 case KERN_KDREMOVE:
1184 kdbg_clear();
1185 break;
1186 case KERN_KDSETREG:
1187 if(size < sizeof(kd_regtype)) {
1188 ret=EINVAL;
1189 break;
1190 }
1191 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1192 ret= EINVAL;
1193 break;
1194 }
1195 ret = kdbg_setreg(&kd_Reg);
1196 break;
1197 case KERN_KDGETREG:
1198 if(size < sizeof(kd_regtype)) {
1199 ret = EINVAL;
1200 break;
1201 }
1202 ret = kdbg_getreg(&kd_Reg);
1203 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1204 ret=EINVAL;
1205 }
1206 break;
1207 case KERN_KDREADTR:
1208 ret = kdbg_read(where, sizep);
1209 break;
1210 case KERN_KDPIDTR:
1211 if (size < sizeof(kd_regtype)) {
1212 ret = EINVAL;
1213 break;
1214 }
1215 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1216 ret= EINVAL;
1217 break;
1218 }
1219 ret = kdbg_setpid(&kd_Reg);
1220 break;
1221 case KERN_KDPIDEX:
1222 if (size < sizeof(kd_regtype)) {
1223 ret = EINVAL;
1224 break;
1225 }
1226 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1227 ret= EINVAL;
1228 break;
1229 }
1230 ret = kdbg_setpidex(&kd_Reg);
1231 break;
1232 case KERN_KDTHRMAP:
91447636 1233 ret = kdbg_readmap(where, sizep);
1c79356b
A
1234 break;
1235 case KERN_KDSETRTCDEC:
1236 if (size < sizeof(kd_regtype)) {
1237 ret = EINVAL;
1238 break;
1239 }
1240 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1241 ret= EINVAL;
1242 break;
1243 }
1244 ret = kdbg_setrtcdec(&kd_Reg);
1245 break;
1246
1247 default:
1248 ret= EINVAL;
1249 }
0c530ab8 1250 lck_mtx_unlock(kd_trace_mtx_sysctl);
91447636 1251
1c79356b
A
1252 return(ret);
1253}
1254
0c530ab8
A
1255
1256/*
1257 * This code can run concurrently with kernel_debug_internal()
1258 * without the need of any locks, because all reads of kd_bufptr[i],
1259 * which get modified by kernel_debug_internal(), are safe.
1260 */
1261int
91447636 1262kdbg_read(user_addr_t buffer, size_t *number)
1c79356b 1263{
0c530ab8
A
1264 unsigned int count;
1265 unsigned int cpu;
1266 int mincpu;
1267 uint64_t mintime, t, last_wrap_time;
1268 int last_wrap_cpu;
1269 int error = 0;
1270 kd_buf *tempbuf;
1271 uint32_t tempbuf_count;
1272 uint32_t tempbuf_number;
1273 unsigned int old_kdebug_flags, new_kdebug_flags;
1274 unsigned int old_kdebug_slowcheck, new_kdebug_slowcheck;
2d21ac55
A
1275 boolean_t first_event = TRUE;
1276
0c530ab8
A
1277 count = *number/sizeof(kd_buf);
1278 *number = 0;
1279
1280 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1281 return EINVAL;
1c79356b 1282
0c530ab8
A
1283 /*
1284 * because we hold kd_trace_mtx_sysctl, no other control threads can
1285 * be playing with kdebug_flags... the code that cuts new events could
1286 * be running, but it only reads kdebug_flags, it doesn't write it..
1287 * use an OSCompareAndSwap to make sure the other processors see the
1288 * change of state immediately, not to protect against 2 threads racing to update it
1289 */
1290 old_kdebug_slowcheck = kdebug_slowcheck;
1291 do {
1292 old_kdebug_flags = kdebug_flags;
1293 new_kdebug_flags = old_kdebug_flags & ~KDBG_WRAPPED;
1294 new_kdebug_flags |= KDBG_NOWRAP;
1295 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1296
1297 last_wrap_time = 0;
1298 last_wrap_cpu = -1;
1299
1300 for (cpu = 0; cpu < kd_cpus; cpu++) {
1301 kd_buf *cur_bufptr;
1302
1303 if ((cur_bufptr = kdbip[cpu].kd_bufptr) >= kdbip[cpu].kd_buflast)
1304 cur_bufptr = kdbip[cpu].kd_buffer;
89b3af67 1305
0c530ab8
A
1306 if (kdbip[cpu].kd_wrapped) {
1307 kdbip[cpu].kd_wrapped = 0;
1308 kdbip[cpu].kd_readlast = cur_bufptr;
1309 kdbip[cpu].kd_stop = cur_bufptr;
4452a7af 1310
0c530ab8
A
1311 if (kd_cpus > 1 && ((cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK) > last_wrap_time)) {
1312 last_wrap_time = cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK;
1313 last_wrap_cpu = cpu;
1314 }
1315 } else {
1316 if (kdbip[cpu].kd_readlast == cur_bufptr)
1317 kdbip[cpu].kd_stop = 0;
1318 else
1319 kdbip[cpu].kd_stop = cur_bufptr;
1320 }
1321 }
1322 if (count > nkdbufs)
1323 count = nkdbufs;
4452a7af 1324
0c530ab8
A
1325 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1326 tempbuf_count = KDCOPYBUF_COUNT;
4452a7af 1327
2d21ac55
A
1328 if (last_wrap_cpu == -1)
1329 first_event = FALSE;
1330
0c530ab8
A
1331 while (count) {
1332 tempbuf = kdcopybuf;
1333 tempbuf_number = 0;
1334
1335 while (tempbuf_count) {
1336 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1337 mincpu = -1;
1338
1339 for (cpu = 0; cpu < kd_cpus; cpu++) {
1340 if (kdbip[cpu].kd_stop == 0) /* empty buffer */
1341 continue;
1342 t = kdbip[cpu].kd_readlast[0].timestamp & KDBG_TIMESTAMP_MASK;
1343
1344 if (t < mintime) {
1345 mintime = t;
1346 mincpu = cpu;
91447636
A
1347 }
1348 }
0c530ab8
A
1349 if (mincpu < 0)
1350 /*
1351 * all buffers ran empty early
91447636 1352 */
0c530ab8
A
1353 break;
1354
2d21ac55
A
1355 if (first_event == TRUE) {
1356 /*
1357 * make sure we leave room for the
1358 * LAST_WRAPPER event we inject
1359 * by throwing away the first event
1360 * it's better to lose that one
1361 * than the last one
1362 */
1363 first_event = FALSE;
1364
1365 kdbip[mincpu].kd_readlast++;
1366
1367 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1368 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1369 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1370 kdbip[mincpu].kd_stop = 0;
1371
1372 continue;
1373 }
0c530ab8
A
1374 if (last_wrap_cpu == mincpu) {
1375 tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
2d21ac55
A
1376 tempbuf->arg1 = kd_bufsize / sizeof(kd_buf);
1377 tempbuf->arg2 = kd_cpus;
0c530ab8
A
1378 tempbuf->arg3 = 0;
1379 tempbuf->arg4 = 0;
1380 tempbuf->arg5 = (int)current_thread();
1381
1382 tempbuf->timestamp = last_wrap_time | (((uint64_t)last_wrap_cpu) << KDBG_CPU_SHIFT);
1383
1384 tempbuf++;
1385
1386 last_wrap_cpu = -1;
1387
1388 } else {
1389 *(tempbuf++) = kdbip[mincpu].kd_readlast[0];
1390
1391 kdbip[mincpu].kd_readlast++;
1392
1393 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1394 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1395 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1396 kdbip[mincpu].kd_stop = 0;
91447636 1397 }
0c530ab8
A
1398 tempbuf_count--;
1399 tempbuf_number++;
1400 }
1401 if (tempbuf_number) {
1402 if ((error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf)))) {
91447636 1403 *number = 0;
0c530ab8
A
1404 error = EINVAL;
1405 break;
6601e61a 1406 }
0c530ab8
A
1407 count -= tempbuf_number;
1408 *number += tempbuf_number;
1409 buffer += (tempbuf_number * sizeof(kd_buf));
1410 }
1411 if (tempbuf_count)
1412 /*
1413 * all trace buffers are empty
1414 */
1415 break;
89b3af67 1416
0c530ab8
A
1417 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1418 tempbuf_count = KDCOPYBUF_COUNT;
1419 }
1420 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
1421 do {
1422 old_kdebug_flags = kdebug_flags;
1423 new_kdebug_flags = old_kdebug_flags & ~KDBG_NOWRAP;
1424 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1425
1426 if ( !(old_kdebug_slowcheck & SLOW_NOLOG)) {
1427 do {
1428 old_kdebug_slowcheck = kdebug_slowcheck;
1429 new_kdebug_slowcheck = old_kdebug_slowcheck & ~SLOW_NOLOG;
1430 } while ( !OSCompareAndSwap((UInt32)old_kdebug_slowcheck, (UInt32)new_kdebug_slowcheck, (UInt32 *)&kdebug_slowcheck));
1431 }
1432 }
1433 return (error);
6601e61a 1434}
4452a7af 1435
0c530ab8 1436
55e303ae
A
1437unsigned char *getProcName(struct proc *proc);
1438unsigned char *getProcName(struct proc *proc) {
1439
1440 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1441
1442}
0c530ab8
A
1443
1444#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1445#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1446#ifdef __i386__
1447#define TRAP_DEBUGGER __asm__ volatile("int3");
1448#endif
1449#ifdef __ppc__
2d21ac55 1450#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
0c530ab8
A
1451#endif
1452
1453#define SANE_TRACEBUF_SIZE 2*1024*1024
1454
1455/* Initialize the mutex governing access to the stack snapshot subsystem */
1456__private_extern__ void
1457stackshot_lock_init( void )
1458{
1459 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1460
1461 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1462
1463 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1464
1465 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1466}
1467
1468/*
1469 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1470 * on the system, tracing both kernel and user stacks
1471 * where available. Uses machine specific trace routines
1472 * for ppc, ppc64 and x86.
1473 * Inputs: uap->pid - process id of process to be traced, or -1
1474 * for the entire system
1475 * uap->tracebuf - address of the user space destination
1476 * buffer
1477 * uap->tracebuf_size - size of the user space trace buffer
1478 * uap->options - various options, including the maximum
1479 * number of frames to trace.
1480 * Outputs: EPERM if the caller is not privileged
1481 * EINVAL if the supplied trace buffer isn't sanely sized
1482 * ENOMEM if we don't have enough memory to satisfy the
1483 * request
1484 * ENOENT if the target pid isn't found
1485 * ENOSPC if the supplied buffer is insufficient
1486 * *retval contains the number of bytes traced, if successful
1487 * and -1 otherwise. If the request failed due to
1488 * tracebuffer exhaustion, we copyout as much as possible.
1489 */
1490int
1491stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, register_t *retval) {
1492 int error = 0;
1493
1494 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1495 return(error);
1496
1497 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
1498 uap->options, retval);
1499}
1500
1501int
1502stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval)
1503{
1504 int error = 0;
1505 unsigned bytesTraced = 0;
1506
1507 *retval = -1;
1508/* Serialize tracing */
1509 STACKSHOT_SUBSYS_LOCK();
1510
1511 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1512 error = EINVAL;
1513 goto error_exit;
1514 }
1515
1516 MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
1517
1518 if (stackshot_snapbuf == NULL) {
1519 error = ENOMEM;
1520 goto error_exit;
1521 }
1522/* Preload trace parameters*/
1523 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, options);
1524
1525/* Trap to the debugger to obtain a coherent stack snapshot; this populates
1526 * the trace buffer
1527 */
2d21ac55
A
1528 if (panic_active()) {
1529 error = ENOMEM;
1530 goto error_exit;
1531 }
1532
0c530ab8
A
1533 TRAP_DEBUGGER;
1534
1535 bytesTraced = kdp_stack_snapshot_bytes_traced();
1536
1537 if (bytesTraced > 0) {
1538 if ((error = copyout(stackshot_snapbuf, tracebuf,
1539 ((bytesTraced < tracebuf_size) ?
1540 bytesTraced : tracebuf_size))))
1541 goto error_exit;
1542 *retval = bytesTraced;
1543 }
1544 else {
1545 error = ENOENT;
1546 goto error_exit;
1547 }
1548
1549 error = kdp_stack_snapshot_geterror();
1550 if (error == -1) {
1551 error = ENOSPC;
1552 *retval = -1;
1553 goto error_exit;
1554 }
1555
1556error_exit:
1557 if (stackshot_snapbuf != NULL)
1558 FREE(stackshot_snapbuf, M_TEMP);
1559 stackshot_snapbuf = NULL;
1560 STACKSHOT_SUBSYS_UNLOCK();
1561 return error;
1562}
2d21ac55
A
1563
1564void
1565start_kern_tracing(unsigned int new_nkdbufs) {
1566 if (!new_nkdbufs)
1567 return;
1568 kdbg_set_nkdbufs(new_nkdbufs);
1569 kdbg_lock_init();
1570 kdbg_reinit();
1571 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1572 kdebug_slowcheck &= ~SLOW_NOLOG;
1573 kdbg_mapinit();
1574 printf("kernel tracing started\n");
1575}