]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23 #include <machine/spl.h>
24
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
29 #include <sys/vm.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
33
34 #define HZ 100
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
39
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <kern/debug.h>
43 #include <vm/vm_kern.h>
44 #include <sys/lock.h>
45
46 #include <sys/malloc.h>
47 #include <sys/kauth.h>
48
49 #include <mach/mach_host.h> /* for host_info() */
50 #include <libkern/OSAtomic.h>
51
52 /* XXX should have prototypes, but Mach does not provide one */
53 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
54 int cpu_number(void); /* XXX <machine/...> include path broken */
55
56 /* XXX should probably be static, but it's debugging code... */
57 int kdbg_read(user_addr_t, size_t *);
58 void kdbg_control_chud(int, void *);
59 int kdbg_control(int *, u_int, user_addr_t, size_t *);
60 int kdbg_getentropy (user_addr_t, size_t *, int);
61 int kdbg_readmap(user_addr_t, size_t *);
62 int kdbg_getreg(kd_regtype *);
63 int kdbg_setreg(kd_regtype *);
64 int kdbg_setrtcdec(kd_regtype *);
65 int kdbg_setpidex(kd_regtype *);
66 int kdbg_setpid(kd_regtype *);
67 void kdbg_mapinit(void);
68 int kdbg_reinit(void);
69 int kdbg_bootstrap(void);
70
71 static int create_buffers(void);
72 static void delete_buffers(void);
73
74 extern void IOSleep(int);
75
76 #ifdef ppc
77 extern uint32_t maxDec;
78 #endif
79
80 /* trace enable status */
81 unsigned int kdebug_enable = 0;
82
83 /* track timestamps for security server's entropy needs */
84 uint64_t * kd_entropy_buffer = 0;
85 unsigned int kd_entropy_bufsize = 0;
86 unsigned int kd_entropy_count = 0;
87 unsigned int kd_entropy_indx = 0;
88 unsigned int kd_entropy_buftomem = 0;
89
90
91 #define SLOW_NOLOG 0x01
92 #define SLOW_CHECKS 0x02
93 #define SLOW_ENTROPY 0x04
94
95 unsigned int kdebug_slowcheck=SLOW_NOLOG;
96
97 unsigned int kd_cpus;
98
99 struct kd_bufinfo {
100 kd_buf * kd_stop;
101 kd_buf * kd_bufptr;
102 kd_buf * kd_buffer;
103 kd_buf * kd_buflast;
104 kd_buf * kd_readlast;
105 int kd_wrapped; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
106 uint64_t kd_prev_timebase;
107 int kd_pad[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
108
109 };
110
111 struct kd_bufinfo *kdbip = NULL;
112
113 #define KDCOPYBUF_COUNT 1024
114 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
115 kd_buf *kdcopybuf = NULL;
116
117
118 unsigned int nkdbufs = 8192;
119 unsigned int kd_bufsize = 0;
120 unsigned int kdebug_flags = 0;
121 unsigned int kdlog_beg=0;
122 unsigned int kdlog_end=0;
123 unsigned int kdlog_value1=0;
124 unsigned int kdlog_value2=0;
125 unsigned int kdlog_value3=0;
126 unsigned int kdlog_value4=0;
127
128 static lck_mtx_t * kd_trace_mtx_sysctl;
129 static lck_grp_t * kd_trace_mtx_sysctl_grp;
130 static lck_attr_t * kd_trace_mtx_sysctl_attr;
131 static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
132
133 static lck_grp_t *stackshot_subsys_lck_grp;
134 static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
135 static lck_attr_t *stackshot_subsys_lck_attr;
136 static lck_mtx_t stackshot_subsys_mutex;
137
138 void *stackshot_snapbuf = NULL;
139
140 int
141 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval);
142
143 extern void
144 kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t options);
145
146 extern int
147 kdp_stack_snapshot_geterror(void);
148 extern unsigned int
149 kdp_stack_snapshot_bytes_traced(void);
150
151 kd_threadmap *kd_mapptr = 0;
152 unsigned int kd_mapsize = 0;
153 unsigned int kd_mapcount = 0;
154 unsigned int kd_maptomem = 0;
155
156 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
157
158 #define DBG_FUNC_MASK 0xfffffffc
159
160 /* task to string structure */
161 struct tts
162 {
163 task_t task; /* from procs task */
164 pid_t pid; /* from procs p_pid */
165 char task_comm[20]; /* from procs p_comm */
166 };
167
168 typedef struct tts tts_t;
169
170 struct krt
171 {
172 kd_threadmap *map; /* pointer to the map buffer */
173 int count;
174 int maxcount;
175 struct tts *atts;
176 };
177
178 typedef struct krt krt_t;
179
180 /* This is for the CHUD toolkit call */
181 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
182 unsigned int arg2, unsigned int arg3,
183 unsigned int arg4, unsigned int arg5);
184
185 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
186
187 __private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
188
189 /* Support syscall SYS_kdebug_trace */
190 int
191 kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused register_t *retval)
192 {
193 if ( (kdebug_enable == 0) )
194 return(EINVAL);
195
196 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
197 return(0);
198 }
199
200 static int
201 create_buffers(void)
202 {
203 unsigned int cpu, i;
204 int nentries;
205
206 nentries = nkdbufs / kd_cpus;
207 nkdbufs = nentries * kd_cpus;
208
209 kd_bufsize = nentries * sizeof(kd_buf);
210
211 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
212
213 if (kdcopybuf == 0) {
214 if (kmem_alloc(kernel_map, (unsigned int *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS)
215 return(ENOMEM);
216 }
217 for (cpu = 0; cpu < kd_cpus; cpu++) {
218 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip[cpu].kd_buffer, kd_bufsize) != KERN_SUCCESS)
219 break;
220 }
221 if (cpu < kd_cpus) {
222 for (i = 0; i < cpu; i++)
223 kmem_free(kernel_map, (vm_offset_t)kdbip[i].kd_buffer, kd_bufsize);
224 kd_bufsize = 0;
225
226 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
227 kdcopybuf = NULL;
228
229 return(ENOMEM);
230 }
231 for (cpu = 0; cpu < kd_cpus; cpu++) {
232 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
233 kdbip[cpu].kd_buflast = &kdbip[cpu].kd_bufptr[nentries];
234 kdbip[cpu].kd_readlast = kdbip[cpu].kd_bufptr;
235 }
236 kdebug_flags |= KDBG_BUFINIT;
237
238 return(0);
239 }
240
241
242 static void
243 delete_buffers(void)
244 {
245 unsigned int cpu;
246
247 if (kd_bufsize && (kdebug_flags & KDBG_BUFINIT)) {
248 for (cpu = 0; cpu < kd_cpus; cpu++)
249 kmem_free(kernel_map, (vm_offset_t)kdbip[cpu].kd_buffer, kd_bufsize);
250 kd_bufsize = 0;
251 }
252 if (kdcopybuf) {
253 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
254 kdcopybuf = NULL;
255 }
256 kdebug_flags &= ~KDBG_BUFINIT;
257 }
258
259
260 static void
261 kernel_debug_internal(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
262 unsigned int arg4, unsigned int arg5, int entropy_flag)
263 {
264 int s;
265 kd_buf * kd;
266 struct proc *curproc;
267 unsigned long long now;
268 int cpu;
269
270 s = ml_set_interrupts_enabled(FALSE);
271
272 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
273 cpu = cpu_number();
274
275 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
276 if (kdebug_chudhook)
277 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
278
279 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
280 goto out;
281 }
282
283 if (kdebug_slowcheck == 0)
284 goto record_trace;
285
286 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY))
287 {
288 if (kd_entropy_indx < kd_entropy_count)
289 {
290 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
291 kd_entropy_indx++;
292 }
293
294 if (kd_entropy_indx == kd_entropy_count)
295 {
296 /* Disable entropy collection */
297 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
298 kdebug_slowcheck &= ~SLOW_ENTROPY;
299 }
300 }
301
302 if ( (kdebug_slowcheck & SLOW_NOLOG) )
303 goto out;
304
305 if (kdebug_flags & KDBG_PIDCHECK)
306 {
307 /* If kdebug flag is not set for current proc, return */
308 curproc = current_proc();
309 if ((curproc && !(curproc->p_kdebug)) &&
310 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
311 goto out;
312 }
313 else if (kdebug_flags & KDBG_PIDEXCLUDE)
314 {
315 /* If kdebug flag is set for current proc, return */
316 curproc = current_proc();
317 if ((curproc && curproc->p_kdebug) &&
318 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
319 goto out;
320 }
321
322 if (kdebug_flags & KDBG_RANGECHECK)
323 {
324 if ((debugid < kdlog_beg)
325 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
326 goto out;
327 }
328 else if (kdebug_flags & KDBG_VALCHECK)
329 {
330 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
331 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
332 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
333 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
334 (debugid >> 24 != DBG_TRACE))
335 goto out;
336 }
337
338 record_trace:
339 kd = kdbip[cpu].kd_bufptr;
340 kd->debugid = debugid;
341 kd->arg1 = arg1;
342 kd->arg2 = arg2;
343 kd->arg3 = arg3;
344 kd->arg4 = arg4;
345 kd->arg5 = arg5;
346
347 /*
348 * Watch for out of order timestamps
349 */
350 if (now < kdbip[cpu].kd_prev_timebase)
351 {
352 /*
353 * if so, just store the previous timestamp + a cycle
354 */
355 now = ++kdbip[cpu].kd_prev_timebase & KDBG_TIMESTAMP_MASK;
356 }
357 else
358 {
359 kdbip[cpu].kd_prev_timebase = now;
360 }
361 kd->timestamp = now | (((uint64_t)cpu) << KDBG_CPU_SHIFT);
362
363 kdbip[cpu].kd_bufptr++;
364
365 if (kdbip[cpu].kd_bufptr >= kdbip[cpu].kd_buflast)
366 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
367
368 if (kdbip[cpu].kd_bufptr == kdbip[cpu].kd_readlast) {
369 if (kdebug_flags & KDBG_NOWRAP)
370 kdebug_slowcheck |= SLOW_NOLOG;
371 kdbip[cpu].kd_wrapped = 1;
372 kdebug_flags |= KDBG_WRAPPED;
373 }
374
375 out:
376 ml_set_interrupts_enabled(s);
377 }
378
379 void
380 kernel_debug(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
381 unsigned int arg4, __unused unsigned int arg5)
382 {
383 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (int)current_thread(), 1);
384 }
385
386 void
387 kernel_debug1(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
388 unsigned int arg4, unsigned int arg5)
389 {
390 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
391 }
392
393 static void
394 kdbg_lock_init(void)
395 {
396 host_basic_info_data_t hinfo;
397 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
398
399 if (kdebug_flags & KDBG_LOCKINIT)
400 return;
401
402 /* get the number of cpus and cache it */
403 #define BSD_HOST 1
404 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
405 kd_cpus = hinfo.physical_cpu_max;
406
407 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip,
408 sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
409 return;
410
411 /*
412 * allocate lock group attribute and group
413 */
414 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
415 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
416
417 /*
418 * allocate the lock attribute
419 */
420 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
421
422
423 /*
424 * allocate and initialize spin lock and mutex
425 */
426 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
427
428 kdebug_flags |= KDBG_LOCKINIT;
429 }
430
431
432 int
433 kdbg_bootstrap(void)
434 {
435 kdebug_flags &= ~KDBG_WRAPPED;
436
437 return (create_buffers());
438 }
439
440 int
441 kdbg_reinit(void)
442 {
443 int ret=0;
444
445 /*
446 * Disable trace collecting
447 * First make sure we're not in
448 * the middle of cutting a trace
449 */
450
451 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
452 kdebug_slowcheck |= SLOW_NOLOG;
453
454 /*
455 * make sure the SLOW_NOLOG is seen
456 * by everyone that might be trying
457 * to cut a trace..
458 */
459 IOSleep(100);
460
461 delete_buffers();
462
463 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
464 {
465 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
466 kdebug_flags &= ~KDBG_MAPINIT;
467 kd_mapsize = 0;
468 kd_mapptr = (kd_threadmap *) 0;
469 kd_mapcount = 0;
470 }
471
472 ret = kdbg_bootstrap();
473
474 return(ret);
475 }
476
477 void
478 kdbg_trace_data(struct proc *proc, long *arg_pid)
479 {
480 if (!proc)
481 *arg_pid = 0;
482 else
483 *arg_pid = proc->p_pid;
484
485 return;
486 }
487
488
489 void
490 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
491 {
492 char *dbg_nameptr;
493 int dbg_namelen;
494 long dbg_parms[4];
495 if (!proc)
496 {
497 *arg1 = 0;
498 *arg2 = 0;
499 *arg3 = 0;
500 *arg4 = 0;
501 return;
502 }
503
504 /* Collect the pathname for tracing */
505 dbg_nameptr = proc->p_comm;
506 dbg_namelen = strlen(proc->p_comm);
507 dbg_parms[0]=0L;
508 dbg_parms[1]=0L;
509 dbg_parms[2]=0L;
510 dbg_parms[3]=0L;
511
512 if(dbg_namelen > (int)sizeof(dbg_parms))
513 dbg_namelen = sizeof(dbg_parms);
514
515 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
516
517 *arg1=dbg_parms[0];
518 *arg2=dbg_parms[1];
519 *arg3=dbg_parms[2];
520 *arg4=dbg_parms[3];
521 }
522
523 static void
524 kdbg_resolve_map(thread_t th_act, void *opaque)
525 {
526 kd_threadmap *mapptr;
527 krt_t *t = (krt_t *)opaque;
528
529 if(t->count < t->maxcount)
530 {
531 mapptr=&t->map[t->count];
532 mapptr->thread = (unsigned int)th_act;
533 (void) strlcpy (mapptr->command, t->atts->task_comm,
534 sizeof(t->atts->task_comm));
535
536 /*
537 Some kernel threads have no associated pid.
538 We still need to mark the entry as valid.
539 */
540 if (t->atts->pid)
541 mapptr->valid = t->atts->pid;
542 else
543 mapptr->valid = 1;
544
545 t->count++;
546 }
547 }
548
549 void
550 kdbg_mapinit(void)
551 {
552 struct proc *p;
553 struct krt akrt;
554 int tts_count; /* number of task-to-string structures */
555 struct tts *tts_mapptr;
556 unsigned int tts_mapsize = 0;
557 unsigned int tts_maptomem=0;
558 int i;
559
560
561 if (kdebug_flags & KDBG_MAPINIT)
562 return;
563
564 /* need to use PROC_SCANPROCLIST with proc_iterate */
565 proc_list_lock();
566
567 /* Calculate the sizes of map buffers*/
568 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
569 p = p->p_list.le_next)
570 {
571 kd_mapcount += get_task_numacts((task_t)p->task);
572 tts_count++;
573 }
574
575 proc_list_unlock();
576
577 /*
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
582 */
583 kd_mapcount += kd_mapcount/10;
584 tts_count += tts_count/10;
585
586 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
587 if((kmem_alloc(kernel_map, & kd_maptomem,
588 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
589 {
590 kd_mapptr = (kd_threadmap *) kd_maptomem;
591 bzero(kd_mapptr, kd_mapsize);
592 }
593 else
594 kd_mapptr = (kd_threadmap *) 0;
595
596 tts_mapsize = tts_count * sizeof(struct tts);
597 if((kmem_alloc(kernel_map, & tts_maptomem,
598 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
599 {
600 tts_mapptr = (struct tts *) tts_maptomem;
601 bzero(tts_mapptr, tts_mapsize);
602 }
603 else
604 tts_mapptr = (struct tts *) 0;
605
606
607 /*
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
611 */
612
613 if (tts_mapptr) {
614 /* should use proc_iterate */
615 proc_list_lock();
616
617 for (p = allproc.lh_first, i=0; p && i < tts_count;
618 p = p->p_list.le_next) {
619 if (p->p_lflag & P_LEXIT)
620 continue;
621
622 if (p->task) {
623 task_reference(p->task);
624 tts_mapptr[i].task = p->task;
625 tts_mapptr[i].pid = p->p_pid;
626 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
627 i++;
628 }
629 }
630 tts_count = i;
631
632 proc_list_unlock();
633
634 }
635
636
637 if (kd_mapptr && tts_mapptr)
638 {
639 kdebug_flags |= KDBG_MAPINIT;
640 /* Initialize thread map data */
641 akrt.map = kd_mapptr;
642 akrt.count = 0;
643 akrt.maxcount = kd_mapcount;
644
645 for (i=0; i < tts_count; i++)
646 {
647 akrt.atts = &tts_mapptr[i];
648 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
649 task_deallocate((task_t) tts_mapptr[i].task);
650 }
651 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
652 }
653 }
654
655 static void
656 kdbg_clear(void)
657 {
658 /*
659 * Clean up the trace buffer
660 * First make sure we're not in
661 * the middle of cutting a trace
662 */
663
664 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
665 kdebug_slowcheck = SLOW_NOLOG;
666
667 /*
668 * make sure the SLOW_NOLOG is seen
669 * by everyone that might be trying
670 * to cut a trace..
671 */
672 IOSleep(100);
673
674 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
675 kdebug_slowcheck |= SLOW_ENTROPY;
676
677 global_state_pid = -1;
678 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
679 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
680 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
681
682 delete_buffers();
683
684 /* Clean up the thread map buffer */
685 kdebug_flags &= ~KDBG_MAPINIT;
686 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
687 kd_mapptr = (kd_threadmap *) 0;
688 kd_mapsize = 0;
689 kd_mapcount = 0;
690 }
691
692 int
693 kdbg_setpid(kd_regtype *kdr)
694 {
695 pid_t pid;
696 int flag, ret=0;
697 struct proc *p;
698
699 pid = (pid_t)kdr->value1;
700 flag = (int)kdr->value2;
701
702 if (pid > 0)
703 {
704 if ((p = proc_find(pid)) == NULL)
705 ret = ESRCH;
706 else
707 {
708 if (flag == 1) /* turn on pid check for this and all pids */
709 {
710 kdebug_flags |= KDBG_PIDCHECK;
711 kdebug_flags &= ~KDBG_PIDEXCLUDE;
712 kdebug_slowcheck |= SLOW_CHECKS;
713
714 p->p_kdebug = 1;
715 }
716 else /* turn off pid check for this pid value */
717 {
718 /* Don't turn off all pid checking though */
719 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
720 p->p_kdebug = 0;
721 }
722 proc_rele(p);
723 }
724 }
725 else
726 ret = EINVAL;
727 return(ret);
728 }
729
730 /* This is for pid exclusion in the trace buffer */
731 int
732 kdbg_setpidex(kd_regtype *kdr)
733 {
734 pid_t pid;
735 int flag, ret=0;
736 struct proc *p;
737
738 pid = (pid_t)kdr->value1;
739 flag = (int)kdr->value2;
740
741 if (pid > 0)
742 {
743 if ((p = proc_find(pid)) == NULL)
744 ret = ESRCH;
745 else
746 {
747 if (flag == 1) /* turn on pid exclusion */
748 {
749 kdebug_flags |= KDBG_PIDEXCLUDE;
750 kdebug_flags &= ~KDBG_PIDCHECK;
751 kdebug_slowcheck |= SLOW_CHECKS;
752
753 p->p_kdebug = 1;
754 }
755 else /* turn off pid exclusion for this pid value */
756 {
757 /* Don't turn off all pid exclusion though */
758 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
759 p->p_kdebug = 0;
760 }
761 proc_rele(p);
762 }
763 }
764 else
765 ret = EINVAL;
766 return(ret);
767 }
768
769 /* This is for setting a maximum decrementer value */
770 int
771 kdbg_setrtcdec(kd_regtype *kdr)
772 {
773 int ret=0;
774 natural_t decval;
775
776 decval = (natural_t)kdr->value1;
777
778 if (decval && decval < KDBG_MINRTCDEC)
779 ret = EINVAL;
780 #ifdef ppc
781 else {
782 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
783 }
784 #else
785 else
786 ret = ENOTSUP;
787 #endif /* ppc */
788
789 return(ret);
790 }
791
792 int
793 kdbg_setreg(kd_regtype * kdr)
794 {
795 int ret=0;
796 unsigned int val_1, val_2, val;
797 switch (kdr->type) {
798
799 case KDBG_CLASSTYPE :
800 val_1 = (kdr->value1 & 0xff);
801 val_2 = (kdr->value2 & 0xff);
802 kdlog_beg = (val_1<<24);
803 kdlog_end = (val_2<<24);
804 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
805 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
806 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
807 kdebug_slowcheck |= SLOW_CHECKS;
808 break;
809 case KDBG_SUBCLSTYPE :
810 val_1 = (kdr->value1 & 0xff);
811 val_2 = (kdr->value2 & 0xff);
812 val = val_2 + 1;
813 kdlog_beg = ((val_1<<24) | (val_2 << 16));
814 kdlog_end = ((val_1<<24) | (val << 16));
815 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
816 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
817 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
818 kdebug_slowcheck |= SLOW_CHECKS;
819 break;
820 case KDBG_RANGETYPE :
821 kdlog_beg = (kdr->value1);
822 kdlog_end = (kdr->value2);
823 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
824 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
825 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
826 kdebug_slowcheck |= SLOW_CHECKS;
827 break;
828 case KDBG_VALCHECK:
829 kdlog_value1 = (kdr->value1);
830 kdlog_value2 = (kdr->value2);
831 kdlog_value3 = (kdr->value3);
832 kdlog_value4 = (kdr->value4);
833 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
834 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
835 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
836 kdebug_slowcheck |= SLOW_CHECKS;
837 break;
838 case KDBG_TYPENONE :
839 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
840
841 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
842 kdebug_slowcheck |= SLOW_CHECKS;
843 else
844 kdebug_slowcheck &= ~SLOW_CHECKS;
845
846 kdlog_beg = 0;
847 kdlog_end = 0;
848 break;
849 default :
850 ret = EINVAL;
851 break;
852 }
853 return(ret);
854 }
855
856 int
857 kdbg_getreg(__unused kd_regtype * kdr)
858 {
859 #if 0
860 int i,j, ret=0;
861 unsigned int val_1, val_2, val;
862
863 switch (kdr->type) {
864 case KDBG_CLASSTYPE :
865 val_1 = (kdr->value1 & 0xff);
866 val_2 = val_1 + 1;
867 kdlog_beg = (val_1<<24);
868 kdlog_end = (val_2<<24);
869 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
870 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
871 break;
872 case KDBG_SUBCLSTYPE :
873 val_1 = (kdr->value1 & 0xff);
874 val_2 = (kdr->value2 & 0xff);
875 val = val_2 + 1;
876 kdlog_beg = ((val_1<<24) | (val_2 << 16));
877 kdlog_end = ((val_1<<24) | (val << 16));
878 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
879 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
880 break;
881 case KDBG_RANGETYPE :
882 kdlog_beg = (kdr->value1);
883 kdlog_end = (kdr->value2);
884 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
885 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
886 break;
887 case KDBG_TYPENONE :
888 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
889 kdlog_beg = 0;
890 kdlog_end = 0;
891 break;
892 default :
893 ret = EINVAL;
894 break;
895 }
896 #endif /* 0 */
897 return(EINVAL);
898 }
899
900
901 int
902 kdbg_readmap(user_addr_t buffer, size_t *number)
903 {
904 int avail = *number;
905 int ret = 0;
906 unsigned int count = 0;
907
908 count = avail/sizeof (kd_threadmap);
909
910 if (count && (count <= kd_mapcount))
911 {
912 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
913 {
914 if (*number < kd_mapsize)
915 ret=EINVAL;
916 else
917 {
918 if (copyout(kd_mapptr, buffer, kd_mapsize))
919 ret=EINVAL;
920 }
921 }
922 else
923 ret=EINVAL;
924 }
925 else
926 ret=EINVAL;
927
928 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
929 {
930 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
931 kdebug_flags &= ~KDBG_MAPINIT;
932 kd_mapsize = 0;
933 kd_mapptr = (kd_threadmap *) 0;
934 kd_mapcount = 0;
935 }
936
937 return(ret);
938 }
939
940 int
941 kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
942 {
943 int avail = *number;
944 int ret = 0;
945
946 if (kd_entropy_buffer)
947 return(EBUSY);
948
949 kd_entropy_count = avail/sizeof(mach_timespec_t);
950 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
951 kd_entropy_indx = 0;
952
953 /* Enforce maximum entropy entries here if needed */
954
955 /* allocate entropy buffer */
956 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
957 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
958 {
959 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
960 }
961 else
962 {
963 kd_entropy_buffer = (uint64_t *) 0;
964 kd_entropy_count = 0;
965 kd_entropy_indx = 0;
966 return (EINVAL);
967 }
968
969 if (ms_timeout < 10)
970 ms_timeout = 10;
971
972 /* Enable entropy sampling */
973 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
974 kdebug_slowcheck |= SLOW_ENTROPY;
975
976 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
977
978 /* Disable entropy sampling */
979 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
980 kdebug_slowcheck &= ~SLOW_ENTROPY;
981
982 *number = 0;
983 ret = 0;
984
985 if (kd_entropy_indx > 0)
986 {
987 /* copyout the buffer */
988 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
989 ret = EINVAL;
990 else
991 *number = kd_entropy_indx;
992 }
993
994 /* Always cleanup */
995 kd_entropy_count = 0;
996 kd_entropy_indx = 0;
997 kd_entropy_buftomem = 0;
998 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
999 kd_entropy_buffer = (uint64_t *) 0;
1000 return(ret);
1001 }
1002
1003
1004 static void
1005 kdbg_set_nkdbufs(unsigned int value)
1006 {
1007 /*
1008 * We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller
1009 * 'value' is the desired number of trace entries
1010 */
1011 unsigned int max_entries = (sane_size/4) / sizeof(kd_buf);
1012
1013 if (value <= max_entries)
1014 nkdbufs = value;
1015 else
1016 nkdbufs = max_entries;
1017 }
1018
1019
1020 /*
1021 * This function is provided for the CHUD toolkit only.
1022 * int val:
1023 * zero disables kdebug_chudhook function call
1024 * non-zero enables kdebug_chudhook function call
1025 * char *fn:
1026 * address of the enabled kdebug_chudhook function
1027 */
1028
1029 void
1030 kdbg_control_chud(int val, void *fn)
1031 {
1032 if (val) {
1033 /* enable chudhook */
1034 kdebug_chudhook = fn;
1035 kdebug_enable |= KDEBUG_ENABLE_CHUD;
1036 }
1037 else {
1038 /* disable chudhook */
1039 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1040 kdebug_chudhook = 0;
1041 }
1042 }
1043
1044
1045 int
1046 kdbg_control(int *name, __unused u_int namelen, user_addr_t where, size_t *sizep)
1047 {
1048 int ret=0;
1049 size_t size=*sizep;
1050 unsigned int value = name[1];
1051 kd_regtype kd_Reg;
1052 kbufinfo_t kd_bufinfo;
1053 pid_t curpid;
1054 struct proc *p, *curproc;
1055
1056
1057 kdbg_lock_init();
1058
1059 if ( !(kdebug_flags & KDBG_LOCKINIT))
1060 return(ENOMEM);
1061
1062 lck_mtx_lock(kd_trace_mtx_sysctl);
1063
1064 if (name[0] == KERN_KDGETBUF) {
1065 /*
1066 * Does not alter the global_state_pid
1067 * This is a passive request.
1068 */
1069 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1070 /*
1071 * There is not enough room to return even
1072 * the first element of the info structure.
1073 */
1074 lck_mtx_unlock(kd_trace_mtx_sysctl);
1075
1076 return(EINVAL);
1077 }
1078 kd_bufinfo.nkdbufs = nkdbufs;
1079 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1080
1081 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1082 kd_bufinfo.nolog = 1;
1083 else
1084 kd_bufinfo.nolog = 0;
1085 kd_bufinfo.flags = kdebug_flags;
1086 kd_bufinfo.bufid = global_state_pid;
1087
1088 if (size >= sizeof(kd_bufinfo)) {
1089 /*
1090 * Provide all the info we have
1091 */
1092 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
1093 lck_mtx_unlock(kd_trace_mtx_sysctl);
1094
1095 return(EINVAL);
1096 }
1097 }
1098 else {
1099 /*
1100 * For backwards compatibility, only provide
1101 * as much info as there is room for.
1102 */
1103 if (copyout (&kd_bufinfo, where, size)) {
1104 lck_mtx_unlock(kd_trace_mtx_sysctl);
1105
1106 return(EINVAL);
1107 }
1108 }
1109 lck_mtx_unlock(kd_trace_mtx_sysctl);
1110
1111 return(0);
1112 } else if (name[0] == KERN_KDGETENTROPY) {
1113 if (kd_entropy_buffer)
1114 ret = EBUSY;
1115 else
1116 ret = kdbg_getentropy(where, sizep, value);
1117 lck_mtx_unlock(kd_trace_mtx_sysctl);
1118
1119 return (ret);
1120 }
1121
1122 if ((curproc = current_proc()) != NULL)
1123 curpid = curproc->p_pid;
1124 else {
1125 lck_mtx_unlock(kd_trace_mtx_sysctl);
1126
1127 return (ESRCH);
1128 }
1129 if (global_state_pid == -1)
1130 global_state_pid = curpid;
1131 else if (global_state_pid != curpid) {
1132 if ((p = proc_find(global_state_pid)) == NULL) {
1133 /*
1134 * The global pid no longer exists
1135 */
1136 global_state_pid = curpid;
1137 } else {
1138 /*
1139 * The global pid exists, deny this request
1140 */
1141 proc_rele(p);
1142 lck_mtx_unlock(kd_trace_mtx_sysctl);
1143
1144 return(EBUSY);
1145 }
1146 }
1147
1148 switch(name[0]) {
1149 case KERN_KDEFLAGS:
1150 value &= KDBG_USERFLAGS;
1151 kdebug_flags |= value;
1152 break;
1153 case KERN_KDDFLAGS:
1154 value &= KDBG_USERFLAGS;
1155 kdebug_flags &= ~value;
1156 break;
1157 case KERN_KDENABLE: /* used to enable or disable */
1158 if (value)
1159 {
1160 /* enable only if buffer is initialized */
1161 if (!(kdebug_flags & KDBG_BUFINIT))
1162 {
1163 ret=EINVAL;
1164 break;
1165 }
1166 kdbg_mapinit();
1167
1168 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1169 kdebug_slowcheck &= ~SLOW_NOLOG;
1170 }
1171 else
1172 {
1173 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1174 kdebug_slowcheck |= SLOW_NOLOG;
1175 }
1176 break;
1177 case KERN_KDSETBUF:
1178 kdbg_set_nkdbufs(value);
1179 break;
1180 case KERN_KDSETUP:
1181 ret=kdbg_reinit();
1182 break;
1183 case KERN_KDREMOVE:
1184 kdbg_clear();
1185 break;
1186 case KERN_KDSETREG:
1187 if(size < sizeof(kd_regtype)) {
1188 ret=EINVAL;
1189 break;
1190 }
1191 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1192 ret= EINVAL;
1193 break;
1194 }
1195 ret = kdbg_setreg(&kd_Reg);
1196 break;
1197 case KERN_KDGETREG:
1198 if(size < sizeof(kd_regtype)) {
1199 ret = EINVAL;
1200 break;
1201 }
1202 ret = kdbg_getreg(&kd_Reg);
1203 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1204 ret=EINVAL;
1205 }
1206 break;
1207 case KERN_KDREADTR:
1208 ret = kdbg_read(where, sizep);
1209 break;
1210 case KERN_KDPIDTR:
1211 if (size < sizeof(kd_regtype)) {
1212 ret = EINVAL;
1213 break;
1214 }
1215 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1216 ret= EINVAL;
1217 break;
1218 }
1219 ret = kdbg_setpid(&kd_Reg);
1220 break;
1221 case KERN_KDPIDEX:
1222 if (size < sizeof(kd_regtype)) {
1223 ret = EINVAL;
1224 break;
1225 }
1226 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1227 ret= EINVAL;
1228 break;
1229 }
1230 ret = kdbg_setpidex(&kd_Reg);
1231 break;
1232 case KERN_KDTHRMAP:
1233 ret = kdbg_readmap(where, sizep);
1234 break;
1235 case KERN_KDSETRTCDEC:
1236 if (size < sizeof(kd_regtype)) {
1237 ret = EINVAL;
1238 break;
1239 }
1240 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1241 ret= EINVAL;
1242 break;
1243 }
1244 ret = kdbg_setrtcdec(&kd_Reg);
1245 break;
1246
1247 default:
1248 ret= EINVAL;
1249 }
1250 lck_mtx_unlock(kd_trace_mtx_sysctl);
1251
1252 return(ret);
1253 }
1254
1255
1256 /*
1257 * This code can run concurrently with kernel_debug_internal()
1258 * without the need of any locks, because all reads of kd_bufptr[i],
1259 * which get modified by kernel_debug_internal(), are safe.
1260 */
1261 int
1262 kdbg_read(user_addr_t buffer, size_t *number)
1263 {
1264 unsigned int count;
1265 unsigned int cpu;
1266 int mincpu;
1267 uint64_t mintime, t, last_wrap_time;
1268 int last_wrap_cpu;
1269 int error = 0;
1270 kd_buf *tempbuf;
1271 uint32_t tempbuf_count;
1272 uint32_t tempbuf_number;
1273 unsigned int old_kdebug_flags, new_kdebug_flags;
1274 unsigned int old_kdebug_slowcheck, new_kdebug_slowcheck;
1275 boolean_t first_event = TRUE;
1276
1277 count = *number/sizeof(kd_buf);
1278 *number = 0;
1279
1280 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1281 return EINVAL;
1282
1283 /*
1284 * because we hold kd_trace_mtx_sysctl, no other control threads can
1285 * be playing with kdebug_flags... the code that cuts new events could
1286 * be running, but it only reads kdebug_flags, it doesn't write it..
1287 * use an OSCompareAndSwap to make sure the other processors see the
1288 * change of state immediately, not to protect against 2 threads racing to update it
1289 */
1290 old_kdebug_slowcheck = kdebug_slowcheck;
1291 do {
1292 old_kdebug_flags = kdebug_flags;
1293 new_kdebug_flags = old_kdebug_flags & ~KDBG_WRAPPED;
1294 new_kdebug_flags |= KDBG_NOWRAP;
1295 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1296
1297 last_wrap_time = 0;
1298 last_wrap_cpu = -1;
1299
1300 for (cpu = 0; cpu < kd_cpus; cpu++) {
1301 kd_buf *cur_bufptr;
1302
1303 if ((cur_bufptr = kdbip[cpu].kd_bufptr) >= kdbip[cpu].kd_buflast)
1304 cur_bufptr = kdbip[cpu].kd_buffer;
1305
1306 if (kdbip[cpu].kd_wrapped) {
1307 kdbip[cpu].kd_wrapped = 0;
1308 kdbip[cpu].kd_readlast = cur_bufptr;
1309 kdbip[cpu].kd_stop = cur_bufptr;
1310
1311 if (kd_cpus > 1 && ((cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK) > last_wrap_time)) {
1312 last_wrap_time = cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK;
1313 last_wrap_cpu = cpu;
1314 }
1315 } else {
1316 if (kdbip[cpu].kd_readlast == cur_bufptr)
1317 kdbip[cpu].kd_stop = 0;
1318 else
1319 kdbip[cpu].kd_stop = cur_bufptr;
1320 }
1321 }
1322 if (count > nkdbufs)
1323 count = nkdbufs;
1324
1325 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1326 tempbuf_count = KDCOPYBUF_COUNT;
1327
1328 if (last_wrap_cpu == -1)
1329 first_event = FALSE;
1330
1331 while (count) {
1332 tempbuf = kdcopybuf;
1333 tempbuf_number = 0;
1334
1335 while (tempbuf_count) {
1336 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1337 mincpu = -1;
1338
1339 for (cpu = 0; cpu < kd_cpus; cpu++) {
1340 if (kdbip[cpu].kd_stop == 0) /* empty buffer */
1341 continue;
1342 t = kdbip[cpu].kd_readlast[0].timestamp & KDBG_TIMESTAMP_MASK;
1343
1344 if (t < mintime) {
1345 mintime = t;
1346 mincpu = cpu;
1347 }
1348 }
1349 if (mincpu < 0)
1350 /*
1351 * all buffers ran empty early
1352 */
1353 break;
1354
1355 if (first_event == TRUE) {
1356 /*
1357 * make sure we leave room for the
1358 * LAST_WRAPPER event we inject
1359 * by throwing away the first event
1360 * it's better to lose that one
1361 * than the last one
1362 */
1363 first_event = FALSE;
1364
1365 kdbip[mincpu].kd_readlast++;
1366
1367 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1368 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1369 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1370 kdbip[mincpu].kd_stop = 0;
1371
1372 continue;
1373 }
1374 if (last_wrap_cpu == mincpu) {
1375 tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
1376 tempbuf->arg1 = kd_bufsize / sizeof(kd_buf);
1377 tempbuf->arg2 = kd_cpus;
1378 tempbuf->arg3 = 0;
1379 tempbuf->arg4 = 0;
1380 tempbuf->arg5 = (int)current_thread();
1381
1382 tempbuf->timestamp = last_wrap_time | (((uint64_t)last_wrap_cpu) << KDBG_CPU_SHIFT);
1383
1384 tempbuf++;
1385
1386 last_wrap_cpu = -1;
1387
1388 } else {
1389 *(tempbuf++) = kdbip[mincpu].kd_readlast[0];
1390
1391 kdbip[mincpu].kd_readlast++;
1392
1393 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1394 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1395 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1396 kdbip[mincpu].kd_stop = 0;
1397 }
1398 tempbuf_count--;
1399 tempbuf_number++;
1400 }
1401 if (tempbuf_number) {
1402 if ((error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf)))) {
1403 *number = 0;
1404 error = EINVAL;
1405 break;
1406 }
1407 count -= tempbuf_number;
1408 *number += tempbuf_number;
1409 buffer += (tempbuf_number * sizeof(kd_buf));
1410 }
1411 if (tempbuf_count)
1412 /*
1413 * all trace buffers are empty
1414 */
1415 break;
1416
1417 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1418 tempbuf_count = KDCOPYBUF_COUNT;
1419 }
1420 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
1421 do {
1422 old_kdebug_flags = kdebug_flags;
1423 new_kdebug_flags = old_kdebug_flags & ~KDBG_NOWRAP;
1424 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1425
1426 if ( !(old_kdebug_slowcheck & SLOW_NOLOG)) {
1427 do {
1428 old_kdebug_slowcheck = kdebug_slowcheck;
1429 new_kdebug_slowcheck = old_kdebug_slowcheck & ~SLOW_NOLOG;
1430 } while ( !OSCompareAndSwap((UInt32)old_kdebug_slowcheck, (UInt32)new_kdebug_slowcheck, (UInt32 *)&kdebug_slowcheck));
1431 }
1432 }
1433 return (error);
1434 }
1435
1436
1437 unsigned char *getProcName(struct proc *proc);
1438 unsigned char *getProcName(struct proc *proc) {
1439
1440 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1441
1442 }
1443
1444 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1445 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1446 #ifdef __i386__
1447 #define TRAP_DEBUGGER __asm__ volatile("int3");
1448 #endif
1449 #ifdef __ppc__
1450 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1451 #endif
1452
1453 #define SANE_TRACEBUF_SIZE 2*1024*1024
1454
1455 /* Initialize the mutex governing access to the stack snapshot subsystem */
1456 __private_extern__ void
1457 stackshot_lock_init( void )
1458 {
1459 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1460
1461 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1462
1463 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1464
1465 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1466 }
1467
1468 /*
1469 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1470 * on the system, tracing both kernel and user stacks
1471 * where available. Uses machine specific trace routines
1472 * for ppc, ppc64 and x86.
1473 * Inputs: uap->pid - process id of process to be traced, or -1
1474 * for the entire system
1475 * uap->tracebuf - address of the user space destination
1476 * buffer
1477 * uap->tracebuf_size - size of the user space trace buffer
1478 * uap->options - various options, including the maximum
1479 * number of frames to trace.
1480 * Outputs: EPERM if the caller is not privileged
1481 * EINVAL if the supplied trace buffer isn't sanely sized
1482 * ENOMEM if we don't have enough memory to satisfy the
1483 * request
1484 * ENOENT if the target pid isn't found
1485 * ENOSPC if the supplied buffer is insufficient
1486 * *retval contains the number of bytes traced, if successful
1487 * and -1 otherwise. If the request failed due to
1488 * tracebuffer exhaustion, we copyout as much as possible.
1489 */
1490 int
1491 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, register_t *retval) {
1492 int error = 0;
1493
1494 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1495 return(error);
1496
1497 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
1498 uap->options, retval);
1499 }
1500
1501 int
1502 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval)
1503 {
1504 int error = 0;
1505 unsigned bytesTraced = 0;
1506
1507 *retval = -1;
1508 /* Serialize tracing */
1509 STACKSHOT_SUBSYS_LOCK();
1510
1511 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1512 error = EINVAL;
1513 goto error_exit;
1514 }
1515
1516 MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
1517
1518 if (stackshot_snapbuf == NULL) {
1519 error = ENOMEM;
1520 goto error_exit;
1521 }
1522 /* Preload trace parameters*/
1523 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, options);
1524
1525 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1526 * the trace buffer
1527 */
1528 if (panic_active()) {
1529 error = ENOMEM;
1530 goto error_exit;
1531 }
1532
1533 TRAP_DEBUGGER;
1534
1535 bytesTraced = kdp_stack_snapshot_bytes_traced();
1536
1537 if (bytesTraced > 0) {
1538 if ((error = copyout(stackshot_snapbuf, tracebuf,
1539 ((bytesTraced < tracebuf_size) ?
1540 bytesTraced : tracebuf_size))))
1541 goto error_exit;
1542 *retval = bytesTraced;
1543 }
1544 else {
1545 error = ENOENT;
1546 goto error_exit;
1547 }
1548
1549 error = kdp_stack_snapshot_geterror();
1550 if (error == -1) {
1551 error = ENOSPC;
1552 *retval = -1;
1553 goto error_exit;
1554 }
1555
1556 error_exit:
1557 if (stackshot_snapbuf != NULL)
1558 FREE(stackshot_snapbuf, M_TEMP);
1559 stackshot_snapbuf = NULL;
1560 STACKSHOT_SUBSYS_UNLOCK();
1561 return error;
1562 }
1563
1564 void
1565 start_kern_tracing(unsigned int new_nkdbufs) {
1566 if (!new_nkdbufs)
1567 return;
1568 kdbg_set_nkdbufs(new_nkdbufs);
1569 kdbg_lock_init();
1570 kdbg_reinit();
1571 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1572 kdebug_slowcheck &= ~SLOW_NOLOG;
1573 kdbg_mapinit();
1574 printf("kernel tracing started\n");
1575 }