]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23 #include <machine/spl.h>
24
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
29 #include <sys/vm.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
33
34 #define HZ 100
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
39
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <kern/debug.h>
43 #include <vm/vm_kern.h>
44 #include <sys/lock.h>
45
46 #include <sys/malloc.h>
47 #include <sys/kauth.h>
48
49 #include <mach/mach_host.h> /* for host_info() */
50 #include <libkern/OSAtomic.h>
51
52 /* XXX should have prototypes, but Mach does not provide one */
53 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
54 int cpu_number(void); /* XXX <machine/...> include path broken */
55
56 /* XXX should probably be static, but it's debugging code... */
57 int kdbg_read(user_addr_t, size_t *);
58 void kdbg_control_chud(int, void *);
59 int kdbg_control(int *, u_int, user_addr_t, size_t *);
60 int kdbg_getentropy (user_addr_t, size_t *, int);
61 int kdbg_readmap(user_addr_t, size_t *);
62 int kdbg_getreg(kd_regtype *);
63 int kdbg_setreg(kd_regtype *);
64 int kdbg_setrtcdec(kd_regtype *);
65 int kdbg_setpidex(kd_regtype *);
66 int kdbg_setpid(kd_regtype *);
67 void kdbg_mapinit(void);
68 int kdbg_reinit(void);
69 int kdbg_bootstrap(void);
70
71 static int create_buffers(void);
72 static void delete_buffers(void);
73
74 extern void IOSleep(int);
75
76 #ifdef ppc
77 extern uint32_t maxDec;
78 #endif
79
80 /* trace enable status */
81 unsigned int kdebug_enable = 0;
82
83 /* track timestamps for security server's entropy needs */
84 uint64_t * kd_entropy_buffer = 0;
85 unsigned int kd_entropy_bufsize = 0;
86 unsigned int kd_entropy_count = 0;
87 unsigned int kd_entropy_indx = 0;
88 unsigned int kd_entropy_buftomem = 0;
89
90
91 #define SLOW_NOLOG 0x01
92 #define SLOW_CHECKS 0x02
93 #define SLOW_ENTROPY 0x04
94
95 unsigned int kdebug_slowcheck=SLOW_NOLOG;
96
97 unsigned int kd_cpus;
98
99 struct kd_bufinfo {
100 kd_buf * kd_stop;
101 kd_buf * kd_bufptr;
102 kd_buf * kd_buffer;
103 kd_buf * kd_buflast;
104 kd_buf * kd_readlast;
105 int kd_wrapped; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
106 uint64_t kd_prev_timebase;
107 int kd_pad[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
108
109 };
110
111 struct kd_bufinfo *kdbip = NULL;
112
113 #define KDCOPYBUF_COUNT 1024
114 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
115 kd_buf *kdcopybuf = NULL;
116
117
118 unsigned int nkdbufs = 8192;
119 unsigned int kd_bufsize = 0;
120 unsigned int kdebug_flags = 0;
121 unsigned int kdlog_beg=0;
122 unsigned int kdlog_end=0;
123 unsigned int kdlog_value1=0;
124 unsigned int kdlog_value2=0;
125 unsigned int kdlog_value3=0;
126 unsigned int kdlog_value4=0;
127
128 static lck_mtx_t * kd_trace_mtx_sysctl;
129 static lck_grp_t * kd_trace_mtx_sysctl_grp;
130 static lck_attr_t * kd_trace_mtx_sysctl_attr;
131 static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
132
133 static lck_grp_t *stackshot_subsys_lck_grp;
134 static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
135 static lck_attr_t *stackshot_subsys_lck_attr;
136 static lck_mtx_t stackshot_subsys_mutex;
137
138 void *stackshot_snapbuf = NULL;
139
140 int
141 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval);
142
143 extern void
144 kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t options);
145
146 extern int
147 kdp_stack_snapshot_geterror(void);
148 extern unsigned int
149 kdp_stack_snapshot_bytes_traced(void);
150
151 kd_threadmap *kd_mapptr = 0;
152 unsigned int kd_mapsize = 0;
153 unsigned int kd_mapcount = 0;
154 unsigned int kd_maptomem = 0;
155
156 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
157
158 #define DBG_FUNC_MASK 0xfffffffc
159
160 /* task to string structure */
161 struct tts
162 {
163 task_t task; /* from procs task */
164 pid_t pid; /* from procs p_pid */
165 char task_comm[20]; /* from procs p_comm */
166 };
167
168 typedef struct tts tts_t;
169
170 struct krt
171 {
172 kd_threadmap *map; /* pointer to the map buffer */
173 int count;
174 int maxcount;
175 struct tts *atts;
176 };
177
178 typedef struct krt krt_t;
179
180 /* This is for the CHUD toolkit call */
181 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
182 unsigned int arg2, unsigned int arg3,
183 unsigned int arg4, unsigned int arg5);
184
185 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
186
187 __private_extern__ void stackshot_lock_init( void ) __attribute__((section("__TEXT, initcode")));
188
189 /* Support syscall SYS_kdebug_trace */
190 int
191 kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused register_t *retval)
192 {
193 if ( (kdebug_enable == 0) )
194 return(EINVAL);
195
196 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
197 return(0);
198 }
199
200 static int
201 create_buffers(void)
202 {
203 unsigned int cpu, i;
204 int nentries;
205
206 nentries = nkdbufs / kd_cpus;
207 nkdbufs = nentries * kd_cpus;
208
209 kd_bufsize = nentries * sizeof(kd_buf);
210
211 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
212
213 if (kdcopybuf == 0) {
214 if (kmem_alloc(kernel_map, (unsigned int *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS)
215 return(ENOMEM);
216 }
217 for (cpu = 0; cpu < kd_cpus; cpu++) {
218 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip[cpu].kd_buffer, kd_bufsize) != KERN_SUCCESS)
219 break;
220 }
221 if (cpu < kd_cpus) {
222 for (i = 0; i < cpu; i++)
223 kmem_free(kernel_map, (vm_offset_t)kdbip[i].kd_buffer, kd_bufsize);
224 kd_bufsize = 0;
225
226 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
227 kdcopybuf = NULL;
228
229 return(ENOMEM);
230 }
231 for (cpu = 0; cpu < kd_cpus; cpu++) {
232 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
233 kdbip[cpu].kd_buflast = &kdbip[cpu].kd_bufptr[nentries];
234 kdbip[cpu].kd_readlast = kdbip[cpu].kd_bufptr;
235 }
236 kdebug_flags |= KDBG_BUFINIT;
237
238 return(0);
239 }
240
241
242 static void
243 delete_buffers(void)
244 {
245 unsigned int cpu;
246
247 if (kd_bufsize && (kdebug_flags & KDBG_BUFINIT)) {
248 for (cpu = 0; cpu < kd_cpus; cpu++)
249 kmem_free(kernel_map, (vm_offset_t)kdbip[cpu].kd_buffer, kd_bufsize);
250 kd_bufsize = 0;
251 }
252 if (kdcopybuf) {
253 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
254 kdcopybuf = NULL;
255 }
256 kdebug_flags &= ~KDBG_BUFINIT;
257 }
258
259
260 static void
261 kernel_debug_internal(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
262 unsigned int arg4, unsigned int arg5, int entropy_flag)
263 {
264 int s;
265 kd_buf * kd;
266 struct proc *curproc;
267 unsigned long long now;
268 int cpu;
269
270 s = ml_set_interrupts_enabled(FALSE);
271
272 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
273 cpu = cpu_number();
274
275 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
276 if (kdebug_chudhook)
277 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
278
279 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
280 goto out;
281 }
282
283 if (kdebug_slowcheck == 0)
284 goto record_trace;
285
286 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY))
287 {
288 if (kd_entropy_indx < kd_entropy_count)
289 {
290 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
291 kd_entropy_indx++;
292 }
293
294 if (kd_entropy_indx == kd_entropy_count)
295 {
296 /* Disable entropy collection */
297 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
298 kdebug_slowcheck &= ~SLOW_ENTROPY;
299 }
300 }
301
302 if ( (kdebug_slowcheck & SLOW_NOLOG) )
303 goto out;
304
305 if (kdebug_flags & KDBG_PIDCHECK)
306 {
307 /* If kdebug flag is not set for current proc, return */
308 curproc = current_proc();
309 if ((curproc && !(curproc->p_kdebug)) &&
310 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
311 goto out;
312 }
313 else if (kdebug_flags & KDBG_PIDEXCLUDE)
314 {
315 /* If kdebug flag is set for current proc, return */
316 curproc = current_proc();
317 if ((curproc && curproc->p_kdebug) &&
318 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
319 goto out;
320 }
321
322 if (kdebug_flags & KDBG_RANGECHECK)
323 {
324 if ((debugid < kdlog_beg)
325 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
326 goto out;
327 }
328 else if (kdebug_flags & KDBG_VALCHECK)
329 {
330 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
331 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
332 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
333 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
334 (debugid >> 24 != DBG_TRACE))
335 goto out;
336 }
337
338 record_trace:
339 kd = kdbip[cpu].kd_bufptr;
340 kd->debugid = debugid;
341 kd->arg1 = arg1;
342 kd->arg2 = arg2;
343 kd->arg3 = arg3;
344 kd->arg4 = arg4;
345 kd->arg5 = arg5;
346
347 /*
348 * Watch for out of order timestamps
349 */
350 if (now < kdbip[cpu].kd_prev_timebase)
351 {
352 /*
353 * if so, just store the previous timestamp + a cycle
354 */
355 now = ++kdbip[cpu].kd_prev_timebase & KDBG_TIMESTAMP_MASK;
356 }
357 else
358 {
359 kdbip[cpu].kd_prev_timebase = now;
360 }
361 kd->timestamp = now | (((uint64_t)cpu) << KDBG_CPU_SHIFT);
362
363 kdbip[cpu].kd_bufptr++;
364
365 if (kdbip[cpu].kd_bufptr >= kdbip[cpu].kd_buflast)
366 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
367
368 if (kdbip[cpu].kd_bufptr == kdbip[cpu].kd_readlast) {
369 if (kdebug_flags & KDBG_NOWRAP)
370 kdebug_slowcheck |= SLOW_NOLOG;
371 kdbip[cpu].kd_wrapped = 1;
372 kdebug_flags |= KDBG_WRAPPED;
373 }
374
375 out:
376 ml_set_interrupts_enabled(s);
377 }
378
379 void
380 kernel_debug(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
381 unsigned int arg4, __unused unsigned int arg5)
382 {
383 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (int)current_thread(), 1);
384 }
385
386 void
387 kernel_debug1(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
388 unsigned int arg4, unsigned int arg5)
389 {
390 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
391 }
392
393 static void
394 kdbg_lock_init(void)
395 {
396 host_basic_info_data_t hinfo;
397 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
398
399 if (kdebug_flags & KDBG_LOCKINIT)
400 return;
401
402 /* get the number of cpus and cache it */
403 #define BSD_HOST 1
404 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
405 kd_cpus = hinfo.logical_cpu_max;
406
407 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip,
408 sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
409 return;
410
411 /*
412 * allocate lock group attribute and group
413 */
414 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
415 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
416
417 /*
418 * allocate the lock attribute
419 */
420 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
421
422
423 /*
424 * allocate and initialize spin lock and mutex
425 */
426 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
427
428 kdebug_flags |= KDBG_LOCKINIT;
429 }
430
431
432 int
433 kdbg_bootstrap(void)
434 {
435 kdebug_flags &= ~KDBG_WRAPPED;
436
437 return (create_buffers());
438 }
439
440 int
441 kdbg_reinit(void)
442 {
443 int ret=0;
444
445 /*
446 * Disable trace collecting
447 * First make sure we're not in
448 * the middle of cutting a trace
449 */
450
451 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
452 kdebug_slowcheck |= SLOW_NOLOG;
453
454 /*
455 * make sure the SLOW_NOLOG is seen
456 * by everyone that might be trying
457 * to cut a trace..
458 */
459 IOSleep(100);
460
461 delete_buffers();
462
463 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
464 {
465 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
466 kdebug_flags &= ~KDBG_MAPINIT;
467 kd_mapsize = 0;
468 kd_mapptr = (kd_threadmap *) 0;
469 kd_mapcount = 0;
470 }
471
472 ret = kdbg_bootstrap();
473
474 return(ret);
475 }
476
477 void
478 kdbg_trace_data(struct proc *proc, long *arg_pid)
479 {
480 if (!proc)
481 *arg_pid = 0;
482 else
483 *arg_pid = proc->p_pid;
484
485 return;
486 }
487
488
489 void
490 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
491 {
492 char *dbg_nameptr;
493 int dbg_namelen;
494 long dbg_parms[4];
495 if (!proc)
496 {
497 *arg1 = 0;
498 *arg2 = 0;
499 *arg3 = 0;
500 *arg4 = 0;
501 return;
502 }
503
504 /* Collect the pathname for tracing */
505 dbg_nameptr = proc->p_comm;
506 dbg_namelen = strlen(proc->p_comm);
507 dbg_parms[0]=0L;
508 dbg_parms[1]=0L;
509 dbg_parms[2]=0L;
510 dbg_parms[3]=0L;
511
512 if(dbg_namelen > (int)sizeof(dbg_parms))
513 dbg_namelen = sizeof(dbg_parms);
514
515 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
516
517 *arg1=dbg_parms[0];
518 *arg2=dbg_parms[1];
519 *arg3=dbg_parms[2];
520 *arg4=dbg_parms[3];
521 }
522
523 static void
524 kdbg_resolve_map(thread_t th_act, void *opaque)
525 {
526 kd_threadmap *mapptr;
527 krt_t *t = (krt_t *)opaque;
528
529 if(t->count < t->maxcount)
530 {
531 mapptr=&t->map[t->count];
532 mapptr->thread = (unsigned int)th_act;
533 (void) strlcpy (mapptr->command, t->atts->task_comm,
534 sizeof(t->atts->task_comm));
535
536 /*
537 Some kernel threads have no associated pid.
538 We still need to mark the entry as valid.
539 */
540 if (t->atts->pid)
541 mapptr->valid = t->atts->pid;
542 else
543 mapptr->valid = 1;
544
545 t->count++;
546 }
547 }
548
549 void
550 kdbg_mapinit(void)
551 {
552 struct proc *p;
553 struct krt akrt;
554 int tts_count; /* number of task-to-string structures */
555 struct tts *tts_mapptr;
556 unsigned int tts_mapsize = 0;
557 unsigned int tts_maptomem=0;
558 int i;
559
560
561 if (kdebug_flags & KDBG_MAPINIT)
562 return;
563
564 /* need to use PROC_SCANPROCLIST with proc_iterate */
565 proc_list_lock();
566
567 /* Calculate the sizes of map buffers*/
568 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
569 p = p->p_list.le_next)
570 {
571 kd_mapcount += get_task_numacts((task_t)p->task);
572 tts_count++;
573 }
574
575 proc_list_unlock();
576
577 /*
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
582 */
583 kd_mapcount += kd_mapcount/10;
584 tts_count += tts_count/10;
585
586 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
587 if((kmem_alloc(kernel_map, & kd_maptomem,
588 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
589 {
590 kd_mapptr = (kd_threadmap *) kd_maptomem;
591 bzero(kd_mapptr, kd_mapsize);
592 }
593 else
594 kd_mapptr = (kd_threadmap *) 0;
595
596 tts_mapsize = tts_count * sizeof(struct tts);
597 if((kmem_alloc(kernel_map, & tts_maptomem,
598 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
599 {
600 tts_mapptr = (struct tts *) tts_maptomem;
601 bzero(tts_mapptr, tts_mapsize);
602 }
603 else
604 tts_mapptr = (struct tts *) 0;
605
606
607 /*
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
611 */
612
613 if (tts_mapptr) {
614 /* should use proc_iterate */
615 proc_list_lock();
616
617 for (p = allproc.lh_first, i=0; p && i < tts_count;
618 p = p->p_list.le_next) {
619 if (p->p_lflag & P_LEXIT)
620 continue;
621
622 if (p->task) {
623 task_reference(p->task);
624 tts_mapptr[i].task = p->task;
625 tts_mapptr[i].pid = p->p_pid;
626 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
627 i++;
628 }
629 }
630 tts_count = i;
631
632 proc_list_unlock();
633
634 }
635
636
637 if (kd_mapptr && tts_mapptr)
638 {
639 kdebug_flags |= KDBG_MAPINIT;
640 /* Initialize thread map data */
641 akrt.map = kd_mapptr;
642 akrt.count = 0;
643 akrt.maxcount = kd_mapcount;
644
645 for (i=0; i < tts_count; i++)
646 {
647 akrt.atts = &tts_mapptr[i];
648 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
649 task_deallocate((task_t) tts_mapptr[i].task);
650 }
651 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
652 }
653 }
654
655 static void
656 kdbg_clear(void)
657 {
658 /*
659 * Clean up the trace buffer
660 * First make sure we're not in
661 * the middle of cutting a trace
662 */
663
664 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
665 kdebug_slowcheck = SLOW_NOLOG;
666
667 /*
668 * make sure the SLOW_NOLOG is seen
669 * by everyone that might be trying
670 * to cut a trace..
671 */
672 IOSleep(100);
673
674 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
675 kdebug_slowcheck |= SLOW_ENTROPY;
676
677 global_state_pid = -1;
678 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
679 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
680 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
681
682 delete_buffers();
683
684 /* Clean up the thread map buffer */
685 kdebug_flags &= ~KDBG_MAPINIT;
686 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
687 kd_mapptr = (kd_threadmap *) 0;
688 kd_mapsize = 0;
689 kd_mapcount = 0;
690 }
691
692 int
693 kdbg_setpid(kd_regtype *kdr)
694 {
695 pid_t pid;
696 int flag, ret=0;
697 struct proc *p;
698
699 pid = (pid_t)kdr->value1;
700 flag = (int)kdr->value2;
701
702 if (pid > 0)
703 {
704 if ((p = proc_find(pid)) == NULL)
705 ret = ESRCH;
706 else
707 {
708 if (flag == 1) /* turn on pid check for this and all pids */
709 {
710 kdebug_flags |= KDBG_PIDCHECK;
711 kdebug_flags &= ~KDBG_PIDEXCLUDE;
712 kdebug_slowcheck |= SLOW_CHECKS;
713
714 p->p_kdebug = 1;
715 }
716 else /* turn off pid check for this pid value */
717 {
718 /* Don't turn off all pid checking though */
719 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
720 p->p_kdebug = 0;
721 }
722 proc_rele(p);
723 }
724 }
725 else
726 ret = EINVAL;
727 return(ret);
728 }
729
730 /* This is for pid exclusion in the trace buffer */
731 int
732 kdbg_setpidex(kd_regtype *kdr)
733 {
734 pid_t pid;
735 int flag, ret=0;
736 struct proc *p;
737
738 pid = (pid_t)kdr->value1;
739 flag = (int)kdr->value2;
740
741 if (pid > 0)
742 {
743 if ((p = proc_find(pid)) == NULL)
744 ret = ESRCH;
745 else
746 {
747 if (flag == 1) /* turn on pid exclusion */
748 {
749 kdebug_flags |= KDBG_PIDEXCLUDE;
750 kdebug_flags &= ~KDBG_PIDCHECK;
751 kdebug_slowcheck |= SLOW_CHECKS;
752
753 p->p_kdebug = 1;
754 }
755 else /* turn off pid exclusion for this pid value */
756 {
757 /* Don't turn off all pid exclusion though */
758 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
759 p->p_kdebug = 0;
760 }
761 proc_rele(p);
762 }
763 }
764 else
765 ret = EINVAL;
766 return(ret);
767 }
768
769 /* This is for setting a maximum decrementer value */
770 int
771 kdbg_setrtcdec(kd_regtype *kdr)
772 {
773 int ret=0;
774 natural_t decval;
775
776 decval = (natural_t)kdr->value1;
777
778 if (decval && decval < KDBG_MINRTCDEC)
779 ret = EINVAL;
780 #ifdef ppc
781 else {
782 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
783 }
784 #else
785 else
786 ret = ENOTSUP;
787 #endif /* ppc */
788
789 return(ret);
790 }
791
792 int
793 kdbg_setreg(kd_regtype * kdr)
794 {
795 int ret=0;
796 unsigned int val_1, val_2, val;
797 switch (kdr->type) {
798
799 case KDBG_CLASSTYPE :
800 val_1 = (kdr->value1 & 0xff);
801 val_2 = (kdr->value2 & 0xff);
802 kdlog_beg = (val_1<<24);
803 kdlog_end = (val_2<<24);
804 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
805 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
806 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
807 kdebug_slowcheck |= SLOW_CHECKS;
808 break;
809 case KDBG_SUBCLSTYPE :
810 val_1 = (kdr->value1 & 0xff);
811 val_2 = (kdr->value2 & 0xff);
812 val = val_2 + 1;
813 kdlog_beg = ((val_1<<24) | (val_2 << 16));
814 kdlog_end = ((val_1<<24) | (val << 16));
815 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
816 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
817 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
818 kdebug_slowcheck |= SLOW_CHECKS;
819 break;
820 case KDBG_RANGETYPE :
821 kdlog_beg = (kdr->value1);
822 kdlog_end = (kdr->value2);
823 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
824 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
825 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
826 kdebug_slowcheck |= SLOW_CHECKS;
827 break;
828 case KDBG_VALCHECK:
829 kdlog_value1 = (kdr->value1);
830 kdlog_value2 = (kdr->value2);
831 kdlog_value3 = (kdr->value3);
832 kdlog_value4 = (kdr->value4);
833 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
834 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
835 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
836 kdebug_slowcheck |= SLOW_CHECKS;
837 break;
838 case KDBG_TYPENONE :
839 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
840
841 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
842 kdebug_slowcheck |= SLOW_CHECKS;
843 else
844 kdebug_slowcheck &= ~SLOW_CHECKS;
845
846 kdlog_beg = 0;
847 kdlog_end = 0;
848 break;
849 default :
850 ret = EINVAL;
851 break;
852 }
853 return(ret);
854 }
855
856 int
857 kdbg_getreg(__unused kd_regtype * kdr)
858 {
859 #if 0
860 int i,j, ret=0;
861 unsigned int val_1, val_2, val;
862
863 switch (kdr->type) {
864 case KDBG_CLASSTYPE :
865 val_1 = (kdr->value1 & 0xff);
866 val_2 = val_1 + 1;
867 kdlog_beg = (val_1<<24);
868 kdlog_end = (val_2<<24);
869 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
870 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
871 break;
872 case KDBG_SUBCLSTYPE :
873 val_1 = (kdr->value1 & 0xff);
874 val_2 = (kdr->value2 & 0xff);
875 val = val_2 + 1;
876 kdlog_beg = ((val_1<<24) | (val_2 << 16));
877 kdlog_end = ((val_1<<24) | (val << 16));
878 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
879 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
880 break;
881 case KDBG_RANGETYPE :
882 kdlog_beg = (kdr->value1);
883 kdlog_end = (kdr->value2);
884 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
885 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
886 break;
887 case KDBG_TYPENONE :
888 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
889 kdlog_beg = 0;
890 kdlog_end = 0;
891 break;
892 default :
893 ret = EINVAL;
894 break;
895 }
896 #endif /* 0 */
897 return(EINVAL);
898 }
899
900
901 int
902 kdbg_readmap(user_addr_t buffer, size_t *number)
903 {
904 int avail = *number;
905 int ret = 0;
906 unsigned int count = 0;
907
908 count = avail/sizeof (kd_threadmap);
909
910 if (count && (count <= kd_mapcount))
911 {
912 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
913 {
914 if (*number < kd_mapsize)
915 ret=EINVAL;
916 else
917 {
918 if (copyout(kd_mapptr, buffer, kd_mapsize))
919 ret=EINVAL;
920 }
921 }
922 else
923 ret=EINVAL;
924 }
925 else
926 ret=EINVAL;
927
928 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
929 {
930 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
931 kdebug_flags &= ~KDBG_MAPINIT;
932 kd_mapsize = 0;
933 kd_mapptr = (kd_threadmap *) 0;
934 kd_mapcount = 0;
935 }
936
937 return(ret);
938 }
939
940 int
941 kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
942 {
943 int avail = *number;
944 int ret = 0;
945
946 if (kd_entropy_buffer)
947 return(EBUSY);
948
949 kd_entropy_count = avail/sizeof(mach_timespec_t);
950 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
951 kd_entropy_indx = 0;
952
953 /* Enforce maximum entropy entries here if needed */
954
955 /* allocate entropy buffer */
956 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
957 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
958 {
959 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
960 }
961 else
962 {
963 kd_entropy_buffer = (uint64_t *) 0;
964 kd_entropy_count = 0;
965 kd_entropy_indx = 0;
966 return (EINVAL);
967 }
968
969 if (ms_timeout < 10)
970 ms_timeout = 10;
971
972 /* Enable entropy sampling */
973 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
974 kdebug_slowcheck |= SLOW_ENTROPY;
975
976 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
977
978 /* Disable entropy sampling */
979 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
980 kdebug_slowcheck &= ~SLOW_ENTROPY;
981
982 *number = 0;
983 ret = 0;
984
985 if (kd_entropy_indx > 0)
986 {
987 /* copyout the buffer */
988 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
989 ret = EINVAL;
990 else
991 *number = kd_entropy_indx;
992 }
993
994 /* Always cleanup */
995 kd_entropy_count = 0;
996 kd_entropy_indx = 0;
997 kd_entropy_buftomem = 0;
998 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
999 kd_entropy_buffer = (uint64_t *) 0;
1000 return(ret);
1001 }
1002
1003
1004 static void
1005 kdbg_set_nkdbufs(unsigned int value)
1006 {
1007 /*
1008 * We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller
1009 * 'value' is the desired number of trace entries
1010 */
1011 unsigned int max_entries = (sane_size/4) / sizeof(kd_buf);
1012
1013 if (value <= max_entries)
1014 nkdbufs = value;
1015 else
1016 nkdbufs = max_entries;
1017 }
1018
1019
1020 /*
1021 * This function is provided for the CHUD toolkit only.
1022 * int val:
1023 * zero disables kdebug_chudhook function call
1024 * non-zero enables kdebug_chudhook function call
1025 * char *fn:
1026 * address of the enabled kdebug_chudhook function
1027 */
1028
1029 void
1030 kdbg_control_chud(int val, void *fn)
1031 {
1032 if (val) {
1033 /* enable chudhook */
1034 kdebug_chudhook = fn;
1035 kdebug_enable |= KDEBUG_ENABLE_CHUD;
1036 }
1037 else {
1038 /* disable chudhook */
1039 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1040 kdebug_chudhook = 0;
1041 }
1042 }
1043
1044
1045 int
1046 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
1047 {
1048 int ret=0;
1049 size_t size=*sizep;
1050 unsigned int value = 0;
1051 kd_regtype kd_Reg;
1052 kbufinfo_t kd_bufinfo;
1053 pid_t curpid;
1054 struct proc *p, *curproc;
1055
1056 if (name[0] == KERN_KDGETENTROPY ||
1057 name[0] == KERN_KDEFLAGS ||
1058 name[0] == KERN_KDDFLAGS ||
1059 name[0] == KERN_KDENABLE ||
1060 name[0] == KERN_KDSETBUF) {
1061
1062 if ( namelen < 2 )
1063 return(EINVAL);
1064 value = name[1];
1065 }
1066
1067 kdbg_lock_init();
1068
1069 if ( !(kdebug_flags & KDBG_LOCKINIT))
1070 return(ENOMEM);
1071
1072 lck_mtx_lock(kd_trace_mtx_sysctl);
1073
1074 if (name[0] == KERN_KDGETBUF) {
1075 /*
1076 * Does not alter the global_state_pid
1077 * This is a passive request.
1078 */
1079 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1080 /*
1081 * There is not enough room to return even
1082 * the first element of the info structure.
1083 */
1084 lck_mtx_unlock(kd_trace_mtx_sysctl);
1085
1086 return(EINVAL);
1087 }
1088 kd_bufinfo.nkdbufs = nkdbufs;
1089 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1090
1091 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1092 kd_bufinfo.nolog = 1;
1093 else
1094 kd_bufinfo.nolog = 0;
1095 kd_bufinfo.flags = kdebug_flags;
1096 kd_bufinfo.bufid = global_state_pid;
1097
1098 if (size >= sizeof(kd_bufinfo)) {
1099 /*
1100 * Provide all the info we have
1101 */
1102 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
1103 lck_mtx_unlock(kd_trace_mtx_sysctl);
1104
1105 return(EINVAL);
1106 }
1107 }
1108 else {
1109 /*
1110 * For backwards compatibility, only provide
1111 * as much info as there is room for.
1112 */
1113 if (copyout (&kd_bufinfo, where, size)) {
1114 lck_mtx_unlock(kd_trace_mtx_sysctl);
1115
1116 return(EINVAL);
1117 }
1118 }
1119 lck_mtx_unlock(kd_trace_mtx_sysctl);
1120
1121 return(0);
1122 } else if (name[0] == KERN_KDGETENTROPY) {
1123 if (kd_entropy_buffer)
1124 ret = EBUSY;
1125 else
1126 ret = kdbg_getentropy(where, sizep, value);
1127 lck_mtx_unlock(kd_trace_mtx_sysctl);
1128
1129 return (ret);
1130 }
1131
1132 if ((curproc = current_proc()) != NULL)
1133 curpid = curproc->p_pid;
1134 else {
1135 lck_mtx_unlock(kd_trace_mtx_sysctl);
1136
1137 return (ESRCH);
1138 }
1139 if (global_state_pid == -1)
1140 global_state_pid = curpid;
1141 else if (global_state_pid != curpid) {
1142 if ((p = proc_find(global_state_pid)) == NULL) {
1143 /*
1144 * The global pid no longer exists
1145 */
1146 global_state_pid = curpid;
1147 } else {
1148 /*
1149 * The global pid exists, deny this request
1150 */
1151 proc_rele(p);
1152 lck_mtx_unlock(kd_trace_mtx_sysctl);
1153
1154 return(EBUSY);
1155 }
1156 }
1157
1158 switch(name[0]) {
1159 case KERN_KDEFLAGS:
1160 value &= KDBG_USERFLAGS;
1161 kdebug_flags |= value;
1162 break;
1163 case KERN_KDDFLAGS:
1164 value &= KDBG_USERFLAGS;
1165 kdebug_flags &= ~value;
1166 break;
1167 case KERN_KDENABLE: /* used to enable or disable */
1168 if (value)
1169 {
1170 /* enable only if buffer is initialized */
1171 if (!(kdebug_flags & KDBG_BUFINIT))
1172 {
1173 ret=EINVAL;
1174 break;
1175 }
1176 kdbg_mapinit();
1177
1178 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1179 kdebug_slowcheck &= ~SLOW_NOLOG;
1180 }
1181 else
1182 {
1183 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1184 kdebug_slowcheck |= SLOW_NOLOG;
1185 }
1186 break;
1187 case KERN_KDSETBUF:
1188 kdbg_set_nkdbufs(value);
1189 break;
1190 case KERN_KDSETUP:
1191 ret=kdbg_reinit();
1192 break;
1193 case KERN_KDREMOVE:
1194 kdbg_clear();
1195 break;
1196 case KERN_KDSETREG:
1197 if(size < sizeof(kd_regtype)) {
1198 ret=EINVAL;
1199 break;
1200 }
1201 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1202 ret= EINVAL;
1203 break;
1204 }
1205 ret = kdbg_setreg(&kd_Reg);
1206 break;
1207 case KERN_KDGETREG:
1208 if(size < sizeof(kd_regtype)) {
1209 ret = EINVAL;
1210 break;
1211 }
1212 ret = kdbg_getreg(&kd_Reg);
1213 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1214 ret=EINVAL;
1215 }
1216 break;
1217 case KERN_KDREADTR:
1218 ret = kdbg_read(where, sizep);
1219 break;
1220 case KERN_KDPIDTR:
1221 if (size < sizeof(kd_regtype)) {
1222 ret = EINVAL;
1223 break;
1224 }
1225 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1226 ret= EINVAL;
1227 break;
1228 }
1229 ret = kdbg_setpid(&kd_Reg);
1230 break;
1231 case KERN_KDPIDEX:
1232 if (size < sizeof(kd_regtype)) {
1233 ret = EINVAL;
1234 break;
1235 }
1236 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1237 ret= EINVAL;
1238 break;
1239 }
1240 ret = kdbg_setpidex(&kd_Reg);
1241 break;
1242 case KERN_KDTHRMAP:
1243 ret = kdbg_readmap(where, sizep);
1244 break;
1245 case KERN_KDSETRTCDEC:
1246 if (size < sizeof(kd_regtype)) {
1247 ret = EINVAL;
1248 break;
1249 }
1250 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1251 ret= EINVAL;
1252 break;
1253 }
1254 ret = kdbg_setrtcdec(&kd_Reg);
1255 break;
1256
1257 default:
1258 ret= EINVAL;
1259 }
1260 lck_mtx_unlock(kd_trace_mtx_sysctl);
1261
1262 return(ret);
1263 }
1264
1265
1266 /*
1267 * This code can run concurrently with kernel_debug_internal()
1268 * without the need of any locks, because all reads of kd_bufptr[i],
1269 * which get modified by kernel_debug_internal(), are safe.
1270 */
1271 int
1272 kdbg_read(user_addr_t buffer, size_t *number)
1273 {
1274 unsigned int count;
1275 unsigned int cpu;
1276 int mincpu;
1277 uint64_t mintime, t, last_wrap_time;
1278 int last_wrap_cpu;
1279 int error = 0;
1280 kd_buf *tempbuf;
1281 uint32_t tempbuf_count;
1282 uint32_t tempbuf_number;
1283 unsigned int old_kdebug_flags, new_kdebug_flags;
1284 unsigned int old_kdebug_slowcheck, new_kdebug_slowcheck;
1285 boolean_t first_event = TRUE;
1286
1287 count = *number/sizeof(kd_buf);
1288 *number = 0;
1289
1290 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1291 return EINVAL;
1292
1293 /*
1294 * because we hold kd_trace_mtx_sysctl, no other control threads can
1295 * be playing with kdebug_flags... the code that cuts new events could
1296 * be running, but it only reads kdebug_flags, it doesn't write it..
1297 * use an OSCompareAndSwap to make sure the other processors see the
1298 * change of state immediately, not to protect against 2 threads racing to update it
1299 */
1300 old_kdebug_slowcheck = kdebug_slowcheck;
1301 do {
1302 old_kdebug_flags = kdebug_flags;
1303 new_kdebug_flags = old_kdebug_flags & ~KDBG_WRAPPED;
1304 new_kdebug_flags |= KDBG_NOWRAP;
1305 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1306
1307 last_wrap_time = 0;
1308 last_wrap_cpu = -1;
1309
1310 for (cpu = 0; cpu < kd_cpus; cpu++) {
1311 kd_buf *cur_bufptr;
1312
1313 if ((cur_bufptr = kdbip[cpu].kd_bufptr) >= kdbip[cpu].kd_buflast)
1314 cur_bufptr = kdbip[cpu].kd_buffer;
1315
1316 if (kdbip[cpu].kd_wrapped) {
1317 kdbip[cpu].kd_wrapped = 0;
1318 kdbip[cpu].kd_readlast = cur_bufptr;
1319 kdbip[cpu].kd_stop = cur_bufptr;
1320
1321 if (kd_cpus > 1 && ((cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK) > last_wrap_time)) {
1322 last_wrap_time = cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK;
1323 last_wrap_cpu = cpu;
1324 }
1325 } else {
1326 if (kdbip[cpu].kd_readlast == cur_bufptr)
1327 kdbip[cpu].kd_stop = 0;
1328 else
1329 kdbip[cpu].kd_stop = cur_bufptr;
1330 }
1331 }
1332 if (count > nkdbufs)
1333 count = nkdbufs;
1334
1335 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1336 tempbuf_count = KDCOPYBUF_COUNT;
1337
1338 if (last_wrap_cpu == -1)
1339 first_event = FALSE;
1340
1341 while (count) {
1342 tempbuf = kdcopybuf;
1343 tempbuf_number = 0;
1344
1345 while (tempbuf_count) {
1346 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1347 mincpu = -1;
1348
1349 for (cpu = 0; cpu < kd_cpus; cpu++) {
1350 if (kdbip[cpu].kd_stop == 0) /* empty buffer */
1351 continue;
1352 t = kdbip[cpu].kd_readlast[0].timestamp & KDBG_TIMESTAMP_MASK;
1353
1354 if (t < mintime) {
1355 mintime = t;
1356 mincpu = cpu;
1357 }
1358 }
1359 if (mincpu < 0)
1360 /*
1361 * all buffers ran empty early
1362 */
1363 break;
1364
1365 if (first_event == TRUE) {
1366 /*
1367 * make sure we leave room for the
1368 * LAST_WRAPPER event we inject
1369 * by throwing away the first event
1370 * it's better to lose that one
1371 * than the last one
1372 */
1373 first_event = FALSE;
1374
1375 kdbip[mincpu].kd_readlast++;
1376
1377 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1378 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1379 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1380 kdbip[mincpu].kd_stop = 0;
1381
1382 continue;
1383 }
1384 if (last_wrap_cpu == mincpu) {
1385 tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
1386 tempbuf->arg1 = kd_bufsize / sizeof(kd_buf);
1387 tempbuf->arg2 = kd_cpus;
1388 tempbuf->arg3 = 0;
1389 tempbuf->arg4 = 0;
1390 tempbuf->arg5 = (int)current_thread();
1391
1392 tempbuf->timestamp = last_wrap_time | (((uint64_t)last_wrap_cpu) << KDBG_CPU_SHIFT);
1393
1394 tempbuf++;
1395
1396 last_wrap_cpu = -1;
1397
1398 } else {
1399 *(tempbuf++) = kdbip[mincpu].kd_readlast[0];
1400
1401 kdbip[mincpu].kd_readlast++;
1402
1403 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1404 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1405 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1406 kdbip[mincpu].kd_stop = 0;
1407 }
1408 tempbuf_count--;
1409 tempbuf_number++;
1410 }
1411 if (tempbuf_number) {
1412 if ((error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf)))) {
1413 *number = 0;
1414 error = EINVAL;
1415 break;
1416 }
1417 count -= tempbuf_number;
1418 *number += tempbuf_number;
1419 buffer += (tempbuf_number * sizeof(kd_buf));
1420 }
1421 if (tempbuf_count)
1422 /*
1423 * all trace buffers are empty
1424 */
1425 break;
1426
1427 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1428 tempbuf_count = KDCOPYBUF_COUNT;
1429 }
1430 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
1431 do {
1432 old_kdebug_flags = kdebug_flags;
1433 new_kdebug_flags = old_kdebug_flags & ~KDBG_NOWRAP;
1434 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1435
1436 if ( !(old_kdebug_slowcheck & SLOW_NOLOG)) {
1437 do {
1438 old_kdebug_slowcheck = kdebug_slowcheck;
1439 new_kdebug_slowcheck = old_kdebug_slowcheck & ~SLOW_NOLOG;
1440 } while ( !OSCompareAndSwap((UInt32)old_kdebug_slowcheck, (UInt32)new_kdebug_slowcheck, (UInt32 *)&kdebug_slowcheck));
1441 }
1442 }
1443 return (error);
1444 }
1445
1446
1447 unsigned char *getProcName(struct proc *proc);
1448 unsigned char *getProcName(struct proc *proc) {
1449
1450 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1451
1452 }
1453
1454 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1455 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1456 #ifdef __i386__
1457 #define TRAP_DEBUGGER __asm__ volatile("int3");
1458 #endif
1459 #ifdef __ppc__
1460 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1461 #endif
1462
1463 #define SANE_TRACEBUF_SIZE 2*1024*1024
1464
1465 /* Initialize the mutex governing access to the stack snapshot subsystem */
1466 __private_extern__ void
1467 stackshot_lock_init( void )
1468 {
1469 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1470
1471 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1472
1473 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1474
1475 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1476 }
1477
1478 /*
1479 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1480 * on the system, tracing both kernel and user stacks
1481 * where available. Uses machine specific trace routines
1482 * for ppc, ppc64 and x86.
1483 * Inputs: uap->pid - process id of process to be traced, or -1
1484 * for the entire system
1485 * uap->tracebuf - address of the user space destination
1486 * buffer
1487 * uap->tracebuf_size - size of the user space trace buffer
1488 * uap->options - various options, including the maximum
1489 * number of frames to trace.
1490 * Outputs: EPERM if the caller is not privileged
1491 * EINVAL if the supplied trace buffer isn't sanely sized
1492 * ENOMEM if we don't have enough memory to satisfy the
1493 * request
1494 * ENOENT if the target pid isn't found
1495 * ENOSPC if the supplied buffer is insufficient
1496 * *retval contains the number of bytes traced, if successful
1497 * and -1 otherwise. If the request failed due to
1498 * tracebuffer exhaustion, we copyout as much as possible.
1499 */
1500 int
1501 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, register_t *retval) {
1502 int error = 0;
1503
1504 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1505 return(error);
1506
1507 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
1508 uap->options, retval);
1509 }
1510
1511 int
1512 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval)
1513 {
1514 int error = 0;
1515 unsigned bytesTraced = 0;
1516
1517 *retval = -1;
1518 /* Serialize tracing */
1519 STACKSHOT_SUBSYS_LOCK();
1520
1521 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1522 error = EINVAL;
1523 goto error_exit;
1524 }
1525
1526 MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
1527
1528 if (stackshot_snapbuf == NULL) {
1529 error = ENOMEM;
1530 goto error_exit;
1531 }
1532 /* Preload trace parameters*/
1533 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, options);
1534
1535 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1536 * the trace buffer
1537 */
1538 if (panic_active()) {
1539 error = ENOMEM;
1540 goto error_exit;
1541 }
1542
1543 TRAP_DEBUGGER;
1544
1545 bytesTraced = kdp_stack_snapshot_bytes_traced();
1546
1547 if (bytesTraced > 0) {
1548 if ((error = copyout(stackshot_snapbuf, tracebuf,
1549 ((bytesTraced < tracebuf_size) ?
1550 bytesTraced : tracebuf_size))))
1551 goto error_exit;
1552 *retval = bytesTraced;
1553 }
1554 else {
1555 error = ENOENT;
1556 goto error_exit;
1557 }
1558
1559 error = kdp_stack_snapshot_geterror();
1560 if (error == -1) {
1561 error = ENOSPC;
1562 *retval = -1;
1563 goto error_exit;
1564 }
1565
1566 error_exit:
1567 if (stackshot_snapbuf != NULL)
1568 FREE(stackshot_snapbuf, M_TEMP);
1569 stackshot_snapbuf = NULL;
1570 STACKSHOT_SUBSYS_UNLOCK();
1571 return error;
1572 }
1573
1574 void
1575 start_kern_tracing(unsigned int new_nkdbufs) {
1576 if (!new_nkdbufs)
1577 return;
1578 kdbg_set_nkdbufs(new_nkdbufs);
1579 kdbg_lock_init();
1580 kdbg_reinit();
1581 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1582 kdebug_slowcheck &= ~SLOW_NOLOG;
1583 kdbg_mapinit();
1584 printf("kernel tracing started\n");
1585 }