]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <machine/spl.h>
24
25 #include <sys/errno.h>
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/proc_internal.h>
29 #include <sys/vm.h>
30 #include <sys/sysctl.h>
31 #include <sys/kdebug.h>
32 #include <sys/sysproto.h>
33
34 #define HZ 100
35 #include <mach/clock_types.h>
36 #include <mach/mach_types.h>
37 #include <mach/mach_time.h>
38 #include <machine/machine_routines.h>
39
40 #include <kern/thread.h>
41 #include <kern/task.h>
42 #include <vm/vm_kern.h>
43 #include <sys/lock.h>
44
45 #include <sys/malloc.h>
46 #include <sys/kauth.h>
47
48 #include <mach/mach_host.h> /* for host_info() */
49 #include <libkern/OSAtomic.h>
50
51 /* XXX should have prototypes, but Mach does not provide one */
52 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
53 int cpu_number(void); /* XXX <machine/...> include path broken */
54
55 /* XXX should probably be static, but it's debugging code... */
56 int kdbg_read(user_addr_t, size_t *);
57 void kdbg_control_chud(int, void *);
58 int kdbg_control(int *, u_int, user_addr_t, size_t *);
59 int kdbg_getentropy (user_addr_t, size_t *, int);
60 int kdbg_readmap(user_addr_t, size_t *);
61 int kdbg_getreg(kd_regtype *);
62 int kdbg_setreg(kd_regtype *);
63 int kdbg_setrtcdec(kd_regtype *);
64 int kdbg_setpidex(kd_regtype *);
65 int kdbg_setpid(kd_regtype *);
66 void kdbg_mapinit(void);
67 int kdbg_reinit(void);
68 int kdbg_bootstrap(void);
69
70 static int create_buffers(void);
71 static void delete_buffers(void);
72
73 #ifdef ppc
74 extern uint32_t maxDec;
75 #endif
76
77 /* trace enable status */
78 unsigned int kdebug_enable = 0;
79
80 /* track timestamps for security server's entropy needs */
81 uint64_t * kd_entropy_buffer = 0;
82 unsigned int kd_entropy_bufsize = 0;
83 unsigned int kd_entropy_count = 0;
84 unsigned int kd_entropy_indx = 0;
85 unsigned int kd_entropy_buftomem = 0;
86
87
88 #define SLOW_NOLOG 0x01
89 #define SLOW_CHECKS 0x02
90 #define SLOW_ENTROPY 0x04
91
92 unsigned int kdebug_slowcheck=SLOW_NOLOG;
93
94 unsigned int kd_cpus;
95
96 struct kd_bufinfo {
97 kd_buf * kd_stop;
98 kd_buf * kd_bufptr;
99 kd_buf * kd_buffer;
100 kd_buf * kd_buflast;
101 kd_buf * kd_readlast;
102 int kd_wrapped; /* plus, the global flag KDBG_WRAPPED is set if one of the buffers has wrapped */
103 uint64_t kd_prev_timebase;
104 int kd_pad[24]; /* pad out to 128 bytes so that no cache line is shared between CPUs */
105
106 };
107
108 struct kd_bufinfo *kdbip = NULL;
109
110 #define KDCOPYBUF_COUNT 1024
111 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
112 kd_buf *kdcopybuf = NULL;
113
114
115 unsigned int nkdbufs = 8192;
116 unsigned int kd_bufsize = 0;
117 unsigned int kdebug_flags = 0;
118 unsigned int kdlog_beg=0;
119 unsigned int kdlog_end=0;
120 unsigned int kdlog_value1=0;
121 unsigned int kdlog_value2=0;
122 unsigned int kdlog_value3=0;
123 unsigned int kdlog_value4=0;
124
125 static lck_mtx_t * kd_trace_mtx_sysctl;
126 static lck_grp_t * kd_trace_mtx_sysctl_grp;
127 static lck_attr_t * kd_trace_mtx_sysctl_attr;
128 static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
129
130 static lck_grp_t *stackshot_subsys_lck_grp;
131 static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
132 static lck_attr_t *stackshot_subsys_lck_attr;
133 static lck_mtx_t stackshot_subsys_mutex;
134
135 void *stackshot_snapbuf = NULL;
136
137 int
138 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval);
139
140 extern void
141 kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t options);
142
143 extern int
144 kdp_stack_snapshot_geterror(void);
145 extern unsigned int
146 kdp_stack_snapshot_bytes_traced(void);
147
148 kd_threadmap *kd_mapptr = 0;
149 unsigned int kd_mapsize = 0;
150 unsigned int kd_mapcount = 0;
151 unsigned int kd_maptomem = 0;
152
153 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
154
155 #define DBG_FUNC_MASK 0xfffffffc
156
157 /* task to string structure */
158 struct tts
159 {
160 task_t task; /* from procs task */
161 pid_t pid; /* from procs p_pid */
162 char task_comm[20]; /* from procs p_comm */
163 };
164
165 typedef struct tts tts_t;
166
167 struct krt
168 {
169 kd_threadmap *map; /* pointer to the map buffer */
170 int count;
171 int maxcount;
172 struct tts *atts;
173 };
174
175 typedef struct krt krt_t;
176
177 /* This is for the CHUD toolkit call */
178 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
179 unsigned int arg2, unsigned int arg3,
180 unsigned int arg4, unsigned int arg5);
181
182 kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
183
184 __private_extern__ void stackshot_lock_init( void );
185
186 /* Support syscall SYS_kdebug_trace */
187 int
188 kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused register_t *retval)
189 {
190 if ( (kdebug_enable == 0) )
191 return(EINVAL);
192
193 kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
194 return(0);
195 }
196
197
198
199 static int
200 create_buffers(void)
201 {
202 unsigned int cpu, i;
203 int nentries;
204
205 nentries = nkdbufs / kd_cpus;
206 kd_bufsize = nentries * sizeof(kd_buf);
207
208 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_cpus);
209
210 if (kdcopybuf == 0) {
211 if (kmem_alloc(kernel_map, (unsigned int *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS)
212 return ENOMEM;
213 }
214 for (cpu = 0; cpu < kd_cpus; cpu++) {
215 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip[cpu].kd_buffer, kd_bufsize) != KERN_SUCCESS)
216 break;
217 }
218 if (cpu < kd_cpus) {
219 for (i = 0; i < cpu; i++)
220 kmem_free(kernel_map, (vm_offset_t)kdbip[i].kd_buffer, kd_bufsize);
221 kd_bufsize = 0;
222
223 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
224 kdcopybuf = NULL;
225
226 return(ENOMEM);
227 }
228 for (cpu = 0; cpu < kd_cpus; cpu++) {
229 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
230 kdbip[cpu].kd_buflast = &kdbip[cpu].kd_bufptr[nentries];
231 kdbip[cpu].kd_readlast = kdbip[cpu].kd_bufptr;
232 }
233 kdebug_flags |= KDBG_BUFINIT;
234
235 return(0);
236 }
237
238
239 static void
240 delete_buffers(void)
241 {
242 unsigned int cpu;
243
244 if (kd_bufsize && (kdebug_flags & KDBG_BUFINIT)) {
245 for (cpu = 0; cpu < kd_cpus; cpu++)
246 kmem_free(kernel_map, (vm_offset_t)kdbip[cpu].kd_buffer, kd_bufsize);
247 kd_bufsize = 0;
248 }
249 if (kdcopybuf) {
250 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
251 kdcopybuf = NULL;
252 }
253 kdebug_flags &= ~KDBG_BUFINIT;
254 }
255
256
257 static void
258 kernel_debug_internal(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
259 unsigned int arg4, unsigned int arg5, int entropy_flag)
260 {
261 int s;
262 kd_buf * kd;
263 struct proc *curproc;
264 unsigned long long now;
265 int cpu;
266
267 s = ml_set_interrupts_enabled(FALSE);
268
269 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
270 cpu = cpu_number();
271
272 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
273 if (kdebug_chudhook)
274 kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
275
276 if ( !(kdebug_enable & (KDEBUG_ENABLE_ENTROPY | KDEBUG_ENABLE_TRACE)))
277 goto out;
278 }
279
280 if (kdebug_slowcheck == 0)
281 goto record_trace;
282
283 if (entropy_flag && (kdebug_enable & KDEBUG_ENABLE_ENTROPY))
284 {
285 if (kd_entropy_indx < kd_entropy_count)
286 {
287 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
288 kd_entropy_indx++;
289 }
290
291 if (kd_entropy_indx == kd_entropy_count)
292 {
293 /* Disable entropy collection */
294 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
295 kdebug_slowcheck &= ~SLOW_ENTROPY;
296 }
297 }
298
299 if ( (kdebug_slowcheck & SLOW_NOLOG) )
300 goto out;
301
302 if (kdebug_flags & KDBG_PIDCHECK)
303 {
304 /* If kdebug flag is not set for current proc, return */
305 curproc = current_proc();
306 if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
307 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
308 goto out;
309 }
310 else if (kdebug_flags & KDBG_PIDEXCLUDE)
311 {
312 /* If kdebug flag is set for current proc, return */
313 curproc = current_proc();
314 if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
315 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
316 goto out;
317 }
318
319 if (kdebug_flags & KDBG_RANGECHECK)
320 {
321 if ((debugid < kdlog_beg)
322 || ((debugid >= kdlog_end) && (debugid >> 24 != DBG_TRACE)))
323 goto out;
324 }
325 else if (kdebug_flags & KDBG_VALCHECK)
326 {
327 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
328 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
329 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
330 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
331 (debugid >> 24 != DBG_TRACE))
332 goto out;
333 }
334
335 record_trace:
336 kd = kdbip[cpu].kd_bufptr;
337 kd->debugid = debugid;
338 kd->arg1 = arg1;
339 kd->arg2 = arg2;
340 kd->arg3 = arg3;
341 kd->arg4 = arg4;
342 kd->arg5 = arg5;
343
344 /*
345 * Watch for out of order timestamps
346 */
347 if (now < kdbip[cpu].kd_prev_timebase)
348 {
349 /*
350 * if so, just store the previous timestamp + a cycle
351 */
352 now = ++kdbip[cpu].kd_prev_timebase & KDBG_TIMESTAMP_MASK;
353 }
354 else
355 {
356 kdbip[cpu].kd_prev_timebase = now;
357 }
358 kd->timestamp = now | (((uint64_t)cpu) << KDBG_CPU_SHIFT);
359
360 kdbip[cpu].kd_bufptr++;
361
362 if (kdbip[cpu].kd_bufptr >= kdbip[cpu].kd_buflast)
363 kdbip[cpu].kd_bufptr = kdbip[cpu].kd_buffer;
364
365 if (kdbip[cpu].kd_bufptr == kdbip[cpu].kd_readlast) {
366 if (kdebug_flags & KDBG_NOWRAP)
367 kdebug_slowcheck |= SLOW_NOLOG;
368 kdbip[cpu].kd_wrapped = 1;
369 kdebug_flags |= KDBG_WRAPPED;
370 }
371
372 out:
373 ml_set_interrupts_enabled(s);
374 }
375
376 void
377 kernel_debug(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
378 unsigned int arg4, __unused unsigned int arg5)
379 {
380 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (int)current_thread(), 1);
381 }
382
383 void
384 kernel_debug1(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3,
385 unsigned int arg4, unsigned int arg5)
386 {
387 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0);
388 }
389
390 static void
391 kdbg_lock_init(void)
392 {
393 host_basic_info_data_t hinfo;
394 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
395
396
397 if (kdebug_flags & KDBG_LOCKINIT)
398 return;
399
400 /* get the number of cpus and cache it */
401 #define BSD_HOST 1
402 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
403 kd_cpus = hinfo.physical_cpu_max;
404
405 if (kmem_alloc(kernel_map, (unsigned int *)&kdbip, sizeof(struct kd_bufinfo) * kd_cpus) != KERN_SUCCESS)
406 return;
407
408 /*
409 * allocate lock group attribute and group
410 */
411 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
412 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
413
414 /*
415 * allocate the lock attribute
416 */
417 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
418
419
420 /*
421 * allocate and initialize spin lock and mutex
422 */
423 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
424
425 kdebug_flags |= KDBG_LOCKINIT;
426 }
427
428
429 int
430 kdbg_bootstrap(void)
431 {
432 kdebug_flags &= ~KDBG_WRAPPED;
433
434 return (create_buffers());
435 }
436
437 int
438 kdbg_reinit(void)
439 {
440 int ret=0;
441
442 /*
443 * Disable trace collecting
444 * First make sure we're not in
445 * the middle of cutting a trace
446 */
447
448 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
449 kdebug_slowcheck |= SLOW_NOLOG;
450
451 /*
452 * make sure the SLOW_NOLOG is seen
453 * by everyone that might be trying
454 * to cut a trace..
455 */
456 IOSleep(100);
457
458 delete_buffers();
459
460 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
461 {
462 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
463 kdebug_flags &= ~KDBG_MAPINIT;
464 kd_mapsize = 0;
465 kd_mapptr = (kd_threadmap *) 0;
466 kd_mapcount = 0;
467 }
468
469 ret = kdbg_bootstrap();
470
471 return(ret);
472 }
473
474 void
475 kdbg_trace_data(struct proc *proc, long *arg_pid)
476 {
477 if (!proc)
478 *arg_pid = 0;
479 else
480 *arg_pid = proc->p_pid;
481
482 return;
483 }
484
485
486 void
487 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
488 {
489 int i;
490 char *dbg_nameptr;
491 int dbg_namelen;
492 long dbg_parms[4];
493
494 if (!proc)
495 {
496 *arg1 = 0;
497 *arg2 = 0;
498 *arg3 = 0;
499 *arg4 = 0;
500 return;
501 }
502
503 /* Collect the pathname for tracing */
504 dbg_nameptr = proc->p_comm;
505 dbg_namelen = strlen(proc->p_comm);
506 dbg_parms[0]=0L;
507 dbg_parms[1]=0L;
508 dbg_parms[2]=0L;
509 dbg_parms[3]=0L;
510
511 if(dbg_namelen > (int)sizeof(dbg_parms))
512 dbg_namelen = sizeof(dbg_parms);
513
514 for(i=0;dbg_namelen > 0; i++)
515 {
516 dbg_parms[i]=*(long*)dbg_nameptr;
517 dbg_nameptr += sizeof(long);
518 dbg_namelen -= sizeof(long);
519 }
520
521 *arg1=dbg_parms[0];
522 *arg2=dbg_parms[1];
523 *arg3=dbg_parms[2];
524 *arg4=dbg_parms[3];
525 }
526
527 static void
528 kdbg_resolve_map(thread_t th_act, void *opaque)
529 {
530 kd_threadmap *mapptr;
531 krt_t *t = (krt_t *)opaque;
532
533 if(t->count < t->maxcount)
534 {
535 mapptr=&t->map[t->count];
536 mapptr->thread = (unsigned int)th_act;
537 (void) strncpy (mapptr->command, t->atts->task_comm,
538 sizeof(t->atts->task_comm)-1);
539 mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
540
541 /*
542 Some kernel threads have no associated pid.
543 We still need to mark the entry as valid.
544 */
545 if (t->atts->pid)
546 mapptr->valid = t->atts->pid;
547 else
548 mapptr->valid = 1;
549
550 t->count++;
551 }
552 }
553
554 void
555 kdbg_mapinit(void)
556 {
557 struct proc *p;
558 struct krt akrt;
559 int tts_count; /* number of task-to-string structures */
560 struct tts *tts_mapptr;
561 unsigned int tts_mapsize = 0;
562 unsigned int tts_maptomem=0;
563 int i;
564
565
566 if (kdebug_flags & KDBG_MAPINIT)
567 return;
568
569 /* Calculate the sizes of map buffers*/
570 for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p;
571 p = p->p_list.le_next)
572 {
573 kd_mapcount += get_task_numacts((task_t)p->task);
574 tts_count++;
575 }
576
577 /*
578 * The proc count could change during buffer allocation,
579 * so introduce a small fudge factor to bump up the
580 * buffer sizes. This gives new tasks some chance of
581 * making into the tables. Bump up by 10%.
582 */
583 kd_mapcount += kd_mapcount/10;
584 tts_count += tts_count/10;
585
586 kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
587 if((kmem_alloc(kernel_map, & kd_maptomem,
588 (vm_size_t)kd_mapsize) == KERN_SUCCESS))
589 {
590 kd_mapptr = (kd_threadmap *) kd_maptomem;
591 bzero(kd_mapptr, kd_mapsize);
592 }
593 else
594 kd_mapptr = (kd_threadmap *) 0;
595
596 tts_mapsize = tts_count * sizeof(struct tts);
597 if((kmem_alloc(kernel_map, & tts_maptomem,
598 (vm_size_t)tts_mapsize) == KERN_SUCCESS))
599 {
600 tts_mapptr = (struct tts *) tts_maptomem;
601 bzero(tts_mapptr, tts_mapsize);
602 }
603 else
604 tts_mapptr = (struct tts *) 0;
605
606
607 /*
608 * We need to save the procs command string
609 * and take a reference for each task associated
610 * with a valid process
611 */
612
613 if (tts_mapptr) {
614 for (p = allproc.lh_first, i=0; p && i < tts_count;
615 p = p->p_list.le_next) {
616 if (p->p_flag & P_WEXIT)
617 continue;
618
619 if (p->task) {
620 task_reference(p->task);
621 tts_mapptr[i].task = p->task;
622 tts_mapptr[i].pid = p->p_pid;
623 (void)strncpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
624 i++;
625 }
626 }
627 tts_count = i;
628 }
629
630
631 if (kd_mapptr && tts_mapptr)
632 {
633 kdebug_flags |= KDBG_MAPINIT;
634 /* Initialize thread map data */
635 akrt.map = kd_mapptr;
636 akrt.count = 0;
637 akrt.maxcount = kd_mapcount;
638
639 for (i=0; i < tts_count; i++)
640 {
641 akrt.atts = &tts_mapptr[i];
642 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
643 task_deallocate((task_t) tts_mapptr[i].task);
644 }
645 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
646 }
647 }
648
649 static void
650 kdbg_clear(void)
651 {
652 /*
653 * Clean up the trace buffer
654 * First make sure we're not in
655 * the middle of cutting a trace
656 */
657
658 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
659 kdebug_slowcheck = SLOW_NOLOG;
660
661 /*
662 * make sure the SLOW_NOLOG is seen
663 * by everyone that might be trying
664 * to cut a trace..
665 */
666 IOSleep(100);
667
668 if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
669 kdebug_slowcheck |= SLOW_ENTROPY;
670
671 global_state_pid = -1;
672 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
673 kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
674 kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
675
676 delete_buffers();
677
678 /* Clean up the thread map buffer */
679 kdebug_flags &= ~KDBG_MAPINIT;
680 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
681 kd_mapptr = (kd_threadmap *) 0;
682 kd_mapsize = 0;
683 kd_mapcount = 0;
684 }
685
686 int
687 kdbg_setpid(kd_regtype *kdr)
688 {
689 pid_t pid;
690 int flag, ret=0;
691 struct proc *p;
692
693 pid = (pid_t)kdr->value1;
694 flag = (int)kdr->value2;
695
696 if (pid > 0)
697 {
698 if ((p = pfind(pid)) == NULL)
699 ret = ESRCH;
700 else
701 {
702 if (flag == 1) /* turn on pid check for this and all pids */
703 {
704 kdebug_flags |= KDBG_PIDCHECK;
705 kdebug_flags &= ~KDBG_PIDEXCLUDE;
706 kdebug_slowcheck |= SLOW_CHECKS;
707
708 p->p_flag |= P_KDEBUG;
709 }
710 else /* turn off pid check for this pid value */
711 {
712 /* Don't turn off all pid checking though */
713 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
714 p->p_flag &= ~P_KDEBUG;
715 }
716 }
717 }
718 else
719 ret = EINVAL;
720 return(ret);
721 }
722
723 /* This is for pid exclusion in the trace buffer */
724 int
725 kdbg_setpidex(kd_regtype *kdr)
726 {
727 pid_t pid;
728 int flag, ret=0;
729 struct proc *p;
730
731 pid = (pid_t)kdr->value1;
732 flag = (int)kdr->value2;
733
734 if (pid > 0)
735 {
736 if ((p = pfind(pid)) == NULL)
737 ret = ESRCH;
738 else
739 {
740 if (flag == 1) /* turn on pid exclusion */
741 {
742 kdebug_flags |= KDBG_PIDEXCLUDE;
743 kdebug_flags &= ~KDBG_PIDCHECK;
744 kdebug_slowcheck |= SLOW_CHECKS;
745
746 p->p_flag |= P_KDEBUG;
747 }
748 else /* turn off pid exclusion for this pid value */
749 {
750 /* Don't turn off all pid exclusion though */
751 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
752 p->p_flag &= ~P_KDEBUG;
753 }
754 }
755 }
756 else
757 ret = EINVAL;
758 return(ret);
759 }
760
761 /* This is for setting a maximum decrementer value */
762 int
763 kdbg_setrtcdec(kd_regtype *kdr)
764 {
765 int ret=0;
766 natural_t decval;
767
768 decval = (natural_t)kdr->value1;
769
770 if (decval && decval < KDBG_MINRTCDEC)
771 ret = EINVAL;
772 #ifdef ppc
773 else {
774 maxDec = decval ? decval : 0x7FFFFFFF; /* Set or reset the max decrementer */
775 }
776 #else
777 else
778 ret = ENOTSUP;
779 #endif /* ppc */
780
781 return(ret);
782 }
783
784 int
785 kdbg_setreg(kd_regtype * kdr)
786 {
787 int ret=0;
788 unsigned int val_1, val_2, val;
789 switch (kdr->type) {
790
791 case KDBG_CLASSTYPE :
792 val_1 = (kdr->value1 & 0xff);
793 val_2 = (kdr->value2 & 0xff);
794 kdlog_beg = (val_1<<24);
795 kdlog_end = (val_2<<24);
796 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
797 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
798 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
799 kdebug_slowcheck |= SLOW_CHECKS;
800 break;
801 case KDBG_SUBCLSTYPE :
802 val_1 = (kdr->value1 & 0xff);
803 val_2 = (kdr->value2 & 0xff);
804 val = val_2 + 1;
805 kdlog_beg = ((val_1<<24) | (val_2 << 16));
806 kdlog_end = ((val_1<<24) | (val << 16));
807 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
808 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
809 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
810 kdebug_slowcheck |= SLOW_CHECKS;
811 break;
812 case KDBG_RANGETYPE :
813 kdlog_beg = (kdr->value1);
814 kdlog_end = (kdr->value2);
815 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
816 kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
817 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
818 kdebug_slowcheck |= SLOW_CHECKS;
819 break;
820 case KDBG_VALCHECK:
821 kdlog_value1 = (kdr->value1);
822 kdlog_value2 = (kdr->value2);
823 kdlog_value3 = (kdr->value3);
824 kdlog_value4 = (kdr->value4);
825 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
826 kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
827 kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
828 kdebug_slowcheck |= SLOW_CHECKS;
829 break;
830 case KDBG_TYPENONE :
831 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
832
833 if ( (kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
834 kdebug_slowcheck |= SLOW_CHECKS;
835 else
836 kdebug_slowcheck &= ~SLOW_CHECKS;
837
838 kdlog_beg = 0;
839 kdlog_end = 0;
840 break;
841 default :
842 ret = EINVAL;
843 break;
844 }
845 return(ret);
846 }
847
848 int
849 kdbg_getreg(__unused kd_regtype * kdr)
850 {
851 #if 0
852 int i,j, ret=0;
853 unsigned int val_1, val_2, val;
854
855 switch (kdr->type) {
856 case KDBG_CLASSTYPE :
857 val_1 = (kdr->value1 & 0xff);
858 val_2 = val_1 + 1;
859 kdlog_beg = (val_1<<24);
860 kdlog_end = (val_2<<24);
861 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
862 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
863 break;
864 case KDBG_SUBCLSTYPE :
865 val_1 = (kdr->value1 & 0xff);
866 val_2 = (kdr->value2 & 0xff);
867 val = val_2 + 1;
868 kdlog_beg = ((val_1<<24) | (val_2 << 16));
869 kdlog_end = ((val_1<<24) | (val << 16));
870 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
871 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
872 break;
873 case KDBG_RANGETYPE :
874 kdlog_beg = (kdr->value1);
875 kdlog_end = (kdr->value2);
876 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
877 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
878 break;
879 case KDBG_TYPENONE :
880 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
881 kdlog_beg = 0;
882 kdlog_end = 0;
883 break;
884 default :
885 ret = EINVAL;
886 break;
887 }
888 #endif /* 0 */
889 return(EINVAL);
890 }
891
892
893 int
894 kdbg_readmap(user_addr_t buffer, size_t *number)
895 {
896 int avail = *number;
897 int ret = 0;
898 unsigned int count = 0;
899
900 count = avail/sizeof (kd_threadmap);
901
902 if (count && (count <= kd_mapcount))
903 {
904 if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
905 {
906 if (*number < kd_mapsize)
907 ret=EINVAL;
908 else
909 {
910 if (copyout(kd_mapptr, buffer, kd_mapsize))
911 ret=EINVAL;
912 }
913 }
914 else
915 ret=EINVAL;
916 }
917 else
918 ret=EINVAL;
919
920 if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
921 {
922 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
923 kdebug_flags &= ~KDBG_MAPINIT;
924 kd_mapsize = 0;
925 kd_mapptr = (kd_threadmap *) 0;
926 kd_mapcount = 0;
927 }
928
929 return(ret);
930 }
931
932 int
933 kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
934 {
935 int avail = *number;
936 int ret = 0;
937
938 if (kd_entropy_buffer)
939 return(EBUSY);
940
941 kd_entropy_count = avail/sizeof(mach_timespec_t);
942 kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
943 kd_entropy_indx = 0;
944
945 /* Enforce maximum entropy entries here if needed */
946
947 /* allocate entropy buffer */
948 if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
949 (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
950 {
951 kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
952 }
953 else
954 {
955 kd_entropy_buffer = (uint64_t *) 0;
956 kd_entropy_count = 0;
957 kd_entropy_indx = 0;
958 return (EINVAL);
959 }
960
961 if (ms_timeout < 10)
962 ms_timeout = 10;
963
964 /* Enable entropy sampling */
965 kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
966 kdebug_slowcheck |= SLOW_ENTROPY;
967
968 ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
969
970 /* Disable entropy sampling */
971 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
972 kdebug_slowcheck &= ~SLOW_ENTROPY;
973
974 *number = 0;
975 ret = 0;
976
977 if (kd_entropy_indx > 0)
978 {
979 /* copyout the buffer */
980 if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
981 ret = EINVAL;
982 else
983 *number = kd_entropy_indx;
984 }
985
986 /* Always cleanup */
987 kd_entropy_count = 0;
988 kd_entropy_indx = 0;
989 kd_entropy_buftomem = 0;
990 kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
991 kd_entropy_buffer = (uint64_t *) 0;
992 return(ret);
993 }
994
995
996 /*
997 * This function is provided for the CHUD toolkit only.
998 * int val:
999 * zero disables kdebug_chudhook function call
1000 * non-zero enables kdebug_chudhook function call
1001 * char *fn:
1002 * address of the enabled kdebug_chudhook function
1003 */
1004
1005 void
1006 kdbg_control_chud(int val, void *fn)
1007 {
1008 if (val) {
1009 /* enable chudhook */
1010 kdebug_chudhook = fn;
1011 kdebug_enable |= KDEBUG_ENABLE_CHUD;
1012 }
1013 else {
1014 /* disable chudhook */
1015 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
1016 kdebug_chudhook = 0;
1017 }
1018 }
1019
1020
1021 int
1022 kdbg_control(int *name, __unused u_int namelen, user_addr_t where, size_t *sizep)
1023 {
1024 int ret=0;
1025 size_t size=*sizep;
1026 unsigned int max_entries;
1027 unsigned int value = name[1];
1028 kd_regtype kd_Reg;
1029 kbufinfo_t kd_bufinfo;
1030 pid_t curpid;
1031 struct proc *p, *curproc;
1032
1033
1034 kdbg_lock_init();
1035
1036 if ( !(kdebug_flags & KDBG_LOCKINIT))
1037 return(ENOMEM);
1038
1039 lck_mtx_lock(kd_trace_mtx_sysctl);
1040
1041 if (name[0] == KERN_KDGETBUF) {
1042 /*
1043 * Does not alter the global_state_pid
1044 * This is a passive request.
1045 */
1046 if (size < sizeof(kd_bufinfo.nkdbufs)) {
1047 /*
1048 * There is not enough room to return even
1049 * the first element of the info structure.
1050 */
1051 lck_mtx_unlock(kd_trace_mtx_sysctl);
1052
1053 return(EINVAL);
1054 }
1055 kd_bufinfo.nkdbufs = nkdbufs;
1056 kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
1057
1058 if ( (kdebug_slowcheck & SLOW_NOLOG) )
1059 kd_bufinfo.nolog = 1;
1060 else
1061 kd_bufinfo.nolog = 0;
1062 kd_bufinfo.flags = kdebug_flags;
1063 kd_bufinfo.bufid = global_state_pid;
1064
1065 if (size >= sizeof(kd_bufinfo)) {
1066 /*
1067 * Provide all the info we have
1068 */
1069 if (copyout (&kd_bufinfo, where, sizeof(kd_bufinfo))) {
1070 lck_mtx_unlock(kd_trace_mtx_sysctl);
1071
1072 return(EINVAL);
1073 }
1074 }
1075 else {
1076 /*
1077 * For backwards compatibility, only provide
1078 * as much info as there is room for.
1079 */
1080 if (copyout (&kd_bufinfo, where, size)) {
1081 lck_mtx_unlock(kd_trace_mtx_sysctl);
1082
1083 return(EINVAL);
1084 }
1085 }
1086 lck_mtx_unlock(kd_trace_mtx_sysctl);
1087
1088 return(0);
1089 } else if (name[0] == KERN_KDGETENTROPY) {
1090 if (kd_entropy_buffer)
1091 ret = EBUSY;
1092 else
1093 ret = kdbg_getentropy(where, sizep, value);
1094 lck_mtx_unlock(kd_trace_mtx_sysctl);
1095
1096 return (ret);
1097 }
1098
1099 if ((curproc = current_proc()) != NULL)
1100 curpid = curproc->p_pid;
1101 else {
1102 lck_mtx_unlock(kd_trace_mtx_sysctl);
1103
1104 return (ESRCH);
1105 }
1106 if (global_state_pid == -1)
1107 global_state_pid = curpid;
1108 else if (global_state_pid != curpid) {
1109 if ((p = pfind(global_state_pid)) == NULL) {
1110 /*
1111 * The global pid no longer exists
1112 */
1113 global_state_pid = curpid;
1114 } else {
1115 /*
1116 * The global pid exists, deny this request
1117 */
1118 lck_mtx_unlock(kd_trace_mtx_sysctl);
1119
1120 return(EBUSY);
1121 }
1122 }
1123
1124 switch(name[0]) {
1125 case KERN_KDEFLAGS:
1126 value &= KDBG_USERFLAGS;
1127 kdebug_flags |= value;
1128 break;
1129 case KERN_KDDFLAGS:
1130 value &= KDBG_USERFLAGS;
1131 kdebug_flags &= ~value;
1132 break;
1133 case KERN_KDENABLE: /* used to enable or disable */
1134 if (value)
1135 {
1136 /* enable only if buffer is initialized */
1137 if (!(kdebug_flags & KDBG_BUFINIT))
1138 {
1139 ret=EINVAL;
1140 break;
1141 }
1142 kdbg_mapinit();
1143
1144 kdebug_enable |= KDEBUG_ENABLE_TRACE;
1145 kdebug_slowcheck &= ~SLOW_NOLOG;
1146 }
1147 else
1148 {
1149 kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
1150 kdebug_slowcheck |= SLOW_NOLOG;
1151 }
1152 break;
1153 case KERN_KDSETBUF:
1154 /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
1155 /* 'value' is the desired number of trace entries */
1156 max_entries = (sane_size/4) / sizeof(kd_buf);
1157 if (value <= max_entries)
1158 nkdbufs = value;
1159 else
1160 nkdbufs = max_entries;
1161 break;
1162 case KERN_KDSETUP:
1163 ret=kdbg_reinit();
1164 break;
1165 case KERN_KDREMOVE:
1166 kdbg_clear();
1167 break;
1168 case KERN_KDSETREG:
1169 if(size < sizeof(kd_regtype)) {
1170 ret=EINVAL;
1171 break;
1172 }
1173 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1174 ret= EINVAL;
1175 break;
1176 }
1177 ret = kdbg_setreg(&kd_Reg);
1178 break;
1179 case KERN_KDGETREG:
1180 if(size < sizeof(kd_regtype)) {
1181 ret = EINVAL;
1182 break;
1183 }
1184 ret = kdbg_getreg(&kd_Reg);
1185 if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
1186 ret=EINVAL;
1187 }
1188 break;
1189 case KERN_KDREADTR:
1190 ret = kdbg_read(where, sizep);
1191 break;
1192 case KERN_KDPIDTR:
1193 if (size < sizeof(kd_regtype)) {
1194 ret = EINVAL;
1195 break;
1196 }
1197 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1198 ret= EINVAL;
1199 break;
1200 }
1201 ret = kdbg_setpid(&kd_Reg);
1202 break;
1203 case KERN_KDPIDEX:
1204 if (size < sizeof(kd_regtype)) {
1205 ret = EINVAL;
1206 break;
1207 }
1208 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1209 ret= EINVAL;
1210 break;
1211 }
1212 ret = kdbg_setpidex(&kd_Reg);
1213 break;
1214 case KERN_KDTHRMAP:
1215 ret = kdbg_readmap(where, sizep);
1216 break;
1217 case KERN_KDSETRTCDEC:
1218 if (size < sizeof(kd_regtype)) {
1219 ret = EINVAL;
1220 break;
1221 }
1222 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
1223 ret= EINVAL;
1224 break;
1225 }
1226 ret = kdbg_setrtcdec(&kd_Reg);
1227 break;
1228
1229 default:
1230 ret= EINVAL;
1231 }
1232 lck_mtx_unlock(kd_trace_mtx_sysctl);
1233
1234 return(ret);
1235 }
1236
1237
1238 /*
1239 * This code can run concurrently with kernel_debug_internal()
1240 * without the need of any locks, because all reads of kd_bufptr[i],
1241 * which get modified by kernel_debug_internal(), are safe.
1242 */
1243 int
1244 kdbg_read(user_addr_t buffer, size_t *number)
1245 {
1246 unsigned int count;
1247 unsigned int cpu;
1248 int mincpu;
1249 uint64_t mintime, t, last_wrap_time;
1250 int last_wrap_cpu;
1251 int error = 0;
1252 kd_buf *tempbuf;
1253 uint32_t tempbuf_count;
1254 uint32_t tempbuf_number;
1255 unsigned int old_kdebug_flags, new_kdebug_flags;
1256 unsigned int old_kdebug_slowcheck, new_kdebug_slowcheck;
1257 count = *number/sizeof(kd_buf);
1258 *number = 0;
1259
1260 if (count == 0 || !(kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
1261 return EINVAL;
1262
1263 /*
1264 * because we hold kd_trace_mtx_sysctl, no other control threads can
1265 * be playing with kdebug_flags... the code that cuts new events could
1266 * be running, but it only reads kdebug_flags, it doesn't write it..
1267 * use an OSCompareAndSwap to make sure the other processors see the
1268 * change of state immediately, not to protect against 2 threads racing to update it
1269 */
1270 old_kdebug_slowcheck = kdebug_slowcheck;
1271 do {
1272 old_kdebug_flags = kdebug_flags;
1273 new_kdebug_flags = old_kdebug_flags & ~KDBG_WRAPPED;
1274 new_kdebug_flags |= KDBG_NOWRAP;
1275 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1276
1277 last_wrap_time = 0;
1278 last_wrap_cpu = -1;
1279
1280 for (cpu = 0; cpu < kd_cpus; cpu++) {
1281 kd_buf *cur_bufptr;
1282
1283 if ((cur_bufptr = kdbip[cpu].kd_bufptr) >= kdbip[cpu].kd_buflast)
1284 cur_bufptr = kdbip[cpu].kd_buffer;
1285
1286 if (kdbip[cpu].kd_wrapped) {
1287 kdbip[cpu].kd_wrapped = 0;
1288 kdbip[cpu].kd_readlast = cur_bufptr;
1289 kdbip[cpu].kd_stop = cur_bufptr;
1290
1291 if (kd_cpus > 1 && ((cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK) > last_wrap_time)) {
1292 last_wrap_time = cur_bufptr->timestamp & KDBG_TIMESTAMP_MASK;
1293 last_wrap_cpu = cpu;
1294 }
1295 } else {
1296 if (kdbip[cpu].kd_readlast == cur_bufptr)
1297 kdbip[cpu].kd_stop = 0;
1298 else
1299 kdbip[cpu].kd_stop = cur_bufptr;
1300 }
1301 }
1302 if (count > nkdbufs)
1303 count = nkdbufs;
1304
1305 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1306 tempbuf_count = KDCOPYBUF_COUNT;
1307
1308 while (count) {
1309 tempbuf = kdcopybuf;
1310 tempbuf_number = 0;
1311
1312 while (tempbuf_count) {
1313 mintime = 0xffffffffffffffffULL; /* all actual timestamps are below */
1314 mincpu = -1;
1315
1316 for (cpu = 0; cpu < kd_cpus; cpu++) {
1317 if (kdbip[cpu].kd_stop == 0) /* empty buffer */
1318 continue;
1319 t = kdbip[cpu].kd_readlast[0].timestamp & KDBG_TIMESTAMP_MASK;
1320
1321 if (t < mintime) {
1322 mintime = t;
1323 mincpu = cpu;
1324 }
1325 }
1326 if (mincpu < 0)
1327 /*
1328 * all buffers ran empty early
1329 */
1330 break;
1331
1332 if (last_wrap_cpu == mincpu) {
1333 tempbuf->debugid = MISCDBG_CODE(DBG_BUFFER, 0) | DBG_FUNC_NONE;
1334 tempbuf->arg1 = 0;
1335 tempbuf->arg2 = 0;
1336 tempbuf->arg3 = 0;
1337 tempbuf->arg4 = 0;
1338 tempbuf->arg5 = (int)current_thread();
1339
1340 tempbuf->timestamp = last_wrap_time | (((uint64_t)last_wrap_cpu) << KDBG_CPU_SHIFT);
1341
1342 tempbuf++;
1343
1344 last_wrap_cpu = -1;
1345
1346 } else {
1347 *(tempbuf++) = kdbip[mincpu].kd_readlast[0];
1348
1349 kdbip[mincpu].kd_readlast++;
1350
1351 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_buflast)
1352 kdbip[mincpu].kd_readlast = kdbip[mincpu].kd_buffer;
1353 if (kdbip[mincpu].kd_readlast == kdbip[mincpu].kd_stop)
1354 kdbip[mincpu].kd_stop = 0;
1355 }
1356 tempbuf_count--;
1357 tempbuf_number++;
1358 }
1359 if (tempbuf_number) {
1360 if ((error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf)))) {
1361 *number = 0;
1362 error = EINVAL;
1363 break;
1364 }
1365 count -= tempbuf_number;
1366 *number += tempbuf_number;
1367 buffer += (tempbuf_number * sizeof(kd_buf));
1368 }
1369 if (tempbuf_count)
1370 /*
1371 * all trace buffers are empty
1372 */
1373 break;
1374
1375 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
1376 tempbuf_count = KDCOPYBUF_COUNT;
1377 }
1378 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
1379 do {
1380 old_kdebug_flags = kdebug_flags;
1381 new_kdebug_flags = old_kdebug_flags & ~KDBG_NOWRAP;
1382 } while ( !OSCompareAndSwap((UInt32)old_kdebug_flags, (UInt32)new_kdebug_flags, (UInt32 *)&kdebug_flags));
1383
1384 if ( !(old_kdebug_slowcheck & SLOW_NOLOG)) {
1385 do {
1386 old_kdebug_slowcheck = kdebug_slowcheck;
1387 new_kdebug_slowcheck = old_kdebug_slowcheck & ~SLOW_NOLOG;
1388 } while ( !OSCompareAndSwap((UInt32)old_kdebug_slowcheck, (UInt32)new_kdebug_slowcheck, (UInt32 *)&kdebug_slowcheck));
1389 }
1390 }
1391 return (error);
1392 }
1393
1394
1395 unsigned char *getProcName(struct proc *proc);
1396 unsigned char *getProcName(struct proc *proc) {
1397
1398 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
1399
1400 }
1401
1402 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
1403 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
1404 #ifdef __i386__
1405 #define TRAP_DEBUGGER __asm__ volatile("int3");
1406 #endif
1407 #ifdef __ppc__
1408 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
1409 #endif
1410
1411 #define SANE_TRACEBUF_SIZE 2*1024*1024
1412
1413 /* Initialize the mutex governing access to the stack snapshot subsystem */
1414 __private_extern__ void
1415 stackshot_lock_init( void )
1416 {
1417 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1418
1419 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
1420
1421 stackshot_subsys_lck_attr = lck_attr_alloc_init();
1422
1423 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
1424 }
1425
1426 /*
1427 * stack_snapshot: Obtains a coherent set of stack traces for all threads
1428 * on the system, tracing both kernel and user stacks
1429 * where available. Uses machine specific trace routines
1430 * for ppc, ppc64 and x86.
1431 * Inputs: uap->pid - process id of process to be traced, or -1
1432 * for the entire system
1433 * uap->tracebuf - address of the user space destination
1434 * buffer
1435 * uap->tracebuf_size - size of the user space trace buffer
1436 * uap->options - various options, including the maximum
1437 * number of frames to trace.
1438 * Outputs: EPERM if the caller is not privileged
1439 * EINVAL if the supplied trace buffer isn't sanely sized
1440 * ENOMEM if we don't have enough memory to satisfy the
1441 * request
1442 * ENOENT if the target pid isn't found
1443 * ENOSPC if the supplied buffer is insufficient
1444 * *retval contains the number of bytes traced, if successful
1445 * and -1 otherwise. If the request failed due to
1446 * tracebuffer exhaustion, we copyout as much as possible.
1447 */
1448 int
1449 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, register_t *retval) {
1450 int error = 0;
1451
1452 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
1453 return(error);
1454
1455 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
1456 uap->options, retval);
1457 }
1458
1459 int
1460 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t options, register_t *retval)
1461 {
1462 int error = 0;
1463 unsigned bytesTraced = 0;
1464
1465 *retval = -1;
1466 /* Serialize tracing */
1467 STACKSHOT_SUBSYS_LOCK();
1468
1469 if ((tracebuf_size <= 0) || (tracebuf_size > SANE_TRACEBUF_SIZE)) {
1470 error = EINVAL;
1471 goto error_exit;
1472 }
1473
1474 MALLOC(stackshot_snapbuf, void *, tracebuf_size, M_TEMP, M_WAITOK);
1475
1476 if (stackshot_snapbuf == NULL) {
1477 error = ENOMEM;
1478 goto error_exit;
1479 }
1480 /* Preload trace parameters*/
1481 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, options);
1482
1483 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
1484 * the trace buffer
1485 */
1486 TRAP_DEBUGGER;
1487
1488 bytesTraced = kdp_stack_snapshot_bytes_traced();
1489
1490 if (bytesTraced > 0) {
1491 if ((error = copyout(stackshot_snapbuf, tracebuf,
1492 ((bytesTraced < tracebuf_size) ?
1493 bytesTraced : tracebuf_size))))
1494 goto error_exit;
1495 *retval = bytesTraced;
1496 }
1497 else {
1498 error = ENOENT;
1499 goto error_exit;
1500 }
1501
1502 error = kdp_stack_snapshot_geterror();
1503 if (error == -1) {
1504 error = ENOSPC;
1505 *retval = -1;
1506 goto error_exit;
1507 }
1508
1509 error_exit:
1510 if (stackshot_snapbuf != NULL)
1511 FREE(stackshot_snapbuf, M_TEMP);
1512 stackshot_snapbuf = NULL;
1513 STACKSHOT_SUBSYS_UNLOCK();
1514 return error;
1515 }