]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/debug.c
b89774897c23216fa7baadc80df1900a0227a8ab
[apple/xnu.git] / osfmk / kern / debug.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59
60 #include <kern/cpu_number.h>
61 #include <kern/kalloc.h>
62 #include <kern/lock.h>
63 #include <kern/spl.h>
64 #include <kern/thread.h>
65 #include <kern/assert.h>
66 #include <kern/sched_prim.h>
67 #include <kern/misc_protos.h>
68 #include <kern/clock.h>
69 #include <vm/vm_kern.h>
70 #include <vm/pmap.h>
71 #include <stdarg.h>
72 #if !MACH_KDP
73 #include <kdp/kdp_udp.h>
74 #endif
75
76 #if defined(__i386__) || defined(__x86_64__)
77 #include <i386/cpu_threads.h>
78 #include <i386/pmCPU.h>
79 #endif
80
81 #include <IOKit/IOPlatformExpert.h>
82 #include <machine/pal_routines.h>
83
84 #include <sys/kdebug.h>
85 #include <libkern/OSKextLibPrivate.h>
86 #include <libkern/OSAtomic.h>
87 #include <libkern/kernel_mach_header.h>
88 #include <uuid/uuid.h>
89
90 unsigned int halt_in_debugger = 0;
91 unsigned int switch_debugger = 0;
92 unsigned int current_debugger = 0;
93 unsigned int active_debugger = 0;
94 unsigned int debug_mode=0;
95 unsigned int disable_debug_output = TRUE;
96 unsigned int systemLogDiags = FALSE;
97 unsigned int panicDebugging = FALSE;
98 unsigned int logPanicDataToScreen = FALSE;
99
100 int mach_assert = 1;
101
102 const char *panicstr = (char *) 0;
103 decl_simple_lock_data(,panic_lock)
104 int paniccpu;
105 volatile int panicwait;
106 volatile unsigned int nestedpanic= 0;
107 unsigned int panic_is_inited = 0;
108 unsigned int return_on_panic = 0;
109 unsigned long panic_caller;
110
111 #if CONFIG_EMBEDDED
112 #define DEBUG_BUF_SIZE (PAGE_SIZE)
113 #else
114 #define DEBUG_BUF_SIZE (3 * PAGE_SIZE)
115 #endif
116
117 char debug_buf[DEBUG_BUF_SIZE];
118 char *debug_buf_ptr = debug_buf;
119 unsigned int debug_buf_size = sizeof(debug_buf);
120
121 static char model_name[64];
122 /* uuid_string_t */ char kernel_uuid[37];
123
124 static spl_t panic_prologue(const char *str);
125 static void panic_epilogue(spl_t s);
126
127 struct pasc {
128 unsigned a: 7;
129 unsigned b: 7;
130 unsigned c: 7;
131 unsigned d: 7;
132 unsigned e: 7;
133 unsigned f: 7;
134 unsigned g: 7;
135 unsigned h: 7;
136 } __attribute__((packed));
137
138 typedef struct pasc pasc_t;
139
140 /* Prevent CPP from breaking the definition below */
141 #if CONFIG_NO_PANIC_STRINGS
142 #undef Assert
143 #endif
144
145 void
146 Assert(
147 const char *file,
148 int line,
149 const char *expression
150 )
151 {
152 int saved_return_on_panic;
153
154 if (!mach_assert) {
155 return;
156 }
157
158 saved_return_on_panic = return_on_panic;
159 return_on_panic = 1;
160
161 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
162
163 return_on_panic = saved_return_on_panic;
164 }
165
166 /*
167 * Carefully use the panic_lock. There's always a chance that
168 * somehow we'll call panic before getting to initialize the
169 * panic_lock -- in this case, we'll assume that the world is
170 * in uniprocessor mode and just avoid using the panic lock.
171 */
172 #define PANIC_LOCK() \
173 MACRO_BEGIN \
174 if (panic_is_inited) \
175 simple_lock(&panic_lock); \
176 MACRO_END
177
178 #define PANIC_UNLOCK() \
179 MACRO_BEGIN \
180 if (panic_is_inited) \
181 simple_unlock(&panic_lock); \
182 MACRO_END
183
184
185 void
186 panic_init(void)
187 {
188 unsigned long uuidlen = 0;
189 void *uuid;
190
191 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
192 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
193 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid);
194 }
195
196 simple_lock_init(&panic_lock, 0);
197 panic_is_inited = 1;
198 panic_caller = 0;
199 }
200
201 void
202 debug_log_init(void)
203 {
204 if (debug_buf_size != 0)
205 return;
206 debug_buf_ptr = debug_buf;
207 debug_buf_size = sizeof(debug_buf);
208 }
209
210 #if defined(__i386__) || defined(__x86_64__)
211 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
212 #define panic_safe() pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE)
213 #define panic_normal() pmSafeMode(x86_lcpu(), PM_SAFE_FL_NORMAL)
214 #else
215 #define panic_stop() { while (1) ; }
216 #define panic_safe()
217 #define panic_normal()
218 #endif
219
220 /*
221 * Prevent CPP from breaking the definition below,
222 * since all clients get a #define to prepend line numbers
223 */
224 #undef panic
225
226 void _consume_panic_args(int a __unused, ...)
227 {
228 panic("panic");
229 }
230
231 static spl_t
232 panic_prologue(const char *str)
233 {
234 spl_t s;
235
236 if (kdebug_enable) {
237 ml_set_interrupts_enabled(TRUE);
238 kdbg_dump_trace_to_file("/var/tmp/panic.trace");
239 }
240
241 s = splhigh();
242 disable_preemption();
243
244 #if defined(__i386__) || defined(__x86_64__)
245 /* Attempt to display the unparsed panic string */
246 const char *tstr = str;
247
248 kprintf("Panic initiated, string: ");
249 while (tstr && *tstr)
250 kprintf("%c", *tstr++);
251 kprintf("\n");
252 #endif
253
254 panic_safe();
255
256 if( logPanicDataToScreen )
257 disable_debug_output = FALSE;
258
259 debug_mode = TRUE;
260
261 restart:
262 PANIC_LOCK();
263
264 if (panicstr) {
265 if (cpu_number() != paniccpu) {
266 PANIC_UNLOCK();
267 /*
268 * Wait until message has been printed to identify correct
269 * cpu that made the first panic.
270 */
271 while (panicwait)
272 continue;
273 goto restart;
274 } else {
275 nestedpanic +=1;
276 PANIC_UNLOCK();
277 Debugger("double panic");
278 printf("double panic: We are hanging here...\n");
279 panic_stop();
280 /* NOTREACHED */
281 }
282 }
283 panicstr = str;
284 paniccpu = cpu_number();
285 panicwait = 1;
286
287 PANIC_UNLOCK();
288 return(s);
289 }
290
291
292 static void
293 panic_epilogue(spl_t s)
294 {
295 /*
296 * Release panicstr so that we can handle normally other panics.
297 */
298 PANIC_LOCK();
299 panicstr = (char *)0;
300 PANIC_UNLOCK();
301
302 if (return_on_panic) {
303 panic_normal();
304 enable_preemption();
305 splx(s);
306 return;
307 }
308 kdb_printf("panic: We are hanging here...\n");
309 panic_stop();
310 /* NOTREACHED */
311 }
312
313 void
314 panic(const char *str, ...)
315 {
316 va_list listp;
317 spl_t s;
318
319 /* panic_caller is initialized to 0. If set, don't change it */
320 if ( ! panic_caller )
321 panic_caller = (unsigned long)(char *)__builtin_return_address(0);
322
323 s = panic_prologue(str);
324 kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
325 if (str) {
326 va_start(listp, str);
327 _doprnt(str, &listp, consdebug_putc, 0);
328 va_end(listp);
329 }
330 kdb_printf("\n");
331
332 /*
333 * Release panicwait indicator so that other cpus may call Debugger().
334 */
335 panicwait = 0;
336 Debugger("panic");
337 panic_epilogue(s);
338 }
339
340 void
341 panic_context(unsigned int reason, void *ctx, const char *str, ...)
342 {
343 va_list listp;
344 spl_t s;
345
346 /* panic_caller is initialized to 0. If set, don't change it */
347 if ( ! panic_caller )
348 panic_caller = (unsigned long)(char *)__builtin_return_address(0);
349
350 s = panic_prologue(str);
351 kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
352 if (str) {
353 va_start(listp, str);
354 _doprnt(str, &listp, consdebug_putc, 0);
355 va_end(listp);
356 }
357 kdb_printf("\n");
358
359 /*
360 * Release panicwait indicator so that other cpus may call Debugger().
361 */
362 panicwait = 0;
363 DebuggerWithContext(reason, ctx, "panic");
364 panic_epilogue(s);
365 }
366
367 void
368 log(__unused int level, char *fmt, ...)
369 {
370 va_list listp;
371
372 #ifdef lint
373 level++;
374 #endif /* lint */
375 #ifdef MACH_BSD
376 disable_preemption();
377 va_start(listp, fmt);
378 _doprnt(fmt, &listp, conslog_putc, 0);
379 va_end(listp);
380 enable_preemption();
381 #endif
382 }
383
384 void
385 debug_putc(char c)
386 {
387 if ((debug_buf_size != 0) &&
388 ((debug_buf_ptr-debug_buf) < (int)debug_buf_size)) {
389 *debug_buf_ptr=c;
390 debug_buf_ptr++;
391 }
392 }
393
394 /* In-place packing routines -- inefficient, but they're called at most once.
395 * Assumes "buflen" is a multiple of 8.
396 */
397
398 int packA(char *inbuf, uint32_t length, uint32_t buflen)
399 {
400 unsigned int i, j = 0;
401 pasc_t pack;
402
403 length = MIN(((length + 7) & ~7), buflen);
404
405 for (i = 0; i < length; i+=8)
406 {
407 pack.a = inbuf[i];
408 pack.b = inbuf[i+1];
409 pack.c = inbuf[i+2];
410 pack.d = inbuf[i+3];
411 pack.e = inbuf[i+4];
412 pack.f = inbuf[i+5];
413 pack.g = inbuf[i+6];
414 pack.h = inbuf[i+7];
415 bcopy ((char *) &pack, inbuf + j, 7);
416 j += 7;
417 }
418 return j;
419 }
420
421 void unpackA(char *inbuf, uint32_t length)
422 {
423 pasc_t packs;
424 unsigned i = 0;
425 length = (length * 8)/7;
426
427 while (i < length) {
428 packs = *(pasc_t *)&inbuf[i];
429 bcopy(&inbuf[i+7], &inbuf[i+8], MAX(0, (int) (length - i - 8)));
430 inbuf[i++] = packs.a;
431 inbuf[i++] = packs.b;
432 inbuf[i++] = packs.c;
433 inbuf[i++] = packs.d;
434 inbuf[i++] = packs.e;
435 inbuf[i++] = packs.f;
436 inbuf[i++] = packs.g;
437 inbuf[i++] = packs.h;
438 }
439 }
440
441 extern void *proc_name_address(void *p);
442
443 static void
444 panic_display_process_name(void) {
445 char proc_name[32] = "Unknown";
446 task_t ctask = 0;
447 void *cbsd_info = 0;
448
449 if (ml_nofault_copy((vm_offset_t)&current_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
450 if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(&ctask->bsd_info)) == sizeof(&ctask->bsd_info))
451 if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
452 proc_name[sizeof(proc_name) - 1] = '\0';
453 kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
454 }
455
456 unsigned panic_active(void) {
457 return ((panicstr != (char *) 0));
458 }
459
460 void populate_model_name(char *model_string) {
461 strlcpy(model_name, model_string, sizeof(model_name));
462 }
463
464 static void panic_display_model_name(void) {
465 char tmp_model_name[sizeof(model_name)];
466
467 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name))
468 return;
469
470 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
471
472 if (tmp_model_name[0] != 0)
473 kdb_printf("System model name: %s\n", tmp_model_name);
474 }
475
476 static void panic_display_kernel_uuid(void) {
477 char tmp_kernel_uuid[sizeof(kernel_uuid)];
478
479 if (ml_nofault_copy((vm_offset_t) &kernel_uuid, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid)) != sizeof(kernel_uuid))
480 return;
481
482 if (tmp_kernel_uuid[0] != '\0')
483 kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid);
484 }
485
486 static void panic_display_kernel_aslr(void) {
487 #if defined(__x86_64__)
488 if (vm_kernel_slide) {
489 kdb_printf("Kernel slide: 0x%016lx\n", vm_kernel_slide);
490 kdb_printf("Kernel text base: %p\n", (void *) vm_kernel_stext);
491 }
492 #endif
493 }
494
495 static void panic_display_uptime(void) {
496 uint64_t uptime;
497 absolutetime_to_nanoseconds(mach_absolute_time(), &uptime);
498
499 kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime);
500 }
501
502 extern const char version[];
503 extern char osversion[];
504
505 static volatile uint32_t config_displayed = 0;
506
507 __private_extern__ void panic_display_system_configuration(void) {
508
509 panic_display_process_name();
510 if (OSCompareAndSwap(0, 1, &config_displayed)) {
511 char buf[256];
512 if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
513 kdb_printf("Boot args: %s\n", buf);
514 kdb_printf("\nMac OS version:\n%s\n",
515 (osversion[0] != 0) ? osversion : "Not yet set");
516 kdb_printf("\nKernel version:\n%s\n",version);
517 panic_display_kernel_uuid();
518 panic_display_kernel_aslr();
519 panic_display_pal_info();
520 panic_display_model_name();
521 panic_display_uptime();
522 panic_display_zprint();
523 #if CONFIG_ZLEAKS
524 panic_display_ztrace();
525 #endif /* CONFIG_ZLEAKS */
526 kext_dump_panic_lists(&kdb_log);
527 }
528 }
529
530 extern zone_t first_zone;
531 extern unsigned int num_zones, stack_total;
532 extern unsigned long long stack_allocs;
533
534 #if defined(__i386__) || defined (__x86_64__)
535 extern unsigned int inuse_ptepages_count;
536 extern long long alloc_ptepages_count;
537 #endif
538
539 extern boolean_t panic_include_zprint;
540
541 __private_extern__ void panic_display_zprint()
542 {
543 if(panic_include_zprint == TRUE) {
544
545 unsigned int i;
546 struct zone zone_copy;
547
548 if(first_zone!=NULL) {
549 if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
550 for (i = 0; i < num_zones; i++) {
551 if(zone_copy.cur_size > (1024*1024)) {
552 kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size);
553 }
554
555 if(zone_copy.next_zone == NULL) {
556 break;
557 }
558
559 if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
560 break;
561 }
562 }
563 }
564 }
565
566 kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
567
568 #if defined(__i386__) || defined (__x86_64__)
569 kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
570 #endif
571
572 kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total);
573 }
574 }
575
576 #if CONFIG_ZLEAKS
577 extern boolean_t panic_include_ztrace;
578 extern struct ztrace* top_ztrace;
579 /*
580 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
581 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
582 */
583 __private_extern__ void panic_display_ztrace(void)
584 {
585 if(panic_include_ztrace == TRUE) {
586 unsigned int i = 0;
587 struct ztrace top_ztrace_copy;
588
589 /* Make sure not to trip another panic if there's something wrong with memory */
590 if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
591 kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
592 /* Print the backtrace addresses */
593 for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) {
594 kdb_printf("%p\n", top_ztrace_copy.zt_stack[i]);
595 }
596 /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
597 kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
598 }
599 else {
600 kdb_printf("\nCan't access top_ztrace...\n");
601 }
602 kdb_printf("\n");
603 }
604 }
605 #endif /* CONFIG_ZLEAKS */
606
607 #if !MACH_KDP
608 static struct ether_addr kdp_current_mac_address = {{0, 0, 0, 0, 0, 0}};
609
610 /* XXX ugly forward declares to stop warnings */
611 void *kdp_get_interface(void);
612 void kdp_set_ip_and_mac_addresses(struct in_addr *, struct ether_addr *);
613 void kdp_set_gateway_mac(void *);
614 void kdp_set_interface(void *);
615 void kdp_register_send_receive(void *, void *);
616 void kdp_unregister_send_receive(void *, void *);
617 void kdp_snapshot_preflight(int, void *, uint32_t, uint32_t);
618 int kdp_stack_snapshot_geterror(void);
619 int kdp_stack_snapshot_bytes_traced(void);
620
621 void *
622 kdp_get_interface( void)
623 {
624 return(void *)0;
625 }
626
627 unsigned int
628 kdp_get_ip_address(void )
629 { return 0; }
630
631 struct ether_addr
632 kdp_get_mac_addr(void)
633 {
634 return kdp_current_mac_address;
635 }
636
637 void
638 kdp_set_ip_and_mac_addresses(
639 __unused struct in_addr *ipaddr,
640 __unused struct ether_addr *macaddr)
641 {}
642
643 void
644 kdp_set_gateway_mac(__unused void *gatewaymac)
645 {}
646
647 void
648 kdp_set_interface(__unused void *ifp)
649 {}
650
651 void
652 kdp_register_send_receive(__unused void *send, __unused void *receive)
653 {}
654
655 void
656 kdp_unregister_send_receive(__unused void *send, __unused void *receive)
657 {}
658
659 void
660 kdp_snapshot_preflight(__unused int pid, __unused void * tracebuf,
661 __unused uint32_t tracebuf_size, __unused uint32_t options)
662 {}
663
664 int
665 kdp_stack_snapshot_geterror(void)
666 {
667 return -1;
668 }
669
670 int
671 kdp_stack_snapshot_bytes_traced(void)
672 {
673 return 0;
674 }
675
676 #endif